after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def __call__(self, a, index, shape):
return self.new_tensor([a], shape, indexes=index)
|
def __call__(self, a, index, shape):
return self.new_tensor([a, index], shape)
|
https://github.com/mars-project/mars/issues/64
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, web=True)
In [3]: import mars.tensor as mt
In [4]: t = mt.random.rand(10, 10)
In [5]: mt.split(t, 5).execute()
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 527, in get_executable_operand_dag
kws=kws)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/tensor/expressions/indexing/getitem.py", line 58, in new_chunks
chunk, indexes = inputs
ValueError: not enough values to unpack (expected 2, got 1)
2018-12-25T02:23:13Z <Greenlet "Greenlet-0" at 0x11a9f47b8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x11b154458>> failed with ValueError
|
ValueError
|
def tile(cls, op):
from ..merge.concatenate import TensorConcatenate
in_tensor = op.input
tensor = op.outputs[0]
indexes = list(op.indexes)
axis = 0
output_axis = 0
output_chunk_shape = []
to_concat_axis_index = []
for i, index in enumerate(indexes):
if isinstance(index, TENSOR_TYPE) and index.dtype == np.bool_:
# bool indexing
# do unify chunk here
in_tensor, index = unify_chunks(
in_tensor, (index, tuple(axis + ii for ii in range(index.ndim)))
)
output_chunk_shape.append(reduce(operator.mul, index.chunk_shape))
indexes[i] = index.chunks
axis += index.ndim
output_axis += 1
elif isinstance(index, np.ndarray):
# fancy indexing
if index.ndim > 1:
# currently we only support tensor from numpy.ndarray
raise NotImplementedError
try:
np_index = np.sort(index)
splits = split_index_into_chunks(in_tensor.nsplits[axis], np_index)
except IndexError as e:
idx, size = e.idx, e.size
raise IndexError(
"index {0} is out of bounds for axis {1} with size {2}".format(
idx, axis, size
)
)
non_empty_idx_splits = [(j, s) for j, s in enumerate(splits) if s.size > 0]
non_empty_splits, _ = tuple(zip(*non_empty_idx_splits))
try:
indexes[i] = non_empty_idx_splits
except:
raise
if not is_asc_sorted(index):
pos_index = np.searchsorted(np_index, index)
to_concat_axis_index.append((output_axis, pos_index))
axis += 1
output_axis += 1
output_chunk_shape.append(len(non_empty_splits))
elif isinstance(index, (slice, Integral)):
indexes[i] = sorted(
slice_split(index, in_tensor.nsplits[axis]).items(),
key=operator.itemgetter(0),
)
if isinstance(index, slice) and index.step is not None and index.step < 0:
indexes[i] = list(reversed(indexes[i]))
if isinstance(index, slice):
output_chunk_shape.append(len(indexes[i]))
axis += 1
if isinstance(index, slice):
output_axis += 1
elif isinstance(index, TENSOR_TYPE):
raise NotImplementedError(
"Mars currently does not support fancy index from tensor"
)
else:
assert index is None
output_chunk_shape.append(1)
output_axis += 1
out_chunks = []
for output_idx in itertools.product(*(irange(s) for s in output_chunk_shape)):
chunk_idx = []
chunk_index = [] # chunk[index]
chunk_shape = []
axis = 0
output_axis = 0
for raw_index, index in izip(op.indexes, indexes):
if isinstance(raw_index, TENSOR_TYPE) and raw_index.dtype == np.bool_:
indexed = index[output_idx[output_axis]]
chunk_index.append(indexed)
chunk_idx.extend(indexed.index)
chunk_shape.append(np.nan)
axis += raw_index.ndim
output_axis += 1
elif isinstance(raw_index, np.ndarray):
input_index, indexed = index[output_idx[output_axis]]
chunk_index.append(indexed)
chunk_idx.append(input_index)
chunk_shape.append(len(indexed))
axis += 1
output_axis += 1
elif isinstance(raw_index, slice):
sliceobj = index[output_idx[output_axis]][1]
chunk_index.append(sliceobj)
ix = index[output_idx[output_axis]][0]
chunk_idx.append(ix)
chunk_shape.append(
calc_sliced_size(in_tensor.nsplits[axis][ix], sliceobj)
)
axis += 1
output_axis += 1
elif isinstance(raw_index, Integral):
input_index, sliceobj = index[0]
chunk_index.append(sliceobj)
chunk_idx.append(input_index)
axis += 1
else:
chunk_index.append(index)
chunk_shape.append(1)
output_axis += 1
chunk_input = in_tensor.cix[tuple(chunk_idx)]
chunk_op = op.copy().reset_key()
chunk = chunk_op.new_chunk(
[chunk_input], tuple(chunk_shape), indexes=chunk_index, index=output_idx
)
out_chunks.append(chunk)
nsplits = [
tuple(
c.shape[i]
for c in out_chunks
if all(idx == 0 for j, idx in enumerate(c.index) if j != i)
)
for i in range(len(out_chunks[0].shape))
]
new_op = op.copy().reset_key()
tensor = new_op.new_tensor(
[op.input], tensor.shape, indexes=op.indexes, chunks=out_chunks, nsplits=nsplits
)
if len(to_concat_axis_index) > 1:
raise NotImplementedError
if to_concat_axis_index:
axis, output_index = to_concat_axis_index[0]
indexobj = (
[slice(None)] * axis
+ [output_index]
+ [slice(None)] * (tensor.ndim - axis - 1)
)
output_shape = list(tensor.shape)
output_shape[axis] = len(output_index)
output_nsplits = list(nsplits)
output_nsplits[axis] = (output_shape[axis],)
output_chunks = []
for idx in itertools.product(
*(range(len(it)) for it in (nsplits[:axis] + nsplits[axis + 1 :]))
):
new_idx = idx[:axis] + (0,) + idx[axis:]
chunk_idxes = (
idx[:axis] + (i,) + idx[axis:] for i in range(len(nsplits[axis]))
)
chunks = [tensor.cix[chunk_idx] for chunk_idx in chunk_idxes]
s = list(chunks[0].shape)
s[axis] = len(output_index)
concat_chunk_op = TensorConcatenate(
axis=axis, dtype=chunks[0].dtype, sparse=chunks[0].op.sparse
)
concat_chunk = concat_chunk_op.new_chunk(chunks, tuple(s), index=new_idx)
out_chunk_op = TensorIndex(
dtype=concat_chunk.dtype, sparse=concat_chunk.op.sparse
)
out_chunk = out_chunk_op.new_chunk(
[concat_chunk], tuple(s), indexes=indexobj, index=new_idx
)
output_chunks.append(out_chunk)
new_op = tensor.op.copy()
tensor = new_op.new_tensor(
[op.input],
tuple(output_shape),
indexes=op.indexes,
chunks=output_chunks,
nsplits=output_nsplits,
)
return [tensor]
|
def tile(cls, op):
from ..merge.concatenate import TensorConcatenate
in_tensor = op.input
tensor = op.outputs[0]
indexes = list(op.indexes)
axis = 0
output_axis = 0
output_chunk_shape = []
to_concat_axis_index = []
for i, index in enumerate(indexes):
if isinstance(index, TENSOR_TYPE) and index.dtype == np.bool_:
# bool indexing
# do unify chunk here
in_tensor, index = unify_chunks(
in_tensor, (index, tuple(axis + ii for ii in range(index.ndim)))
)
output_chunk_shape.append(reduce(operator.mul, index.chunk_shape))
indexes[i] = index.chunks
axis += index.ndim
output_axis += 1
elif isinstance(index, np.ndarray):
# fancy indexing
if index.ndim > 1:
# currently we only support tensor from numpy.ndarray
raise NotImplementedError
try:
np_index = np.sort(index)
splits = split_index_into_chunks(in_tensor.nsplits[axis], np_index)
except IndexError as e:
idx, size = e.idx, e.size
raise IndexError(
"index {0} is out of bounds for axis {1} with size {2}".format(
idx, axis, size
)
)
non_empty_idx_splits = [(j, s) for j, s in enumerate(splits) if s.size > 0]
non_empty_splits, _ = tuple(zip(*non_empty_idx_splits))
try:
indexes[i] = non_empty_idx_splits
except:
raise
if not is_asc_sorted(index):
pos_index = np.searchsorted(np_index, index)
to_concat_axis_index.append((output_axis, pos_index))
axis += 1
output_axis += 1
output_chunk_shape.append(len(non_empty_splits))
elif isinstance(index, (slice, Integral)):
indexes[i] = sorted(
slice_split(index, in_tensor.nsplits[axis]).items(),
key=operator.itemgetter(0),
)
if isinstance(index, slice) and index.step is not None and index.step < 0:
indexes[i] = list(reversed(indexes[i]))
if isinstance(index, slice):
output_chunk_shape.append(len(indexes[i]))
axis += 1
if isinstance(index, slice):
output_axis += 1
elif isinstance(index, TENSOR_TYPE):
raise NotImplementedError(
"Mars currently does not support fancy index from tensor"
)
else:
assert index is None
output_chunk_shape.append(1)
output_axis += 1
out_chunks = []
for output_idx in itertools.product(*(irange(s) for s in output_chunk_shape)):
chunk_idx = []
chunk_index = [] # chunk[index]
chunk_shape = []
axis = 0
output_axis = 0
for raw_index, index in izip(op.indexes, indexes):
if isinstance(raw_index, TENSOR_TYPE) and raw_index.dtype == np.bool_:
indexed = index[output_idx[output_axis]]
chunk_index.append(indexed)
chunk_idx.extend(indexed.index)
chunk_shape.append(np.nan)
axis += raw_index.ndim
output_axis += 1
elif isinstance(raw_index, np.ndarray):
input_index, indexed = index[output_idx[output_axis]]
chunk_index.append(indexed)
chunk_idx.append(input_index)
chunk_shape.append(len(indexed))
axis += 1
output_axis += 1
elif isinstance(raw_index, slice):
sliceobj = index[output_idx[output_axis]][1]
chunk_index.append(sliceobj)
ix = index[output_idx[output_axis]][0]
chunk_idx.append(ix)
chunk_shape.append(
calc_sliced_size(in_tensor.nsplits[axis][ix], sliceobj)
)
axis += 1
output_axis += 1
elif isinstance(raw_index, Integral):
input_index, sliceobj = index[0]
chunk_index.append(sliceobj)
chunk_idx.append(input_index)
axis += 1
else:
chunk_index.append(index)
chunk_shape.append(1)
output_axis += 1
chunk_input = in_tensor.cix[tuple(chunk_idx)]
chunk_op = op.copy().reset_key()
chunk = chunk_op.new_chunk(
[chunk_input, chunk_index], tuple(chunk_shape), index=output_idx
)
out_chunks.append(chunk)
nsplits = [
tuple(
c.shape[i]
for c in out_chunks
if all(idx == 0 for j, idx in enumerate(c.index) if j != i)
)
for i in range(len(out_chunks[0].shape))
]
new_op = op.copy().reset_key()
tensor = new_op.new_tensor(
[op.input, op.indexes], tensor.shape, chunks=out_chunks, nsplits=nsplits
)
if len(to_concat_axis_index) > 1:
raise NotImplementedError
if to_concat_axis_index:
axis, output_index = to_concat_axis_index[0]
indexobj = (
[slice(None)] * axis
+ [output_index]
+ [slice(None)] * (tensor.ndim - axis - 1)
)
output_shape = list(tensor.shape)
output_shape[axis] = len(output_index)
output_nsplits = list(nsplits)
output_nsplits[axis] = (output_shape[axis],)
output_chunks = []
for idx in itertools.product(
*(range(len(it)) for it in (nsplits[:axis] + nsplits[axis + 1 :]))
):
new_idx = idx[:axis] + (0,) + idx[axis:]
chunk_idxes = (
idx[:axis] + (i,) + idx[axis:] for i in range(len(nsplits[axis]))
)
chunks = [tensor.cix[chunk_idx] for chunk_idx in chunk_idxes]
s = list(chunks[0].shape)
s[axis] = len(output_index)
concat_chunk_op = TensorConcatenate(
axis=axis, dtype=chunks[0].dtype, sparse=chunks[0].op.sparse
)
concat_chunk = concat_chunk_op.new_chunk(chunks, tuple(s), index=new_idx)
out_chunk_op = TensorIndex(
dtype=concat_chunk.dtype, sparse=concat_chunk.op.sparse
)
out_chunk = out_chunk_op.new_chunk(
[concat_chunk, indexobj], tuple(s), index=new_idx
)
output_chunks.append(out_chunk)
new_op = tensor.op.copy()
tensor = new_op.new_tensor(
[op.input, op.indexes],
tuple(output_shape),
chunks=output_chunks,
nsplits=output_nsplits,
)
return [tensor]
|
https://github.com/mars-project/mars/issues/64
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, web=True)
In [3]: import mars.tensor as mt
In [4]: t = mt.random.rand(10, 10)
In [5]: mt.split(t, 5).execute()
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 527, in get_executable_operand_dag
kws=kws)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/tensor/expressions/indexing/getitem.py", line 58, in new_chunks
chunk, indexes = inputs
ValueError: not enough values to unpack (expected 2, got 1)
2018-12-25T02:23:13Z <Greenlet "Greenlet-0" at 0x11a9f47b8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x11b154458>> failed with ValueError
|
ValueError
|
def new_tensors(self, inputs, shape, **kw):
indexes = kw.pop("indexes", None)
value = kw.pop("value", None)
with self._handle_params(inputs, indexes, value) as mix_inputs:
return super(TensorIndexSetValue, self).new_tensors(mix_inputs, shape, **kw)
|
def new_tensors(self, inputs, shape, **kw):
tensor, indexes, value = inputs
self._indexes = indexes
self._value = value
inputs = self._handle_inputs(inputs)
return super(TensorIndexSetValue, self).new_tensors(inputs, shape, **kw)
|
https://github.com/mars-project/mars/issues/64
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, web=True)
In [3]: import mars.tensor as mt
In [4]: t = mt.random.rand(10, 10)
In [5]: mt.split(t, 5).execute()
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 527, in get_executable_operand_dag
kws=kws)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/tensor/expressions/indexing/getitem.py", line 58, in new_chunks
chunk, indexes = inputs
ValueError: not enough values to unpack (expected 2, got 1)
2018-12-25T02:23:13Z <Greenlet "Greenlet-0" at 0x11a9f47b8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x11b154458>> failed with ValueError
|
ValueError
|
def new_chunks(self, inputs, shape, **kw):
indexes = kw.pop("indexes", None)
value = kw.pop("value", None)
with self._handle_params(inputs, indexes, value) as mix_inputs:
return super(TensorIndexSetValue, self).new_chunks(mix_inputs, shape, **kw)
|
def new_chunks(self, inputs, shape, **kw):
chunk, indexes, value = inputs
self._indexes = indexes
self._value = value
inputs = self._handle_inputs(inputs)
return super(TensorIndexSetValue, self).new_chunks(inputs, shape, **kw)
|
https://github.com/mars-project/mars/issues/64
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, web=True)
In [3]: import mars.tensor as mt
In [4]: t = mt.random.rand(10, 10)
In [5]: mt.split(t, 5).execute()
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 527, in get_executable_operand_dag
kws=kws)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/tensor/expressions/indexing/getitem.py", line 58, in new_chunks
chunk, indexes = inputs
ValueError: not enough values to unpack (expected 2, got 1)
2018-12-25T02:23:13Z <Greenlet "Greenlet-0" at 0x11a9f47b8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x11b154458>> failed with ValueError
|
ValueError
|
def __call__(self, a, index, value):
return self.new_tensor([a], a.shape, indexes=index, value=value)
|
def __call__(self, a, index, value):
return self.new_tensor([a, index, value], a.shape)
|
https://github.com/mars-project/mars/issues/64
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, web=True)
In [3]: import mars.tensor as mt
In [4]: t = mt.random.rand(10, 10)
In [5]: mt.split(t, 5).execute()
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 527, in get_executable_operand_dag
kws=kws)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/tensor/expressions/indexing/getitem.py", line 58, in new_chunks
chunk, indexes = inputs
ValueError: not enough values to unpack (expected 2, got 1)
2018-12-25T02:23:13Z <Greenlet "Greenlet-0" at 0x11a9f47b8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x11b154458>> failed with ValueError
|
ValueError
|
def tile(cls, op):
tensor = op.outputs[0]
value = op.value
is_value_tensor = isinstance(value, TENSOR_TYPE)
index_tensor_op = TensorIndex(dtype=tensor.dtype, sparse=op.sparse)
index_tensor = index_tensor_op.new_tensor(
[op.input], tensor.shape, indexes=op.indexes
).single_tiles()
nsplits = index_tensor.nsplits
if any(any(np.isnan(ns) for ns in nsplit) for nsplit in nsplits):
raise NotImplementedError
if is_value_tensor:
value = op.value.rechunk(nsplits).single_tiles()
chunk_mapping = {c.op.input.index: c for c in index_tensor.chunks}
out_chunks = []
for chunk in op.input.chunks:
index_chunk = chunk_mapping.get(chunk.index)
if index_chunk is None:
out_chunks.append(chunk)
continue
value_chunk = value.cix[index_chunk.index] if is_value_tensor else value
chunk_op = op.copy().reset_key()
out_chunk = chunk_op.new_chunk(
[chunk],
chunk.shape,
indexes=index_chunk.op.indexes,
value=value_chunk,
index=chunk.index,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_tensors(
[op.input],
tensor.shape,
indexes=op.indexes,
value=op.value,
chunks=out_chunks,
nsplits=op.input.nsplits,
)
|
def tile(cls, op):
tensor = op.outputs[0]
value = op.value
is_value_tensor = isinstance(value, TENSOR_TYPE)
index_tensor_op = TensorIndex(dtype=tensor.dtype, sparse=op.sparse)
index_tensor = index_tensor_op.new_tensor(
[op.input, op.indexes], tensor.shape
).single_tiles()
nsplits = index_tensor.nsplits
if any(any(np.isnan(ns) for ns in nsplit) for nsplit in nsplits):
raise NotImplementedError
if is_value_tensor:
value = op.value.rechunk(nsplits).single_tiles()
chunk_mapping = {c.op.input.index: c for c in index_tensor.chunks}
out_chunks = []
for chunk in op.input.chunks:
index_chunk = chunk_mapping.get(chunk.index)
if index_chunk is None:
out_chunks.append(chunk)
continue
value_chunk = value.cix[index_chunk.index] if is_value_tensor else value
chunk_op = op.copy().reset_key()
out_chunk = chunk_op.new_chunk(
[chunk, index_chunk.op.indexes, value_chunk], chunk.shape, index=chunk.index
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_tensors(
[op.input, op.indexes, op.value],
tensor.shape,
chunks=out_chunks,
nsplits=op.input.nsplits,
)
|
https://github.com/mars-project/mars/issues/64
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, web=True)
In [3]: import mars.tensor as mt
In [4]: t = mt.random.rand(10, 10)
In [5]: mt.split(t, 5).execute()
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 527, in get_executable_operand_dag
kws=kws)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/tensor/expressions/indexing/getitem.py", line 58, in new_chunks
chunk, indexes = inputs
ValueError: not enough values to unpack (expected 2, got 1)
2018-12-25T02:23:13Z <Greenlet "Greenlet-0" at 0x11a9f47b8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x11b154458>> failed with ValueError
|
ValueError
|
def delete_meta(self, session_id, chunk_key):
"""
Delete metadata from store and cache
"""
query_key = (session_id, chunk_key)
try:
del self._meta_store[query_key]
if self._kv_store_ref is not None:
self._kv_store_ref.delete(
"/sessions/%s/chunks/%s" % (session_id, chunk_key),
recursive=True,
_tell=True,
_wait=False,
)
except KeyError:
pass
try:
del self._meta_cache[query_key]
except KeyError:
pass
# broadcast deletion into pre-determined destinations
if query_key in self._meta_broadcasts:
for dest in self._meta_broadcasts[query_key]:
self.ctx.actor_ref(self.default_name(), address=dest).delete_meta(
session_id, chunk_key, _wait=False, _tell=True
)
|
def delete_meta(self, session_id, chunk_key):
"""
Delete metadata from store and cache
"""
query_key = (session_id, chunk_key)
try:
del self._meta_store[query_key]
except KeyError:
pass
try:
del self._meta_cache[query_key]
except KeyError:
pass
# broadcast deletion into pre-determined destinations
futures = []
if query_key in self._meta_broadcasts:
for dest in self._meta_broadcasts[query_key]:
futures.append(
self.ctx.actor_ref(self.default_name(), address=dest).delete_meta(
session_id, chunk_key, _wait=False, _tell=True
)
)
if self._kv_store_ref is not None:
futures.append(
self._kv_store_ref.delete(
"/sessions/%s/chunks/%s" % (session_id, chunk_key),
recursive=True,
_tell=True,
_wait=False,
)
)
[f.result() for f in futures]
|
https://github.com/mars-project/mars/issues/72
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
cpdef on_receive(self, message):
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
return getattr(self, method)(*args, **kwargs)
File "/home/admin/wenjun.swj/mars/mars/scheduler/kvstore.py", line 54, in delete
return self._store.delete(key, dir=dir, recursive=recursive)
File "/home/admin/wenjun.swj/mars/mars/kvstore.py", line 265, in delete
raise KeyError(key)
KeyError: '/sessions/c3c9d252-09bc-11e9-bae9-97dedca03eb9/chunks/6f02ffa9ca5cf0b82362b37418d29ccc'
2018-12-27T10:17:46Z <Greenlet "Greenlet-0" at 0x7fed5e0f6268: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x7fed5f8bbf48>> failed with KeyError
|
KeyError
|
def execute(self, session=None, **kw):
from ..session import Session
if session is None:
session = Session.default_or_local()
return session.run(*self, **kw)
|
def execute(self, session=None, n_parallel=None):
from ..session import Session
if session is None:
session = Session.default_or_local()
return session.run(*self, n_parallel=n_parallel)
|
https://github.com/mars-project/mars/issues/63
|
n [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, web=True)
In [3]: import mars.tensor as mt
In [4]: t = mt.random.rand(10, 10)
In [5]: mt.linalg.svd(t).execute()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-7615968c966e> in <module>()
----> 1 mt.linalg.svd(t).execute()
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, n_parallel)
466 if session is None:
467 session = Session.default_or_local()
--> 468 return session.run(*self, n_parallel=n_parallel)
469
470
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
81
82 tensors = tuple(mt.tensor(t) for t in tensors)
---> 83 result = self._sess.run(*tensors, **kw)
84 self._executed_keys.update(t.key for t in tensors)
85 for t in tensors:
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
47 timeout = kw.pop('timeout', -1)
48 if kw:
---> 49 raise TypeError('run got unexpected key arguments {0}'.format(', '.join(kw.keys())))
50
51 graph = DirectedGraph()
TypeError: run got unexpected key arguments n_parallel
|
TypeError
|
def build_graph(self, graph=None, cls=DAG, tiled=False, compose=True):
if tiled and self.is_coarse():
self.tiles()
graph = graph if graph is not None else cls()
keys = None
if tiled:
nodes = list(c.data for c in self.chunks)
keys = list(c.key for c in self.chunks)
else:
nodes = list(self.op.outputs)
visited = set()
while len(nodes) > 0:
chunk = nodes.pop()
visited.add(chunk)
if not graph.contains(chunk):
graph.add_node(chunk)
children = chunk.inputs or []
for c in children:
if not graph.contains(c):
graph.add_node(c)
if not graph.has_successor(c, chunk):
graph.add_edge(c, chunk)
nodes.extend([c for c in children if c not in visited])
if tiled and compose:
graph.compose(keys=keys)
return graph
|
def build_graph(self, graph=None, cls=DAG, tiled=False, compose=True):
if tiled and self.is_coarse():
self.tiles()
graph = graph if graph is not None else cls()
keys = None
if tiled:
nodes = list(c.data for c in self.chunks)
keys = list(c.key for c in self.chunks)
else:
nodes = [self]
visited = set()
while len(nodes) > 0:
chunk = nodes.pop()
visited.add(chunk)
if not graph.contains(chunk):
graph.add_node(chunk)
children = chunk.inputs or []
for c in children:
if not graph.contains(c):
graph.add_node(c)
if not graph.has_successor(c, chunk):
graph.add_edge(c, chunk)
nodes.extend([c for c in children if c not in visited])
if tiled and compose:
graph.compose(keys=keys)
return graph
|
https://github.com/mars-project/mars/issues/8
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
cpdef on_receive(self, message):
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
return getattr(self, method)(*args, **kwargs)
File "/Users/qinxuye/Workspace/mars/mars/scheduler/graph.py", line 153, in execute_graph
self.prepare_graph()
File "/Users/qinxuye/Workspace/mars/mars/scheduler/graph.py", line 207, in prepare_graph
tensor_graph = deserialize_graph(self._serialized_tensor_graph)
File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 403, in deserialize_graph
return DirectedGraph.from_json(json_obj)
File "mars/graph.pyx", line 415, in mars.graph.DirectedGraph.from_json
return cls.deserialize(SerialiableGraph.from_json(json_obj))
File "mars/serialize/core.pyx", line 537, in mars.serialize.core.Serializable.from_json
return cls.deserialize(provider, obj)
File "mars/serialize/core.pyx", line 510, in mars.serialize.core.Serializable.deserialize
[cb(key_to_instance) for cb in callbacks]
File "mars/serialize/jsonserializer.pyx", line 176, in mars.serialize.jsonserializer.JsonSerializeProvider._deserialize_list.cb.inner
o = subs[v.key, v.id]
KeyError: ('9e2478de2695b727435601490ebaa999', '5219323976')
2018-12-07T11:14:35Z <Greenlet "Greenlet-0" at 0x138f5a448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x139002cc8>> failed with KeyError
|
KeyError
|
def build_graph(self, graph=None, cls=DAG, tiled=False, compose=True):
if tiled and self.is_coarse():
self.tiles()
graph = graph if graph is not None else cls()
keys = None
if tiled:
nodes = list(c.data for c in self.chunks)
keys = list(c.key for c in self.chunks)
else:
nodes = list(self.op.outputs)
visited = set()
while len(nodes) > 0:
chunk = nodes.pop()
visited.add(chunk)
if not graph.contains(chunk):
graph.add_node(chunk)
children = chunk.inputs or []
for c in children:
if not graph.contains(c):
graph.add_node(c)
if not graph.has_successor(c, chunk):
graph.add_edge(c, chunk)
nodes.extend(
[
c
for c in itertools.chain(
*[inp.op.outputs for inp in chunk.inputs or []]
)
if c not in visited
]
)
if tiled and compose:
graph.compose(keys=keys)
return graph
|
def build_graph(self, graph=None, cls=DAG, tiled=False, compose=True):
if tiled and self.is_coarse():
self.tiles()
graph = graph if graph is not None else cls()
keys = None
if tiled:
nodes = list(c.data for c in self.chunks)
keys = list(c.key for c in self.chunks)
else:
nodes = list(self.op.outputs)
visited = set()
while len(nodes) > 0:
chunk = nodes.pop()
visited.add(chunk)
if not graph.contains(chunk):
graph.add_node(chunk)
children = chunk.inputs or []
for c in children:
if not graph.contains(c):
graph.add_node(c)
if not graph.has_successor(c, chunk):
graph.add_edge(c, chunk)
nodes.extend([c for c in children if c not in visited])
if tiled and compose:
graph.compose(keys=keys)
return graph
|
https://github.com/mars-project/mars/issues/8
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
cpdef on_receive(self, message):
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
return getattr(self, method)(*args, **kwargs)
File "/Users/qinxuye/Workspace/mars/mars/scheduler/graph.py", line 153, in execute_graph
self.prepare_graph()
File "/Users/qinxuye/Workspace/mars/mars/scheduler/graph.py", line 207, in prepare_graph
tensor_graph = deserialize_graph(self._serialized_tensor_graph)
File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 403, in deserialize_graph
return DirectedGraph.from_json(json_obj)
File "mars/graph.pyx", line 415, in mars.graph.DirectedGraph.from_json
return cls.deserialize(SerialiableGraph.from_json(json_obj))
File "mars/serialize/core.pyx", line 537, in mars.serialize.core.Serializable.from_json
return cls.deserialize(provider, obj)
File "mars/serialize/core.pyx", line 510, in mars.serialize.core.Serializable.deserialize
[cb(key_to_instance) for cb in callbacks]
File "mars/serialize/jsonserializer.pyx", line 176, in mars.serialize.jsonserializer.JsonSerializeProvider._deserialize_list.cb.inner
o = subs[v.key, v.id]
KeyError: ('9e2478de2695b727435601490ebaa999', '5219323976')
2018-12-07T11:14:35Z <Greenlet "Greenlet-0" at 0x138f5a448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x139002cc8>> failed with KeyError
|
KeyError
|
def rand(random_state, *dn, **kw):
"""
Random values in a given shape.
Create a tensor of the given shape and populate it with
random samples from a uniform distributionc
over ``[0, 1)``.
Parameters
----------
d0, d1, ..., dn : int, optional
The dimensions of the returned tensor, should all be positive.
If no argument is given a single Python float is returned.
Returns
-------
out : Tensor, shape ``(d0, d1, ..., dn)``
Random values.
See Also
--------
random
Notes
-----
This is a convenience function. If you want an interface that
takes a shape-tuple as the first argument, refer to
mt.random.random_sample .
Examples
--------
>>> import mars.tensor as mt
>>> mt.random.rand(3,2).execute()
array([[ 0.14022471, 0.96360618], #random
[ 0.37601032, 0.25528411], #random
[ 0.49313049, 0.94909878]]) #random
"""
if len(dn) == 1 and isinstance(dn[0], (tuple, list)):
raise TypeError("'tuple' object cannot be interpreted as an integer")
if "dtype" not in kw:
kw["dtype"] = np.dtype("f8")
chunks = kw.pop("chunks", None)
op = TensorRand(state=random_state._state, size=dn, **kw)
return op(chunks=chunks)
|
def rand(random_state, *dn, **kw):
"""
Random values in a given shape.
Create a tensor of the given shape and populate it with
random samples from a uniform distributionc
over ``[0, 1)``.
Parameters
----------
d0, d1, ..., dn : int, optional
The dimensions of the returned tensor, should all be positive.
If no argument is given a single Python float is returned.
Returns
-------
out : Tensor, shape ``(d0, d1, ..., dn)``
Random values.
See Also
--------
random
Notes
-----
This is a convenience function. If you want an interface that
takes a shape-tuple as the first argument, refer to
mt.random.random_sample .
Examples
--------
>>> import mars.tensor as mt
>>> mt.random.rand(3,2).execute()
array([[ 0.14022471, 0.96360618], #random
[ 0.37601032, 0.25528411], #random
[ 0.49313049, 0.94909878]]) #random
"""
if "dtype" not in kw:
kw["dtype"] = np.dtype("f8")
chunks = kw.pop("chunks", None)
op = TensorRand(state=random_state._state, size=dn, **kw)
return op(chunks=chunks)
|
https://github.com/mars-project/mars/issues/1
|
In [1]: import numpy as np
In [2]: np.random.rand((2, 3))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-2-e49eb55bb286> in <module>()
----> 1 np.random.rand((2, 3))
mtrand.pyx in mtrand.RandomState.rand()
mtrand.pyx in mtrand.RandomState.random_sample()
mtrand.pyx in mtrand.cont0_array()
TypeError: 'tuple' object cannot be interpreted as an integer
|
TypeError
|
def randn(random_state, *dn, **kw):
"""
Return a sample (or samples) from the "standard normal" distribution.
If positive, int_like or int-convertible arguments are provided,
`randn` generates an array of shape ``(d0, d1, ..., dn)``, filled
with random floats sampled from a univariate "normal" (Gaussian)
distribution of mean 0 and variance 1 (if any of the :math:`d_i` are
floats, they are first converted to integers by truncation). A single
float randomly sampled from the distribution is returned if no
argument is provided.
This is a convenience function. If you want an interface that takes a
tuple as the first argument, use `numpy.random.standard_normal` instead.
Parameters
----------
d0, d1, ..., dn : int, optional
The dimensions of the returned tensor, should be all positive.
If no argument is given a single Python float is returned.
Returns
-------
Z : Tensor or float
A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from
the standard normal distribution, or a single such float if
no parameters were supplied.
See Also
--------
random.standard_normal : Similar, but takes a tuple as its argument.
Notes
-----
For random samples from :math:`N(\mu, \sigma^2)`, use:
``sigma * mt.random.randn(...) + mu``
Examples
--------
>>> import mars.tensor as mt
>>> mt.random.randn().execute()
2.1923875335537315 #random
Two-by-four tensor of samples from N(3, 6.25):
>>> (2.5 * mt.random.randn(2, 4) + 3).execute()
array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random
[ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random
"""
if len(dn) == 1 and isinstance(dn[0], (tuple, list)):
raise TypeError("'tuple' object cannot be interpreted as an integer")
if "dtype" not in kw:
kw["dtype"] = np.dtype("f8")
chunks = kw.pop("chunks", None)
op = TensorRandn(state=random_state._state, size=dn, **kw)
return op(chunks=chunks)
|
def randn(random_state, *dn, **kw):
"""
Return a sample (or samples) from the "standard normal" distribution.
If positive, int_like or int-convertible arguments are provided,
`randn` generates an array of shape ``(d0, d1, ..., dn)``, filled
with random floats sampled from a univariate "normal" (Gaussian)
distribution of mean 0 and variance 1 (if any of the :math:`d_i` are
floats, they are first converted to integers by truncation). A single
float randomly sampled from the distribution is returned if no
argument is provided.
This is a convenience function. If you want an interface that takes a
tuple as the first argument, use `numpy.random.standard_normal` instead.
Parameters
----------
d0, d1, ..., dn : int, optional
The dimensions of the returned tensor, should be all positive.
If no argument is given a single Python float is returned.
Returns
-------
Z : Tensor or float
A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from
the standard normal distribution, or a single such float if
no parameters were supplied.
See Also
--------
random.standard_normal : Similar, but takes a tuple as its argument.
Notes
-----
For random samples from :math:`N(\mu, \sigma^2)`, use:
``sigma * mt.random.randn(...) + mu``
Examples
--------
>>> import mars.tensor as mt
>>> mt.random.randn().execute()
2.1923875335537315 #random
Two-by-four tensor of samples from N(3, 6.25):
>>> (2.5 * mt.random.randn(2, 4) + 3).execute()
array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random
[ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random
"""
if "dtype" not in kw:
kw["dtype"] = np.dtype("f8")
chunks = kw.pop("chunks", None)
op = TensorRandn(state=random_state._state, size=dn, **kw)
return op(chunks=chunks)
|
https://github.com/mars-project/mars/issues/1
|
In [1]: import numpy as np
In [2]: np.random.rand((2, 3))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-2-e49eb55bb286> in <module>()
----> 1 np.random.rand((2, 3))
mtrand.pyx in mtrand.RandomState.rand()
mtrand.pyx in mtrand.RandomState.random_sample()
mtrand.pyx in mtrand.cont0_array()
TypeError: 'tuple' object cannot be interpreted as an integer
|
TypeError
|
def assign_average_vars(self, var_list):
"""Assign variables in var_list with their respective averages.
Args:
var_list: List of model variables to be assigned to their average.
Returns:
assign_op: The op corresponding to the assignment operation of
variables to their average.
Example:
```python
model = tf.Sequential([...])
opt = tfa.optimizers.SWA(
tf.keras.optimizers.SGD(lr=2.0), 100, 10)
model.compile(opt, ...)
model.fit(x, y, ...)
# Update the weights to their mean before saving
opt.assign_average_vars(model.variables)
model.save('model.h5')
```
"""
assign_ops = []
for var in var_list:
try:
assign_ops.append(
var.assign(self.get_slot(var, "average"), use_locking=self._use_locking)
)
except Exception as e:
warnings.warn("Unable to assign average slot to {} : {}".format(var, e))
return tf.group(assign_ops)
|
def assign_average_vars(self, var_list):
"""Assign variables in var_list with their respective averages.
Args:
var_list: List of model variables to be assigned to their average.
Returns:
assign_op: The op corresponding to the assignment operation of
variables to their average.
Example:
```python
model = tf.Sequential([...])
opt = tfa.optimizers.SWA(
tf.keras.optimizers.SGD(lr=2.0), 100, 10)
model.compile(opt, ...)
model.fit(x, y, ...)
# Update the weights to their mean before saving
opt.assign_average_vars(model.variables)
model.save('model.h5')
```
"""
assign_op = tf.group(
[
var.assign(self.get_slot(var, "average"), use_locking=self._use_locking)
for var in var_list
if var.trainable
]
)
return assign_op
|
https://github.com/tensorflow/addons/issues/2255
|
Traceback (most recent call last):
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/root/.vscode-server/extensions/ms-python.python-2020.11.371526539/pythonFiles/lib/python/debugpy/__main__.py", line 45, in <module>
cli.main()
File "/root/.vscode-server/extensions/ms-python.python-2020.11.371526539/pythonFiles/lib/python/debugpy/../debugpy/server/cli.py", line 430, in main
run()
File "/root/.vscode-server/extensions/ms-python.python-2020.11.371526539/pythonFiles/lib/python/debugpy/../debugpy/server/cli.py", line 267, in run_file
runpy.run_path(options.target, run_name=compat.force_str("__main__"))
File "/usr/lib/python3.6/runpy.py", line 263, in run_path
pkg_name=pkg_name, script_name=fname)
File "/usr/lib/python3.6/runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/srv/automl/efficientdet/nlp.py", line 65, in <module>
tfa.callbacks.AverageModelCheckpoint(update_weights=True, filepath=os.path.join(MODEL_DIR, 'ckpt_moving_average'))])
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 108, in _method_wrapper
return method(self, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 1137, in fit
callbacks.on_epoch_end(epoch, epoch_logs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/callbacks.py", line 412, in on_epoch_end
callback.on_epoch_end(epoch, logs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/callbacks.py", line 1249, in on_epoch_end
self._save_model(epoch=epoch, logs=logs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_addons/callbacks/average_model_checkpoint.py", line 76, in _save_model
self.model.optimizer.assign_average_vars(self.model.trainable_weights)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_addons/optimizers/average_wrapper.py", line 125, in assign_average_vars
for var in var_list
File "/usr/local/lib/python3.6/dist-packages/tensorflow_addons/optimizers/average_wrapper.py", line 126, in <listcomp>
if var.trainable
AttributeError: 'TrackableWeightHandler' object has no attribute 'trainable'
|
AttributeError
|
def shear_x(image: TensorLike, level: float, replace: TensorLike) -> TensorLike:
"""Perform shear operation on an image (x-axis).
Args:
image: A 3D image `Tensor`.
level: A float denoting shear element along y-axis
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
Transformed image along X or Y axis, with space outside image
filled with replace.
"""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = transform(wrap(image), [1.0, level, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
return unwrap(image, replace)
|
def shear_x(image: TensorLike, level: float, replace: int) -> TensorLike:
"""Perform shear operation on an image (x-axis).
Args:
image: A 3D image Tensor.
level: A float denoting shear element along y-axis
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
Transformed image along X or Y axis, with space outside image
filled with replace.
"""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = transform(wrap(image), [1.0, level, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
return unwrap(image, replace)
|
https://github.com/tensorflow/addons/issues/2092
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-12-50c68fed8a6d> in <module>()
----> 1 sheared_image3 = tfa.image.shear_x(float_image, 0.3, replace=0.5)
2 sheared_image4 = tfa.image.shear_y(float_image, 0.4, replace=0.4)
3
4 plt.imshow(sheared_image3)
5 plt.show()
5 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/image/transform_ops.py in shear_x(image, level, replace)
324 # 0 1].
325 image = transform(wrap(image), [1.0, level, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
--> 326 return unwrap(image, replace)
327
328
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/image/utils.py in unwrap(image, replace)
135 alpha_channel = flattened_image[:, 3]
136
--> 137 replace = tf.constant(replace, tf.uint8)
138 if tf.rank(replace) == 0:
139 replace = tf.expand_dims(replace, 0)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
262 """
263 return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 264 allow_broadcast=True)
265
266
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
273 with trace.Trace("tf.constant"):
274 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
--> 275 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
276
277 g = ops.get_default_graph()
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
298 def _constant_eager_impl(ctx, value, dtype, shape, verify_shape):
299 """Implementation of eager constant."""
--> 300 t = convert_to_eager_tensor(value, ctx, dtype)
301 if shape is None:
302 return t
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
96 dtype = dtypes.as_dtype(dtype).as_datatype_enum
97 ctx.ensure_initialized()
---> 98 return ops.EagerTensor(value, ctx.device_name, dtype)
99
100
TypeError: Cannot convert 0.5 to EagerTensor of dtype uint8
|
TypeError
|
def shear_y(image: TensorLike, level: float, replace: TensorLike) -> TensorLike:
"""Perform shear operation on an image (y-axis).
Args:
image: A 3D image `Tensor`.
level: A float denoting shear element along x-axis
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
Transformed image along X or Y axis, with space outside image
filled with replace.
"""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = transform(wrap(image), [1.0, 0.0, 0.0, level, 1.0, 0.0, 0.0, 0.0])
return unwrap(image, replace)
|
def shear_y(image: TensorLike, level: float, replace: int) -> TensorLike:
"""Perform shear operation on an image (y-axis).
Args:
image: A 3D image `Tensor`.
level: A float denoting shear element along x-axis
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
Transformed image along X or Y axis, with space outside image
filled with replace.
"""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = transform(wrap(image), [1.0, 0.0, 0.0, level, 1.0, 0.0, 0.0, 0.0])
return unwrap(image, replace)
|
https://github.com/tensorflow/addons/issues/2092
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-12-50c68fed8a6d> in <module>()
----> 1 sheared_image3 = tfa.image.shear_x(float_image, 0.3, replace=0.5)
2 sheared_image4 = tfa.image.shear_y(float_image, 0.4, replace=0.4)
3
4 plt.imshow(sheared_image3)
5 plt.show()
5 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/image/transform_ops.py in shear_x(image, level, replace)
324 # 0 1].
325 image = transform(wrap(image), [1.0, level, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
--> 326 return unwrap(image, replace)
327
328
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/image/utils.py in unwrap(image, replace)
135 alpha_channel = flattened_image[:, 3]
136
--> 137 replace = tf.constant(replace, tf.uint8)
138 if tf.rank(replace) == 0:
139 replace = tf.expand_dims(replace, 0)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
262 """
263 return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 264 allow_broadcast=True)
265
266
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
273 with trace.Trace("tf.constant"):
274 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
--> 275 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
276
277 g = ops.get_default_graph()
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
298 def _constant_eager_impl(ctx, value, dtype, shape, verify_shape):
299 """Implementation of eager constant."""
--> 300 t = convert_to_eager_tensor(value, ctx, dtype)
301 if shape is None:
302 return t
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
96 dtype = dtypes.as_dtype(dtype).as_datatype_enum
97 ctx.ensure_initialized()
---> 98 return ops.EagerTensor(value, ctx.device_name, dtype)
99
100
TypeError: Cannot convert 0.5 to EagerTensor of dtype uint8
|
TypeError
|
def translate_xy(
image: TensorLike, translate_to: TensorLike, replace: TensorLike
) -> TensorLike:
"""Translates image in X or Y dimension.
Args:
image: A 3D image `Tensor`.
translate_to: A 1D `Tensor` to translate `[x, y]`.
replace: A one or three value 1D `Tensor` to fill empty pixels.
Returns:
Translated image along X or Y axis, with space outside image
filled with replace.
Raises:
ValueError: if axis is neither 0 nor 1.
"""
image = tf.convert_to_tensor(image)
image = wrap(image)
trans = tf.convert_to_tensor(translate_to)
image = translate(image, [trans[0], trans[1]])
return unwrap(image, replace)
|
def translate_xy(
image: TensorLike, translate_to: TensorLike, replace: int
) -> TensorLike:
"""Translates image in X or Y dimension.
Args:
image: A 3D image `Tensor`.
translate_to: A 1D `Tensor` to translate [x, y]
replace: A one or three value 1D `Tensor` to fill empty pixels.
Returns:
Translated image along X or Y axis, with space outside image
filled with replace.
Raises:
ValueError: if axis is neither 0 nor 1.
"""
image = tf.convert_to_tensor(image)
image = wrap(image)
trans = tf.convert_to_tensor(translate_to)
image = translate(image, [trans[0], trans[1]])
return unwrap(image, replace)
|
https://github.com/tensorflow/addons/issues/2092
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-12-50c68fed8a6d> in <module>()
----> 1 sheared_image3 = tfa.image.shear_x(float_image, 0.3, replace=0.5)
2 sheared_image4 = tfa.image.shear_y(float_image, 0.4, replace=0.4)
3
4 plt.imshow(sheared_image3)
5 plt.show()
5 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/image/transform_ops.py in shear_x(image, level, replace)
324 # 0 1].
325 image = transform(wrap(image), [1.0, level, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
--> 326 return unwrap(image, replace)
327
328
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/image/utils.py in unwrap(image, replace)
135 alpha_channel = flattened_image[:, 3]
136
--> 137 replace = tf.constant(replace, tf.uint8)
138 if tf.rank(replace) == 0:
139 replace = tf.expand_dims(replace, 0)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
262 """
263 return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 264 allow_broadcast=True)
265
266
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
273 with trace.Trace("tf.constant"):
274 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
--> 275 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
276
277 g = ops.get_default_graph()
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
298 def _constant_eager_impl(ctx, value, dtype, shape, verify_shape):
299 """Implementation of eager constant."""
--> 300 t = convert_to_eager_tensor(value, ctx, dtype)
301 if shape is None:
302 return t
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
96 dtype = dtypes.as_dtype(dtype).as_datatype_enum
97 ctx.ensure_initialized()
---> 98 return ops.EagerTensor(value, ctx.device_name, dtype)
99
100
TypeError: Cannot convert 0.5 to EagerTensor of dtype uint8
|
TypeError
|
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D image `Tensor` with 4 channels.
replace: A one or three value 1D `Tensor` to fill empty pixels.
Returns:
image: A 3D image `Tensor` with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.cast(replace, image.dtype)
if tf.rank(replace) == 0:
replace = tf.expand_dims(replace, 0)
replace = tf.concat([replace, replace, replace], 0)
replace = tf.concat([replace, tf.ones([1], dtype=replace.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
cond = tf.equal(alpha_channel, 1)
cond = tf.expand_dims(cond, 1)
cond = tf.concat([cond, cond, cond, cond], 1)
flattened_image = tf.where(cond, flattened_image, replace)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
|
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D image `Tensor` with 4 channels.
replace: A one or three value 1D `Tensor` to fill empty pixels.
Returns:
image: A 3D image `Tensor` with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.constant(replace, tf.uint8)
if tf.rank(replace) == 0:
replace = tf.expand_dims(replace, 0)
replace = tf.concat([replace, replace, replace], 0)
replace = tf.concat([replace, tf.ones([1], dtype=image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
cond = tf.equal(alpha_channel, 1)
cond = tf.expand_dims(cond, 1)
cond = tf.concat([cond, cond, cond, cond], 1)
flattened_image = tf.where(cond, flattened_image, replace)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
|
https://github.com/tensorflow/addons/issues/2092
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-12-50c68fed8a6d> in <module>()
----> 1 sheared_image3 = tfa.image.shear_x(float_image, 0.3, replace=0.5)
2 sheared_image4 = tfa.image.shear_y(float_image, 0.4, replace=0.4)
3
4 plt.imshow(sheared_image3)
5 plt.show()
5 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/image/transform_ops.py in shear_x(image, level, replace)
324 # 0 1].
325 image = transform(wrap(image), [1.0, level, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
--> 326 return unwrap(image, replace)
327
328
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/image/utils.py in unwrap(image, replace)
135 alpha_channel = flattened_image[:, 3]
136
--> 137 replace = tf.constant(replace, tf.uint8)
138 if tf.rank(replace) == 0:
139 replace = tf.expand_dims(replace, 0)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
262 """
263 return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 264 allow_broadcast=True)
265
266
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
273 with trace.Trace("tf.constant"):
274 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
--> 275 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
276
277 g = ops.get_default_graph()
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
298 def _constant_eager_impl(ctx, value, dtype, shape, verify_shape):
299 """Implementation of eager constant."""
--> 300 t = convert_to_eager_tensor(value, ctx, dtype)
301 if shape is None:
302 return t
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
96 dtype = dtypes.as_dtype(dtype).as_datatype_enum
97 ctx.ensure_initialized()
---> 98 return ops.EagerTensor(value, ctx.device_name, dtype)
99
100
TypeError: Cannot convert 0.5 to EagerTensor of dtype uint8
|
TypeError
|
def __init__(self, weight_decay: Union[FloatTensorLike, Callable], **kwargs):
"""Extension class that adds weight decay to an optimizer.
Args:
weight_decay: A `Tensor`, a floating point value, or a schedule
that is a `tf.keras.optimizers.schedules.LearningRateSchedule`
to decay the variable by, in the update step.
**kwargs: Optional list or tuple or set of `Variable` objects to
decay.
"""
wd = kwargs.pop("weight_decay", weight_decay)
super().__init__(**kwargs)
self._decay_var_list = None # is set in minimize or apply_gradients
self._set_hyper("weight_decay", wd)
|
def __init__(self, weight_decay: Union[FloatTensorLike, Callable], **kwargs):
"""Extension class that adds weight decay to an optimizer.
Args:
weight_decay: A `Tensor` or a floating point value, the factor by
which a variable is decayed in the update step.
**kwargs: Optional list or tuple or set of `Variable` objects to
decay.
"""
wd = kwargs.pop("weight_decay", weight_decay)
super().__init__(**kwargs)
self._decay_var_list = None # is set in minimize or apply_gradients
self._set_hyper("weight_decay", wd)
|
https://github.com/tensorflow/addons/issues/844
|
Traceback (most recent call last):
File "tmp2.py", line 42, in <module>
model.fit(x_train, y_train, epochs=40, validation_split=0.1)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/keras/engine/training.py", line 728, in fit
use_multiprocessing=use_multiprocessing)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 324, in fit
total_epochs=epochs)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 123, in run_one_epoch
batch_outs = execution_function(iterator)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 86, in execution_function
distributed_function(input_fn))
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/def_function.py", line 457, in __call__
result = self._call(*args, **kwds)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/def_function.py", line 520, in _call
return self._stateless_fn(*args, **kwds)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/function.py", line 1823, in __call__
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/function.py", line 1141, in _filtered_call
self.captured_inputs)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/function.py", line 1224, in _call_flat
ctx, args, cancellation_manager=cancellation_manager)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/function.py", line 511, in call
ctx=ctx)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/execute.py", line 67, in quick_execute
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 2, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: Cannot assign a device for operation sequential/conv2d/Conv2D/ReadVariableOp: Could not satisfy explicit device specification '/job:localhost/replica:0/task:0/device:GPU:0' because no supported kernel for GPU devices is available.
Colocation Debug Info:
Colocation group had the following types and supported devices:
Root Member(assigned_device_name_index_=2 requested_device_name_='/job:localhost/replica:0/task:0/device:GPU:0' assigned_device_name_='/job:localhost/replica:0/task:0/device:GPU:0' resource_device_name_='/job:localhost/replica:0/task:0/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]
RealDiv: GPU CPU XLA_CPU XLA_GPU
LogicalAnd: GPU CPU XLA_CPU XLA_GPU
_Arg: GPU CPU XLA_CPU XLA_GPU
ReadVariableOp: GPU CPU XLA_CPU XLA_GPU
Greater: GPU CPU XLA_CPU XLA_GPU
Sub: GPU CPU XLA_CPU XLA_GPU
Const: GPU CPU XLA_CPU XLA_GPU
Pack: GPU CPU XLA_CPU XLA_GPU
LessEqual: GPU CPU XLA_CPU XLA_GPU
Identity: GPU CPU XLA_CPU XLA_GPU
Cast: GPU CPU XLA_CPU XLA_GPU
Sum: GPU CPU XLA_CPU XLA_GPU
ResourceApplyAdam: GPU CPU XLA_CPU XLA_GPU
Mul: GPU CPU XLA_CPU XLA_GPU
Sqrt: GPU CPU XLA_CPU XLA_GPU
AssignSubVariableOp: GPU CPU XLA_CPU XLA_GPU
AddV2: GPU CPU XLA_CPU XLA_GPU
Pow: GPU CPU XLA_CPU XLA_GPU
Colocation members, user-requested devices, and framework assigned devices, if any:
sequential_conv2d_conv2d_readvariableop_resource (_Arg) framework assigned device=/job:localhost/replica:0/task:0/device:GPU:0
adamw_adamw_update_resourceapplyadam_m (_Arg) framework assigned device=/job:localhost/replica:0/task:0/device:GPU:0
adamw_adamw_update_resourceapplyadam_v (_Arg) framework assigned device=/job:localhost/replica:0/task:0/device:GPU:0
sequential/conv2d/Conv2D/ReadVariableOp (ReadVariableOp) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Read/ReadVariableOp (ReadVariableOp) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const_1 (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const_2 (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const_3 (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const_4 (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/LessEqual (LessEqual) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Greater (Greater) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Greater_1 (Greater) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/LessEqual_1 (LessEqual) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/and (LogicalAnd) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/preds_c (Pack) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/Cast (Cast) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/Const (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/num_true_conds (Sum) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/n_true_conds (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/LessEqual (LessEqual) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/Assert/Const (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/Assert/AssertGuard/Identity (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/cond/Identity (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul (Mul) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul_1/ReadVariableOp (ReadVariableOp) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul_1 (Mul) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/AssignSubVariableOp (AssignSubVariableOp) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Identity_1 (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/add/y (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/add (AddV2) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Cast (Cast) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Identity_2 (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Identity_3 (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Pow (Pow) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Pow_1 (Pow) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub (Sub) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Sqrt (Sqrt) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_1/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_1 (Sub) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/truediv (RealDiv) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul_2 (Mul) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Const (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_2/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_2 (Sub) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_3/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_3 (Sub) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/ResourceApplyAdam (ResourceApplyAdam) /job:localhost/replica:0/task:0/device:GPU:0
Op: ReadVariableOp
Node attrs: dtype=DT_FLOAT
Registered kernels:
device='XLA_CPU_JIT'; dtype in [DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT8, ..., DT_BFLOAT16, DT_COMPLEX128, DT_HALF, DT_UINT32, DT_UINT64]
device='XLA_GPU_JIT'; dtype in [DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT8, ..., DT_BFLOAT16, DT_COMPLEX128, DT_HALF, DT_UINT32, DT_UINT64]
device='GPU'
device='CPU'
device='XLA_CPU'
device='XLA_GPU'
[[{{node sequential/conv2d/Conv2D/ReadVariableOp}}]] [Op:__inference_distributed_function_2137]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def _decay_weights_op(self, var):
if not self._decay_var_list or var.ref() in self._decay_var_list:
return var.assign_sub(self._decayed_wd(var.dtype) * var, self._use_locking)
return tf.no_op()
|
def _decay_weights_op(self, var):
if not self._decay_var_list or var.ref() in self._decay_var_list:
return var.assign_sub(
self._get_hyper("weight_decay", var.dtype) * var, self._use_locking
)
return tf.no_op()
|
https://github.com/tensorflow/addons/issues/844
|
Traceback (most recent call last):
File "tmp2.py", line 42, in <module>
model.fit(x_train, y_train, epochs=40, validation_split=0.1)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/keras/engine/training.py", line 728, in fit
use_multiprocessing=use_multiprocessing)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 324, in fit
total_epochs=epochs)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 123, in run_one_epoch
batch_outs = execution_function(iterator)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 86, in execution_function
distributed_function(input_fn))
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/def_function.py", line 457, in __call__
result = self._call(*args, **kwds)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/def_function.py", line 520, in _call
return self._stateless_fn(*args, **kwds)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/function.py", line 1823, in __call__
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/function.py", line 1141, in _filtered_call
self.captured_inputs)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/function.py", line 1224, in _call_flat
ctx, args, cancellation_manager=cancellation_manager)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/function.py", line 511, in call
ctx=ctx)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/execute.py", line 67, in quick_execute
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 2, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: Cannot assign a device for operation sequential/conv2d/Conv2D/ReadVariableOp: Could not satisfy explicit device specification '/job:localhost/replica:0/task:0/device:GPU:0' because no supported kernel for GPU devices is available.
Colocation Debug Info:
Colocation group had the following types and supported devices:
Root Member(assigned_device_name_index_=2 requested_device_name_='/job:localhost/replica:0/task:0/device:GPU:0' assigned_device_name_='/job:localhost/replica:0/task:0/device:GPU:0' resource_device_name_='/job:localhost/replica:0/task:0/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]
RealDiv: GPU CPU XLA_CPU XLA_GPU
LogicalAnd: GPU CPU XLA_CPU XLA_GPU
_Arg: GPU CPU XLA_CPU XLA_GPU
ReadVariableOp: GPU CPU XLA_CPU XLA_GPU
Greater: GPU CPU XLA_CPU XLA_GPU
Sub: GPU CPU XLA_CPU XLA_GPU
Const: GPU CPU XLA_CPU XLA_GPU
Pack: GPU CPU XLA_CPU XLA_GPU
LessEqual: GPU CPU XLA_CPU XLA_GPU
Identity: GPU CPU XLA_CPU XLA_GPU
Cast: GPU CPU XLA_CPU XLA_GPU
Sum: GPU CPU XLA_CPU XLA_GPU
ResourceApplyAdam: GPU CPU XLA_CPU XLA_GPU
Mul: GPU CPU XLA_CPU XLA_GPU
Sqrt: GPU CPU XLA_CPU XLA_GPU
AssignSubVariableOp: GPU CPU XLA_CPU XLA_GPU
AddV2: GPU CPU XLA_CPU XLA_GPU
Pow: GPU CPU XLA_CPU XLA_GPU
Colocation members, user-requested devices, and framework assigned devices, if any:
sequential_conv2d_conv2d_readvariableop_resource (_Arg) framework assigned device=/job:localhost/replica:0/task:0/device:GPU:0
adamw_adamw_update_resourceapplyadam_m (_Arg) framework assigned device=/job:localhost/replica:0/task:0/device:GPU:0
adamw_adamw_update_resourceapplyadam_v (_Arg) framework assigned device=/job:localhost/replica:0/task:0/device:GPU:0
sequential/conv2d/Conv2D/ReadVariableOp (ReadVariableOp) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Read/ReadVariableOp (ReadVariableOp) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const_1 (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const_2 (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const_3 (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const_4 (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/LessEqual (LessEqual) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Greater (Greater) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Greater_1 (Greater) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/LessEqual_1 (LessEqual) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/and (LogicalAnd) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/preds_c (Pack) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/Cast (Cast) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/Const (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/num_true_conds (Sum) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/n_true_conds (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/LessEqual (LessEqual) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/Assert/Const (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/Assert/AssertGuard/Identity (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/cond/Identity (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul (Mul) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul_1/ReadVariableOp (ReadVariableOp) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul_1 (Mul) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/AssignSubVariableOp (AssignSubVariableOp) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Identity_1 (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/add/y (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/add (AddV2) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Cast (Cast) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Identity_2 (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Identity_3 (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Pow (Pow) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Pow_1 (Pow) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub (Sub) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Sqrt (Sqrt) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_1/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_1 (Sub) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/truediv (RealDiv) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul_2 (Mul) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Const (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_2/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_2 (Sub) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_3/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_3 (Sub) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/ResourceApplyAdam (ResourceApplyAdam) /job:localhost/replica:0/task:0/device:GPU:0
Op: ReadVariableOp
Node attrs: dtype=DT_FLOAT
Registered kernels:
device='XLA_CPU_JIT'; dtype in [DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT8, ..., DT_BFLOAT16, DT_COMPLEX128, DT_HALF, DT_UINT32, DT_UINT64]
device='XLA_GPU_JIT'; dtype in [DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT8, ..., DT_BFLOAT16, DT_COMPLEX128, DT_HALF, DT_UINT32, DT_UINT64]
device='GPU'
device='CPU'
device='XLA_CPU'
device='XLA_GPU'
[[{{node sequential/conv2d/Conv2D/ReadVariableOp}}]] [Op:__inference_distributed_function_2137]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def _decay_weights_sparse_op(self, var, indices):
if not self._decay_var_list or var.ref() in self._decay_var_list:
update = -self._decayed_wd(var.dtype) * tf.gather(var, indices)
return self._resource_scatter_add(var, indices, update)
return tf.no_op()
|
def _decay_weights_sparse_op(self, var, indices):
if not self._decay_var_list or var.ref() in self._decay_var_list:
update = -self._get_hyper("weight_decay", var.dtype) * tf.gather(var, indices)
return self._resource_scatter_add(var, indices, update)
return tf.no_op()
|
https://github.com/tensorflow/addons/issues/844
|
Traceback (most recent call last):
File "tmp2.py", line 42, in <module>
model.fit(x_train, y_train, epochs=40, validation_split=0.1)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/keras/engine/training.py", line 728, in fit
use_multiprocessing=use_multiprocessing)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 324, in fit
total_epochs=epochs)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 123, in run_one_epoch
batch_outs = execution_function(iterator)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 86, in execution_function
distributed_function(input_fn))
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/def_function.py", line 457, in __call__
result = self._call(*args, **kwds)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/def_function.py", line 520, in _call
return self._stateless_fn(*args, **kwds)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/function.py", line 1823, in __call__
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/function.py", line 1141, in _filtered_call
self.captured_inputs)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/function.py", line 1224, in _call_flat
ctx, args, cancellation_manager=cancellation_manager)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/function.py", line 511, in call
ctx=ctx)
File "/home/yetao/.local/lib/python3.5/site-packages/tensorflow_core/python/eager/execute.py", line 67, in quick_execute
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 2, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: Cannot assign a device for operation sequential/conv2d/Conv2D/ReadVariableOp: Could not satisfy explicit device specification '/job:localhost/replica:0/task:0/device:GPU:0' because no supported kernel for GPU devices is available.
Colocation Debug Info:
Colocation group had the following types and supported devices:
Root Member(assigned_device_name_index_=2 requested_device_name_='/job:localhost/replica:0/task:0/device:GPU:0' assigned_device_name_='/job:localhost/replica:0/task:0/device:GPU:0' resource_device_name_='/job:localhost/replica:0/task:0/device:GPU:0' supported_device_types_=[CPU] possible_devices_=[]
RealDiv: GPU CPU XLA_CPU XLA_GPU
LogicalAnd: GPU CPU XLA_CPU XLA_GPU
_Arg: GPU CPU XLA_CPU XLA_GPU
ReadVariableOp: GPU CPU XLA_CPU XLA_GPU
Greater: GPU CPU XLA_CPU XLA_GPU
Sub: GPU CPU XLA_CPU XLA_GPU
Const: GPU CPU XLA_CPU XLA_GPU
Pack: GPU CPU XLA_CPU XLA_GPU
LessEqual: GPU CPU XLA_CPU XLA_GPU
Identity: GPU CPU XLA_CPU XLA_GPU
Cast: GPU CPU XLA_CPU XLA_GPU
Sum: GPU CPU XLA_CPU XLA_GPU
ResourceApplyAdam: GPU CPU XLA_CPU XLA_GPU
Mul: GPU CPU XLA_CPU XLA_GPU
Sqrt: GPU CPU XLA_CPU XLA_GPU
AssignSubVariableOp: GPU CPU XLA_CPU XLA_GPU
AddV2: GPU CPU XLA_CPU XLA_GPU
Pow: GPU CPU XLA_CPU XLA_GPU
Colocation members, user-requested devices, and framework assigned devices, if any:
sequential_conv2d_conv2d_readvariableop_resource (_Arg) framework assigned device=/job:localhost/replica:0/task:0/device:GPU:0
adamw_adamw_update_resourceapplyadam_m (_Arg) framework assigned device=/job:localhost/replica:0/task:0/device:GPU:0
adamw_adamw_update_resourceapplyadam_v (_Arg) framework assigned device=/job:localhost/replica:0/task:0/device:GPU:0
sequential/conv2d/Conv2D/ReadVariableOp (ReadVariableOp) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Read/ReadVariableOp (ReadVariableOp) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const_1 (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const_2 (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const_3 (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Const_4 (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/LessEqual (LessEqual) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Greater (Greater) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/Greater_1 (Greater) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/LessEqual_1 (LessEqual) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/and (LogicalAnd) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/preds_c (Pack) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/Cast (Cast) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/Const (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/num_true_conds (Sum) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/n_true_conds (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/LessEqual (LessEqual) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/Assert/Const (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/Assert/AssertGuard/Identity (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/PiecewiseConstant/case/cond/Identity (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul (Mul) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul_1/ReadVariableOp (ReadVariableOp) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul_1 (Mul) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/AssignSubVariableOp (AssignSubVariableOp) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Identity_1 (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/add/y (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/add (AddV2) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Cast (Cast) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Identity_2 (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Identity_3 (Identity) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Pow (Pow) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Pow_1 (Pow) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub (Sub) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Sqrt (Sqrt) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_1/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_1 (Sub) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/truediv (RealDiv) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/mul_2 (Mul) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/Const (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_2/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_2 (Sub) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_3/x (Const) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/sub_3 (Sub) /job:localhost/replica:0/task:0/device:GPU:0
AdamW/AdamW/update/ResourceApplyAdam (ResourceApplyAdam) /job:localhost/replica:0/task:0/device:GPU:0
Op: ReadVariableOp
Node attrs: dtype=DT_FLOAT
Registered kernels:
device='XLA_CPU_JIT'; dtype in [DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT8, ..., DT_BFLOAT16, DT_COMPLEX128, DT_HALF, DT_UINT32, DT_UINT64]
device='XLA_GPU_JIT'; dtype in [DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT8, ..., DT_BFLOAT16, DT_COMPLEX128, DT_HALF, DT_UINT32, DT_UINT64]
device='GPU'
device='CPU'
device='XLA_CPU'
device='XLA_GPU'
[[{{node sequential/conv2d/Conv2D/ReadVariableOp}}]] [Op:__inference_distributed_function_2137]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def _update_confusion_matrix(self, y_true, y_pred, sample_weight):
y_true = self._safe_squeeze(y_true)
y_pred = self._safe_squeeze(y_pred)
new_conf_mtx = tf.math.confusion_matrix(
labels=y_true,
predictions=y_pred,
num_classes=self.num_classes,
weights=sample_weight,
dtype=tf.float32,
)
return self.conf_mtx.assign_add(new_conf_mtx)
|
def _update_confusion_matrix(self, y_true, y_pred, sample_weight):
y_true = tf.squeeze(y_true)
y_pred = tf.squeeze(y_pred)
new_conf_mtx = tf.math.confusion_matrix(
labels=y_true,
predictions=y_pred,
num_classes=self.num_classes,
weights=sample_weight,
dtype=tf.float32,
)
return self.conf_mtx.assign_add(new_conf_mtx)
|
https://github.com/tensorflow/addons/issues/1962
|
ValueError Traceback (most recent call last)
<ipython-input-12-7ea8164c36b0> in <module>
9
10 # Batch-size = 1: this will raise an exception due to tf.squeeze
---> 11 kappa.update_state(tf.ones(1), tf.ones(1))
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/utils/metrics_utils.py in decorated(metric_obj, *args, **kwargs)
88
89 with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):
---> 90 update_op = update_state_fn(*args, **kwargs)
91 if update_op is not None: # update_op will be None in eager execution.
92 metric_obj.add_update(update_op)
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/metrics/cohens_kappa.py in update_state(self, y_true, y_pred, sample_weight)
137 Update op.
138 """
--> 139 return self._update(y_true, y_pred, sample_weight)
140
141 def _update_binary_class_model(self, y_true, y_pred, sample_weight=None):
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/metrics/cohens_kappa.py in _update_multi_class_model(self, y_true, y_pred, sample_weight)
153 y_pred = self._cast_ypred(y_pred)
154
--> 155 return self._update_confusion_matrix(y_true, y_pred, sample_weight)
156
157 @tf.function
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/metrics/cohens_kappa.py in _update_confusion_matrix(self, y_true, y_pred, sample_weight)
176 num_classes=self.num_classes,
177 weights=sample_weight,
--> 178 dtype=tf.float32,
179 )
180
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/confusion_matrix.py in confusion_matrix(labels, predictions, num_classes, weights, dtype, name)
190
191 shape = array_ops.stack([num_classes, num_classes])
--> 192 indices = array_ops.stack([labels, predictions], axis=1)
193 values = (array_ops.ones_like(predictions, dtype)
194 if weights is None else weights)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
178 """Call target, and fall back on dispatchers if there is a TypeError."""
179 try:
--> 180 return target(*args, **kwargs)
181 except (TypeError, ValueError):
182 # Note: convert_to_eager_tensor currently raises a ValueError, not a
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/array_ops.py in stack(values, axis, name)
1338 if axis < -expanded_num_dims or axis >= expanded_num_dims:
1339 raise ValueError("axis = %d not in [%d, %d)" %
-> 1340 (axis, -expanded_num_dims, expanded_num_dims))
1341
1342 return gen_array_ops.pack(values, axis=axis, name=name)
ValueError: axis = 1 not in [-1, 1)
|
ValueError
|
def apply_gradients(self, grads_and_vars, name=None, **kwargs):
self._optimizer._iterations = self.iterations # pylint: disable=protected-access
return super().apply_gradients(grads_and_vars, name, **kwargs)
|
def apply_gradients(self, grads_and_vars, name=None):
self._optimizer._iterations = self.iterations # pylint: disable=protected-access
return super().apply_gradients(grads_and_vars, name)
|
https://github.com/tensorflow/addons/issues/1920
|
Epoch 1/5
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-19-841302a83165> in <module>
----> 1 model.fit(
2 train_ds,
3 epochs=1 if lr_finder else 5,
4 callbacks=callbacks,
5 steps_per_epoch=findlr_steps if lr_finder else None,
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
64 def _method_wrapper(self, *args, **kwargs):
65 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 66 return method(self, *args, **kwargs)
67
68 # Running inside `run_distribute_coordinator` already.
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
846 batch_size=batch_size):
847 callbacks.on_train_batch_begin(step)
--> 848 tmp_logs = train_function(iterator)
849 # Catch OutOfRangeError for Datasets of unknown size.
850 # This blocks until the batch has finished executing.
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
578 xla_context.Exit()
579 else:
--> 580 result = self._call(*args, **kwds)
581
582 if tracing_count == self._get_tracing_count():
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
625 # This is the first call of __call__, so we have to initialize.
626 initializers = []
--> 627 self._initialize(args, kwds, add_initializers_to=initializers)
628 finally:
629 # At this point we know that the initialization is complete (or less
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
503 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
504 self._concrete_stateful_fn = (
--> 505 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
506 *args, **kwds))
507
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2444 args, kwargs = None, None
2445 with self._lock:
-> 2446 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2447 return graph_function
2448
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2775
2776 self._function_cache.missed.add(call_context_key)
-> 2777 graph_function = self._create_graph_function(args, kwargs)
2778 self._function_cache.primary[cache_key] = graph_function
2779 return graph_function, args, kwargs
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2655 arg_names = base_arg_names + missing_arg_names
2656 graph_function = ConcreteFunction(
-> 2657 func_graph_module.func_graph_from_py_func(
2658 self._name,
2659 self._python_function,
~/.local/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
979 _, original_func = tf_decorator.unwrap(python_func)
980
--> 981 func_outputs = python_func(*func_args, **func_kwargs)
982
983 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
439 # __wrapped__ allows AutoGraph to swap in a converted function. We give
440 # the function a weak reference to itself to avoid a reference cycle.
--> 441 return weak_wrapped_fn().__wrapped__(*args, **kwds)
442 weak_wrapped_fn = weakref.ref(wrapped_fn)
443
~/.local/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
TypeError: in user code:
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:571 train_function *
outputs = self.distribute_strategy.run(
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:951 run **
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2290 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2649 _call_for_each_replica
return fn(*args, **kwargs)
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:540 train_step **
_minimize(self.distribute_strategy, tape, self.optimizer, loss,
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:1810 _minimize
optimizer.apply_gradients(
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py:245 apply_gradients
return distribution_strategy_context.get_replica_context().merge_call(
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2420 merge_call
return self._merge_call(merge_fn, args, kwargs)
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2427 _merge_call
return merge_fn(self._strategy, *args, **kwargs)
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py:269 _apply_gradients_cross_replica **
maybe_apply_op = smart_cond.smart_cond(should_apply_grads,
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/framework/smart_cond.py:58 smart_cond
return control_flow_ops.cond(pred, true_fn=true_fn, false_fn=false_fn,
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/util/deprecation.py:507 new_func
return func(*args, **kwargs)
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/ops/control_flow_ops.py:1177 cond
return cond_v2.cond_v2(pred, true_fn, false_fn, name)
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/ops/cond_v2.py:78 cond_v2
true_graph = func_graph_module.func_graph_from_py_func(
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py:981 func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py:261 apply_fn
return distribution.extended.call_for_each_replica(
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2290 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2649 _call_for_each_replica
return fn(*args, **kwargs)
/home/alex/.local/lib/python3.8/site-packages/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py:279 _apply_gradients
return self._optimizer.apply_gradients(
TypeError: apply_gradients() got an unexpected keyword argument 'experimental_aggregate_gradients'
|
TypeError
|
def cutout(
images: TensorLike,
mask_size: TensorLike,
offset: TensorLike = (0, 0),
constant_values: Number = 0,
data_format: str = "channels_last",
) -> tf.Tensor:
"""Apply cutout (https://arxiv.org/abs/1708.04552) to images.
This operation applies a (mask_height x mask_width) mask of zeros to
a location within `img` specified by the offset. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole images.
Args:
images: A tensor of shape (batch_size, height, width, channels)
(NHWC), (batch_size, channels, height, width)(NCHW).
mask_size: Specifies how big the zero mask that will be generated is that
is applied to the images. The mask will be of size
(mask_height x mask_width). Note: mask_size should be divisible by 2.
offset: A tuple of (height, width) or (batch_size, 2)
constant_values: What pixel value to fill in the images in the area that has
the cutout mask applied to it.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, ...)`.
Returns:
An image Tensor.
Raises:
InvalidArgumentError: if mask_size can't be divisible by 2.
"""
with tf.name_scope("cutout"):
origin_shape = images.shape
offset = tf.convert_to_tensor(offset)
mask_size, data_format, image_height, image_width = _norm_params(
images, mask_size, data_format
)
mask_size = mask_size // 2
if tf.rank(offset) == 1:
offset = tf.expand_dims(offset, 0)
cutout_center_heights = offset[:, 0]
cutout_center_widths = offset[:, 1]
lower_pads = tf.maximum(0, cutout_center_heights - mask_size[0])
upper_pads = tf.maximum(0, image_height - cutout_center_heights - mask_size[0])
left_pads = tf.maximum(0, cutout_center_widths - mask_size[1])
right_pads = tf.maximum(0, image_width - cutout_center_widths - mask_size[1])
cutout_shape = tf.transpose(
[
image_height - (lower_pads + upper_pads),
image_width - (left_pads + right_pads),
],
[1, 0],
)
masks = tf.TensorArray(images.dtype, 0, dynamic_size=True)
for i in tf.range(tf.shape(cutout_shape)[0]):
padding_dims = [
[lower_pads[i], upper_pads[i]],
[left_pads[i], right_pads[i]],
]
mask = tf.pad(
tf.zeros(cutout_shape[i], dtype=images.dtype),
padding_dims,
constant_values=1,
)
masks = masks.write(i, mask)
if data_format == "channels_last":
mask_4d = tf.expand_dims(masks.stack(), -1)
mask = tf.tile(mask_4d, [1, 1, 1, tf.shape(images)[-1]])
else:
mask_4d = tf.expand_dims(masks.stack(), 1)
mask = tf.tile(mask_4d, [1, tf.shape(images)[1], 1, 1])
images = tf.where(
mask == 0,
tf.ones_like(images, dtype=images.dtype) * constant_values,
images,
)
images.set_shape(origin_shape)
return images
|
def cutout(
images: TensorLike,
mask_size: TensorLike,
offset: TensorLike = (0, 0),
constant_values: Number = 0,
data_format: str = "channels_last",
) -> tf.Tensor:
"""Apply cutout (https://arxiv.org/abs/1708.04552) to images.
This operation applies a (mask_height x mask_width) mask of zeros to
a location within `img` specified by the offset. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole images.
Args:
images: A tensor of shape (batch_size, height, width, channels)
(NHWC), (batch_size, channels, height, width)(NCHW).
mask_size: Specifies how big the zero mask that will be generated is that
is applied to the images. The mask will be of size
(mask_height x mask_width). Note: mask_size should be divisible by 2.
offset: A tuple of (height, width) or (batch_size, 2)
constant_values: What pixel value to fill in the images in the area that has
the cutout mask applied to it.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, ...)`.
Returns:
An image Tensor.
Raises:
InvalidArgumentError: if mask_size can't be divisible by 2.
"""
with tf.name_scope("cutout"):
offset = tf.convert_to_tensor(offset)
mask_size, data_format, image_height, image_width = _norm_params(
images, mask_size, data_format
)
mask_size = mask_size // 2
if tf.rank(offset) == 1:
offset = tf.expand_dims(offset, 0)
cutout_center_heights = offset[:, 0]
cutout_center_widths = offset[:, 1]
lower_pads = tf.maximum(0, cutout_center_heights - mask_size[0])
upper_pads = tf.maximum(0, image_height - cutout_center_heights - mask_size[0])
left_pads = tf.maximum(0, cutout_center_widths - mask_size[1])
right_pads = tf.maximum(0, image_width - cutout_center_widths - mask_size[1])
cutout_shape = tf.transpose(
[
image_height - (lower_pads + upper_pads),
image_width - (left_pads + right_pads),
],
[1, 0],
)
masks = tf.TensorArray(images.dtype, 0, dynamic_size=True)
for i in tf.range(tf.shape(cutout_shape)[0]):
padding_dims = [
[lower_pads[i], upper_pads[i]],
[left_pads[i], right_pads[i]],
]
mask = tf.pad(
tf.zeros(cutout_shape[i], dtype=images.dtype),
padding_dims,
constant_values=1,
)
masks = masks.write(i, mask)
if data_format == "channels_last":
mask_4d = tf.expand_dims(masks.stack(), -1)
mask = tf.tile(mask_4d, [1, 1, 1, tf.shape(images)[-1]])
else:
mask_4d = tf.expand_dims(masks.stack(), 1)
mask = tf.tile(mask_4d, [1, tf.shape(images)[1], 1, 1])
images = tf.where(
mask == 0,
tf.ones_like(images, dtype=images.dtype) * constant_values,
images,
)
return images
|
https://github.com/tensorflow/addons/issues/1824
|
Traceback (most recent call last):
File "test.py", line 16, in <module>
model.fit(dataset)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py", line 819, in fit
use_multiprocessing=use_multiprocessing)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 235, in fit
use_multiprocessing=use_multiprocessing)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 593, in _process_training_inputs
use_multiprocessing=use_multiprocessing)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 706, in _process_inputs
use_multiprocessing=use_multiprocessing)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/data_adapter.py", line 702, in __init__
x = standardize_function(x)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 684, in standardize_function
return dataset.map(map_fn, num_parallel_calls=dataset_ops.AUTOTUNE)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 1591, in map
self, map_func, num_parallel_calls, preserve_cardinality=True)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 3926, in __init__
use_legacy_function=use_legacy_function)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 3147, in __init__
self._function = wrapper_fn._get_concrete_function_internal()
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 2395, in _get_concrete_function_internal
*args, **kwargs)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 2389, in _get_concrete_function_internal_garbage_collected
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 2703, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 2593, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 3140, in wrapper_fn
ret = _wrapper_helper(*args)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 3082, in _wrapper_helper
ret = autograph.tf_convert(func, ag_ctx)(*nested_args)
File "/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/autograph/impl/api.py", line 237, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in converted code:
/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py:677 map_fn
batch_size=None)
/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py:2410 _standardize_tensors
exception_prefix='input')
/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_utils.py:529 standardize_input_data
data = [standardize_single_array(x) for x in data]
/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_utils.py:529 <listcomp>
data = [standardize_single_array(x) for x in data]
/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_utils.py:451 standardize_single_array
if (x.shape is not None and len(x.shape) == 1 and
/home/clementw/Keras-FewShotLearning/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_shape.py:822 __len__
raise ValueError("Cannot take the length of shape with unknown rank.")
ValueError: Cannot take the length of shape with unknown rank.
|
ValueError
|
def pinball_loss(
y_true: TensorLike, y_pred: TensorLike, tau: FloatTensorLike = 0.5
) -> tf.Tensor:
"""Computes the pinball loss between `y_true` and `y_pred`.
`loss = maximum(tau * (y_true - y_pred), (tau - 1) * (y_true - y_pred))`
In the context of regression this, loss yields an estimator of the tau
conditional quantile.
See: https://en.wikipedia.org/wiki/Quantile_regression
Usage:
```python
loss = pinball_loss([0., 0., 1., 1.], [1., 1., 1., 0.], tau=.1)
# loss = max(0.1 * (y_true - y_pred), (0.1 - 1) * (y_true - y_pred))
# = (0.9 + 0.9 + 0 + 0.1) / 4
print('Loss: ', loss.numpy()) # Loss: 0.475
```
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
tau: (Optional) Float in [0, 1] or a tensor taking values in [0, 1] and
shape = `[d0,..., dn]`. It defines the slope of the pinball loss. In
the context of quantile regression, the value of tau determines the
conditional quantile level. When tau = 0.5, this amounts to l1
regression, an estimator of the conditional median (0.5 quantile).
Returns:
pinball_loss: 1-D float `Tensor` with shape [batch_size].
References:
- https://en.wikipedia.org/wiki/Quantile_regression
- https://projecteuclid.org/download/pdfview_1/euclid.bj/1297173840
"""
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
# Broadcast the pinball slope along the batch dimension
tau = tf.expand_dims(tf.cast(tau, y_pred.dtype), 0)
one = tf.cast(1, tau.dtype)
delta_y = y_true - y_pred
pinball = tf.math.maximum(tau * delta_y, (tau - one) * delta_y)
return tf.reduce_mean(pinball, axis=-1)
|
def pinball_loss(
y_true: TensorLike, y_pred: TensorLike, tau: FloatTensorLike = 0.5
) -> tf.Tensor:
"""Computes the pinball loss between `y_true` and `y_pred`.
`loss = maximum(tau * (y_true - y_pred), (tau - 1) * (y_true - y_pred))`
In the context of regression this, loss yields an estimator of the tau
conditional quantile.
See: https://en.wikipedia.org/wiki/Quantile_regression
Usage:
```python
loss = pinball_loss([0., 0., 1., 1.], [1., 1., 1., 0.], tau=.1)
# loss = max(0.1 * (y_true - y_pred), (0.1 - 1) * (y_true - y_pred))
# = (0.9 + 0.9 + 0 + 0.1) / 4
print('Loss: ', loss.numpy()) # Loss: 0.475
```
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
tau: (Optional) Float in [0, 1] or a tensor taking values in [0, 1] and
shape = `[d0,..., dn]`. It defines the slope of the pinball loss. In
the context of quantile regression, the value of tau determines the
conditional quantile level. When tau = 0.5, this amounts to l1
regression, an estimator of the conditional median (0.5 quantile).
Returns:
pinball_loss: 1-D float `Tensor` with shape [batch_size].
References:
- https://en.wikipedia.org/wiki/Quantile_regression
- https://projecteuclid.org/download/pdfview_1/euclid.bj/1297173840
"""
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
# broadcast the pinball slope along the batch dimension, and clip to
# acceptable values
tau = tf.expand_dims(tf.cast(tau, y_pred.dtype), 0)
one = tf.cast(1, tau.dtype)
delta_y = y_true - y_pred
pinball = tf.math.maximum(tau * delta_y, (tau - one) * delta_y)
return tf.reduce_mean(tf.keras.backend.batch_flatten(pinball), axis=-1)
|
https://github.com/tensorflow/addons/issues/1202
|
(proof_of_concept_pytest) /mnt/c/Users/gdemarmi/Desktop/projects/addons $ pytest -v tensorflow_addons/losses/quantiles_test.py
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/pep8.py:110: FutureWarning: Possible nested set at position 1
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
================================================================= test session starts ==================================================================
platform linux -- Python 3.7.5, pytest-5.0.1, py-1.8.0, pluggy-0.13.0 -- /home/gdemarmi/softwares/python/anaconda/bin/python
cachedir: .pytest_cache
rootdir: /mnt/c/Users/gdemarmi/Desktop/projects/addons
plugins: arraydiff-0.3, cov-2.7.1, doctestplus-0.2.0, flake8-1.0.4, forked-1.0.2, openfiles-0.4.0, pep8-1.0.6, remotedata-0.3.2, xdist-1.27.0, typeguard-2.7.1
collected 15 items
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_all_correct_unweighted PASSED [ 6%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_config PASSED [ 13%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_invalid_sample_weight FAILED [ 20%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_no_reduction PASSED [ 26%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_sample_weighted PASSED [ 33%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_scalar_weighted PASSED [ 40%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_session SKIPPED [ 46%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_sum_reduction PASSED [ 53%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_timestep_weighted FAILED [ 60%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_unweighted PASSED [ 66%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_unweighted_quantile_0pc PASSED [ 73%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_unweighted_quantile_100pc PASSED [ 80%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_unweighted_quantile_10pc PASSED [ 86%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_unweighted_quantile_90pc PASSED [ 93%]
tensorflow_addons/losses/quantiles_test.py::PinballLossTest::test_zero_weighted PASSED [100%]
======================================================================= FAILURES =======================================================================
______________________________________________________ PinballLossTest.test_invalid_sample_weight ______________________________________________________
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
c_api.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)
try:
c_op = c_api.TF_FinishOperation(op_desc)
E tensorflow.python.framework.errors_impl.InvalidArgumentError: Can not squeeze dim[1], expected a dimension of 1, got 2 for 'pinball_loss/weighted_loss/Squeeze' (op: 'Squeeze') with input shapes: [2,2].
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py:1619: InvalidArgumentError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py", line 1619, in _create_c_op
c_op = c_api.TF_FinishOperation(op_desc)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Can not squeeze dim[1], expected a dimension of 1, got 2 for 'pinball_loss/weighted_loss/Squeeze' (op: 'Squeeze') with input shapes: [2,2].
During handling of the above exception, another exception occurred:
ValueError: Can not squeeze dim[1], expected a dimension of 1, got 2 for 'pinball_loss/weighted_loss/Squeeze' (op: 'Squeeze') with input shapes: [2,2].
During handling of the above exception, another exception occurred:
self = <tensorflow_addons.losses.quantiles_test.PinballLossTest testMethod=test_invalid_sample_weight>, args = (), kwargs = {}
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/test_util.py:1111:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tensorflow_addons/losses/quantiles_test.py:113: in test_invalid_sample_weight
pin_obj(y_true, y_pred, sample_weight=sample_weight)
E AssertionError: "weights can not be broadcast to values" does not match "Can not squeeze dim[1], expected a dimension of 1, got 2 for 'pinball_loss/weighted_loss/Squeeze' (op: 'Squeeze') with input shapes: [2,2]."
________________________________________________________ PinballLossTest.test_timestep_weighted ________________________________________________________
graph = <tensorflow.python.framework.ops.Graph object at 0x7f562c26e950>
node_def = name: "pinball_loss/weighted_loss/Squeeze"
op: "Squeeze"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "squeeze_dims"
value {
list {
i: -1
}
}
}
inputs = [<tf.Tensor 'pinball_loss/weighted_loss/Cast:0' shape=(2, 3) dtype=float32>], control_inputs = []
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
c_api.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)
try:
c_op = c_api.TF_FinishOperation(op_desc)
E tensorflow.python.framework.errors_impl.InvalidArgumentError: Can not squeeze dim[1], expected a dimension of 1, got 3 for 'pinball_loss/weighted_loss/Squeeze' (op: 'Squeeze') with input shapes: [2,3].
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py:1619: InvalidArgumentError
During handling of the above exception, another exception occurred:
self = <tensorflow_addons.losses.quantiles_test.PinballLossTest testMethod=test_timestep_weighted>, args = (), kwargs = {}
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/test_util.py:1111:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tensorflow_addons/losses/quantiles_test.py:95: in test_timestep_weighted
loss = pin_obj(y_true, y_pred, sample_weight=sample_weight)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/keras/losses.py:128: in __call__
losses, sample_weight, reduction=self._get_reduction())
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/keras/utils/losses_utils.py:107: in compute_weighted_loss
losses, sample_weight)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/ops/losses/util.py:145: in scale_losses_by_sample_weight
losses, None, sample_weight)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/ops/losses/util.py:97: in squeeze_or_expand_dimensions
sample_weight = array_ops.squeeze(sample_weight, [-1])
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/util/dispatch.py:180: in wrapper
return target(*args, **kwargs)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/util/deprecation.py:507: in new_func
return func(*args, **kwargs)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/ops/array_ops.py:3780: in squeeze
return gen_array_ops.squeeze(input, axis, name)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/ops/gen_array_ops.py:9231: in squeeze
"Squeeze", input=input, squeeze_dims=axis, name=name)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/op_def_library.py:742: in _apply_op_helper
attrs=attr_protos, op_def=op_def)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py:3322: in _create_op_internal
op_def=op_def)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py:1786: in __init__
control_input_ops)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
graph = <tensorflow.python.framework.ops.Graph object at 0x7f562c26e950>
node_def = name: "pinball_loss/weighted_loss/Squeeze"
op: "Squeeze"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "squeeze_dims"
value {
list {
i: -1
}
}
}
inputs = [<tf.Tensor 'pinball_loss/weighted_loss/Cast:0' shape=(2, 3) dtype=float32>], control_inputs = []
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
c_api.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)
try:
c_op = c_api.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
E ValueError: Can not squeeze dim[1], expected a dimension of 1, got 3 for 'pinball_loss/weighted_loss/Squeeze' (op: 'Squeeze') with input shapes: [2,3].
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py:1622: ValueError
----------------------------------------------------------------- Captured stderr call -----------------------------------------------------------------
W0302 15:37:30.724358 140009831860032 def_function.py:586] 8 out of the last 11 calls to <function pinball_loss at 0x7f562f7ecb00> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings is likely due to passing python objects instead of tensors. Also, tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. Please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.
------------------------------------------------------------------ Captured log call -------------------------------------------------------------------
WARNING tensorflow:def_function.py:586 8 out of the last 11 calls to <function pinball_loss at 0x7f562f7ecb00> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings is likely due to passing python objects instead of tensors. Also, tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. Please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.
=================================================================== warnings summary ===================================================================
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/google/protobuf/descriptor.py:47
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/google/protobuf/descriptor.py:47: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working
from google.protobuf.pyext import _message
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/botocore/vendored/requests/packages/urllib3/_collections.py:1
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/botocore/vendored/requests/packages/urllib3/_collections.py:1
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/botocore/vendored/requests/packages/urllib3/_collections.py:1: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working
from collections import Mapping, MutableMapping
-- Docs: https://docs.pytest.org/en/latest/warnings.html
============================================= 2 failed, 12 passed, 1 skipped, 3 warnings in 17.15 seconds ==============================================
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def __init__(
self,
metrics_separator: str = " - ",
overall_bar_format: str = "{l_bar}{bar} {n_fmt}/{total_fmt} ETA: "
"{remaining}s, {rate_fmt}{postfix}",
epoch_bar_format: str = "{n_fmt}/{total_fmt}{bar} ETA: {remaining}s - {desc}",
metrics_format: str = "{name}: {value:0.4f}",
update_per_second: int = 10,
leave_epoch_progress: bool = True,
leave_overall_progress: bool = True,
show_epoch_progress: bool = True,
show_overall_progress: bool = True,
):
try:
# import tqdm here because tqdm is not a required package
# for addons
import tqdm
version_message = "Please update your TQDM version to >= 4.36.1, "
"you have version {}. To update, run !pip install -U tqdm"
assert tqdm.__version__ >= "4.36.1", version_message.format(tqdm.__version__)
from tqdm.auto import tqdm
self.tqdm = tqdm
except ImportError:
raise ImportError("Please install tqdm via pip install tqdm")
self.metrics_separator = metrics_separator
self.overall_bar_format = overall_bar_format
self.epoch_bar_format = epoch_bar_format
self.leave_epoch_progress = leave_epoch_progress
self.leave_overall_progress = leave_overall_progress
self.show_epoch_progress = show_epoch_progress
self.show_overall_progress = show_overall_progress
self.metrics_format = metrics_format
# compute update interval (inverse of update per second)
self.update_interval = 1 / update_per_second
self.last_update_time = time.time()
self.overall_progress_tqdm = None
self.epoch_progress_tqdm = None
self.num_epochs = None
self.logs = None
super().__init__()
|
def __init__(
self,
metrics_separator: str = " - ",
overall_bar_format: str = "{l_bar}{bar} {n_fmt}/{total_fmt} ETA: "
"{remaining}s, {rate_fmt}{postfix}",
epoch_bar_format: str = "{n_fmt}/{total_fmt}{bar} ETA: {remaining}s - {desc}",
metrics_format: str = "{name}: {value:0.4f}",
update_per_second: int = 10,
leave_epoch_progress: bool = True,
leave_overall_progress: bool = True,
show_epoch_progress: bool = True,
show_overall_progress: bool = True,
):
try:
# import tqdm here because tqdm is not a required package
# for addons
import tqdm
version_message = "Please update your TQDM version to >= 4.36.1, "
"you have version {}. To update, run !pip install -U tqdm"
assert tqdm.__version__ >= "4.36.1", version_message.format(tqdm.__version__)
from tqdm.auto import tqdm
self.tqdm = tqdm
except ImportError:
raise ImportError("Please install tqdm via pip install tqdm")
self.metrics_separator = metrics_separator
self.overall_bar_format = overall_bar_format
self.epoch_bar_format = epoch_bar_format
self.leave_epoch_progress = leave_epoch_progress
self.leave_overall_progress = leave_overall_progress
self.show_epoch_progress = show_epoch_progress
self.show_overall_progress = show_overall_progress
self.metrics_format = metrics_format
# compute update interval (inverse of update per second)
self.update_interval = 1 / update_per_second
self.last_update_time = time.time()
self.overall_progress_tqdm = None
self.epoch_progress_tqdm = None
self.num_epochs = None
self.logs = None
self.metrics = None
|
https://github.com/tensorflow/addons/issues/1495
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-23-fdbb03f574a1> in <module>
48 # class_weight=class_weights,
49 verbose=VERBOSE,
---> 50 callbacks=model_callbacks,
51 )
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
63 def _method_wrapper(self, *args, **kwargs):
64 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 65 return method(self, *args, **kwargs)
66
67 # Running inside `run_distribute_coordinator` already.
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
763 self.stop_training = False
764 train_function = self.make_train_function()
--> 765 callbacks.on_train_begin()
766 # Handle fault-tolerance for multi-worker.
767 # TODO(omalleyt): Fix the ordering issues that mean this has to
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/callbacks.py in on_train_begin(self, logs)
445 logs = self._process_logs(logs)
446 for callback in self.callbacks:
--> 447 callback.on_train_begin(logs)
448
449 def on_train_end(self, logs=None):
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow_addons/callbacks/tqdm_progress_bar.py in on_train_begin(self, logs)
100 def on_train_begin(self, logs=None):
101 self.num_epochs = self.params["epochs"]
--> 102 self.metrics = self.params["metrics"]
103
104 if self.show_overall_progress:
KeyError: 'metrics'
|
KeyError
|
def on_train_begin(self, logs=None):
self.num_epochs = self.params["epochs"]
if self.show_overall_progress:
self.overall_progress_tqdm = self.tqdm(
desc="Training",
total=self.num_epochs,
bar_format=self.overall_bar_format,
leave=self.leave_overall_progress,
dynamic_ncols=True,
unit="epochs",
)
# set counting mode
self.mode = "steps"
self.total_steps = self.params["steps"]
|
def on_train_begin(self, logs=None):
self.num_epochs = self.params["epochs"]
self.metrics = self.params["metrics"]
if self.show_overall_progress:
self.overall_progress_tqdm = self.tqdm(
desc="Training",
total=self.num_epochs,
bar_format=self.overall_bar_format,
leave=self.leave_overall_progress,
dynamic_ncols=True,
unit="epochs",
)
# set counting mode
if "samples" in self.params:
self.mode = "samples"
self.total_steps = self.params["samples"]
else:
self.mode = "steps"
self.total_steps = self.params["steps"]
|
https://github.com/tensorflow/addons/issues/1495
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-23-fdbb03f574a1> in <module>
48 # class_weight=class_weights,
49 verbose=VERBOSE,
---> 50 callbacks=model_callbacks,
51 )
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
63 def _method_wrapper(self, *args, **kwargs):
64 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 65 return method(self, *args, **kwargs)
66
67 # Running inside `run_distribute_coordinator` already.
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
763 self.stop_training = False
764 train_function = self.make_train_function()
--> 765 callbacks.on_train_begin()
766 # Handle fault-tolerance for multi-worker.
767 # TODO(omalleyt): Fix the ordering issues that mean this has to
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/callbacks.py in on_train_begin(self, logs)
445 logs = self._process_logs(logs)
446 for callback in self.callbacks:
--> 447 callback.on_train_begin(logs)
448
449 def on_train_end(self, logs=None):
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow_addons/callbacks/tqdm_progress_bar.py in on_train_begin(self, logs)
100 def on_train_begin(self, logs=None):
101 self.num_epochs = self.params["epochs"]
--> 102 self.metrics = self.params["metrics"]
103
104 if self.show_overall_progress:
KeyError: 'metrics'
|
KeyError
|
def format_metrics(self, logs={}, factor=1):
"""Format metrics in logs into a string.
Arguments:
logs: dictionary of metrics and their values. Defaults to
empty dictionary.
factor (int): The factor we want to divide the metrics in logs
by, useful when we are computing the logs after each batch.
Defaults to 1.
Returns:
metrics_string: a string displaying metrics using the given
formators passed in through the constructor.
"""
metric_value_pairs = []
for key, value in logs.items():
if key in ["batch", "size"]:
continue
pair = self.metrics_format.format(name=key, value=value / factor)
metric_value_pairs.append(pair)
metrics_string = self.metrics_separator.join(metric_value_pairs)
return metrics_string
|
def format_metrics(self, logs={}, factor=1):
"""Format metrics in logs into a string.
Arguments:
logs: dictionary of metrics and their values. Defaults to
empty dictionary.
factor (int): The factor we want to divide the metrics in logs
by, useful when we are computing the logs after each batch.
Defaults to 1.
Returns:
metrics_string: a string displaying metrics using the given
formators passed in through the constructor.
"""
metric_value_pairs = []
for metric in self.metrics:
if metric in logs:
value = logs[metric] / factor
pair = self.metrics_format.format(name=metric, value=value)
metric_value_pairs.append(pair)
metrics_string = self.metrics_separator.join(metric_value_pairs)
return metrics_string
|
https://github.com/tensorflow/addons/issues/1495
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-23-fdbb03f574a1> in <module>
48 # class_weight=class_weights,
49 verbose=VERBOSE,
---> 50 callbacks=model_callbacks,
51 )
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
63 def _method_wrapper(self, *args, **kwargs):
64 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 65 return method(self, *args, **kwargs)
66
67 # Running inside `run_distribute_coordinator` already.
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
763 self.stop_training = False
764 train_function = self.make_train_function()
--> 765 callbacks.on_train_begin()
766 # Handle fault-tolerance for multi-worker.
767 # TODO(omalleyt): Fix the ordering issues that mean this has to
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/callbacks.py in on_train_begin(self, logs)
445 logs = self._process_logs(logs)
446 for callback in self.callbacks:
--> 447 callback.on_train_begin(logs)
448
449 def on_train_end(self, logs=None):
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow_addons/callbacks/tqdm_progress_bar.py in on_train_begin(self, logs)
100 def on_train_begin(self, logs=None):
101 self.num_epochs = self.params["epochs"]
--> 102 self.metrics = self.params["metrics"]
103
104 if self.show_overall_progress:
KeyError: 'metrics'
|
KeyError
|
def __init__(
self,
learning_rate: Union[FloatTensorLike, Callable] = 0.001,
beta_1: FloatTensorLike = 0.9,
beta_2: FloatTensorLike = 0.999,
epsilon: FloatTensorLike = 1e-7,
weight_decay: FloatTensorLike = 0.0,
amsgrad: bool = False,
sma_threshold: FloatTensorLike = 5.0,
# float for total_steps is here to be able to load models created before
# https://github.com/tensorflow/addons/pull/1375 was merged. It should be
# removed for Addons 0.11.
total_steps: Union[int, float] = 0,
warmup_proportion: FloatTensorLike = 0.1,
min_lr: FloatTensorLike = 0.0,
name: str = "RectifiedAdam",
**kwargs,
):
r"""Construct a new RAdam optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. or a schedule
that is a `tf.keras.optimizers.schedules.LearningRateSchedule`
The learning rate.
beta_1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
weight_decay: A floating point value. Weight decay for each param.
amsgrad: boolean. Whether to apply AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
beyond".
sma_threshold. A float value.
The threshold for simple mean average.
total_steps: An integer. Total number of training steps.
Enable warmup by setting a positive value.
warmup_proportion: A floating point value.
The proportion of increasing steps.
min_lr: A floating point value. Minimum learning rate after warmup.
name: Optional name for the operations created when applying
gradients. Defaults to "RectifiedAdam".
**kwargs: keyword arguments. Allowed to be {`clipnorm`,
`clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients
by norm; `clipvalue` is clip gradients by value, `decay` is
included for backward compatibility to allow time inverse
decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super().__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("beta_1", beta_1)
self._set_hyper("beta_2", beta_2)
self._set_hyper("decay", self._initial_decay)
self._set_hyper("weight_decay", weight_decay)
self._set_hyper("sma_threshold", sma_threshold)
if isinstance(total_steps, float):
warnings.warn(
"The parameter `total_steps` passed to the __init__ of RectifiedAdam "
"is a float. This behavior is deprecated and in Addons 0.11, this "
"will raise an error. Use an int instead. If you get this message "
"when loading a model, save it again and the `total_steps` parameter "
"will automatically be converted to a int.",
DeprecationWarning,
)
self._set_hyper("total_steps", int(total_steps))
self._set_hyper("warmup_proportion", warmup_proportion)
self._set_hyper("min_lr", min_lr)
self.epsilon = epsilon or tf.keras.backend.epsilon()
self.amsgrad = amsgrad
self._initial_weight_decay = weight_decay
self._initial_total_steps = total_steps
|
def __init__(
self,
learning_rate: Union[FloatTensorLike, Callable] = 0.001,
beta_1: FloatTensorLike = 0.9,
beta_2: FloatTensorLike = 0.999,
epsilon: FloatTensorLike = 1e-7,
weight_decay: FloatTensorLike = 0.0,
amsgrad: bool = False,
sma_threshold: FloatTensorLike = 5.0,
total_steps: int = 0,
warmup_proportion: FloatTensorLike = 0.1,
min_lr: FloatTensorLike = 0.0,
name: str = "RectifiedAdam",
**kwargs,
):
r"""Construct a new RAdam optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. or a schedule
that is a `tf.keras.optimizers.schedules.LearningRateSchedule`
The learning rate.
beta_1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
weight_decay: A floating point value. Weight decay for each param.
amsgrad: boolean. Whether to apply AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
beyond".
sma_threshold. A float value.
The threshold for simple mean average.
total_steps: An integer. Total number of training steps.
Enable warmup by setting a positive value.
warmup_proportion: A floating point value.
The proportion of increasing steps.
min_lr: A floating point value. Minimum learning rate after warmup.
name: Optional name for the operations created when applying
gradients. Defaults to "RectifiedAdam".
**kwargs: keyword arguments. Allowed to be {`clipnorm`,
`clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients
by norm; `clipvalue` is clip gradients by value, `decay` is
included for backward compatibility to allow time inverse
decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super().__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("beta_1", beta_1)
self._set_hyper("beta_2", beta_2)
self._set_hyper("decay", self._initial_decay)
self._set_hyper("weight_decay", weight_decay)
self._set_hyper("sma_threshold", sma_threshold)
self._set_hyper("total_steps", float(total_steps))
self._set_hyper("warmup_proportion", warmup_proportion)
self._set_hyper("min_lr", min_lr)
self.epsilon = epsilon or tf.keras.backend.epsilon()
self.amsgrad = amsgrad
self._initial_weight_decay = weight_decay
self._initial_total_steps = total_steps
|
https://github.com/tensorflow/addons/issues/1373
|
Traceback (most recent call last):
File "Boucle_IA.py", line 24, in <module>
model = tf.keras.models.load_model('inference_complete-2020-03-18-Mobilenet.h5', custom_objects={'Lookahead': ranger})
File "C:\Program Files\Python37\lib\site-packages\tensorflow_core\python\keras\saving\save.py", line 146, in load_model
return hdf5_format.load_model_from_hdf5(filepath, custom_objects, compile)
File "C:\Program Files\Python37\lib\site-packages\tensorflow_core\python\keras\saving\hdf5_format.py", line 184, in load_model_from_hdf5
training_config, custom_objects))
File "C:\Program Files\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saving_utils.py", line 229, in compile_args_from_training_config
optimizer_config, custom_objects=custom_objects)
File "C:\Program Files\Python37\lib\site-packages\tensorflow_core\python\keras\optimizers.py", line 819, in deserialize
printable_module_name='optimizer')
File "C:\Program Files\Python37\lib\site-packages\tensorflow_core\python\keras\utils\generic_utils.py", line 303, in deserialize_keras_object
list(custom_objects.items())))
File "C:\Program Files\Python37\lib\site-packages\tensorflow_addons\optimizers\lookahead.py", line 185, in from_config
custom_objects=custom_objects,
File "C:\Program Files\Python37\lib\site-packages\tensorflow_core\python\keras\optimizers.py", line 819, in deserialize
printable_module_name='optimizer')
File "C:\Program Files\Python37\lib\site-packages\tensorflow_core\python\keras\utils\generic_utils.py", line 303, in deserialize_keras_object
list(custom_objects.items())))
File "C:\Program Files\Python37\lib\site-packages\tensorflow_core\python\keras\optimizer_v2\optimizer_v2.py", line 733, in from_config
return cls(**config)
File "C:\Program Files\Python37\lib\site-packages\typeguard\__init__.py", line 809, in wrapper
check_argument_types(memo)
File "C:\Program Files\Python37\lib\site-packages\typeguard\__init__.py", line 670, in check_argument_types
raise exc from None
File "C:\Program Files\Python37\lib\site-packages\typeguard\__init__.py", line 668, in check_argument_types
check_type(description, value, expected_type, memo)
File "C:\Program Files\Python37\lib\site-packages\typeguard\__init__.py", line 598, in check_type
format(argname, qualified_name(expected_type), qualified_name(value)))
TypeError: type of argument "total_steps" must be int; got float instead
|
TypeError
|
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used
by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
attention=self._get_attention_layer_size(),
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms
),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms
),
alignment_history=self._item_or_tuple(
a.alignments_size if self._alignment_history else ()
for a in self._attention_mechanisms
),
) # sometimes a TensorArray
|
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used
by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tf.TensorShape([]),
attention=self._get_attention_layer_size(),
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms
),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms
),
alignment_history=self._item_or_tuple(
a.alignments_size if self._alignment_history else ()
for a in self._attention_mechanisms
),
) # sometimes a TensorArray
|
https://github.com/tensorflow/addons/issues/1194
|
Test 1 passed
Traceback (most recent call last):
File "/run/media/zenbook/work/phd/work/__/pb1.py", line 26, in <module>
test(masked=True); print('Test 2 passed')
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 580, in __call__
result = self._call(*args, **kwds)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 611, in _call
return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2419, in __call__
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2777, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2667, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py", line 981, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 441, in wrapped_fn
return weak_wrapped_fn().__wrapped__(*args, **kwds)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py", line 968, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
/run/media/zenbook/work/phd/work/__/pb1.py:22 test *
return layer(data, mask=mask)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/keras/layers/recurrent.py:652 __call__ **
return super(RNN, self).__call__(inputs, **kwargs)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py:926 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/keras/layers/recurrent.py:793 call
zero_output_for_mask=self.zero_output_for_mask)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/keras/backend.py:4205 rnn
**while_loop_kwargs)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/ops/control_flow_ops.py:2688 while_loop
back_prop=back_prop)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/ops/while_v2.py:229 while_loop
len_orig_loop_vars], expand_composites=True))
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/ops/while_v2.py:1143 _check_shapes_compat
"specify a less-specific shape." % (input_t.name, shape, t.shape))
ValueError: Input tensor 'rnn/AttentionWrapperZeroState/zeros_3:0' enters the loop with shape (), but has shape (4, 1) after one iteration. To allow the shape to vary across iterations, use the `shape_invariants` argument of tf.while_loop to specify a less-specific shape.
Process finished with exit code 1
|
ValueError
|
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `get_initial_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
inputs: The inputs that will be fed to this cell.
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
if inputs is not None:
batch_size = tf.shape(inputs)[0]
dtype = inputs.dtype
with tf.name_scope(type(self).__name__ + "ZeroState"): # pylint: disable=bad-continuation
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.get_initial_state(
batch_size=batch_size, dtype=dtype
)
error_message = (
"When calling get_initial_state of AttentionWrapper %s: "
% self.name
+ "Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output "
"has been tiled to beam_width via "
"tfa.seq2seq.tile_batch, and the batch_size= argument "
"passed to get_initial_state is batch_size * beam_width."
)
with tf.control_dependencies(
self._batch_size_checks(batch_size, error_message)
): # pylint: disable=bad-continuation
cell_state = tf.nest.map_structure(
lambda s: tf.identity(s, name="checked_cell_state"), cell_state
)
initial_alignments = [
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms
]
return AttentionWrapperState(
cell_state=cell_state,
attention=tf.zeros(
[batch_size, self._get_attention_layer_size()], dtype=dtype
),
alignments=self._item_or_tuple(initial_alignments),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms
),
alignment_history=self._item_or_tuple(
tf.TensorArray(
dtype, size=0, dynamic_size=True, element_shape=alignment.shape
)
if self._alignment_history
else ()
for alignment in initial_alignments
),
)
|
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `get_initial_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
inputs: The inputs that will be fed to this cell.
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
if inputs is not None:
batch_size = tf.shape(inputs)[0]
dtype = inputs.dtype
with tf.name_scope(type(self).__name__ + "ZeroState"): # pylint: disable=bad-continuation
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.get_initial_state(
batch_size=batch_size, dtype=dtype
)
error_message = (
"When calling get_initial_state of AttentionWrapper %s: "
% self.name
+ "Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output "
"has been tiled to beam_width via "
"tfa.seq2seq.tile_batch, and the batch_size= argument "
"passed to get_initial_state is batch_size * beam_width."
)
with tf.control_dependencies(
self._batch_size_checks(batch_size, error_message)
): # pylint: disable=bad-continuation
cell_state = tf.nest.map_structure(
lambda s: tf.identity(s, name="checked_cell_state"), cell_state
)
initial_alignments = [
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms
]
return AttentionWrapperState(
cell_state=cell_state,
time=tf.zeros([], dtype=tf.int32),
attention=tf.zeros(
[batch_size, self._get_attention_layer_size()], dtype=dtype
),
alignments=self._item_or_tuple(initial_alignments),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms
),
alignment_history=self._item_or_tuple(
tf.TensorArray(
dtype, size=0, dynamic_size=True, element_shape=alignment.shape
)
if self._alignment_history
else ()
for alignment in initial_alignments
),
)
|
https://github.com/tensorflow/addons/issues/1194
|
Test 1 passed
Traceback (most recent call last):
File "/run/media/zenbook/work/phd/work/__/pb1.py", line 26, in <module>
test(masked=True); print('Test 2 passed')
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 580, in __call__
result = self._call(*args, **kwds)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 611, in _call
return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2419, in __call__
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2777, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2667, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py", line 981, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 441, in wrapped_fn
return weak_wrapped_fn().__wrapped__(*args, **kwds)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py", line 968, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
/run/media/zenbook/work/phd/work/__/pb1.py:22 test *
return layer(data, mask=mask)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/keras/layers/recurrent.py:652 __call__ **
return super(RNN, self).__call__(inputs, **kwargs)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py:926 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/keras/layers/recurrent.py:793 call
zero_output_for_mask=self.zero_output_for_mask)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/keras/backend.py:4205 rnn
**while_loop_kwargs)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/ops/control_flow_ops.py:2688 while_loop
back_prop=back_prop)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/ops/while_v2.py:229 while_loop
len_orig_loop_vars], expand_composites=True))
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/ops/while_v2.py:1143 _check_shapes_compat
"specify a less-specific shape." % (input_t.name, shape, t.shape))
ValueError: Input tensor 'rnn/AttentionWrapperZeroState/zeros_3:0' enters the loop with shape (), but has shape (4, 1) after one iteration. To allow the shape to vary across iterations, use the `shape_invariants` argument of tf.while_loop to specify a less-specific shape.
Process finished with exit code 1
|
ValueError
|
def call(self, inputs, state, **kwargs):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous
state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell
output and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time
step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
**kwargs: Dict, other keyword arguments for the cell call method.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
try:
state = AttentionWrapperState(*state)
except TypeError:
raise TypeError(
"Expected state to be instance of AttentionWrapperState or "
"values that can construct AttentionWrapperState. "
"Received type %s instead." % type(state)
)
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state, **kwargs)
cell_batch_size = (
tf.compat.dimension_value(cell_output.shape[0]) or tf.shape(cell_output)[0]
)
error_message = (
"When applying AttentionWrapper %s: "
% self.name
+ "Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input "
"via the tfa.seq2seq.tile_batch function with argument "
"multiple=beam_width."
)
with tf.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)
): # pylint: disable=bad-continuation
cell_output = tf.identity(cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments, next_attention_state = self._attention_fn(
attention_mechanism,
cell_output,
previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None,
)
alignment_history = (
previous_alignment_history[i].write(
previous_alignment_history[i].size(), alignments
)
if self._alignment_history
else ()
)
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = tf.concat(all_attentions, 1)
next_state = AttentionWrapperState(
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories),
)
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
|
def call(self, inputs, state, **kwargs):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous
state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell
output and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time
step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
**kwargs: Dict, other keyword arguments for the cell call method.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
try:
state = AttentionWrapperState(*state)
except TypeError:
raise TypeError(
"Expected state to be instance of AttentionWrapperState or "
"values that can construct AttentionWrapperState. "
"Received type %s instead." % type(state)
)
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state, **kwargs)
cell_batch_size = (
tf.compat.dimension_value(cell_output.shape[0]) or tf.shape(cell_output)[0]
)
error_message = (
"When applying AttentionWrapper %s: "
% self.name
+ "Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input "
"via the tfa.seq2seq.tile_batch function with argument "
"multiple=beam_width."
)
with tf.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)
): # pylint: disable=bad-continuation
cell_output = tf.identity(cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments, next_attention_state = self._attention_fn(
attention_mechanism,
cell_output,
previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None,
)
alignment_history = (
previous_alignment_history[i].write(state.time, alignments)
if self._alignment_history
else ()
)
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = tf.concat(all_attentions, 1)
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories),
)
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
|
https://github.com/tensorflow/addons/issues/1194
|
Test 1 passed
Traceback (most recent call last):
File "/run/media/zenbook/work/phd/work/__/pb1.py", line 26, in <module>
test(masked=True); print('Test 2 passed')
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 580, in __call__
result = self._call(*args, **kwds)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 611, in _call
return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2419, in __call__
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2777, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2667, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py", line 981, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 441, in wrapped_fn
return weak_wrapped_fn().__wrapped__(*args, **kwds)
File "/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py", line 968, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
/run/media/zenbook/work/phd/work/__/pb1.py:22 test *
return layer(data, mask=mask)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/keras/layers/recurrent.py:652 __call__ **
return super(RNN, self).__call__(inputs, **kwargs)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py:926 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/keras/layers/recurrent.py:793 call
zero_output_for_mask=self.zero_output_for_mask)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/keras/backend.py:4205 rnn
**while_loop_kwargs)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/ops/control_flow_ops.py:2688 while_loop
back_prop=back_prop)
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/ops/while_v2.py:229 while_loop
len_orig_loop_vars], expand_composites=True))
/home/zenbook/miniconda3/envs/tfn/lib/python3.7/site-packages/tensorflow/python/ops/while_v2.py:1143 _check_shapes_compat
"specify a less-specific shape." % (input_t.name, shape, t.shape))
ValueError: Input tensor 'rnn/AttentionWrapperZeroState/zeros_3:0' enters the loop with shape (), but has shape (4, 1) after one iteration. To allow the shape to vary across iterations, use the `shape_invariants` argument of tf.while_loop to specify a less-specific shape.
Process finished with exit code 1
|
ValueError
|
def sigmoid_focal_crossentropy(
y_true, y_pred, alpha=0.25, gamma=2.0, from_logits=False
):
"""
Args
y_true: true targets tensor.
y_pred: predictions tensor.
alpha: balancing factor.
gamma: modulating factor.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the
same shape as `y_true`; otherwise, it is scalar.
"""
if gamma and gamma < 0:
raise ValueError("Value of gamma should be greater than or equal to zero")
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)
# Get the cross_entropy for each entry
ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)
# If logits are provided then convert the predictions into probabilities
if from_logits:
pred_prob = tf.sigmoid(y_pred)
else:
pred_prob = y_pred
p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
alpha_factor = 1.0
modulating_factor = 1.0
if alpha:
alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
if gamma:
gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())
modulating_factor = tf.pow((1.0 - p_t), gamma)
# compute the final loss and return
return tf.reduce_sum(alpha_factor * modulating_factor * ce, axis=-1)
|
def sigmoid_focal_crossentropy(
y_true, y_pred, alpha=0.25, gamma=2.0, from_logits=False
):
"""
Args
y_true: true targets tensor.
y_pred: predictions tensor.
alpha: balancing factor.
gamma: modulating factor.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the
same shape as `y_true`; otherwise, it is scalar.
"""
if gamma and gamma < 0:
raise ValueError("Value of gamma should be greater than or equal to zero")
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)
if y_true.shape != y_pred.shape:
raise ValueError(
"Shape mismatch for y_true: {} and y_pred: {}".format(
tf.shape(y_true), tf.shape(y_pred)
)
)
# Get the cross_entropy for each entry
ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)
# If logits are provided then convert the predictions into probabilities
if from_logits:
pred_prob = tf.sigmoid(y_pred)
else:
pred_prob = y_pred
p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
alpha_factor = 1.0
modulating_factor = 1.0
if alpha:
alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
if gamma:
gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())
modulating_factor = tf.pow((1.0 - p_t), gamma)
# compute the final loss and return
return tf.reduce_sum(alpha_factor * modulating_factor * ce, axis=-1)
|
https://github.com/tensorflow/addons/issues/876
|
Model: "mlp"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
hidden (Dense) (None, 100) 100100
_________________________________________________________________
output (Dense) (None, 5) 505
=================================================================
Total params: 100,605
Trainable params: 100,605
Non-trainable params: 0
_________________________________________________________________
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-10-2891a1b09888> in <module>()
----> 1 mlp = create_mlp_classifier(1000, 5)
14 frames
<ipython-input-8-686d7bad7efc> in create_mlp_classifier(input_size, num_classes)
20
21 model.summary()
---> 22 model.compile(opt, loss)
23
24 return model
/tensorflow-2.1.0/python3.6/tensorflow_core/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
455 self._self_setattr_tracking = False # pylint: disable=protected-access
456 try:
--> 457 result = method(self, *args, **kwargs)
458 finally:
459 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/training.py in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, target_tensors, distribute, **kwargs)
444
445 # Creates the model loss and weighted metrics sub-graphs.
--> 446 self._compile_weights_loss_and_weighted_metrics()
447
448 # Functions for train, test and predict will
/tensorflow-2.1.0/python3.6/tensorflow_core/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
455 self._self_setattr_tracking = False # pylint: disable=protected-access
456 try:
--> 457 result = method(self, *args, **kwargs)
458 finally:
459 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/training.py in _compile_weights_loss_and_weighted_metrics(self, sample_weights)
1590 # loss_weight_2 * output_2_loss_fn(...) +
1591 # layer losses.
-> 1592 self.total_loss = self._prepare_total_loss(masks)
1593
1594 def _prepare_skip_target_masks(self):
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/training.py in _prepare_total_loss(self, masks)
1650
1651 if hasattr(loss_fn, 'reduction'):
-> 1652 per_sample_losses = loss_fn.call(y_true, y_pred)
1653 weighted_losses = losses_utils.compute_weighted_loss(
1654 per_sample_losses,
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/losses/focal_loss.py in call(self, y_true, y_pred)
86 alpha=self.alpha,
87 gamma=self.gamma,
---> 88 from_logits=self.from_logits)
89
90 def get_config(self):
/tensorflow-2.1.0/python3.6/tensorflow_core/python/eager/def_function.py in __call__(self, *args, **kwds)
566 xla_context.Exit()
567 else:
--> 568 result = self._call(*args, **kwds)
569
570 if tracing_count == self._get_tracing_count():
/tensorflow-2.1.0/python3.6/tensorflow_core/python/eager/def_function.py in _call(self, *args, **kwds)
604 # In this case we have not created variables on the first call. So we can
605 # run the first trace but we should fail if variables are created.
--> 606 results = self._stateful_fn(*args, **kwds)
607 if self._created_variables:
608 raise ValueError("Creating variables on a non-first call to a function"
/tensorflow-2.1.0/python3.6/tensorflow_core/python/eager/function.py in __call__(self, *args, **kwargs)
2360 """Calls a graph function specialized to the inputs."""
2361 with self._lock:
-> 2362 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
2363 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
2364
/tensorflow-2.1.0/python3.6/tensorflow_core/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2701
2702 self._function_cache.missed.add(call_context_key)
-> 2703 graph_function = self._create_graph_function(args, kwargs)
2704 self._function_cache.primary[cache_key] = graph_function
2705 return graph_function, args, kwargs
/tensorflow-2.1.0/python3.6/tensorflow_core/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2591 arg_names=arg_names,
2592 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2593 capture_by_value=self._capture_by_value),
2594 self._function_attributes,
2595 # Tell the ConcreteFunction to clean up its graph once it goes out of
/tensorflow-2.1.0/python3.6/tensorflow_core/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
976 converted_func)
977
--> 978 func_outputs = python_func(*func_args, **func_kwargs)
979
980 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/tensorflow-2.1.0/python3.6/tensorflow_core/python/eager/def_function.py in wrapped_fn(*args, **kwds)
437 # __wrapped__ allows AutoGraph to swap in a converted function. We give
438 # the function a weak reference to itself to avoid a reference cycle.
--> 439 return weak_wrapped_fn().__wrapped__(*args, **kwds)
440 weak_wrapped_fn = weakref.ref(wrapped_fn)
441
/tensorflow-2.1.0/python3.6/tensorflow_core/python/framework/func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
ValueError: in converted code:
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/losses/focal_loss.py:126 sigmoid_focal_crossentropy *
raise ValueError("Shape mismatch for y_true: {} and y_pred: {}".format(
ValueError: Shape mismatch for y_true: Tensor("Shape:0", shape=(2,), dtype=int32) and y_pred: Tensor("Shape_1:0", shape=(2,),
|
ValueError
|
def build(self, input_shape):
"""Build `Layer`"""
input_shape = tf.TensorShape(input_shape).as_list()
self.input_spec = tf.keras.layers.InputSpec(shape=[None] + input_shape[1:])
if not self.layer.built:
self.layer.build(input_shape)
if not hasattr(self.layer, "kernel"):
raise ValueError(
"`WeightNormalization` must wrap a layer that"
" contains a `kernel` for weights"
)
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.kernel_norm_axes = list(range(self.layer.kernel.shape.rank - 1))
self.g = self.add_weight(
name="g",
shape=(self.layer_depth,),
initializer="ones",
dtype=self.layer.kernel.dtype,
trainable=True,
)
self.v = self.layer.kernel
self._initialized = self.add_weight(
name="initialized",
shape=None,
initializer="zeros",
dtype=tf.dtypes.bool,
trainable=False,
)
if self.data_init:
# Used for data initialization in self._data_dep_init.
with tf.name_scope("data_dep_init"):
layer_config = tf.keras.layers.serialize(self.layer)
layer_config["config"]["trainable"] = False
self._naked_clone_layer = tf.keras.layers.deserialize(layer_config)
self._naked_clone_layer.build(input_shape)
self._naked_clone_layer.set_weights(self.layer.get_weights())
self._naked_clone_layer.activation = None
self.built = True
|
def build(self, input_shape):
"""Build `Layer`"""
input_shape = tf.TensorShape(input_shape).as_list()
self.input_spec = tf.keras.layers.InputSpec(shape=[None] + input_shape[1:])
if not self.layer.built:
self.layer.build(input_shape)
if not hasattr(self.layer, "kernel"):
raise ValueError(
"`WeightNormalization` must wrap a layer that"
" contains a `kernel` for weights"
)
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.kernel_norm_axes = list(range(self.layer.kernel.shape.rank - 1))
self.g = self.add_weight(
name="g",
shape=(self.layer_depth,),
initializer="ones",
dtype=self.layer.kernel.dtype,
trainable=True,
)
self.v = self.layer.kernel
self._initialized = self.add_weight(
name="initialized",
shape=None,
initializer="zeros",
dtype=tf.dtypes.bool,
trainable=False,
)
if self.data_init:
# Used for data initialization in self._data_dep_init.
layer_config = tf.keras.layers.serialize(self.layer)
layer_config["config"]["trainable"] = False
self._naked_clone_layer = tf.keras.layers.deserialize(layer_config)
self._naked_clone_layer.build(input_shape)
self._naked_clone_layer.set_weights(self.layer.get_weights())
self._naked_clone_layer.activation = None
self.built = True
|
https://github.com/tensorflow/addons/issues/624
|
['weight_normalization/g:0', 'weight_normalization/kernel:0', 'weight_normalization/bias:0', 'weight_normalization/initialized:0', 'weight_normalization/kernel:0', 'weight_normalization/bias:0']
Traceback (most recent call last):
File "/home/ubuntu/hankcs/laser/tests/playground/wn_bug.py", line 14, in <module>
model.save_weights('model.h5')
File "/home/ubuntu/.local/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/network.py", line 1074, in save_weights
saving.save_weights_to_hdf5_group(f, self.layers)
File "/home/ubuntu/.local/lib/python3.6/site-packages/tensorflow_core/python/keras/saving/hdf5_format.py", line 631, in save_weights_to_hdf5_group
param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)
File "/home/ubuntu/.local/lib/python3.6/site-packages/h5py/_hl/group.py", line 139, in create_dataset
self[name] = dset
File "/home/ubuntu/.local/lib/python3.6/site-packages/h5py/_hl/group.py", line 371, in __setitem__
h5o.link(obj.id, self.id, name, lcpl=lcpl, lapl=self._lapl)
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py/h5o.pyx", line 202, in h5py.h5o.link
RuntimeError: Unable to create link (name already exists)
|
RuntimeError
|
def build(self, input_shape):
super(LayerNormLSTMCell, self).build(input_shape)
self.kernel_norm.build([input_shape[0], self.units * 4])
self.recurrent_norm.build([input_shape[0], self.units * 4])
self.state_norm.build([input_shape[0], self.units])
|
def build(self, input_shape):
super(LayerNormLSTMCell, self).build(input_shape)
norm_input_shape = [input_shape[0], self.units]
self.kernel_norm.build(norm_input_shape)
self.recurrent_norm.build(norm_input_shape)
self.state_norm.build(norm_input_shape)
|
https://github.com/tensorflow/addons/issues/321
|
ERROR: testCellOutput (__main__.LayerNormLSTMCellTest)
testCellOutput (__main__.LayerNormLSTMCellTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/absl/third_party/unittest3_backport/case.py", line 37, in testPartExecutor
yield
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/absl/third_party/unittest3_backport/case.py", line 162, in run
testMethod()
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/framework/test_util.py", line 1053, in decorated
f(self, *args, **kwargs)
File "/tmpfs/tmp/bazel/sandbox/linux-sandbox/8/execroot/__main__/bazel-out/k8-opt/bin/tensorflow_addons/rnn/cell_test.runfiles/__main__/tensorflow_addons/rnn/cell_test.py", line 199, in testCellOutput
output, output_states = cell(x, state)
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 716, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/keras/layers/recurrent.py", line 135, in call
inputs, states = cell.call(inputs, states, **kwargs)
File "/tmpfs/tmp/bazel/sandbox/linux-sandbox/8/execroot/__main__/bazel-out/k8-opt/bin/tensorflow_addons/rnn/cell_test.runfiles/__main__/tensorflow_addons/rnn/cell.py", line 338, in call
z = self.kernel_norm(keras.backend.dot(inputs, self.kernel))
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 716, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/keras/layers/normalization.py", line 1022, in call
variance_epsilon=self.epsilon)
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/ops/nn_impl.py", line 1395, in batch_normalization
return x * math_ops.cast(inv, x.dtype) + math_ops.cast(
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/ops/math_ops.py", line 890, in binary_op_wrapper
return func(x, y, name=name)
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/ops/math_ops.py", line 1197, in _mul_dispatch
return gen_math_ops.mul(x, y, name=name)
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/ops/gen_math_ops.py", line 6592, in mul
"Mul", x=x, y=y, name=name)
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/framework/op_def_library.py", line 793, in _apply_op_helper
op_def=op_def)
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/framework/ops.py", line 3279, in create_op
attrs, op_def, compute_device)
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/framework/ops.py", line 3348, in _create_op_internal
op_def=op_def)
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/framework/ops.py", line 1716, in __init__
control_input_ops)
File "/tmpfs/src/github/tensorflow_addons/tf/local/lib/python2.7/site-packages/tensorflow_core/python/framework/ops.py", line 1556, in _create_c_op
raise ValueError(str(e))
ValueError: Dimensions must be equal, but are 8 and 2 for 'stacked_rnn_cells/kernel_norm/batchnorm/mul_1' (op: 'Mul') with input shapes: [1,8], [1,2].
|
ValueError
|
def triplet_semihard_loss(y_true, y_pred, margin=1.0):
"""Computes the triplet loss with semi-hard negative mining.
Args:
y_true: 1-D integer `Tensor` with shape [batch_size] of
multiclass integer labels.
y_pred: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
"""
labels, embeddings = y_true, y_pred
# Reshape label tensor to [batch_size, 1].
lshape = tf.shape(labels)
labels = tf.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pdist_matrix = metric_learning.pairwise_distance(embeddings, squared=True)
# Build pairwise binary adjacency matrix.
adjacency = tf.math.equal(labels, tf.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = tf.math.logical_not(adjacency)
batch_size = tf.size(labels)
# Compute the mask.
pdist_matrix_tile = tf.tile(pdist_matrix, [batch_size, 1])
mask = tf.math.logical_and(
tf.tile(adjacency_not, [batch_size, 1]),
tf.math.greater(
pdist_matrix_tile, tf.reshape(tf.transpose(pdist_matrix), [-1, 1])
),
)
mask_final = tf.reshape(
tf.math.greater(
tf.math.reduce_sum(
tf.cast(mask, dtype=tf.dtypes.float32), 1, keepdims=True
),
0.0,
),
[batch_size, batch_size],
)
mask_final = tf.transpose(mask_final)
adjacency_not = tf.cast(adjacency_not, dtype=tf.dtypes.float32)
mask = tf.cast(mask, dtype=tf.dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = tf.reshape(
_masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size]
)
negatives_outside = tf.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = tf.tile(
_masked_maximum(pdist_matrix, adjacency_not), [1, batch_size]
)
semi_hard_negatives = tf.where(mask_final, negatives_outside, negatives_inside)
loss_mat = tf.math.add(margin, pdist_matrix - semi_hard_negatives)
mask_positives = tf.cast(adjacency, dtype=tf.dtypes.float32) - tf.linalg.diag(
tf.ones([batch_size])
)
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
num_positives = tf.math.reduce_sum(mask_positives)
triplet_loss = tf.math.truediv(
tf.math.reduce_sum(
tf.math.maximum(tf.math.multiply(loss_mat, mask_positives), 0.0)
),
num_positives,
)
return triplet_loss
|
def triplet_semihard_loss(y_true, y_pred, margin=1.0):
"""Computes the triplet loss with semi-hard negative mining.
Args:
y_true: 1-D integer `Tensor` with shape [batch_size] of
multiclass integer labels.
y_pred: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
"""
labels, embeddings = y_true, y_pred
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = tf.shape(labels)
assert lshape.shape == 1
labels = tf.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pdist_matrix = metric_learning.pairwise_distance(embeddings, squared=True)
# Build pairwise binary adjacency matrix.
adjacency = tf.math.equal(labels, tf.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = tf.math.logical_not(adjacency)
batch_size = tf.size(labels)
# Compute the mask.
pdist_matrix_tile = tf.tile(pdist_matrix, [batch_size, 1])
mask = tf.math.logical_and(
tf.tile(adjacency_not, [batch_size, 1]),
tf.math.greater(
pdist_matrix_tile, tf.reshape(tf.transpose(pdist_matrix), [-1, 1])
),
)
mask_final = tf.reshape(
tf.math.greater(
tf.math.reduce_sum(
tf.cast(mask, dtype=tf.dtypes.float32), 1, keepdims=True
),
0.0,
),
[batch_size, batch_size],
)
mask_final = tf.transpose(mask_final)
adjacency_not = tf.cast(adjacency_not, dtype=tf.dtypes.float32)
mask = tf.cast(mask, dtype=tf.dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = tf.reshape(
_masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size]
)
negatives_outside = tf.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = tf.tile(
_masked_maximum(pdist_matrix, adjacency_not), [1, batch_size]
)
semi_hard_negatives = tf.where(mask_final, negatives_outside, negatives_inside)
loss_mat = tf.math.add(margin, pdist_matrix - semi_hard_negatives)
mask_positives = tf.cast(adjacency, dtype=tf.dtypes.float32) - tf.linalg.diag(
tf.ones([batch_size])
)
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
num_positives = tf.math.reduce_sum(mask_positives)
triplet_loss = tf.math.truediv(
tf.math.reduce_sum(
tf.math.maximum(tf.math.multiply(loss_mat, mask_positives), 0.0)
),
num_positives,
)
return triplet_loss
|
https://github.com/tensorflow/addons/issues/295
|
AssertionError Traceback (most recent call last)
<ipython-input-13-f35ffd674f2d> in <module>
5 model = Sequential()
6 model.add(Dense(32, input_dim=784))
----> 7 model.compile(loss=tfa.losses.triplet_semihard_loss, optimizer=tf.keras.optimizers.Adam())
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
456 self._self_setattr_tracking = False # pylint: disable=protected-access
457 try:
--> 458 result = method(self, *args, **kwargs)
459 finally:
460 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, target_tensors, distribute, **kwargs)
335
336 # Creates the model loss and weighted metrics sub-graphs.
--> 337 self._compile_weights_loss_and_weighted_metrics()
338
339 # Functions for train, test and predict will
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
456 self._self_setattr_tracking = False # pylint: disable=protected-access
457 try:
--> 458 result = method(self, *args, **kwargs)
459 finally:
460 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py in _compile_weights_loss_and_weighted_metrics(self, sample_weights)
1492 # loss_weight_2 * output_2_loss_fn(...) +
1493 # layer losses.
-> 1494 self.total_loss = self._prepare_total_loss(masks)
1495
1496 def _prepare_skip_target_masks(self):
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py in _prepare_total_loss(self, masks)
1552
1553 if hasattr(loss_fn, 'reduction'):
-> 1554 per_sample_losses = loss_fn.call(y_true, y_pred)
1555 weighted_losses = losses_utils.compute_weighted_loss(
1556 per_sample_losses,
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/keras/losses.py in call(self, y_true, y_pred)
213 Loss values per sample.
214 """
--> 215 return self.fn(y_true, y_pred, **self._fn_kwargs)
216
217 def get_config(self):
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
406 # In this case we have not created variables on the first call. So we can
407 # run the first trace but we should fail if variables are created.
--> 408 results = self._stateful_fn(*args, **kwds)
409 if self._created_variables:
410 raise ValueError("Creating variables on a non-first call to a function"
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/eager/function.py in __call__(self, *args, **kwargs)
1332 def __call__(self, *args, **kwargs):
1333 """Calls a graph function specialized to the inputs."""
-> 1334 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
1335 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
1336
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
1646 graph_function = self._function_cache.primary.get(cache_key, None)
1647 if graph_function is None:
-> 1648 graph_function = self._create_graph_function(args, kwargs)
1649 self._function_cache.primary[cache_key] = graph_function
1650 return graph_function, args, kwargs
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
1539 arg_names=arg_names,
1540 override_flat_arg_shapes=override_flat_arg_shapes,
-> 1541 capture_by_value=self._capture_by_value),
1542 self._function_attributes)
1543
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
714 converted_func)
715
--> 716 func_outputs = python_func(*func_args, **func_kwargs)
717
718 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
307 # __wrapped__ allows AutoGraph to swap in a converted function. We give
308 # the function a weak reference to itself to avoid a reference cycle.
--> 309 return weak_wrapped_fn().__wrapped__(*args, **kwds)
310 weak_wrapped_fn = weakref.ref(wrapped_fn)
311
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
704 except Exception as e: # pylint:disable=broad-except
705 if hasattr(e, "ag_error_metadata"):
--> 706 raise e.ag_error_metadata.to_exception(type(e))
707 else:
708 raise
AssertionError: in converted code:
/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow_addons/losses/triplet.py:78 triplet_semihard_loss *
assert lshape.shape == 1
AssertionError:
|
AssertionError
|
def __init__(self, model, autobalance=False):
super(ComputeLoss, self).__init__()
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["obj_pw"]], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=0.0)
# Focal loss
g = h["fl_gamma"] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
det = (
model.module.model[-1] if is_parallel(model) else model.model[-1]
) # Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(
det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]
) # P3-P7
self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = (
BCEcls,
BCEobj,
model.gr,
h,
autobalance,
)
for k in "na", "nc", "nl", "anchors":
setattr(self, k, getattr(det, k))
|
def __init__(self, model, autobalance=False):
super(ComputeLoss, self).__init__()
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["obj_pw"]], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=0.0)
# Focal loss
g = h["fl_gamma"] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
det = (
model.module.model[-1] if is_parallel(model) else model.model[-1]
) # Detect() module
self.balance = {
3: [4.0, 1.0, 0.4],
4: [4.0, 1.0, 0.25, 0.06],
5: [4.0, 1.0, 0.25, 0.06, 0.02],
}[det.nl]
self.ssi = (det.stride == 16).nonzero(as_tuple=False).item() # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = (
BCEcls,
BCEobj,
model.gr,
h,
autobalance,
)
for k in "na", "nc", "nl", "anchors":
setattr(self, k, getattr(det, k))
|
https://github.com/ultralytics/yolov5/issues/2255
|
Traceback (most recent call last):
File "/Users/glennjocher/opt/anaconda3/envs/env1/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-5-be04c762b799>", line 5, in <module>
c = a / 0
RuntimeError: ZeroDivisionError
|
RuntimeError
|
def train(hyp, opt, device, tb_writer=None, wandb=None):
logger.info(
colorstr("hyperparameters: ") + ", ".join(f"{k}={v}" for k, v in hyp.items())
)
save_dir, epochs, batch_size, total_batch_size, weights, rank = (
Path(opt.save_dir),
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.global_rank,
)
# Directories
wdir = save_dir / "weights"
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / "last.pt"
best = wdir / "best.pt"
results_file = save_dir / "results.txt"
# Save run settings
with open(save_dir / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(save_dir / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict["train"]
test_path = data_dict["val"]
nc = 1 if opt.single_cls else int(data_dict["nc"]) # number of classes
names = (
["item"]
if opt.single_cls and len(data_dict["names"]) != 1
else data_dict["names"]
) # class names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Model
pretrained = weights.endswith(".pt")
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
if hyp.get("anchors"):
ckpt["model"].yaml["anchors"] = round(hyp["anchors"]) # force autoanchor
model = Model(opt.cfg or ckpt["model"].yaml, ch=3, nc=nc).to(device) # create
exclude = ["anchor"] if opt.cfg or hyp.get("anchors") else [] # exclude keys
state_dict = ckpt["model"].float().state_dict() # to FP32
state_dict = intersect_dicts(
state_dict, model.state_dict(), exclude=exclude
) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info(
"Transferred %g/%g items from %s"
% (len(state_dict), len(model.state_dict()), weights)
) # report
else:
model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print("freezing %s" % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
logger.info(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = (
lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp["lrf"]) + hyp["lrf"]
) # linear
else:
lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Logging
if rank in [-1, 0] and wandb and wandb.run is None:
opt.hyp = hyp # add hyperparameters
wandb_run = wandb.init(
config=opt,
resume="allow",
project="YOLOv5" if opt.project == "runs/train" else Path(opt.project).stem,
name=save_dir.stem,
id=ckpt.get("wandb_id") if "ckpt" in locals() else None,
)
loggers = {"wandb": wandb} # loggers dict
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt["optimizer"] is not None:
optimizer.load_state_dict(ckpt["optimizer"])
best_fitness = ckpt["best_fitness"]
# Results
if ckpt.get("training_results") is not None:
with open(results_file, "w") as file:
file.write(ckpt["training_results"]) # write results.txt
# Epochs
start_epoch = ckpt["epoch"] + 1
if opt.resume:
assert start_epoch > 0, (
"%s training to %g epochs is finished, nothing to resume."
% (weights, epochs)
)
if epochs < start_epoch:
logger.info(
"%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
% (weights, ckpt["epoch"], epochs)
)
epochs += ckpt["epoch"] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info("Using SyncBatchNorm()")
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
rank=rank,
world_size=opt.world_size,
workers=opt.workers,
image_weights=opt.image_weights,
quad=opt.quad,
prefix=colorstr("train: "),
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, (
"Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g"
% (mlc, nc, opt.data, nc - 1)
)
# Process 0
if rank in [-1, 0]:
ema.updates = start_epoch * nb // accumulate # set EMA updates
testloader = create_dataloader(
test_path,
imgsz_test,
batch_size * 2,
gs,
opt, # testloader
hyp=hyp,
cache=opt.cache_images and not opt.notest,
rect=True,
rank=-1,
world_size=opt.world_size,
workers=opt.workers,
pad=0.5,
prefix=colorstr("val: "),
)[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram("classes", c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
# Model parameters
hyp["box"] *= 3.0 / nl # scale to layers
hyp["cls"] *= nc / 80.0 * 3.0 / nl # scale to classes and layers
hyp["obj"] *= (imgsz / 640) ** 2 * 3.0 / nl # scale to image size and layers
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = (
labels_to_class_weights(dataset.labels, nc).to(device) * nc
) # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(
round(hyp["warmup_epochs"] * nb), 1000
) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model) # init loss class
logger.info(
f"Image sizes {imgsz} train, {imgsz_test} test\n"
f"Using {dataloader.num_workers} dataloader workers\n"
f"Logging results to {save_dir}\n"
f"Starting training for {epochs} epochs..."
)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = (
model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc
) # class weights
iw = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=cw
) # image weights
dataset.indices = random.choices(
range(dataset.n), weights=iw, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (
torch.tensor(dataset.indices)
if rank == 0
else torch.zeros(dataset.n)
).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(
("\n" + "%10s" * 8)
% ("Epoch", "gpu_mem", "box", "obj", "cls", "total", "targets", "img_size")
)
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for (
i,
(imgs, targets, paths, _),
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni,
xi,
[
hyp["warmup_bias_lr"] if j == 2 else 0.0,
x["initial_lr"] * lf(epoch),
],
)
if "momentum" in x:
x["momentum"] = np.interp(
ni, xi, [hyp["warmup_momentum"], hyp["momentum"]]
)
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(
pred, targets.to(device)
) # loss scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.0
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f"train_batch{ni}.jpg" # filename
Thread(
target=plot_images, args=(imgs, targets, paths, f), daemon=True
).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
elif plots and ni == 10 and wandb:
wandb.log(
{
"Mosaics": [
wandb.Image(str(x), caption=x.name)
for x in save_dir.glob("train*.jpg")
if x.exists()
]
},
commit=False,
)
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x["lr"] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema:
ema.update_attr(
model,
include=[
"yaml",
"nc",
"hyp",
"gr",
"names",
"stride",
"class_weights",
],
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(
opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
log_imgs=opt.log_imgs if wandb else 0,
compute_loss=compute_loss,
)
# Write
with open(results_file, "a") as f:
f.write(
s + "%10.4g" * 7 % results + "\n"
) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Log
tags = [
"train/box_loss",
"train/obj_loss",
"train/cls_loss", # train loss
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/box_loss",
"val/obj_loss",
"val/cls_loss", # val loss
"x/lr0",
"x/lr1",
"x/lr2",
] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb:
wandb.log({tag: x}, step=epoch, commit=tag == tags[-1]) # W&B
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, "r") as f: # create checkpoint
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": f.read(),
"model": ema.ema,
"optimizer": None if final_epoch else optimizer.state_dict(),
"wandb_id": wandb_run.id if wandb else None,
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
final = best if best.exists() else last # final model
for f in [last, best]:
if f.exists():
strip_optimizer(f) # strip optimizers
if opt.bucket:
os.system(f"gsutil cp {final} gs://{opt.bucket}/weights") # upload
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb:
files = [
"results.png",
"confusion_matrix.png",
*[f"{x}_curve.png" for x in ("F1", "PR", "P", "R")],
]
wandb.log(
{
"Results": [
wandb.Image(str(save_dir / f), caption=f)
for f in files
if (save_dir / f).exists()
]
}
)
if opt.log_artifacts:
wandb.log_artifact(
artifact_or_path=str(final), type="model", name=save_dir.stem
)
# Test best.pt
logger.info(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
if opt.data.endswith("coco.yaml") and nc == 80: # if COCO
for conf, iou, save_json in (
[0.25, 0.45, False],
[0.001, 0.65, True],
): # speed, mAP tests
results, _, _ = test.test(
opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
conf_thres=conf,
iou_thres=iou,
model=attempt_load(final, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=save_json,
plots=False,
)
else:
dist.destroy_process_group()
wandb.run.finish() if wandb and wandb.run else None
torch.cuda.empty_cache()
return results
|
def train(hyp, opt, device, tb_writer=None, wandb=None):
logger.info(
colorstr("hyperparameters: ") + ", ".join(f"{k}={v}" for k, v in hyp.items())
)
save_dir, epochs, batch_size, total_batch_size, weights, rank = (
Path(opt.save_dir),
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.global_rank,
)
# Directories
wdir = save_dir / "weights"
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / "last.pt"
best = wdir / "best.pt"
results_file = save_dir / "results.txt"
# Save run settings
with open(save_dir / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(save_dir / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict["train"]
test_path = data_dict["val"]
nc = 1 if opt.single_cls else int(data_dict["nc"]) # number of classes
names = (
["item"]
if opt.single_cls and len(data_dict["names"]) != 1
else data_dict["names"]
) # class names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Model
pretrained = weights.endswith(".pt")
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
if hyp.get("anchors"):
ckpt["model"].yaml["anchors"] = round(hyp["anchors"]) # force autoanchor
model = Model(opt.cfg or ckpt["model"].yaml, ch=3, nc=nc).to(device) # create
exclude = ["anchor"] if opt.cfg or hyp.get("anchors") else [] # exclude keys
state_dict = ckpt["model"].float().state_dict() # to FP32
state_dict = intersect_dicts(
state_dict, model.state_dict(), exclude=exclude
) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info(
"Transferred %g/%g items from %s"
% (len(state_dict), len(model.state_dict()), weights)
) # report
else:
model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print("freezing %s" % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
logger.info(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = (
lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp["lrf"]) + hyp["lrf"]
) # linear
else:
lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Logging
if rank in [-1, 0] and wandb and wandb.run is None:
opt.hyp = hyp # add hyperparameters
wandb_run = wandb.init(
config=opt,
resume="allow",
project="YOLOv5" if opt.project == "runs/train" else Path(opt.project).stem,
name=save_dir.stem,
id=ckpt.get("wandb_id") if "ckpt" in locals() else None,
)
loggers = {"wandb": wandb} # loggers dict
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt["optimizer"] is not None:
optimizer.load_state_dict(ckpt["optimizer"])
best_fitness = ckpt["best_fitness"]
# Results
if ckpt.get("training_results") is not None:
with open(results_file, "w") as file:
file.write(ckpt["training_results"]) # write results.txt
# Epochs
start_epoch = ckpt["epoch"] + 1
if opt.resume:
assert start_epoch > 0, (
"%s training to %g epochs is finished, nothing to resume."
% (weights, epochs)
)
if epochs < start_epoch:
logger.info(
"%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
% (weights, ckpt["epoch"], epochs)
)
epochs += ckpt["epoch"] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = int(model.stride.max()) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info("Using SyncBatchNorm()")
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
rank=rank,
world_size=opt.world_size,
workers=opt.workers,
image_weights=opt.image_weights,
quad=opt.quad,
prefix=colorstr("train: "),
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, (
"Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g"
% (mlc, nc, opt.data, nc - 1)
)
# Process 0
if rank in [-1, 0]:
ema.updates = start_epoch * nb // accumulate # set EMA updates
testloader = create_dataloader(
test_path,
imgsz_test,
batch_size * 2,
gs,
opt, # testloader
hyp=hyp,
cache=opt.cache_images and not opt.notest,
rect=True,
rank=-1,
world_size=opt.world_size,
workers=opt.workers,
pad=0.5,
prefix=colorstr("val: "),
)[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram("classes", c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
# Model parameters
hyp["box"] *= 3.0 / nl # scale to layers
hyp["cls"] *= nc / 80.0 * 3.0 / nl # scale to classes and layers
hyp["obj"] *= (imgsz / 640) ** 2 * 3.0 / nl # scale to image size and layers
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = (
labels_to_class_weights(dataset.labels, nc).to(device) * nc
) # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(
round(hyp["warmup_epochs"] * nb), 1000
) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model) # init loss class
logger.info(
f"Image sizes {imgsz} train, {imgsz_test} test\n"
f"Using {dataloader.num_workers} dataloader workers\n"
f"Logging results to {save_dir}\n"
f"Starting training for {epochs} epochs..."
)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = (
model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc
) # class weights
iw = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=cw
) # image weights
dataset.indices = random.choices(
range(dataset.n), weights=iw, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (
torch.tensor(dataset.indices)
if rank == 0
else torch.zeros(dataset.n)
).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(
("\n" + "%10s" * 8)
% ("Epoch", "gpu_mem", "box", "obj", "cls", "total", "targets", "img_size")
)
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for (
i,
(imgs, targets, paths, _),
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni,
xi,
[
hyp["warmup_bias_lr"] if j == 2 else 0.0,
x["initial_lr"] * lf(epoch),
],
)
if "momentum" in x:
x["momentum"] = np.interp(
ni, xi, [hyp["warmup_momentum"], hyp["momentum"]]
)
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(
pred, targets.to(device)
) # loss scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.0
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f"train_batch{ni}.jpg" # filename
Thread(
target=plot_images, args=(imgs, targets, paths, f), daemon=True
).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
elif plots and ni == 10 and wandb:
wandb.log(
{
"Mosaics": [
wandb.Image(str(x), caption=x.name)
for x in save_dir.glob("train*.jpg")
if x.exists()
]
},
commit=False,
)
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x["lr"] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema:
ema.update_attr(
model,
include=[
"yaml",
"nc",
"hyp",
"gr",
"names",
"stride",
"class_weights",
],
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(
opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
log_imgs=opt.log_imgs if wandb else 0,
compute_loss=compute_loss,
)
# Write
with open(results_file, "a") as f:
f.write(
s + "%10.4g" * 7 % results + "\n"
) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Log
tags = [
"train/box_loss",
"train/obj_loss",
"train/cls_loss", # train loss
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/box_loss",
"val/obj_loss",
"val/cls_loss", # val loss
"x/lr0",
"x/lr1",
"x/lr2",
] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb:
wandb.log({tag: x}, step=epoch, commit=tag == tags[-1]) # W&B
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, "r") as f: # create checkpoint
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": f.read(),
"model": ema.ema,
"optimizer": None if final_epoch else optimizer.state_dict(),
"wandb_id": wandb_run.id if wandb else None,
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
final = best if best.exists() else last # final model
for f in [last, best]:
if f.exists():
strip_optimizer(f) # strip optimizers
if opt.bucket:
os.system(f"gsutil cp {final} gs://{opt.bucket}/weights") # upload
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb:
files = [
"results.png",
"confusion_matrix.png",
*[f"{x}_curve.png" for x in ("F1", "PR", "P", "R")],
]
wandb.log(
{
"Results": [
wandb.Image(str(save_dir / f), caption=f)
for f in files
if (save_dir / f).exists()
]
}
)
if opt.log_artifacts:
wandb.log_artifact(
artifact_or_path=str(final), type="model", name=save_dir.stem
)
# Test best.pt
logger.info(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
if opt.data.endswith("coco.yaml") and nc == 80: # if COCO
for conf, iou, save_json in (
[0.25, 0.45, False],
[0.001, 0.65, True],
): # speed, mAP tests
results, _, _ = test.test(
opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
conf_thres=conf,
iou_thres=iou,
model=attempt_load(final, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=save_json,
plots=False,
)
else:
dist.destroy_process_group()
wandb.run.finish() if wandb and wandb.run else None
torch.cuda.empty_cache()
return results
|
https://github.com/ultralytics/yolov5/issues/2255
|
Traceback (most recent call last):
File "/Users/glennjocher/opt/anaconda3/envs/env1/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-5-be04c762b799>", line 5, in <module>
c = a / 0
RuntimeError: ZeroDivisionError
|
RuntimeError
|
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=720, width=1280, RGB images example inputs are:
# filename: imgs = 'data/samples/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3)
# PIL: = Image.open('image.jpg') # HWC x(720,1280,3)
# numpy: = np.zeros((720,1280,3)) # HWC
# torch: = torch.zeros(16,3,720,1280) # BCHW
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (
(len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs])
) # number of images, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
if isinstance(im, str): # filename or uri
im, f = (
Image.open(
requests.get(im, stream=True).raw if im.startswith("http") else im
),
im,
) # open
im.filename = f # for uri
files.append(
Path(im.filename).with_suffix(".jpg").name
if isinstance(im, Image.Image)
else f"image{i}.jpg"
)
im = np.array(im) # to numpy
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = (
im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3)
) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = size / max(s) # gain
shape1.append([y * g for y in s])
imgs[i] = im # update
shape1 = [
make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)
] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255.0 # uint8 to fp16/32
# Inference
with torch.no_grad():
y = self.model(x, augment, profile)[0] # forward
y = non_max_suppression(
y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes
) # NMS
# Post-process
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
return Detections(imgs, y, files, self.names)
|
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=720, width=1280, RGB images example inputs are:
# filename: imgs = 'data/samples/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3)
# PIL: = Image.open('image.jpg') # HWC x(720,1280,3)
# numpy: = np.zeros((720,1280,3)) # HWC
# torch: = torch.zeros(16,3,720,1280) # BCHW
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (
(len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs])
) # number of images, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
if isinstance(im, str): # filename or uri
im = Image.open(
requests.get(im, stream=True).raw if im.startswith("http") else im
) # open
files.append(
Path(im.filename).with_suffix(".jpg").name
if isinstance(im, Image.Image)
else f"image{i}.jpg"
)
im = np.array(im) # to numpy
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = (
im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3)
) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = size / max(s) # gain
shape1.append([y * g for y in s])
imgs[i] = im # update
shape1 = [
make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)
] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255.0 # uint8 to fp16/32
# Inference
with torch.no_grad():
y = self.model(x, augment, profile)[0] # forward
y = non_max_suppression(
y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes
) # NMS
# Post-process
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
return Detections(imgs, y, files, self.names)
|
https://github.com/ultralytics/yolov5/issues/2246
|
ValueError Traceback (most recent call last)
<ipython-input-1-86a3d667ec69> in <module>()
9
10 # Inference
---> 11 results = model(imgs)
12
13 # Results
2 frames
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/root/.cache/torch/hub/ultralytics_yolov5_master/models/common.py in forward(self, imgs, size, augment, profile)
201 if isinstance(im, str): # filename or uri
202 im = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im) # open
--> 203 files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg')
204 im = np.array(im) # to numpy
205 if im.shape[0] < 5: # image in CHW
/usr/lib/python3.6/pathlib.py in with_suffix(self, suffix)
835 name = self.name
836 if not name:
--> 837 raise ValueError("%r has an empty name" % (self,))
838 old_suffix = self.suffix
839 if not old_suffix:
ValueError: PosixPath('.') has an empty name
|
ValueError
|
def display(self, pprint=False, show=False, save=False, render=False, save_dir=""):
colors = color_list()
for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
str = f"image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} "
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
if show or save or render:
img = (
Image.fromarray(img) if isinstance(img, np.ndarray) else img
) # from np
for *box, conf, cls in pred: # xyxy, confidence, class
# str += '%s %.2f, ' % (names[int(cls)], conf) # label
ImageDraw.Draw(img).rectangle(
box, width=4, outline=colors[int(cls) % 10]
) # plot
if pprint:
print(str.rstrip(", "))
if show:
img.show(self.files[i]) # show
if save:
f = Path(save_dir) / self.files[i]
img.save(f) # save
print(
f"{'Saving' * (i == 0)} {f},", end="" if i < self.n - 1 else " done.\n"
)
if render:
self.imgs[i] = np.asarray(img)
|
def display(self, pprint=False, show=False, save=False, render=False, save_dir=""):
colors = color_list()
for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
str = f"image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} "
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
if show or save or render:
img = (
Image.fromarray(img.astype(np.uint8))
if isinstance(img, np.ndarray)
else img
) # from np
for *box, conf, cls in pred: # xyxy, confidence, class
# str += '%s %.2f, ' % (names[int(cls)], conf) # label
ImageDraw.Draw(img).rectangle(
box, width=4, outline=colors[int(cls) % 10]
) # plot
if pprint:
print(str.rstrip(", "))
if show:
img.show(self.files[i]) # show
if save:
f = Path(save_dir) / self.files[i]
img.save(f) # save
print(
f"{'Saving' * (i == 0)} {f},", end="" if i < self.n - 1 else " done.\n"
)
if render:
self.imgs[i] = np.asarray(img)
|
https://github.com/ultralytics/yolov5/issues/2246
|
ValueError Traceback (most recent call last)
<ipython-input-1-86a3d667ec69> in <module>()
9
10 # Inference
---> 11 results = model(imgs)
12
13 # Results
2 frames
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/root/.cache/torch/hub/ultralytics_yolov5_master/models/common.py in forward(self, imgs, size, augment, profile)
201 if isinstance(im, str): # filename or uri
202 im = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im) # open
--> 203 files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg')
204 im = np.array(im) # to numpy
205 if im.shape[0] < 5: # image in CHW
/usr/lib/python3.6/pathlib.py in with_suffix(self, suffix)
835 name = self.name
836 if not name:
--> 837 raise ValueError("%r has an empty name" % (self,))
838 old_suffix = self.suffix
839 if not old_suffix:
ValueError: PosixPath('.') has an empty name
|
ValueError
|
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
super(GhostBottleneck, self).__init__()
c_ = c2 // 2
self.conv = nn.Sequential(
GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False),
) # pw-linear
self.shortcut = (
nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, act=False))
if s == 2
else nn.Identity()
)
|
def __init__(self, c1, c2, k, s):
super(GhostBottleneck, self).__init__()
c_ = c2 // 2
self.conv = nn.Sequential(
GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False),
) # pw-linear
self.shortcut = (
nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, act=False))
if s == 2
else nn.Identity()
)
|
https://github.com/ultralytics/yolov5/issues/2081
|
TypeError Traceback (most recent call last)
<ipython-input-2-7facafecdabb> in <module>()
----> 1 model = Model("./models/yolov5s.yaml")
2
<ipython-input-1-7b447b7ed90c> in __init__(self, cfg, ch, nc)
78 logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
79 self.yaml['nc'] = nc # override yaml value
---> 80 self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
81 self.names = [str(i) for i in range(self.yaml['nc'])] # default names
82 # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
<ipython-input-1-7b447b7ed90c> in parse_model(d, ch)
252
253 print(m, args)
--> 254 m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
255 t = str(m)[8:-2].replace('__main__.', '') # module type
256 np = sum([x.numel() for x in m_.parameters()]) # number params
TypeError: __init__() missing 1 required positional argument: 's'
|
TypeError
|
def parse_model(d, ch): # model_dict, input_channels(3)
logger.info(
"\n%3s%18s%3s%10s %-40s%-30s"
% ("", "from", "n", "params", "module", "arguments")
)
anchors, nc, gd, gw = (
d["anchors"],
d["nc"],
d["depth_multiple"],
d["width_multiple"],
)
na = (
(len(anchors[0]) // 2) if isinstance(anchors, list) else anchors
) # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(
d["backbone"] + d["head"]
): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [
Conv,
GhostConv,
Bottleneck,
GhostBottleneck,
SPP,
DWConv,
MixConv2d,
Focus,
CrossConv,
BottleneckCSP,
C3,
]:
c1, c2 = ch[f], args[0]
# Normal
# if i > 0 and args[0] != no: # channel expansion factor
# ex = 1.75 # exponential (default 2.0)
# e = math.log(c2 / ch[1]) / math.log(2)
# c2 = int(ch[1] * ex ** e)
# if m != Focus:
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
# Experimental
# if i > 0 and args[0] != no: # channel expansion factor
# ex = 1 + gw # exponential (default 2.0)
# ch1 = 32 # ch[1]
# e = math.log(c2 / ch1) / math.log(2) # level 1-n
# c2 = int(ch1 * ex ** e)
# if m != Focus:
# c2 = make_divisible(c2, 8) if c2 != no else c2
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3]:
args.insert(2, n)
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x if x < 0 else x + 1] for x in f])
elif m is Detect:
args.append([ch[x + 1] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f if f < 0 else f + 1] * args[0] ** 2
elif m is Expand:
c2 = ch[f if f < 0 else f + 1] // args[0] ** 2
else:
c2 = ch[f if f < 0 else f + 1]
m_ = (
nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args)
) # module
t = str(m)[8:-2].replace("__main__.", "") # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = (
i,
f,
t,
np,
) # attach index, 'from' index, type, number params
logger.info("%3s%18s%3s%10.0f %-40s%-30s" % (i, f, n, np, t, args)) # print
save.extend(
x % i for x in ([f] if isinstance(f, int) else f) if x != -1
) # append to savelist
layers.append(m_)
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
|
def parse_model(d, ch): # model_dict, input_channels(3)
logger.info(
"\n%3s%18s%3s%10s %-40s%-30s"
% ("", "from", "n", "params", "module", "arguments")
)
anchors, nc, gd, gw = (
d["anchors"],
d["nc"],
d["depth_multiple"],
d["width_multiple"],
)
na = (
(len(anchors[0]) // 2) if isinstance(anchors, list) else anchors
) # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(
d["backbone"] + d["head"]
): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [
Conv,
Bottleneck,
SPP,
DWConv,
MixConv2d,
Focus,
CrossConv,
BottleneckCSP,
C3,
]:
c1, c2 = ch[f], args[0]
# Normal
# if i > 0 and args[0] != no: # channel expansion factor
# ex = 1.75 # exponential (default 2.0)
# e = math.log(c2 / ch[1]) / math.log(2)
# c2 = int(ch[1] * ex ** e)
# if m != Focus:
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
# Experimental
# if i > 0 and args[0] != no: # channel expansion factor
# ex = 1 + gw # exponential (default 2.0)
# ch1 = 32 # ch[1]
# e = math.log(c2 / ch1) / math.log(2) # level 1-n
# c2 = int(ch1 * ex ** e)
# if m != Focus:
# c2 = make_divisible(c2, 8) if c2 != no else c2
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3]:
args.insert(2, n)
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x if x < 0 else x + 1] for x in f])
elif m is Detect:
args.append([ch[x + 1] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f if f < 0 else f + 1] * args[0] ** 2
elif m is Expand:
c2 = ch[f if f < 0 else f + 1] // args[0] ** 2
else:
c2 = ch[f if f < 0 else f + 1]
m_ = (
nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args)
) # module
t = str(m)[8:-2].replace("__main__.", "") # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = (
i,
f,
t,
np,
) # attach index, 'from' index, type, number params
logger.info("%3s%18s%3s%10.0f %-40s%-30s" % (i, f, n, np, t, args)) # print
save.extend(
x % i for x in ([f] if isinstance(f, int) else f) if x != -1
) # append to savelist
layers.append(m_)
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
|
https://github.com/ultralytics/yolov5/issues/2081
|
TypeError Traceback (most recent call last)
<ipython-input-2-7facafecdabb> in <module>()
----> 1 model = Model("./models/yolov5s.yaml")
2
<ipython-input-1-7b447b7ed90c> in __init__(self, cfg, ch, nc)
78 logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
79 self.yaml['nc'] = nc # override yaml value
---> 80 self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
81 self.names = [str(i) for i in range(self.yaml['nc'])] # default names
82 # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
<ipython-input-1-7b447b7ed90c> in parse_model(d, ch)
252
253 print(m, args)
--> 254 m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
255 t = str(m)[8:-2].replace('__main__.', '') # module type
256 np = sum([x.numel() for x in m_.parameters()]) # number params
TypeError: __init__() missing 1 required positional argument: 's'
|
TypeError
|
def color_list():
# Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
def hex2rgb(h):
return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4))
return [
hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()
] # or BASE_ (8), CSS4_ (148), XKCD_ (949)
|
def color_list():
# Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
def hex2rgb(h):
return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4))
return [hex2rgb(h) for h in plt.rcParams["axes.prop_cycle"].by_key()["color"]]
|
https://github.com/ultralytics/yolov5/issues/2066
|
Traceback (most recent call last):
File "D:\software\Python38\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "D:\software\Python38\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "D:\yolov5\utils\plots.py", line 124, in plot_images
colors = color_list() # list of colors
File "D:\yolov5\utils\plots.py", line 34, in color_list
return [hex2rgb(h) for h in plt.rcParams['axes.prop_cycle'].by_key()['color']]
File "D:\yolov5\utils\plots.py", line 34, in <listcomp>
return [hex2rgb(h) for h in plt.rcParams['axes.prop_cycle'].by_key()['color']]
File "D:\yolov5\utils\plots.py", line 32, in hex2rgb
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
File "D:\yolov5\utils\plots.py", line 32, in <genexpr>
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
TypeError: int() can't convert non-string with explicit base
|
TypeError
|
def cache_labels(self, path=Path("./labels.cache"), prefix=""):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(
zip(self.img_files, self.label_files),
desc="Scanning images",
total=len(self.img_files),
)
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels"
assert im.format.lower() in img_formats, f"invalid image format {im.format}"
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, "r") as f:
l = np.array(
[x.split() for x in f.read().strip().splitlines()],
dtype=np.float32,
) # labels
if len(l):
assert l.shape[1] == 5, "labels require 5 columns each"
assert (l >= 0).all(), "negative labels"
assert (l[:, 1:] <= 1).all(), (
"non-normalized or out of bounds coordinate labels"
)
assert np.unique(l, axis=0).shape[0] == l.shape[0], (
"duplicate labels"
)
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape]
except Exception as e:
nc += 1
print(
f"{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}"
)
pbar.desc = (
f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... "
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
)
if nf == 0:
print(f"{prefix}WARNING: No labels found in {path}. See {help_url}")
x["hash"] = get_hash(self.label_files + self.img_files)
x["results"] = [nf, nm, ne, nc, i + 1]
torch.save(x, path) # save for next time
logging.info(f"{prefix}New cache created: {path}")
return x
|
def cache_labels(self, path=Path("./labels.cache"), prefix=""):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(
zip(self.img_files, self.label_files),
desc="Scanning images",
total=len(self.img_files),
)
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), "image size <10 pixels"
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, "r") as f:
l = np.array(
[x.split() for x in f.read().strip().splitlines()],
dtype=np.float32,
) # labels
if len(l):
assert l.shape[1] == 5, "labels require 5 columns each"
assert (l >= 0).all(), "negative labels"
assert (l[:, 1:] <= 1).all(), (
"non-normalized or out of bounds coordinate labels"
)
assert np.unique(l, axis=0).shape[0] == l.shape[0], (
"duplicate labels"
)
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape]
except Exception as e:
nc += 1
print(
f"{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}"
)
pbar.desc = (
f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... "
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
)
if nf == 0:
print(f"{prefix}WARNING: No labels found in {path}. See {help_url}")
x["hash"] = get_hash(self.label_files + self.img_files)
x["results"] = [nf, nm, ne, nc, i + 1]
torch.save(x, path) # save for next time
logging.info(f"{prefix}New cache created: {path}")
return x
|
https://github.com/ultralytics/yolov5/issues/195
|
Unable to init server: Could not connect: Connection refused
Unable to init server: Could not connect: Connection refused
(train.py:19670): Gdk-CRITICAL **: 18:33:23.890: gdk_cursor_new_for_display: assertion 'GDK_IS_DISPLAY (display)' failed
Apex recommended for faster mixed precision training: https://github.com/NVIDIA/apex
{'lr0': 0.01, 'momentum': 0.937, 'weight_decay': 0.0005, 'giou': 0.05, 'cls': 0.58, 'cls_pw': 1.0, 'obj': 1.0, 'obj_pw': 1.0, 'iou_t': 0.2, 'anchor_t': 4.0, 'fl_gamma': 0.0, 'hsv_h': 0.014, 'hsv_s': 0.68, 'hsv_v': 0.36, 'degrees': 0.0, 'translate': 0.0, 'scale': 0.5, 'shear': 0.0}
Your branch is behind 'origin/master' by 1 commit, and can be fast-forwarded.
(use "git pull" to update your local branch)
Namespace(adam=False, batch_size=16, bucket='', cache_images=False, cfg='./models/yolov5s.yaml', data='./data/dataset.yaml', device='', epochs=5, evolve=False, img_size=[640], multi_scale=False, name='', noautoanchor=False, nosave=False, notest=False, rect=False, resume=False, single_cls=False, weights='weights/yolov5s.pt')
Using CUDA device0 _CudaDeviceProperties(name='GeForce RTX 2080 Ti', total_memory=11019MB)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:528: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:529: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:530: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:535: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/
from n params module arguments
0 -1 1 3520 models.common.Focus [3, 32, 3]
1 -1 1 18560 models.common.Conv [32, 64, 3, 2]
2 -1 1 19904 models.common.BottleneckCSP [64, 64, 1]
3 -1 1 73984 models.common.Conv [64, 128, 3, 2]
4 -1 1 161152 models.common.BottleneckCSP [128, 128, 3]
5 -1 1 295424 models.common.Conv [128, 256, 3, 2]
6 -1 1 641792 models.common.BottleneckCSP [256, 256, 3]
7 -1 1 1180672 models.common.Conv [256, 512, 3, 2]
8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]]
9 -1 1 1248768 models.common.BottleneckCSP [512, 512, 1, False]
10 -1 1 131584 models.common.Conv [512, 256, 1, 1]
11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
12 [-1, 6] 1 0 models.common.Concat [1]
13 -1 1 378624 models.common.BottleneckCSP [512, 256, 1, False]
14 -1 1 33024 models.common.Conv [256, 128, 1, 1]
15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
16 [-1, 4] 1 0 models.common.Concat [1]
17 -1 1 95104 models.common.BottleneckCSP [256, 128, 1, False]
18 -1 1 3483 torch.nn.modules.conv.Conv2d [128, 27, 1, 1]
19 -2 1 147712 models.common.Conv [128, 128, 3, 2]
20 [-1, 14] 1 0 models.common.Concat [1]
21 -1 1 313088 models.common.BottleneckCSP [256, 256, 1, False]
22 -1 1 6939 torch.nn.modules.conv.Conv2d [256, 27, 1, 1]
23 -2 1 590336 models.common.Conv [256, 256, 3, 2]
24 [-1, 10] 1 0 models.common.Concat [1]
25 -1 1 1248768 models.common.BottleneckCSP [512, 512, 1, False]
26 -1 1 13851 torch.nn.modules.conv.Conv2d [512, 27, 1, 1]
27 [-1, 22, 18] 1 0 models.yolo.Detect [4, [[116, 90, 156, 198, 373, 326], [30, 61, 62, 45, 59, 119], [10, 13, 16, 30, 33, 23]]]
Model Summary: 191 layers, 7.26318e+06 parameters, 7.26318e+06 gradients
Optimizer groups: 62 .bias, 70 conv.weight, 59 other
Caching labels ../dataset/labels/train.npy (8582 found, 0 missing, 0 empty, 674 duplicate, for 8582 images): 100%|████| 8582/8582 [00:00<00:00, 24748.73it/s]
Caching labels ../dataset/labels/val.npy (1958 found, 0 missing, 0 empty, 135 duplicate, for 1958 images): 100%|██████| 1958/1958 [00:00<00:00, 25395.42it/s]
Analyzing anchors... Best Possible Recall (BPR) = 0.9977
Image sizes 640 train, 640 test
Using 1 dataloader workers
Starting training for 5 epochs...
Epoch gpu_mem GIoU obj cls total targets img_size
0/4 4.72G 0.1325 0.03671 0.05608 0.2252 60 640: 2%|▊ | 12/537 [00:07<03:35, 2.44it/s]Traceback (most recent call last):
File "train.py", line 407, in <module>
train(hyp)
File "train.py", line 237, in train
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
File "/usr/local/lib/python3.6/dist-packages/tqdm/std.py", line 1081, in __iter__
for obj in iterable:
File "/home/fyp2020s1/.local/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 345, in __next__
data = self._next_data()
File "/home/fyp2020s1/.local/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 856, in _next_data
return self._process_data(data)
File "/home/fyp2020s1/.local/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 881, in _process_data
data.reraise()
File "/home/fyp2020s1/.local/lib/python3.6/site-packages/torch/_utils.py", line 395, in reraise
raise self.exc_type(msg)
AssertionError: Caught AssertionError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/home/fyp2020s1/.local/lib/python3.6/site-packages/torch/utils/data/_utils/worker.py", line 178, in _worker_loop
data = fetcher.fetch(index)
File "/home/fyp2020s1/.local/lib/python3.6/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/home/fyp2020s1/.local/lib/python3.6/site-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/home/fyp2020s1/YoLo_v5/yolov5/utils/datasets.py", line 446, in __getitem__
img, labels = load_mosaic(self, index)
File "/home/fyp2020s1/YoLo_v5/yolov5/utils/datasets.py", line 573, in load_mosaic
img, _, (h, w) = load_image(self, index)
File "/home/fyp2020s1/YoLo_v5/yolov5/utils/datasets.py", line 534, in load_image
assert img is not None, 'Image Not Found ' + path
AssertionError: Image Not Found ../dataset/images/train/4501.jpeg
0/4 4.72G 0.1325 0.03671 0.05608 0.2252 60 640: 2%|▊
|
AssertionError
|
def select_device(device="", batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
cpu_request = device.lower() == "cpu"
if device and not cpu_request: # if device requested other than 'cpu'
os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable
assert torch.cuda.is_available(), (
f"CUDA unavailable, invalid device {device} requested"
) # check availablity
cuda = False if cpu_request else torch.cuda.is_available()
if cuda:
c = 1024**2 # bytes to MB
ng = torch.cuda.device_count()
if (
ng > 1 and batch_size
): # check that batch_size is compatible with device_count
assert batch_size % ng == 0, (
f"batch-size {batch_size} not multiple of GPU count {ng}"
)
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
s = f"Using torch {torch.__version__} "
for i, d in enumerate((device or "0").split(",")):
if i == 1:
s = " " * len(s)
logger.info(f"{s}CUDA:{d} ({x[i].name}, {x[i].total_memory / c}MB)")
else:
logger.info(f"Using torch {torch.__version__} CPU")
logger.info("") # skip a line
return torch.device("cuda:0" if cuda else "cpu")
|
def select_device(device="", batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
cpu_request = device.lower() == "cpu"
if device and not cpu_request: # if device requested other than 'cpu'
os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable
assert torch.cuda.is_available(), (
"CUDA unavailable, invalid device %s requested" % device
) # check availablity
cuda = False if cpu_request else torch.cuda.is_available()
if cuda:
c = 1024**2 # bytes to MB
ng = torch.cuda.device_count()
if (
ng > 1 and batch_size
): # check that batch_size is compatible with device_count
assert batch_size % ng == 0, (
"batch-size %g not multiple of GPU count %g" % (batch_size, ng)
)
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
s = f"Using torch {torch.__version__} "
for i in range(0, ng):
if i == 1:
s = " " * len(s)
logger.info(
"%sCUDA:%g (%s, %dMB)" % (s, i, x[i].name, x[i].total_memory / c)
)
else:
logger.info(f"Using torch {torch.__version__} CPU")
logger.info("") # skip a line
return torch.device("cuda:0" if cuda else "cpu")
|
https://github.com/ultralytics/yolov5/issues/1760
|
$ python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5x.pt --device "2"
Using torch 1.7.1 CUDA:0 (GeForce RTX 2080 Ti, 11019MB)
Namespace(adam=False, batch_size=16, bucket='', cache_images=False, cfg='', data='./data/coco128.yaml', device='2', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', rect=False, resume=False, save_dir='runs/train/exp22', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5x.pt', workers=8, world_size=1)
Start Tensorboard with "tensorboard --logdir runs/train", view at http://localhost:6006/
Hyperparameters {'lr0': 0.01, 'lrf': 0.2, 'momentum': 0.937, 'weight_decay': 0.0005, 'warmup_epochs': 3.0, 'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1, 'box': 0.05, 'cls': 0.5, 'cls_pw': 1.0, 'obj': 1.0, 'obj_pw': 1.0, 'iou_t': 0.2, 'anchor_t': 4.0, 'fl_gamma': 0.0, 'hsv_h': 0.015, 'hsv_s': 0.7, 'hsv_v': 0.4, 'degrees': 0.0, 'translate': 0.1, 'scale': 0.5, 'shear': 0.0, 'perspective': 0.0, 'flipud': 0.0, 'fliplr': 0.5, 'mosaic': 1.0, 'mixup': 0.0}
……
Model Summary: 607 layers, 88965245 parameters, 88965245 gradients, 221.7 GFLOPS
Transferred 802/802 items from yolov5x.pt
Optimizer groups: 134 .bias, 142 conv.weight, 131 other
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:00<?, ?it/s]
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:00<?, ?it/s]
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:00<?, ?it/s]
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:00<?, ?it/s]
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:00<?, ?it/s]
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:00<?, ?it/s]
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:00<?, ?it/s]
Plotting labels...
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:01<?, ?it/s]
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:01<?, ?it/s]
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:02<?, ?it/s]
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:02<?, ?it/s]
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:02<?, ?it/s]
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:02<?, ?it/s]
Scanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100%|███████████████████████████████████████████████████████████████████| 128/128 [00:02<?, ?it/s]
Analyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946
Image sizes 640 train, 640 test
Using 8 dataloader workers
Logging results to runs/train/exp22
Starting training for 3 epochs...
Epoch gpu_mem box obj cls total targets img_size
0%| | 0/8 [00:01<?, ?it/s]
Traceback (most recent call last):
File "train.py", line 512, in <module>
train(hyp, opt, device, tb_writer, wandb)
File "train.py", line 289, in train
pred = model(imgs) # forward
File "/home/elfin/anaconda3/envs/yolov5/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/elfin/utils/yolov5-master/models/yolo.py", line 122, in forward
return self.forward_once(x, profile) # single-scale inference, train
File "/home/elfin/utils/yolov5-master/models/yolo.py", line 138, in forward_once
x = m(x) # run
File "/home/elfin/anaconda3/envs/yolov5/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/elfin/utils/yolov5-master/models/common.py", line 119, in forward
return torch.cat(x, self.d)
RuntimeError: CUDA out of memory. Tried to allocate 126.00 MiB (GPU 0; 10.76 GiB total capacity; 9.69 GiB already allocated; 11.56 MiB free; 9.74 GiB reserved in total by PyTorch)
|
RuntimeError
|
def detect(save_img=False):
source, weights, view_img, save_txt, imgsz = (
opt.source,
opt.weights,
opt.view_img,
opt.save_txt,
opt.img_size,
)
webcam = (
source.isnumeric()
or source.endswith(".txt")
or source.lower().startswith(("rtsp://", "rtmp://", "http://"))
)
# Directories
save_dir = Path(
increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)
) # increment run
(save_dir / "labels" if save_txt else save_dir).mkdir(
parents=True, exist_ok=True
) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != "cpu" # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name="resnet101", n=2) # initialize
modelc.load_state_dict(
torch.load("weights/resnet101.pt", map_location=device)["model"]
).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, "module") else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != "cpu" else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(
pred,
opt.conf_thres,
opt.iou_thres,
classes=opt.classes,
agnostic=opt.agnostic_nms,
)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = (
Path(path[i]),
"%g: " % i,
im0s[i].copy(),
dataset.count,
)
else:
p, s, im0, frame = Path(path), "", im0s, getattr(dataset, "frame", 0)
save_path = str(save_dir / p.name)
txt_path = str(save_dir / "labels" / p.stem) + (
"" if dataset.mode == "image" else f"_{frame}"
)
s += "%gx%g " % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}s, " # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (
(xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn)
.view(-1)
.tolist()
) # normalized xywh
line = (
(cls, *xywh, conf) if opt.save_conf else (cls, *xywh)
) # label format
with open(txt_path + ".txt", "a") as f:
f.write(("%g " * len(line)).rstrip() % line + "\n")
if save_img or view_img: # Add bbox to image
label = f"{names[int(cls)]} {conf:.2f}"
plot_one_box(
xyxy,
im0,
label=label,
color=colors[int(cls)],
line_thickness=3,
)
# Print time (inference + NMS)
print(f"{s}Done. ({t2 - t1:.3f}s)")
# Stream results
if view_img:
cv2.imshow(str(p), im0)
if cv2.waitKey(1) == ord("q"): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == "image":
cv2.imwrite(save_path, im0)
else: # 'video'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = "mp4v" # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(
save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)
)
vid_writer.write(im0)
if save_txt or save_img:
s = (
f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}"
if save_txt
else ""
)
print(f"Results saved to {save_dir}{s}")
print(f"Done. ({time.time() - t0:.3f}s)")
|
def detect(save_img=False):
source, weights, view_img, save_txt, imgsz = (
opt.source,
opt.weights,
opt.view_img,
opt.save_txt,
opt.img_size,
)
webcam = (
source.isnumeric()
or source.endswith(".txt")
or source.lower().startswith(("rtsp://", "rtmp://", "http://"))
)
# Directories
save_dir = Path(
increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)
) # increment run
(save_dir / "labels" if save_txt else save_dir).mkdir(
parents=True, exist_ok=True
) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != "cpu" # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name="resnet101", n=2) # initialize
modelc.load_state_dict(
torch.load("weights/resnet101.pt", map_location=device)["model"]
).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, "module") else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != "cpu" else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(
pred,
opt.conf_thres,
opt.iou_thres,
classes=opt.classes,
agnostic=opt.agnostic_nms,
)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0 = Path(path[i]), "%g: " % i, im0s[i].copy()
else:
p, s, im0 = Path(path), "", im0s
save_path = str(save_dir / p.name)
txt_path = str(save_dir / "labels" / p.stem) + (
"_%g" % dataset.frame if dataset.mode == "video" else ""
)
s += "%gx%g " % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += "%g %ss, " % (n, names[int(c)]) # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (
(xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn)
.view(-1)
.tolist()
) # normalized xywh
line = (
(cls, *xywh, conf) if opt.save_conf else (cls, *xywh)
) # label format
with open(txt_path + ".txt", "a") as f:
f.write(("%g " * len(line)).rstrip() % line + "\n")
if save_img or view_img: # Add bbox to image
label = "%s %.2f" % (names[int(cls)], conf)
plot_one_box(
xyxy,
im0,
label=label,
color=colors[int(cls)],
line_thickness=3,
)
# Print time (inference + NMS)
print("%sDone. (%.3fs)" % (s, t2 - t1))
# Stream results
if view_img:
cv2.imshow(str(p), im0)
if cv2.waitKey(1) == ord("q"): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == "images":
cv2.imwrite(save_path, im0)
else:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = "mp4v" # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(
save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)
)
vid_writer.write(im0)
if save_txt or save_img:
s = (
f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}"
if save_txt
else ""
)
print(f"Results saved to {save_dir}{s}")
print("Done. (%.3fs)" % (time.time() - t0))
|
https://github.com/ultralytics/yolov5/issues/1625
|
** On entry to DGEBAL parameter number 3 had an illegal value
** On entry to DGEHRD parameter number 2 had an illegal value
** On entry to DORGHR DORGQR parameter number 2 had an illegal value
** On entry to DHSEQR parameter number 4 had an illegal value
Traceback (most recent call last):
File "D:\Softwares\Python\lib\site-packages\numpy\__init__.py", line 305, in <module>
_win_os_check()
File "D:\Softwares\Python\lib\site-packages\numpy\__init__.py", line 302, in _win_os_check
raise RuntimeError(msg.format(__file__)) from None
RuntimeError: The current Numpy installation ('D:\\Softwares\\Python\\lib\\site-packages\\numpy\\__init__.py') fails to pass a sanity check due to a bug in the windows runtime. See this issue for more information: https://tinyurl.com/y3dm3h86
Traceback (most recent call last):
File ".\detect.py", line 5, in <module>
import cv2
File "D:\Softwares\Python\lib\site-packages\cv2\__init__.py", line 5, in <module>
from .cv2 import *
ImportError: numpy.core.multiarray failed to import
|
RuntimeError
|
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if "*" in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, "*.*"))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception("ERROR: %s does not exist" % p)
images = [x for x in files if x.split(".")[-1].lower() in img_formats]
videos = [x for x in files if x.split(".")[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = "image"
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, (
"No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s"
% (p, img_formats, vid_formats)
)
|
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if "*" in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, "*.*"))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception("ERROR: %s does not exist" % p)
images = [x for x in files if x.split(".")[-1].lower() in img_formats]
videos = [x for x in files if x.split(".")[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = "images"
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, (
"No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s"
% (p, img_formats, vid_formats)
)
|
https://github.com/ultralytics/yolov5/issues/1625
|
** On entry to DGEBAL parameter number 3 had an illegal value
** On entry to DGEHRD parameter number 2 had an illegal value
** On entry to DORGHR DORGQR parameter number 2 had an illegal value
** On entry to DHSEQR parameter number 4 had an illegal value
Traceback (most recent call last):
File "D:\Softwares\Python\lib\site-packages\numpy\__init__.py", line 305, in <module>
_win_os_check()
File "D:\Softwares\Python\lib\site-packages\numpy\__init__.py", line 302, in _win_os_check
raise RuntimeError(msg.format(__file__)) from None
RuntimeError: The current Numpy installation ('D:\\Softwares\\Python\\lib\\site-packages\\numpy\\__init__.py') fails to pass a sanity check due to a bug in the windows runtime. See this issue for more information: https://tinyurl.com/y3dm3h86
Traceback (most recent call last):
File ".\detect.py", line 5, in <module>
import cv2
File "D:\Softwares\Python\lib\site-packages\cv2\__init__.py", line 5, in <module>
from .cv2 import *
ImportError: numpy.core.multiarray failed to import
|
RuntimeError
|
def __init__(self, sources="streams.txt", img_size=640):
self.mode = "stream"
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, "r") as f:
sources = [
x.strip() for x in f.read().strip().splitlines() if len(x.strip())
]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print("%g/%g: %s... " % (i + 1, n, s), end="")
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), "Failed to open %s" % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(" success (%gx%g at %.2f FPS)." % (w, h, fps))
thread.start()
print("") # newline
# check for common shapes
s = np.stack(
[letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0
) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print(
"WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams."
)
|
def __init__(self, sources="streams.txt", img_size=640):
self.mode = "images"
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, "r") as f:
sources = [
x.strip() for x in f.read().strip().splitlines() if len(x.strip())
]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print("%g/%g: %s... " % (i + 1, n, s), end="")
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), "Failed to open %s" % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(" success (%gx%g at %.2f FPS)." % (w, h, fps))
thread.start()
print("") # newline
# check for common shapes
s = np.stack(
[letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0
) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print(
"WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams."
)
|
https://github.com/ultralytics/yolov5/issues/1625
|
** On entry to DGEBAL parameter number 3 had an illegal value
** On entry to DGEHRD parameter number 2 had an illegal value
** On entry to DORGHR DORGQR parameter number 2 had an illegal value
** On entry to DHSEQR parameter number 4 had an illegal value
Traceback (most recent call last):
File "D:\Softwares\Python\lib\site-packages\numpy\__init__.py", line 305, in <module>
_win_os_check()
File "D:\Softwares\Python\lib\site-packages\numpy\__init__.py", line 302, in _win_os_check
raise RuntimeError(msg.format(__file__)) from None
RuntimeError: The current Numpy installation ('D:\\Softwares\\Python\\lib\\site-packages\\numpy\\__init__.py') fails to pass a sanity check due to a bug in the windows runtime. See this issue for more information: https://tinyurl.com/y3dm3h86
Traceback (most recent call last):
File ".\detect.py", line 5, in <module>
import cv2
File "D:\Softwares\Python\lib\site-packages\cv2\__init__.py", line 5, in <module>
from .cv2 import *
ImportError: numpy.core.multiarray failed to import
|
RuntimeError
|
def detect(save_img=False):
source, weights, view_img, save_txt, imgsz = (
opt.source,
opt.weights,
opt.view_img,
opt.save_txt,
opt.img_size,
)
webcam = (
source.isnumeric()
or source.endswith(".txt")
or source.lower().startswith(("rtsp://", "rtmp://", "http://"))
)
# Directories
save_dir = Path(
increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)
) # increment run
(save_dir / "labels" if save_txt else save_dir).mkdir(
parents=True, exist_ok=True
) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != "cpu" # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name="resnet101", n=2) # initialize
modelc.load_state_dict(
torch.load("weights/resnet101.pt", map_location=device)["model"]
).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, "module") else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != "cpu" else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(
pred,
opt.conf_thres,
opt.iou_thres,
classes=opt.classes,
agnostic=opt.agnostic_nms,
)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = path[i], "%g: " % i, im0s[i].copy(), dataset.count
else:
p, s, im0, frame = path, "", im0s, getattr(dataset, "frame", 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / "labels" / p.stem) + (
"" if dataset.mode == "image" else f"_{frame}"
) # img.txt
s += "%gx%g " % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}s, " # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (
(xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn)
.view(-1)
.tolist()
) # normalized xywh
line = (
(cls, *xywh, conf) if opt.save_conf else (cls, *xywh)
) # label format
with open(txt_path + ".txt", "a") as f:
f.write(("%g " * len(line)).rstrip() % line + "\n")
if save_img or view_img: # Add bbox to image
label = f"{names[int(cls)]} {conf:.2f}"
plot_one_box(
xyxy,
im0,
label=label,
color=colors[int(cls)],
line_thickness=3,
)
# Print time (inference + NMS)
print(f"{s}Done. ({t2 - t1:.3f}s)")
# Stream results
if view_img:
cv2.imshow(str(p), im0)
if cv2.waitKey(1) == ord("q"): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == "image":
cv2.imwrite(save_path, im0)
else: # 'video'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = "mp4v" # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(
save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)
)
vid_writer.write(im0)
if save_txt or save_img:
s = (
f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}"
if save_txt
else ""
)
print(f"Results saved to {save_dir}{s}")
print(f"Done. ({time.time() - t0:.3f}s)")
|
def detect(save_img=False):
source, weights, view_img, save_txt, imgsz = (
opt.source,
opt.weights,
opt.view_img,
opt.save_txt,
opt.img_size,
)
webcam = (
source.isnumeric()
or source.endswith(".txt")
or source.lower().startswith(("rtsp://", "rtmp://", "http://"))
)
# Directories
save_dir = Path(
increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)
) # increment run
(save_dir / "labels" if save_txt else save_dir).mkdir(
parents=True, exist_ok=True
) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != "cpu" # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name="resnet101", n=2) # initialize
modelc.load_state_dict(
torch.load("weights/resnet101.pt", map_location=device)["model"]
).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, "module") else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != "cpu" else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(
pred,
opt.conf_thres,
opt.iou_thres,
classes=opt.classes,
agnostic=opt.agnostic_nms,
)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = (
Path(path[i]),
"%g: " % i,
im0s[i].copy(),
dataset.count,
)
else:
p, s, im0, frame = Path(path), "", im0s, getattr(dataset, "frame", 0)
save_path = str(save_dir / p.name)
txt_path = str(save_dir / "labels" / p.stem) + (
"" if dataset.mode == "image" else f"_{frame}"
)
s += "%gx%g " % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}s, " # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (
(xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn)
.view(-1)
.tolist()
) # normalized xywh
line = (
(cls, *xywh, conf) if opt.save_conf else (cls, *xywh)
) # label format
with open(txt_path + ".txt", "a") as f:
f.write(("%g " * len(line)).rstrip() % line + "\n")
if save_img or view_img: # Add bbox to image
label = f"{names[int(cls)]} {conf:.2f}"
plot_one_box(
xyxy,
im0,
label=label,
color=colors[int(cls)],
line_thickness=3,
)
# Print time (inference + NMS)
print(f"{s}Done. ({t2 - t1:.3f}s)")
# Stream results
if view_img:
cv2.imshow(str(p), im0)
if cv2.waitKey(1) == ord("q"): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == "image":
cv2.imwrite(save_path, im0)
else: # 'video'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = "mp4v" # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(
save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)
)
vid_writer.write(im0)
if save_txt or save_img:
s = (
f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}"
if save_txt
else ""
)
print(f"Results saved to {save_dir}{s}")
print(f"Done. ({time.time() - t0:.3f}s)")
|
https://github.com/ultralytics/yolov5/issues/1625
|
** On entry to DGEBAL parameter number 3 had an illegal value
** On entry to DGEHRD parameter number 2 had an illegal value
** On entry to DORGHR DORGQR parameter number 2 had an illegal value
** On entry to DHSEQR parameter number 4 had an illegal value
Traceback (most recent call last):
File "D:\Softwares\Python\lib\site-packages\numpy\__init__.py", line 305, in <module>
_win_os_check()
File "D:\Softwares\Python\lib\site-packages\numpy\__init__.py", line 302, in _win_os_check
raise RuntimeError(msg.format(__file__)) from None
RuntimeError: The current Numpy installation ('D:\\Softwares\\Python\\lib\\site-packages\\numpy\\__init__.py') fails to pass a sanity check due to a bug in the windows runtime. See this issue for more information: https://tinyurl.com/y3dm3h86
Traceback (most recent call last):
File ".\detect.py", line 5, in <module>
import cv2
File "D:\Softwares\Python\lib\site-packages\cv2\__init__.py", line 5, in <module>
from .cv2 import *
ImportError: numpy.core.multiarray failed to import
|
RuntimeError
|
def __init__(self, sources="streams.txt", img_size=640):
self.mode = "stream"
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, "r") as f:
sources = [
x.strip() for x in f.read().strip().splitlines() if len(x.strip())
]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print("%g/%g: %s... " % (i + 1, n, s), end="")
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), "Failed to open %s" % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(" success (%gx%g at %.2f FPS)." % (w, h, fps))
thread.start()
print("") # newline
# check for common shapes
s = np.stack(
[letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0
) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print(
"WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams."
)
|
def __init__(self, sources="streams.txt", img_size=640):
self.mode = "stream"
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, "r") as f:
sources = [
x.strip() for x in f.read().strip().splitlines() if len(x.strip())
]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print("%g/%g: %s... " % (i + 1, n, s), end="")
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), "Failed to open %s" % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(" success (%gx%g at %.2f FPS)." % (w, h, fps))
thread.start()
print("") # newline
# check for common shapes
s = np.stack(
[letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0
) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print(
"WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams."
)
|
https://github.com/ultralytics/yolov5/issues/1625
|
** On entry to DGEBAL parameter number 3 had an illegal value
** On entry to DGEHRD parameter number 2 had an illegal value
** On entry to DORGHR DORGQR parameter number 2 had an illegal value
** On entry to DHSEQR parameter number 4 had an illegal value
Traceback (most recent call last):
File "D:\Softwares\Python\lib\site-packages\numpy\__init__.py", line 305, in <module>
_win_os_check()
File "D:\Softwares\Python\lib\site-packages\numpy\__init__.py", line 302, in _win_os_check
raise RuntimeError(msg.format(__file__)) from None
RuntimeError: The current Numpy installation ('D:\\Softwares\\Python\\lib\\site-packages\\numpy\\__init__.py') fails to pass a sanity check due to a bug in the windows runtime. See this issue for more information: https://tinyurl.com/y3dm3h86
Traceback (most recent call last):
File ".\detect.py", line 5, in <module>
import cv2
File "D:\Softwares\Python\lib\site-packages\cv2\__init__.py", line 5, in <module>
from .cv2 import *
ImportError: numpy.core.multiarray failed to import
|
RuntimeError
|
def non_max_suppression(
prediction, conf_thres=0.1, iou_thres=0.6, classes=None, agnostic=False, labels=()
):
"""Performs Non-Maximum Suppression (NMS) on inference results
Returns:
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# If none remain process next image
n = x.shape[0] # number of boxes
if not n:
continue
# Sort by confidence
# x = x[x[:, 4].argsort(descending=True)]
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(
1, keepdim=True
) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
break # time limit exceeded
return output
|
def non_max_suppression(
prediction, conf_thres=0.1, iou_thres=0.6, classes=None, agnostic=False, labels=()
):
"""Performs Non-Maximum Suppression (NMS) on inference results
Returns:
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
"""
nc = prediction[0].shape[1] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros(0, 6)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# If none remain process next image
n = x.shape[0] # number of boxes
if not n:
continue
# Sort by confidence
# x = x[x[:, 4].argsort(descending=True)]
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(
1, keepdim=True
) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
break # time limit exceeded
return output
|
https://github.com/ultralytics/yolov5/issues/1617
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-5-e00d4d91b5be> in <module>
11 ]
12
---> 13 det = yolo(images)
~/miniconda3/envs/wstal/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/.cache/torch/hub/ultralytics_yolov5_master/models/common.py in forward(self, imgs, size, augment, profile)
171 y[i][:, :4] = scale_coords(shape1, y[i][:, :4], shape0[i])
172
--> 173 return Detections(imgs, y, self.names)
174
175
~/.cache/torch/hub/ultralytics_yolov5_master/models/common.py in __init__(self, imgs, pred, names)
185 d = pred[0].device # device
186 gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
--> 187 self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
188 self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
189 self.n = len(self.pred)
~/.cache/torch/hub/ultralytics_yolov5_master/models/common.py in <listcomp>(.0)
185 d = pred[0].device # device
186 gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
--> 187 self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
188 self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
189 self.n = len(self.pred)
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
|
RuntimeError
|
def cache_labels(self, path=Path("./labels.cache")):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(
zip(self.img_files, self.label_files),
desc="Scanning images",
total=len(self.img_files),
)
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), "image size <10 pixels"
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, "r") as f:
l = np.array(
[x.split() for x in f.read().splitlines()], dtype=np.float32
) # labels
if len(l):
assert l.shape[1] == 5, "labels require 5 columns each"
assert (l >= 0).all(), "negative labels"
assert (l[:, 1:] <= 1).all(), (
"non-normalized or out of bounds coordinate labels"
)
assert np.unique(l, axis=0).shape[0] == l.shape[0], (
"duplicate labels"
)
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape]
except Exception as e:
nc += 1
print(
"WARNING: Ignoring corrupted image and/or label %s: %s" % (im_file, e)
)
pbar.desc = (
f"Scanning '{path.parent / path.stem}' for images and labels... "
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
)
if nf == 0:
print(f"WARNING: No labels found in {path}. See {help_url}")
x["hash"] = get_hash(self.label_files + self.img_files)
x["results"] = [nf, nm, ne, nc, i + 1]
torch.save(x, path) # save for next time
logging.info(f"New cache created: {path}")
return x
|
def cache_labels(self, path=Path("./labels.cache")):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(
zip(self.img_files, self.label_files),
desc="Scanning images",
total=len(self.img_files),
)
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), "image size <10 pixels"
# verify labels
l = []
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, "r") as f:
l = np.array(
[x.split() for x in f.read().splitlines()], dtype=np.float32
) # labels
if len(l):
assert l.shape[1] == 5, "labels require 5 columns each"
assert (l >= 0).all(), "negative labels"
assert (l[:, 1:] <= 1).all(), (
"non-normalized or out of bounds coordinate labels"
)
assert np.unique(l, axis=0).shape[0] == l.shape[0], (
"duplicate labels"
)
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
x[im_file] = [l, shape]
except Exception as e:
nc += 1
print(
"WARNING: Ignoring corrupted image and/or label %s: %s" % (im_file, e)
)
pbar.desc = (
f"Scanning '{path.parent / path.stem}' for images and labels... "
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
)
if nf == 0:
print(f"WARNING: No labels found in {path}. See {help_url}")
x["hash"] = get_hash(self.label_files + self.img_files)
x["results"] = [nf, nm, ne, nc, i]
torch.save(x, path) # save for next time
logging.info(f"New cache created: {path}")
return x
|
https://github.com/ultralytics/yolov5/issues/1507
|
Traceback (most recent call last):
File "train.py", line 491, in <module>
train(hyp, opt, device, tb_writer, wandb)
File "train.py", line 186, in train
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
File "<__array_function__ internals>", line 6, in concatenate
ValueError: all the input arrays must have same number of dimensions, but the array at index 0 has 2 dimension(s) and the array at index 114824 has 1 dimension(s)
|
ValueError
|
def output_to_target(output):
# Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
targets = []
for i, o in enumerate(output):
for *box, conf, cls in o.cpu().numpy():
targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
return np.array(targets)
|
def output_to_target(output, width, height):
# Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
if isinstance(output, torch.Tensor):
output = output.cpu().numpy()
targets = []
for i, o in enumerate(output):
if o is not None:
for pred in o:
box = pred[:4]
w = (box[2] - box[0]) / width
h = (box[3] - box[1]) / height
x = box[0] / width + w / 2
y = box[1] / height + h / 2
conf = pred[4]
cls = int(pred[5])
targets.append([i, cls, x, y, w, h, conf])
return np.array(targets)
|
https://github.com/ultralytics/yolov5/issues/1507
|
Traceback (most recent call last):
File "train.py", line 491, in <module>
train(hyp, opt, device, tb_writer, wandb)
File "train.py", line 186, in train
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
File "<__array_function__ internals>", line 6, in concatenate
ValueError: all the input arrays must have same number of dimensions, but the array at index 0 has 2 dimension(s) and the array at index 114824 has 1 dimension(s)
|
ValueError
|
def plot_images(
images,
targets,
paths=None,
fname="images.jpg",
names=None,
max_size=640,
max_subplots=16,
):
# Plot image grid with labels
if isinstance(images, torch.Tensor):
images = images.cpu().float().numpy()
if isinstance(targets, torch.Tensor):
targets = targets.cpu().numpy()
# un-normalise
if np.max(images[0]) <= 1:
images *= 255
tl = 3 # line thickness
tf = max(tl - 1, 1) # font thickness
bs, _, h, w = images.shape # batch size, _, height, width
bs = min(bs, max_subplots) # limit plot images
ns = np.ceil(bs**0.5) # number of subplots (square)
# Check if we should resize
scale_factor = max_size / max(h, w)
if scale_factor < 1:
h = math.ceil(scale_factor * h)
w = math.ceil(scale_factor * w)
colors = color_list() # list of colors
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
for i, img in enumerate(images):
if i == max_subplots: # if last batch has fewer images than we expect
break
block_x = int(w * (i // ns))
block_y = int(h * (i % ns))
img = img.transpose(1, 2, 0)
if scale_factor < 1:
img = cv2.resize(img, (w, h))
mosaic[block_y : block_y + h, block_x : block_x + w, :] = img
if len(targets) > 0:
image_targets = targets[targets[:, 0] == i]
boxes = xywh2xyxy(image_targets[:, 2:6]).T
classes = image_targets[:, 1].astype("int")
labels = image_targets.shape[1] == 6 # labels if no conf column
conf = (
None if labels else image_targets[:, 6]
) # check for confidence presence (label vs pred)
boxes[[0, 2]] += block_x
boxes[[1, 3]] += block_y
for j, box in enumerate(boxes.T):
cls = int(classes[j])
color = colors[cls % len(colors)]
cls = names[cls] if names else cls
if labels or conf[j] > 0.25: # 0.25 conf thresh
label = "%s" % cls if labels else "%s %.1f" % (cls, conf[j])
plot_one_box(
box, mosaic, label=label, color=color, line_thickness=tl
)
# Draw image filename labels
if paths:
label = Path(paths[i]).name[:40] # trim to 40 char
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
cv2.putText(
mosaic,
label,
(block_x + 5, block_y + t_size[1] + 5),
0,
tl / 3,
[220, 220, 220],
thickness=tf,
lineType=cv2.LINE_AA,
)
# Image border
cv2.rectangle(
mosaic,
(block_x, block_y),
(block_x + w, block_y + h),
(255, 255, 255),
thickness=3,
)
if fname:
r = min(1280.0 / max(h, w) / ns, 1.0) # ratio to limit image size
mosaic = cv2.resize(
mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA
)
# cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
Image.fromarray(mosaic).save(fname) # PIL save
return mosaic
|
def plot_images(
images,
targets,
paths=None,
fname="images.jpg",
names=None,
max_size=640,
max_subplots=16,
):
# Plot image grid with labels
if isinstance(images, torch.Tensor):
images = images.cpu().float().numpy()
if isinstance(targets, torch.Tensor):
targets = targets.cpu().numpy()
# un-normalise
if np.max(images[0]) <= 1:
images *= 255
tl = 3 # line thickness
tf = max(tl - 1, 1) # font thickness
bs, _, h, w = images.shape # batch size, _, height, width
bs = min(bs, max_subplots) # limit plot images
ns = np.ceil(bs**0.5) # number of subplots (square)
# Check if we should resize
scale_factor = max_size / max(h, w)
if scale_factor < 1:
h = math.ceil(scale_factor * h)
w = math.ceil(scale_factor * w)
colors = color_list() # list of colors
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
for i, img in enumerate(images):
if i == max_subplots: # if last batch has fewer images than we expect
break
block_x = int(w * (i // ns))
block_y = int(h * (i % ns))
img = img.transpose(1, 2, 0)
if scale_factor < 1:
img = cv2.resize(img, (w, h))
mosaic[block_y : block_y + h, block_x : block_x + w, :] = img
if len(targets) > 0:
image_targets = targets[targets[:, 0] == i]
boxes = xywh2xyxy(image_targets[:, 2:6]).T
classes = image_targets[:, 1].astype("int")
labels = image_targets.shape[1] == 6 # labels if no conf column
conf = (
None if labels else image_targets[:, 6]
) # check for confidence presence (label vs pred)
boxes[[0, 2]] *= w
boxes[[0, 2]] += block_x
boxes[[1, 3]] *= h
boxes[[1, 3]] += block_y
for j, box in enumerate(boxes.T):
cls = int(classes[j])
color = colors[cls % len(colors)]
cls = names[cls] if names else cls
if labels or conf[j] > 0.25: # 0.25 conf thresh
label = "%s" % cls if labels else "%s %.1f" % (cls, conf[j])
plot_one_box(
box, mosaic, label=label, color=color, line_thickness=tl
)
# Draw image filename labels
if paths:
label = Path(paths[i]).name[:40] # trim to 40 char
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
cv2.putText(
mosaic,
label,
(block_x + 5, block_y + t_size[1] + 5),
0,
tl / 3,
[220, 220, 220],
thickness=tf,
lineType=cv2.LINE_AA,
)
# Image border
cv2.rectangle(
mosaic,
(block_x, block_y),
(block_x + w, block_y + h),
(255, 255, 255),
thickness=3,
)
if fname:
r = min(1280.0 / max(h, w) / ns, 1.0) # ratio to limit image size
mosaic = cv2.resize(
mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA
)
# cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
Image.fromarray(mosaic).save(fname) # PIL save
return mosaic
|
https://github.com/ultralytics/yolov5/issues/1507
|
Traceback (most recent call last):
File "train.py", line 491, in <module>
train(hyp, opt, device, tb_writer, wandb)
File "train.py", line 186, in train
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
File "<__array_function__ internals>", line 6, in concatenate
ValueError: all the input arrays must have same number of dimensions, but the array at index 0 has 2 dimension(s) and the array at index 114824 has 1 dimension(s)
|
ValueError
|
def __init__(self, imgs, pred, names=None):
super(Detections, self).__init__()
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
d = pred[0].device # device
gn = [
torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1.0, 1.0], device=d)
for im in imgs
] # normalizations
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred)
|
def __init__(self, imgs, pred, names=None):
super(Detections, self).__init__()
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
gn = [
torch.Tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1.0, 1.0]) for im in imgs
] # normalization gains
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred)
|
https://github.com/ultralytics/yolov5/issues/1454
|
Using cache found in /home/appuser/.cache/torch/hub/ultralytics_yolov5_master
from n params module arguments
0 -1 1 7040 models.common.Focus [3, 64, 3]
1 -1 1 73984 models.common.Conv [64, 128, 3, 2]
2 -1 1 161152 models.common.BottleneckCSP [128, 128, 3]
3 -1 1 295424 models.common.Conv [128, 256, 3, 2]
4 -1 1 1627904 models.common.BottleneckCSP [256, 256, 9]
5 -1 1 1180672 models.common.Conv [256, 512, 3, 2]
6 -1 1 6499840 models.common.BottleneckCSP [512, 512, 9]
7 -1 1 4720640 models.common.Conv [512, 1024, 3, 2]
8 -1 1 2624512 models.common.SPP [1024, 1024, [5, 9, 13]]
9 -1 1 10234880 models.common.BottleneckCSP [1024, 1024, 3, False]
10 -1 1 525312 models.common.Conv [1024, 512, 1, 1]
11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
12 [-1, 6] 1 0 models.common.Concat [1]
13 -1 1 2823680 models.common.BottleneckCSP [1024, 512, 3, False]
14 -1 1 131584 models.common.Conv [512, 256, 1, 1]
15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
16 [-1, 4] 1 0 models.common.Concat [1]
17 -1 1 707328 models.common.BottleneckCSP [512, 256, 3, False]
18 -1 1 590336 models.common.Conv [256, 256, 3, 2]
19 [-1, 14] 1 0 models.common.Concat [1]
20 -1 1 2561536 models.common.BottleneckCSP [512, 512, 3, False]
21 -1 1 2360320 models.common.Conv [512, 512, 3, 2]
22 [-1, 10] 1 0 models.common.Concat [1]
23 -1 1 10234880 models.common.BottleneckCSP [1024, 1024, 3, False]
24 [17, 20, 23] 1 457725 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [256, 512, 1024]]
Model Summary: 499 layers, 47818749 parameters, 47818749 gradients
Fusing layers...
Model Summary: 400 layers, 47790077 parameters, 47790077 gradients
Adding autoShape...
RuntimeError Traceback (most recent call last)
<ipython-input-3-5dc3294ee725> in <module>
3 image_path = 'data/images/bus.jpg'
4 img = cv2.imread(image_path)
----> 5 r = model(img)
~/.conda/envs/pytorch/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/.cache/torch/hub/ultralytics_yolov5_master/models/common.py in forward(self, imgs, size, augment, profile)
172 y[i][:, :4] = scale_coords(shape1, y[i][:, :4], shape0[i])
173
--> 174 return Detections(imgs, y, self.names)
175
176
~/.cache/torch/hub/ultralytics_yolov5_master/models/common.py in __init__(self, imgs, pred, names)
185 self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
186 gn = [torch.Tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.]) for im in imgs] # normalization gains
--> 187 self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
188 self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
189 self.n = len(self.pred)
~/.cache/torch/hub/ultralytics_yolov5_master/models/common.py in <listcomp>(.0)
185 self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
186 gn = [torch.Tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.]) for im in imgs] # normalization gains
--> 187 self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
188 self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
189 self.n = len(self.pred)
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
|
RuntimeError
|
def train(hyp, opt, device, tb_writer=None, wandb=None):
logger.info(f"Hyperparameters {hyp}")
log_dir = (
Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / "evolve"
) # logging directory
wdir = log_dir / "weights" # weights directory
wdir.mkdir(parents=True, exist_ok=True)
last = wdir / "last.pt"
best = wdir / "best.pt"
results_file = log_dir / "results.txt"
epochs, batch_size, total_batch_size, weights, rank = (
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.global_rank,
)
# Save run settings
with open(log_dir / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(log_dir / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict["train"]
test_path = data_dict["val"]
nc, names = (
(1, ["item"]) if opt.single_cls else (int(data_dict["nc"]), data_dict["names"])
) # number classes, names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Model
pretrained = weights.endswith(".pt")
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
if hyp.get("anchors"):
ckpt["model"].yaml["anchors"] = round(hyp["anchors"]) # force autoanchor
model = Model(opt.cfg or ckpt["model"].yaml, ch=3, nc=nc).to(device) # create
exclude = ["anchor"] if opt.cfg or hyp.get("anchors") else [] # exclude keys
state_dict = ckpt["model"].float().state_dict() # to FP32
state_dict = intersect_dicts(
state_dict, model.state_dict(), exclude=exclude
) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info(
"Transferred %g/%g items from %s"
% (len(state_dict), len(model.state_dict()), weights)
) # report
else:
model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print("freezing %s" % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
logger.info(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = (
lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp["lrf"])
+ hyp["lrf"]
) # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Logging
if wandb and wandb.run is None:
id = ckpt.get("wandb_id") if "ckpt" in locals() else None
wandb_run = wandb.init(
config=opt, resume="allow", project="YOLOv5", name=log_dir.stem, id=id
)
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt["optimizer"] is not None:
optimizer.load_state_dict(ckpt["optimizer"])
best_fitness = ckpt["best_fitness"]
# Results
if ckpt.get("training_results") is not None:
with open(results_file, "w") as file:
file.write(ckpt["training_results"]) # write results.txt
# Epochs
start_epoch = ckpt["epoch"] + 1
if opt.resume:
assert start_epoch > 0, (
"%s training to %g epochs is finished, nothing to resume."
% (weights, epochs)
)
shutil.copytree(
wdir, wdir.parent / f"weights_backup_epoch{start_epoch - 1}"
) # save previous weights
if epochs < start_epoch:
logger.info(
"%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
% (weights, ckpt["epoch"], epochs)
)
epochs += ckpt["epoch"] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info("Using SyncBatchNorm()")
# Exponential moving average
ema = ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
rank=rank,
world_size=opt.world_size,
workers=opt.workers,
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, (
"Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g"
% (mlc, nc, opt.data, nc - 1)
)
# Process 0
if rank in [-1, 0]:
ema.updates = start_epoch * nb // accumulate # set EMA updates
testloader = create_dataloader(
test_path,
imgsz_test,
total_batch_size,
gs,
opt,
hyp=hyp,
augment=False,
cache=opt.cache_images and not opt.notest,
rect=True,
rank=-1,
world_size=opt.world_size,
workers=opt.workers,
)[0] # testloader
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
plot_labels(labels, save_dir=log_dir)
if tb_writer:
# tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
tb_writer.add_histogram("classes", c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
# Model parameters
hyp["cls"] *= nc / 80.0 # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(
device
) # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(
round(hyp["warmup_epochs"] * nb), 1e3
) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
logger.info(
"Image sizes %g train, %g test\n"
"Using %g dataloader workers\nLogging results to %s\n"
"Starting training for %g epochs..."
% (imgsz, imgsz_test, dataloader.num_workers, log_dir, epochs)
)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = (
model.class_weights.cpu().numpy() * (1 - maps) ** 2
) # class weights
iw = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=cw
) # image weights
dataset.indices = random.choices(
range(dataset.n), weights=iw, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (
torch.tensor(dataset.indices)
if rank == 0
else torch.zeros(dataset.n)
).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(
("\n" + "%10s" * 8)
% ("Epoch", "gpu_mem", "box", "obj", "cls", "total", "targets", "img_size")
)
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for (
i,
(imgs, targets, paths, _),
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni,
xi,
[
hyp["warmup_bias_lr"] if j == 2 else 0.0,
x["initial_lr"] * lf(epoch),
],
)
if "momentum" in x:
x["momentum"] = np.interp(
ni, xi, [hyp["warmup_momentum"], hyp["momentum"]]
)
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(
pred, targets.to(device), model
) # loss scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if ni < 3:
f = str(log_dir / f"train_batch{ni}.jpg") # filename
result = plot_images(
images=imgs, targets=targets, paths=paths, fname=f
)
# if tb_writer and result is not None:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
lr = [x["lr"] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema:
ema.update_attr(
model, include=["yaml", "nc", "hyp", "gr", "names", "stride"]
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(
opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=log_dir,
plots=epoch == 0 or final_epoch, # plot first and last
log_imgs=opt.log_imgs if wandb else 0,
)
# Write
with open(results_file, "a") as f:
f.write(
s + "%10.4g" * 7 % results + "\n"
) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Log
tags = [
"train/giou_loss",
"train/obj_loss",
"train/cls_loss", # train loss
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/giou_loss",
"val/obj_loss",
"val/cls_loss", # val loss
"x/lr0",
"x/lr1",
"x/lr2",
] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb:
wandb.log({tag: x}) # W&B
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, "r") as f: # create checkpoint
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": f.read(),
"model": ema.ema,
"optimizer": None if final_epoch else optimizer.state_dict(),
"wandb_id": wandb_run.id if wandb else None,
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
n = opt.name if opt.name.isnumeric() else ""
fresults, flast, fbest = (
log_dir / f"results{n}.txt",
wdir / f"last{n}.pt",
wdir / f"best{n}.pt",
)
for f1, f2 in zip(
[wdir / "last.pt", wdir / "best.pt", results_file], [flast, fbest, fresults]
):
if f1.exists():
os.rename(f1, f2) # rename
if str(f2).endswith(".pt"): # is *.pt
strip_optimizer(f2) # strip optimizer
os.system(
"gsutil cp %s gs://%s/weights" % (f2, opt.bucket)
) if opt.bucket else None # upload
# Finish
if not opt.evolve:
plot_results(save_dir=log_dir) # save as results.png
logger.info(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
dist.destroy_process_group() if rank not in [-1, 0] else None
torch.cuda.empty_cache()
return results
|
def train(hyp, opt, device, tb_writer=None, wandb=None):
logger.info(f"Hyperparameters {hyp}")
log_dir = (
Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / "evolve"
) # logging directory
wdir = log_dir / "weights" # weights directory
wdir.mkdir(parents=True, exist_ok=True)
last = wdir / "last.pt"
best = wdir / "best.pt"
results_file = log_dir / "results.txt"
epochs, batch_size, total_batch_size, weights, rank = (
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.global_rank,
)
# Save run settings
with open(log_dir / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(log_dir / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict["train"]
test_path = data_dict["val"]
nc, names = (
(1, ["item"]) if opt.single_cls else (int(data_dict["nc"]), data_dict["names"])
) # number classes, names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Model
pretrained = weights.endswith(".pt")
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
if hyp.get("anchors"):
ckpt["model"].yaml["anchors"] = round(hyp["anchors"]) # force autoanchor
model = Model(opt.cfg or ckpt["model"].yaml, ch=3, nc=nc).to(device) # create
exclude = ["anchor"] if opt.cfg or hyp.get("anchors") else [] # exclude keys
state_dict = ckpt["model"].float().state_dict() # to FP32
state_dict = intersect_dicts(
state_dict, model.state_dict(), exclude=exclude
) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info(
"Transferred %g/%g items from %s"
% (len(state_dict), len(model.state_dict()), weights)
) # report
else:
model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print("freezing %s" % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
logger.info(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = (
lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp["lrf"])
+ hyp["lrf"]
) # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Logging
if wandb and wandb.run is None:
id = ckpt.get("wandb_id") if "ckpt" in locals() else None
wandb_run = wandb.init(
config=opt, resume="allow", project="YOLOv5", name=log_dir.stem, id=id
)
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt["optimizer"] is not None:
optimizer.load_state_dict(ckpt["optimizer"])
best_fitness = ckpt["best_fitness"]
# Results
if ckpt.get("training_results") is not None:
with open(results_file, "w") as file:
file.write(ckpt["training_results"]) # write results.txt
# Epochs
start_epoch = ckpt["epoch"] + 1
if opt.resume:
assert start_epoch > 0, (
"%s training to %g epochs is finished, nothing to resume."
% (weights, epochs)
)
shutil.copytree(
wdir, wdir.parent / f"weights_backup_epoch{start_epoch - 1}"
) # save previous weights
if epochs < start_epoch:
logger.info(
"%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
% (weights, ckpt["epoch"], epochs)
)
epochs += ckpt["epoch"] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info("Using SyncBatchNorm()")
# Exponential moving average
ema = ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
rank=rank,
world_size=opt.world_size,
workers=opt.workers,
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, (
"Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g"
% (mlc, nc, opt.data, nc - 1)
)
# Process 0
if rank in [-1, 0]:
ema.updates = start_epoch * nb // accumulate # set EMA updates
testloader = create_dataloader(
test_path,
imgsz_test,
total_batch_size,
gs,
opt,
hyp=hyp,
augment=False,
cache=opt.cache_images and not opt.notest,
rect=True,
rank=-1,
world_size=opt.world_size,
workers=opt.workers,
)[0] # testloader
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
plot_labels(labels, save_dir=log_dir)
if tb_writer:
# tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
tb_writer.add_histogram("classes", c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
# Model parameters
hyp["cls"] *= nc / 80.0 # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(
device
) # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(
round(hyp["warmup_epochs"] * nb), 1e3
) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
logger.info(
"Image sizes %g train, %g test\n"
"Using %g dataloader workers\nLogging results to %s\n"
"Starting training for %g epochs..."
% (imgsz, imgsz_test, dataloader.num_workers, log_dir, epochs)
)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = (
model.class_weights.cpu().numpy() * (1 - maps) ** 2
) # class weights
iw = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=cw
) # image weights
dataset.indices = random.choices(
range(dataset.n), weights=iw, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (
torch.tensor(dataset.indices)
if rank == 0
else torch.zeros(dataset.n)
).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(
("\n" + "%10s" * 8)
% ("Epoch", "gpu_mem", "box", "obj", "cls", "total", "targets", "img_size")
)
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for (
i,
(imgs, targets, paths, _),
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni,
xi,
[
hyp["warmup_bias_lr"] if j == 2 else 0.0,
x["initial_lr"] * lf(epoch),
],
)
if "momentum" in x:
x["momentum"] = np.interp(
ni, xi, [hyp["warmup_momentum"], hyp["momentum"]]
)
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(
pred, targets.to(device), model
) # loss scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if ni < 3:
f = str(log_dir / f"train_batch{ni}.jpg") # filename
result = plot_images(
images=imgs, targets=targets, paths=paths, fname=f
)
# if tb_writer and result is not None:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
lr = [x["lr"] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema:
ema.update_attr(
model, include=["yaml", "nc", "hyp", "gr", "names", "stride"]
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(
opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=log_dir,
plots=epoch == 0 or final_epoch, # plot first and last
log_imgs=opt.log_imgs,
)
# Write
with open(results_file, "a") as f:
f.write(
s + "%10.4g" * 7 % results + "\n"
) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Log
tags = [
"train/giou_loss",
"train/obj_loss",
"train/cls_loss", # train loss
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/giou_loss",
"val/obj_loss",
"val/cls_loss", # val loss
"x/lr0",
"x/lr1",
"x/lr2",
] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb:
wandb.log({tag: x}) # W&B
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, "r") as f: # create checkpoint
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": f.read(),
"model": ema.ema,
"optimizer": None if final_epoch else optimizer.state_dict(),
"wandb_id": wandb_run.id if wandb else None,
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
n = opt.name if opt.name.isnumeric() else ""
fresults, flast, fbest = (
log_dir / f"results{n}.txt",
wdir / f"last{n}.pt",
wdir / f"best{n}.pt",
)
for f1, f2 in zip(
[wdir / "last.pt", wdir / "best.pt", results_file], [flast, fbest, fresults]
):
if f1.exists():
os.rename(f1, f2) # rename
if str(f2).endswith(".pt"): # is *.pt
strip_optimizer(f2) # strip optimizer
os.system(
"gsutil cp %s gs://%s/weights" % (f2, opt.bucket)
) if opt.bucket else None # upload
# Finish
if not opt.evolve:
plot_results(save_dir=log_dir) # save as results.png
logger.info(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
dist.destroy_process_group() if rank not in [-1, 0] else None
torch.cuda.empty_cache()
return results
|
https://github.com/ultralytics/yolov5/issues/1356
|
Traceback (most recent call last):
File "train.py", line 555, in <module>
results = train(hyp.copy(), opt, device)
File "train.py", line 326, in train
log_imgs=opt.log_imgs)
File "/home/dagasan/Documents/yolov5_source/test.py", line 214, in test
wandb.log({"outputs": wandb_images})
File "/home/dagasany/anaconda3/envs/pytorch/lib/python3.7/site-packages/wandb/sdk/lib/preinit.py", line 37, in preinit_wrapper
raise wandb.Error("You must call wandb.init() before {}()".format(name))
wandb.errors.error.Error: You must call wandb.init() before wandb.log()
Internal process exited
|
wandb.errors.error.Error
|
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
init_torch_seeds(seed=seed)
|
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
init_seeds(seed=seed)
|
https://github.com/ultralytics/yolov5/issues/822
|
RecursionError Traceback (most recent call last)
<ipython-input-2-f6caabb30a03> in <module>
----> 1 init_seeds()
~/git/yolov5/utils/general.py in init_seeds(seed)
57 random.seed(seed)
58 np.random.seed(seed)
---> 59 init_seeds(seed=seed)
60
61
... last 1 frames repeated, from the frame below ...
~/git/yolov5/utils/general.py in init_seeds(seed)
57 random.seed(seed)
58 np.random.seed(seed)
---> 59 init_seeds(seed=seed)
60
61
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def train(hyp, opt, device, tb_writer=None):
print(f"Hyperparameters {hyp}")
log_dir = (
Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / "evolve"
) # logging directory
wdir = str(log_dir / "weights") + os.sep # weights directory
os.makedirs(wdir, exist_ok=True)
last = wdir + "last.pt"
best = wdir + "best.pt"
results_file = str(log_dir / "results.txt")
epochs, batch_size, total_batch_size, weights, rank = (
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.global_rank,
)
# TODO: Use DDP logging. Only the first process is allowed to log.
# Save run settings
with open(log_dir / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(log_dir / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
train_path = data_dict["train"]
test_path = data_dict["val"]
nc, names = (
(1, ["item"]) if opt.single_cls else (int(data_dict["nc"]), data_dict["names"])
) # number classes, names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Model
pretrained = weights.endswith(".pt")
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(opt.cfg or ckpt["model"].yaml, ch=3, nc=nc).to(device) # create
exclude = ["anchor"] if opt.cfg else [] # exclude keys
state_dict = ckpt["model"].float().state_dict() # to FP32
state_dict = intersect_dicts(
state_dict, model.state_dict(), exclude=exclude
) # intersect
model.load_state_dict(state_dict, strict=False) # load
print(
"Transferred %g/%g items from %s"
% (len(state_dict), len(model.state_dict()), weights)
) # report
else:
model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_parameters():
v.requires_grad = True
if ".bias" in k:
pg2.append(v) # biases
elif ".weight" in k and ".bn" not in k:
pg1.append(v) # apply weight decay
else:
pg0.append(v) # all else
if opt.adam:
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
print(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = (
lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2
) # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt["optimizer"] is not None:
optimizer.load_state_dict(ckpt["optimizer"])
best_fitness = ckpt["best_fitness"]
# Results
if ckpt.get("training_results") is not None:
with open(results_file, "w") as file:
file.write(ckpt["training_results"]) # write results.txt
# Epochs
start_epoch = ckpt["epoch"] + 1
if epochs < start_epoch:
print(
"%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
% (weights, ckpt["epoch"], epochs)
)
epochs += ckpt["epoch"] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
print("Using SyncBatchNorm()")
# Exponential moving average
ema = ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=(opt.local_rank))
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
local_rank=rank,
world_size=opt.world_size,
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, (
"Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g"
% (mlc, nc, opt.data, nc - 1)
)
# Testloader
if rank in [-1, 0]:
# local_rank is set to -1. Because only the first process is expected to do evaluation.
testloader = create_dataloader(
test_path,
imgsz_test,
total_batch_size,
gs,
opt,
hyp=hyp,
augment=False,
cache=opt.cache_images,
rect=True,
local_rank=-1,
world_size=opt.world_size,
)[0]
# Model parameters
hyp["cls"] *= nc / 80.0 # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(
device
) # attach class weights
model.names = names
# Class frequency
if rank in [-1, 0]:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1.
# model._initialize_biases(cf.to(device))
plot_labels(labels, save_dir=log_dir)
if tb_writer:
# tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
tb_writer.add_histogram("classes", c, 0)
# Check anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
# Start training
t0 = time.time()
nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (
0,
0,
0,
0,
0,
0,
0,
) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
if rank in [0, -1]:
print("Image sizes %g train, %g test" % (imgsz, imgsz_test))
print("Using %g dataloader workers" % dataloader.num_workers)
print("Starting training for %g epochs..." % epochs)
# torch.autograd.set_detect_anomaly(True)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if dataset.image_weights:
# Generate indices
if rank in [-1, 0]:
w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
image_weights = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=w
)
dataset.indices = random.choices(
range(dataset.n), weights=image_weights, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = torch.zeros([dataset.n], dtype=torch.int)
if rank == 0:
indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
if rank in [-1, 0]:
print(
("\n" + "%10s" * 8)
% (
"Epoch",
"gpu_mem",
"GIoU",
"obj",
"cls",
"total",
"targets",
"img_size",
)
)
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for (
i,
(imgs, targets, paths, _),
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni, xi, [0.1 if j == 2 else 0.0, x["initial_lr"] * lf(epoch)]
)
if "momentum" in x:
x["momentum"] = np.interp(ni, xi, [0.9, hyp["momentum"]])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Autocast
with amp.autocast(enabled=cuda):
# Forward
pred = model(imgs)
# Loss
loss, loss_items = compute_loss(
pred, targets.to(device), model
) # scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
# if not torch.isfinite(loss):
# print('WARNING: non-finite loss, ending training ', loss_items)
# return results
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema is not None:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if ni < 3:
f = str(log_dir / ("train_batch%g.jpg" % ni)) # filename
result = plot_images(
images=imgs, targets=targets, paths=paths, fname=f
)
if tb_writer and result is not None:
tb_writer.add_image(
f, result, dataformats="HWC", global_step=epoch
)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema is not None:
ema.update_attr(
model, include=["yaml", "nc", "hyp", "gr", "names", "stride"]
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(
opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
save_json=final_epoch and opt.data.endswith(os.sep + "coco.yaml"),
model=ema.ema.module if hasattr(ema.ema, "module") else ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=log_dir,
)
# Write
with open(results_file, "a") as f:
f.write(
s + "%10.4g" * 7 % results + "\n"
) # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Tensorboard
if tb_writer:
tags = [
"train/giou_loss",
"train/obj_loss",
"train/cls_loss",
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/giou_loss",
"val/obj_loss",
"val/cls_loss",
]
for x, tag in zip(list(mloss[:-1]) + list(results), tags):
tb_writer.add_scalar(tag, x, epoch)
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, "r") as f: # create checkpoint
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": f.read(),
"model": ema.ema.module if hasattr(ema, "module") else ema.ema,
"optimizer": None if final_epoch else optimizer.state_dict(),
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
n = ("_" if len(opt.name) and not opt.name.isnumeric() else "") + opt.name
fresults, flast, fbest = (
"results%s.txt" % n,
wdir + "last%s.pt" % n,
wdir + "best%s.pt" % n,
)
for f1, f2 in zip(
[wdir + "last.pt", wdir + "best.pt", "results.txt"],
[flast, fbest, fresults],
):
if os.path.exists(f1):
os.rename(f1, f2) # rename
ispt = f2.endswith(".pt") # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer
os.system(
"gsutil cp %s gs://%s/weights" % (f2, opt.bucket)
) if opt.bucket and ispt else None # upload
# Finish
if not opt.evolve:
plot_results(save_dir=log_dir) # save as results.png
print(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
dist.destroy_process_group() if rank not in [-1, 0] else None
torch.cuda.empty_cache()
return results
|
def train(hyp, opt, device, tb_writer=None):
print(f"Hyperparameters {hyp}")
log_dir = (
Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / "evolve"
) # logging directory
wdir = str(log_dir / "weights") + os.sep # weights directory
os.makedirs(wdir, exist_ok=True)
last = wdir + "last.pt"
best = wdir + "best.pt"
results_file = str(log_dir / "results.txt")
epochs, batch_size, total_batch_size, weights, rank = (
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.global_rank,
)
# TODO: Use DDP logging. Only the first process is allowed to log.
# Save run settings
with open(log_dir / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(log_dir / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
train_path = data_dict["train"]
test_path = data_dict["val"]
nc, names = (
(1, ["item"]) if opt.single_cls else (int(data_dict["nc"]), data_dict["names"])
) # number classes, names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Model
pretrained = weights.endswith(".pt")
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(opt.cfg or ckpt["model"].yaml, ch=3, nc=nc).to(device) # create
exclude = ["anchor"] if opt.cfg else [] # exclude keys
state_dict = ckpt["model"].float().state_dict() # to FP32
state_dict = intersect_dicts(
state_dict, model.state_dict(), exclude=exclude
) # intersect
model.load_state_dict(state_dict, strict=False) # load
print(
"Transferred %g/%g items from %s"
% (len(state_dict), len(model.state_dict()), weights)
) # report
else:
model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_parameters():
v.requires_grad = True
if ".bias" in k:
pg2.append(v) # biases
elif ".weight" in k and ".bn" not in k:
pg1.append(v) # apply weight decay
else:
pg0.append(v) # all else
if opt.adam:
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
print(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = (
lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2
) # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt["optimizer"] is not None:
optimizer.load_state_dict(ckpt["optimizer"])
best_fitness = ckpt["best_fitness"]
# Results
if ckpt.get("training_results") is not None:
with open(results_file, "w") as file:
file.write(ckpt["training_results"]) # write results.txt
# Epochs
start_epoch = ckpt["epoch"] + 1
if epochs < start_epoch:
print(
"%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
% (weights, ckpt["epoch"], epochs)
)
epochs += ckpt["epoch"] # finetune additional epochs
del ckpt, state_dict
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
print("Using SyncBatchNorm()")
# Exponential moving average
ema = ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=(opt.local_rank))
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
local_rank=rank,
world_size=opt.world_size,
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, (
"Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g"
% (mlc, nc, opt.data, nc - 1)
)
# Testloader
if rank in [-1, 0]:
# local_rank is set to -1. Because only the first process is expected to do evaluation.
testloader = create_dataloader(
test_path,
imgsz_test,
total_batch_size,
gs,
opt,
hyp=hyp,
augment=False,
cache=opt.cache_images,
rect=True,
local_rank=-1,
world_size=opt.world_size,
)[0]
# Model parameters
hyp["cls"] *= nc / 80.0 # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(
device
) # attach class weights
model.names = names
# Class frequency
if rank in [-1, 0]:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1.
# model._initialize_biases(cf.to(device))
plot_labels(labels, save_dir=log_dir)
if tb_writer:
# tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
tb_writer.add_histogram("classes", c, 0)
# Check anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
# Start training
t0 = time.time()
nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (
0,
0,
0,
0,
0,
0,
0,
) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
if rank in [0, -1]:
print("Image sizes %g train, %g test" % (imgsz, imgsz_test))
print("Using %g dataloader workers" % dataloader.num_workers)
print("Starting training for %g epochs..." % epochs)
# torch.autograd.set_detect_anomaly(True)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if dataset.image_weights:
# Generate indices
if rank in [-1, 0]:
w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
image_weights = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=w
)
dataset.indices = random.choices(
range(dataset.n), weights=image_weights, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = torch.zeros([dataset.n], dtype=torch.int)
if rank == 0:
indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
if rank in [-1, 0]:
print(
("\n" + "%10s" * 8)
% (
"Epoch",
"gpu_mem",
"GIoU",
"obj",
"cls",
"total",
"targets",
"img_size",
)
)
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for (
i,
(imgs, targets, paths, _),
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni, xi, [0.1 if j == 2 else 0.0, x["initial_lr"] * lf(epoch)]
)
if "momentum" in x:
x["momentum"] = np.interp(ni, xi, [0.9, hyp["momentum"]])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Autocast
with amp.autocast(enabled=cuda):
# Forward
pred = model(imgs)
# Loss
loss, loss_items = compute_loss(
pred, targets.to(device), model
) # scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
# if not torch.isfinite(loss):
# print('WARNING: non-finite loss, ending training ', loss_items)
# return results
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema is not None:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if ni < 3:
f = str(log_dir / ("train_batch%g.jpg" % ni)) # filename
result = plot_images(
images=imgs, targets=targets, paths=paths, fname=f
)
if tb_writer and result is not None:
tb_writer.add_image(
f, result, dataformats="HWC", global_step=epoch
)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema is not None:
ema.update_attr(
model, include=["yaml", "nc", "hyp", "gr", "names", "stride"]
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(
opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
save_json=final_epoch and opt.data.endswith(os.sep + "coco.yaml"),
model=ema.ema.module if hasattr(ema.ema, "module") else ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=log_dir,
)
# Write
with open(results_file, "a") as f:
f.write(
s + "%10.4g" * 7 % results + "\n"
) # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Tensorboard
if tb_writer:
tags = [
"train/giou_loss",
"train/obj_loss",
"train/cls_loss",
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/giou_loss",
"val/obj_loss",
"val/cls_loss",
]
for x, tag in zip(list(mloss[:-1]) + list(results), tags):
tb_writer.add_scalar(tag, x, epoch)
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, "r") as f: # create checkpoint
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": f.read(),
"model": ema.ema.module if hasattr(ema, "module") else ema.ema,
"optimizer": None if final_epoch else optimizer.state_dict(),
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
n = ("_" if len(opt.name) and not opt.name.isnumeric() else "") + opt.name
fresults, flast, fbest = (
"results%s.txt" % n,
wdir + "last%s.pt" % n,
wdir + "best%s.pt" % n,
)
for f1, f2 in zip(
[wdir + "last.pt", wdir + "best.pt", "results.txt"],
[flast, fbest, fresults],
):
if os.path.exists(f1):
os.rename(f1, f2) # rename
ispt = f2.endswith(".pt") # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer
os.system(
"gsutil cp %s gs://%s/weights" % (f2, opt.bucket)
) if opt.bucket and ispt else None # upload
# Finish
if not opt.evolve:
plot_results(save_dir=log_dir) # save as results.png
print(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
dist.destroy_process_group() if rank not in [-1, 0] else None
torch.cuda.empty_cache()
return results
|
https://github.com/ultralytics/yolov5/issues/682
|
Transferred 370/370 items from yolov5s.pt
Optimizer groups: 62 .bias, 70 conv.weight, 59 other
Transferred 370/370 items from yolov5s.pt
Optimizer groups: 62 .bias, 70 conv.weight, 59 other
Traceback (most recent call last):
File "train.py", line 439, in <module>
Traceback (most recent call last):
File "train.py", line 439, in <module>
train(hyp, opt, device, tb_writer)
File "train.py", line 144, in train
gs = int(max(model.stride)) # grid size (max stride)
File ".conda/envs/py37/lib/python3.7/site-packages/torch/nn/modules/module.py", line 772, in __getattr__
train(hyp, opt, device, tb_writer)
File "train.py", line 144, in train
gs = int(max(model.stride)) # grid size (max stride)
File ".conda/envs/py37/lib/python3.7/site-packages/torch/nn/modules/module.py", line 772, in __getattr__
type(self).__name__, name))
torch.nn.modules.module.ModuleAttributeError: 'DistributedDataParallel' object has no attribute 'stride'
type(self).__name__, name))
torch.nn.modules.module.ModuleAttributeError: 'DistributedDataParallel' object has no attribute 'stride'
Traceback (most recent call last):
File ".conda/envs/py37/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File ".conda/envs/py37/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File ".conda/envs/py37/lib/python3.7/site-packages/torch/distributed/launch.py", line 261, in <module>
main()
File ".conda/envs/py37/lib/python3.7/site-packages/torch/distributed/launch.py", line 257, in main
cmd=cmd)
subprocess.CalledProcessError: Command '['.conda/envs/py37/bin/python', '-u', 'train.py', '--local_rank=1', '--weights', 'yolov5s.pt', '--epochs', '3', '--img', '320', '--device', '0,1']' returned non-zero exit status 1.
|
torch.nn.modules.module.ModuleAttributeError
|
def train(hyp, opt, device, tb_writer=None):
print(f"Hyperparameters {hyp}")
log_dir = tb_writer.log_dir if tb_writer else "runs/evolution" # run directory
wdir = str(Path(log_dir) / "weights") + os.sep # weights directory
os.makedirs(wdir, exist_ok=True)
last = wdir + "last.pt"
best = wdir + "best.pt"
results_file = log_dir + os.sep + "results.txt"
epochs, batch_size, total_batch_size, weights, rank = (
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.local_rank,
)
# TODO: Use DDP logging. Only the first process is allowed to log.
# Save run settings
with open(Path(log_dir) / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(Path(log_dir) / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
train_path = data_dict["train"]
test_path = data_dict["val"]
nc, names = (
(1, ["item"]) if opt.single_cls else (int(data_dict["nc"]), data_dict["names"])
) # number classes, names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Remove previous results
if rank in [-1, 0]:
for f in glob.glob("*_batch*.jpg") + glob.glob(results_file):
os.remove(f)
# Create model
model = Model(opt.cfg, nc=nc).to(device)
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# Optimizer
nbs = 64 # nominal batch size
# default DDP implementation is slow for accumulation according to: https://pytorch.org/docs/stable/notes/ddp.html
# all-reduce operation is carried out during loss.backward().
# Thus, there would be redundant all-reduce communications in a accumulation procedure,
# which means, the result is still right but the training speed gets slower.
# TODO: If acceleration is needed, there is an implementation of allreduce_post_accumulation
# in https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/run_pretraining.py
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_parameters():
if v.requires_grad:
if ".bias" in k:
pg2.append(v) # biases
elif ".weight" in k and ".bn" not in k:
pg1.append(v) # apply weight decay
else:
pg0.append(v) # all else
if opt.adam:
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
print(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = (
lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2
) # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Load Model
with torch_distributed_zero_first(rank):
google_utils.attempt_download(weights)
start_epoch, best_fitness = 0, 0.0
if weights.endswith(".pt"): # pytorch format
ckpt = torch.load(weights, map_location=device) # load checkpoint
# load model
try:
exclude = ["anchor"] # exclude keys
ckpt["model"] = {
k: v
for k, v in ckpt["model"].float().state_dict().items()
if k in model.state_dict()
and not any(x in k for x in exclude)
and model.state_dict()[k].shape == v.shape
}
model.load_state_dict(ckpt["model"], strict=False)
print(
"Transferred %g/%g items from %s"
% (len(ckpt["model"]), len(model.state_dict()), weights)
)
except KeyError as e:
s = (
"%s is not compatible with %s. This may be due to model differences or %s may be out of date. "
"Please delete or update %s and try again, or use --weights '' to train from scratch."
% (weights, opt.cfg, weights, weights)
)
raise KeyError(s) from e
# load optimizer
if ckpt["optimizer"] is not None:
optimizer.load_state_dict(ckpt["optimizer"])
best_fitness = ckpt["best_fitness"]
# load results
if ckpt.get("training_results") is not None:
with open(results_file, "w") as file:
file.write(ckpt["training_results"]) # write results.txt
# epochs
start_epoch = ckpt["epoch"] + 1
if epochs < start_epoch:
print(
"%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
% (weights, ckpt["epoch"], epochs)
)
epochs += ckpt["epoch"] # finetune additional epochs
del ckpt
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
print("Using SyncBatchNorm()")
# Exponential moving average
ema = torch_utils.ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[rank], output_device=rank)
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
local_rank=rank,
world_size=opt.world_size,
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, (
"Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g"
% (mlc, nc, opt.data, nc - 1)
)
# Testloader
if rank in [-1, 0]:
# local_rank is set to -1. Because only the first process is expected to do evaluation.
testloader = create_dataloader(
test_path,
imgsz_test,
total_batch_size,
gs,
opt,
hyp=hyp,
augment=False,
cache=opt.cache_images,
rect=True,
local_rank=-1,
world_size=opt.world_size,
)[0]
# Model parameters
hyp["cls"] *= nc / 80.0 # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(
device
) # attach class weights
model.names = names
# Class frequency
if rank in [-1, 0]:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1.
# model._initialize_biases(cf.to(device))
plot_labels(labels, save_dir=log_dir)
if tb_writer:
# tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
tb_writer.add_histogram("classes", c, 0)
# Check anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
# Start training
t0 = time.time()
nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
maps = np.zeros(nc) # mAP per class
results = (
0,
0,
0,
0,
0,
0,
0,
) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
if rank in [0, -1]:
print("Image sizes %g train, %g test" % (imgsz, imgsz_test))
print("Using %g dataloader workers" % dataloader.num_workers)
print("Starting training for %g epochs..." % epochs)
# torch.autograd.set_detect_anomaly(True)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if dataset.image_weights:
# Generate indices
if rank in [-1, 0]:
w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
image_weights = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=w
)
dataset.indices = random.choices(
range(dataset.n), weights=image_weights, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = torch.zeros([dataset.n], dtype=torch.int)
if rank == 0:
indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
if rank in [-1, 0]:
print(
("\n" + "%10s" * 8)
% (
"Epoch",
"gpu_mem",
"GIoU",
"obj",
"cls",
"total",
"targets",
"img_size",
)
)
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for (
i,
(imgs, targets, paths, _),
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni, xi, [0.1 if j == 2 else 0.0, x["initial_lr"] * lf(epoch)]
)
if "momentum" in x:
x["momentum"] = np.interp(ni, xi, [0.9, hyp["momentum"]])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Autocast
with amp.autocast(enabled=cuda):
# Forward
pred = model(imgs)
# Loss
loss, loss_items = compute_loss(
pred, targets.to(device), model
) # scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
# if not torch.isfinite(loss):
# print('WARNING: non-finite loss, ending training ', loss_items)
# return results
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema is not None:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if ni < 3:
f = str(Path(log_dir) / ("train_batch%g.jpg" % ni)) # filename
result = plot_images(
images=imgs, targets=targets, paths=paths, fname=f
)
if tb_writer and result is not None:
tb_writer.add_image(
f, result, dataformats="HWC", global_step=epoch
)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema is not None:
ema.update_attr(
model, include=["yaml", "nc", "hyp", "gr", "names", "stride"]
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(
opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
save_json=final_epoch and opt.data.endswith(os.sep + "coco.yaml"),
model=ema.ema.module if hasattr(ema.ema, "module") else ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=log_dir,
)
# Write
with open(results_file, "a") as f:
f.write(
s + "%10.4g" * 7 % results + "\n"
) # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Tensorboard
if tb_writer:
tags = [
"train/giou_loss",
"train/obj_loss",
"train/cls_loss",
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/giou_loss",
"val/obj_loss",
"val/cls_loss",
]
for x, tag in zip(list(mloss[:-1]) + list(results), tags):
tb_writer.add_scalar(tag, x, epoch)
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, "r") as f: # create checkpoint
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": f.read(),
"model": ema.ema.module if hasattr(ema, "module") else ema.ema,
"optimizer": None if final_epoch else optimizer.state_dict(),
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
n = ("_" if len(opt.name) and not opt.name.isnumeric() else "") + opt.name
fresults, flast, fbest = (
"results%s.txt" % n,
wdir + "last%s.pt" % n,
wdir + "best%s.pt" % n,
)
for f1, f2 in zip(
[wdir + "last.pt", wdir + "best.pt", "results.txt"],
[flast, fbest, fresults],
):
if os.path.exists(f1):
os.rename(f1, f2) # rename
ispt = f2.endswith(".pt") # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer
os.system(
"gsutil cp %s gs://%s/weights" % (f2, opt.bucket)
) if opt.bucket and ispt else None # upload
# Finish
if not opt.evolve:
plot_results(save_dir=log_dir) # save as results.png
print(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
dist.destroy_process_group() if rank not in [-1, 0] else None
torch.cuda.empty_cache()
return results
|
def train(hyp, tb_writer, opt, device):
print(f"Hyperparameters {hyp}")
log_dir = tb_writer.log_dir if tb_writer else "runs/evolution" # run directory
wdir = str(Path(log_dir) / "weights") + os.sep # weights directory
os.makedirs(wdir, exist_ok=True)
last = wdir + "last.pt"
best = wdir + "best.pt"
results_file = log_dir + os.sep + "results.txt"
epochs, batch_size, total_batch_size, weights, rank = (
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.local_rank,
)
# TODO: Use DDP logging. Only the first process is allowed to log.
# Save run settings
with open(Path(log_dir) / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(Path(log_dir) / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
train_path = data_dict["train"]
test_path = data_dict["val"]
nc, names = (
(1, ["item"]) if opt.single_cls else (int(data_dict["nc"]), data_dict["names"])
) # number classes, names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Remove previous results
if rank in [-1, 0]:
for f in glob.glob("*_batch*.jpg") + glob.glob(results_file):
os.remove(f)
# Create model
model = Model(opt.cfg, nc=nc).to(device)
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# Optimizer
nbs = 64 # nominal batch size
# default DDP implementation is slow for accumulation according to: https://pytorch.org/docs/stable/notes/ddp.html
# all-reduce operation is carried out during loss.backward().
# Thus, there would be redundant all-reduce communications in a accumulation procedure,
# which means, the result is still right but the training speed gets slower.
# TODO: If acceleration is needed, there is an implementation of allreduce_post_accumulation
# in https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/run_pretraining.py
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_parameters():
if v.requires_grad:
if ".bias" in k:
pg2.append(v) # biases
elif ".weight" in k and ".bn" not in k:
pg1.append(v) # apply weight decay
else:
pg0.append(v) # all else
if hyp["optimizer"] == "Adam":
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
print(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = (
lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2
) # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Load Model
with torch_distributed_zero_first(rank):
google_utils.attempt_download(weights)
start_epoch, best_fitness = 0, 0.0
if weights.endswith(".pt"): # pytorch format
ckpt = torch.load(weights, map_location=device) # load checkpoint
# load model
try:
exclude = ["anchor"] # exclude keys
ckpt["model"] = {
k: v
for k, v in ckpt["model"].float().state_dict().items()
if k in model.state_dict()
and not any(x in k for x in exclude)
and model.state_dict()[k].shape == v.shape
}
model.load_state_dict(ckpt["model"], strict=False)
print(
"Transferred %g/%g items from %s"
% (len(ckpt["model"]), len(model.state_dict()), weights)
)
except KeyError as e:
s = (
"%s is not compatible with %s. This may be due to model differences or %s may be out of date. "
"Please delete or update %s and try again, or use --weights '' to train from scratch."
% (weights, opt.cfg, weights, weights)
)
raise KeyError(s) from e
# load optimizer
if ckpt["optimizer"] is not None:
optimizer.load_state_dict(ckpt["optimizer"])
best_fitness = ckpt["best_fitness"]
# load results
if ckpt.get("training_results") is not None:
with open(results_file, "w") as file:
file.write(ckpt["training_results"]) # write results.txt
# epochs
start_epoch = ckpt["epoch"] + 1
if epochs < start_epoch:
print(
"%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
% (weights, ckpt["epoch"], epochs)
)
epochs += ckpt["epoch"] # finetune additional epochs
del ckpt
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
print("Using SyncBatchNorm()")
# Exponential moving average
ema = torch_utils.ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[rank], output_device=rank)
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
local_rank=rank,
world_size=opt.world_size,
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, (
"Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g"
% (mlc, nc, opt.data, nc - 1)
)
# Testloader
if rank in [-1, 0]:
# local_rank is set to -1. Because only the first process is expected to do evaluation.
testloader = create_dataloader(
test_path,
imgsz_test,
total_batch_size,
gs,
opt,
hyp=hyp,
augment=False,
cache=opt.cache_images,
rect=True,
local_rank=-1,
world_size=opt.world_size,
)[0]
# Model parameters
hyp["cls"] *= nc / 80.0 # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(
device
) # attach class weights
model.names = names
# Class frequency
if rank in [-1, 0]:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1.
# model._initialize_biases(cf.to(device))
plot_labels(labels, save_dir=log_dir)
if tb_writer:
# tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
tb_writer.add_histogram("classes", c, 0)
# Check anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
# Start training
t0 = time.time()
nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
maps = np.zeros(nc) # mAP per class
results = (
0,
0,
0,
0,
0,
0,
0,
) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
if rank in [0, -1]:
print("Image sizes %g train, %g test" % (imgsz, imgsz_test))
print("Using %g dataloader workers" % dataloader.num_workers)
print("Starting training for %g epochs..." % epochs)
# torch.autograd.set_detect_anomaly(True)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if dataset.image_weights:
# Generate indices
if rank in [-1, 0]:
w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
image_weights = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=w
)
dataset.indices = random.choices(
range(dataset.n), weights=image_weights, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = torch.zeros([dataset.n], dtype=torch.int)
if rank == 0:
indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
if rank in [-1, 0]:
print(
("\n" + "%10s" * 8)
% (
"Epoch",
"gpu_mem",
"GIoU",
"obj",
"cls",
"total",
"targets",
"img_size",
)
)
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for (
i,
(imgs, targets, paths, _),
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni, xi, [0.1 if j == 2 else 0.0, x["initial_lr"] * lf(epoch)]
)
if "momentum" in x:
x["momentum"] = np.interp(ni, xi, [0.9, hyp["momentum"]])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Autocast
with amp.autocast():
# Forward
pred = model(imgs)
# Loss
loss, loss_items = compute_loss(
pred, targets.to(device), model
) # scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
# if not torch.isfinite(loss):
# print('WARNING: non-finite loss, ending training ', loss_items)
# return results
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema is not None:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if ni < 3:
f = str(Path(log_dir) / ("train_batch%g.jpg" % ni)) # filename
result = plot_images(
images=imgs, targets=targets, paths=paths, fname=f
)
if tb_writer and result is not None:
tb_writer.add_image(
f, result, dataformats="HWC", global_step=epoch
)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema is not None:
ema.update_attr(
model, include=["yaml", "nc", "hyp", "gr", "names", "stride"]
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(
opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
save_json=final_epoch and opt.data.endswith(os.sep + "coco.yaml"),
model=ema.ema.module if hasattr(ema.ema, "module") else ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=log_dir,
)
# Write
with open(results_file, "a") as f:
f.write(
s + "%10.4g" * 7 % results + "\n"
) # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Tensorboard
if tb_writer:
tags = [
"train/giou_loss",
"train/obj_loss",
"train/cls_loss",
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/giou_loss",
"val/obj_loss",
"val/cls_loss",
]
for x, tag in zip(list(mloss[:-1]) + list(results), tags):
tb_writer.add_scalar(tag, x, epoch)
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, "r") as f: # create checkpoint
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": f.read(),
"model": ema.ema.module if hasattr(ema, "module") else ema.ema,
"optimizer": None if final_epoch else optimizer.state_dict(),
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
n = ("_" if len(opt.name) and not opt.name.isnumeric() else "") + opt.name
fresults, flast, fbest = (
"results%s.txt" % n,
wdir + "last%s.pt" % n,
wdir + "best%s.pt" % n,
)
for f1, f2 in zip(
[wdir + "last.pt", wdir + "best.pt", "results.txt"],
[flast, fbest, fresults],
):
if os.path.exists(f1):
os.rename(f1, f2) # rename
ispt = f2.endswith(".pt") # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer
os.system(
"gsutil cp %s gs://%s/weights" % (f2, opt.bucket)
) if opt.bucket and ispt else None # upload
# Finish
if not opt.evolve:
plot_results(save_dir=log_dir) # save as results.png
print(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
dist.destroy_process_group() if rank not in [-1, 0] else None
torch.cuda.empty_cache()
return results
|
https://github.com/ultralytics/yolov5/issues/566
|
Traceback (most recent call last):
File "yolov5/train.py", line 516, in <module>
print_mutation(hyp, results, opt.bucket)
File "/content/drive/.shortcut-targets-by-id/1hQ281-8ogL2dJzHbXZSWEYDIYei6RRbe/YOLOv5/yolov5/utils/utils.py", line 830, in print_mutation
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
TypeError: must be real number, not str
|
TypeError
|
def train(hyp, opt, device, tb_writer=None):
print(f"Hyperparameters {hyp}")
log_dir = tb_writer.log_dir if tb_writer else "runs/evolve" # run directory
wdir = str(Path(log_dir) / "weights") + os.sep # weights directory
os.makedirs(wdir, exist_ok=True)
last = wdir + "last.pt"
best = wdir + "best.pt"
results_file = log_dir + os.sep + "results.txt"
epochs, batch_size, total_batch_size, weights, rank = (
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.local_rank,
)
# TODO: Use DDP logging. Only the first process is allowed to log.
# Save run settings
with open(Path(log_dir) / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(Path(log_dir) / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
train_path = data_dict["train"]
test_path = data_dict["val"]
nc, names = (
(1, ["item"]) if opt.single_cls else (int(data_dict["nc"]), data_dict["names"])
) # number classes, names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Remove previous results
if rank in [-1, 0]:
for f in glob.glob("*_batch*.jpg") + glob.glob(results_file):
os.remove(f)
# Create model
model = Model(opt.cfg, nc=nc).to(device)
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# Optimizer
nbs = 64 # nominal batch size
# default DDP implementation is slow for accumulation according to: https://pytorch.org/docs/stable/notes/ddp.html
# all-reduce operation is carried out during loss.backward().
# Thus, there would be redundant all-reduce communications in a accumulation procedure,
# which means, the result is still right but the training speed gets slower.
# TODO: If acceleration is needed, there is an implementation of allreduce_post_accumulation
# in https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/run_pretraining.py
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_parameters():
if v.requires_grad:
if ".bias" in k:
pg2.append(v) # biases
elif ".weight" in k and ".bn" not in k:
pg1.append(v) # apply weight decay
else:
pg0.append(v) # all else
if opt.adam:
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
print(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = (
lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2
) # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Load Model
with torch_distributed_zero_first(rank):
google_utils.attempt_download(weights)
start_epoch, best_fitness = 0, 0.0
if weights.endswith(".pt"): # pytorch format
ckpt = torch.load(weights, map_location=device) # load checkpoint
# load model
try:
exclude = ["anchor"] # exclude keys
ckpt["model"] = {
k: v
for k, v in ckpt["model"].float().state_dict().items()
if k in model.state_dict()
and not any(x in k for x in exclude)
and model.state_dict()[k].shape == v.shape
}
model.load_state_dict(ckpt["model"], strict=False)
print(
"Transferred %g/%g items from %s"
% (len(ckpt["model"]), len(model.state_dict()), weights)
)
except KeyError as e:
s = (
"%s is not compatible with %s. This may be due to model differences or %s may be out of date. "
"Please delete or update %s and try again, or use --weights '' to train from scratch."
% (weights, opt.cfg, weights, weights)
)
raise KeyError(s) from e
# load optimizer
if ckpt["optimizer"] is not None:
optimizer.load_state_dict(ckpt["optimizer"])
best_fitness = ckpt["best_fitness"]
# load results
if ckpt.get("training_results") is not None:
with open(results_file, "w") as file:
file.write(ckpt["training_results"]) # write results.txt
# epochs
start_epoch = ckpt["epoch"] + 1
if epochs < start_epoch:
print(
"%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
% (weights, ckpt["epoch"], epochs)
)
epochs += ckpt["epoch"] # finetune additional epochs
del ckpt
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
print("Using SyncBatchNorm()")
# Exponential moving average
ema = torch_utils.ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[rank], output_device=rank)
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
local_rank=rank,
world_size=opt.world_size,
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, (
"Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g"
% (mlc, nc, opt.data, nc - 1)
)
# Testloader
if rank in [-1, 0]:
# local_rank is set to -1. Because only the first process is expected to do evaluation.
testloader = create_dataloader(
test_path,
imgsz_test,
total_batch_size,
gs,
opt,
hyp=hyp,
augment=False,
cache=opt.cache_images,
rect=True,
local_rank=-1,
world_size=opt.world_size,
)[0]
# Model parameters
hyp["cls"] *= nc / 80.0 # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(
device
) # attach class weights
model.names = names
# Class frequency
if rank in [-1, 0]:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1.
# model._initialize_biases(cf.to(device))
plot_labels(labels, save_dir=log_dir)
if tb_writer:
# tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
tb_writer.add_histogram("classes", c, 0)
# Check anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
# Start training
t0 = time.time()
nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
maps = np.zeros(nc) # mAP per class
results = (
0,
0,
0,
0,
0,
0,
0,
) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
if rank in [0, -1]:
print("Image sizes %g train, %g test" % (imgsz, imgsz_test))
print("Using %g dataloader workers" % dataloader.num_workers)
print("Starting training for %g epochs..." % epochs)
# torch.autograd.set_detect_anomaly(True)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if dataset.image_weights:
# Generate indices
if rank in [-1, 0]:
w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
image_weights = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=w
)
dataset.indices = random.choices(
range(dataset.n), weights=image_weights, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = torch.zeros([dataset.n], dtype=torch.int)
if rank == 0:
indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
if rank in [-1, 0]:
print(
("\n" + "%10s" * 8)
% (
"Epoch",
"gpu_mem",
"GIoU",
"obj",
"cls",
"total",
"targets",
"img_size",
)
)
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for (
i,
(imgs, targets, paths, _),
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni, xi, [0.1 if j == 2 else 0.0, x["initial_lr"] * lf(epoch)]
)
if "momentum" in x:
x["momentum"] = np.interp(ni, xi, [0.9, hyp["momentum"]])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Autocast
with amp.autocast(enabled=cuda):
# Forward
pred = model(imgs)
# Loss
loss, loss_items = compute_loss(
pred, targets.to(device), model
) # scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
# if not torch.isfinite(loss):
# print('WARNING: non-finite loss, ending training ', loss_items)
# return results
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema is not None:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if ni < 3:
f = str(Path(log_dir) / ("train_batch%g.jpg" % ni)) # filename
result = plot_images(
images=imgs, targets=targets, paths=paths, fname=f
)
if tb_writer and result is not None:
tb_writer.add_image(
f, result, dataformats="HWC", global_step=epoch
)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema is not None:
ema.update_attr(
model, include=["yaml", "nc", "hyp", "gr", "names", "stride"]
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(
opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
save_json=final_epoch and opt.data.endswith(os.sep + "coco.yaml"),
model=ema.ema.module if hasattr(ema.ema, "module") else ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=log_dir,
)
# Write
with open(results_file, "a") as f:
f.write(
s + "%10.4g" * 7 % results + "\n"
) # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Tensorboard
if tb_writer:
tags = [
"train/giou_loss",
"train/obj_loss",
"train/cls_loss",
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/giou_loss",
"val/obj_loss",
"val/cls_loss",
]
for x, tag in zip(list(mloss[:-1]) + list(results), tags):
tb_writer.add_scalar(tag, x, epoch)
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, "r") as f: # create checkpoint
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": f.read(),
"model": ema.ema.module if hasattr(ema, "module") else ema.ema,
"optimizer": None if final_epoch else optimizer.state_dict(),
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
n = ("_" if len(opt.name) and not opt.name.isnumeric() else "") + opt.name
fresults, flast, fbest = (
"results%s.txt" % n,
wdir + "last%s.pt" % n,
wdir + "best%s.pt" % n,
)
for f1, f2 in zip(
[wdir + "last.pt", wdir + "best.pt", "results.txt"],
[flast, fbest, fresults],
):
if os.path.exists(f1):
os.rename(f1, f2) # rename
ispt = f2.endswith(".pt") # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer
os.system(
"gsutil cp %s gs://%s/weights" % (f2, opt.bucket)
) if opt.bucket and ispt else None # upload
# Finish
if not opt.evolve:
plot_results(save_dir=log_dir) # save as results.png
print(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
dist.destroy_process_group() if rank not in [-1, 0] else None
torch.cuda.empty_cache()
return results
|
def train(hyp, opt, device, tb_writer=None):
print(f"Hyperparameters {hyp}")
log_dir = tb_writer.log_dir if tb_writer else "runs/evolution" # run directory
wdir = str(Path(log_dir) / "weights") + os.sep # weights directory
os.makedirs(wdir, exist_ok=True)
last = wdir + "last.pt"
best = wdir + "best.pt"
results_file = log_dir + os.sep + "results.txt"
epochs, batch_size, total_batch_size, weights, rank = (
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.local_rank,
)
# TODO: Use DDP logging. Only the first process is allowed to log.
# Save run settings
with open(Path(log_dir) / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(Path(log_dir) / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
train_path = data_dict["train"]
test_path = data_dict["val"]
nc, names = (
(1, ["item"]) if opt.single_cls else (int(data_dict["nc"]), data_dict["names"])
) # number classes, names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Remove previous results
if rank in [-1, 0]:
for f in glob.glob("*_batch*.jpg") + glob.glob(results_file):
os.remove(f)
# Create model
model = Model(opt.cfg, nc=nc).to(device)
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# Optimizer
nbs = 64 # nominal batch size
# default DDP implementation is slow for accumulation according to: https://pytorch.org/docs/stable/notes/ddp.html
# all-reduce operation is carried out during loss.backward().
# Thus, there would be redundant all-reduce communications in a accumulation procedure,
# which means, the result is still right but the training speed gets slower.
# TODO: If acceleration is needed, there is an implementation of allreduce_post_accumulation
# in https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/run_pretraining.py
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_parameters():
if v.requires_grad:
if ".bias" in k:
pg2.append(v) # biases
elif ".weight" in k and ".bn" not in k:
pg1.append(v) # apply weight decay
else:
pg0.append(v) # all else
if opt.adam:
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
print(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = (
lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2
) # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Load Model
with torch_distributed_zero_first(rank):
google_utils.attempt_download(weights)
start_epoch, best_fitness = 0, 0.0
if weights.endswith(".pt"): # pytorch format
ckpt = torch.load(weights, map_location=device) # load checkpoint
# load model
try:
exclude = ["anchor"] # exclude keys
ckpt["model"] = {
k: v
for k, v in ckpt["model"].float().state_dict().items()
if k in model.state_dict()
and not any(x in k for x in exclude)
and model.state_dict()[k].shape == v.shape
}
model.load_state_dict(ckpt["model"], strict=False)
print(
"Transferred %g/%g items from %s"
% (len(ckpt["model"]), len(model.state_dict()), weights)
)
except KeyError as e:
s = (
"%s is not compatible with %s. This may be due to model differences or %s may be out of date. "
"Please delete or update %s and try again, or use --weights '' to train from scratch."
% (weights, opt.cfg, weights, weights)
)
raise KeyError(s) from e
# load optimizer
if ckpt["optimizer"] is not None:
optimizer.load_state_dict(ckpt["optimizer"])
best_fitness = ckpt["best_fitness"]
# load results
if ckpt.get("training_results") is not None:
with open(results_file, "w") as file:
file.write(ckpt["training_results"]) # write results.txt
# epochs
start_epoch = ckpt["epoch"] + 1
if epochs < start_epoch:
print(
"%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
% (weights, ckpt["epoch"], epochs)
)
epochs += ckpt["epoch"] # finetune additional epochs
del ckpt
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
print("Using SyncBatchNorm()")
# Exponential moving average
ema = torch_utils.ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[rank], output_device=rank)
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
local_rank=rank,
world_size=opt.world_size,
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, (
"Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g"
% (mlc, nc, opt.data, nc - 1)
)
# Testloader
if rank in [-1, 0]:
# local_rank is set to -1. Because only the first process is expected to do evaluation.
testloader = create_dataloader(
test_path,
imgsz_test,
total_batch_size,
gs,
opt,
hyp=hyp,
augment=False,
cache=opt.cache_images,
rect=True,
local_rank=-1,
world_size=opt.world_size,
)[0]
# Model parameters
hyp["cls"] *= nc / 80.0 # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(
device
) # attach class weights
model.names = names
# Class frequency
if rank in [-1, 0]:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1.
# model._initialize_biases(cf.to(device))
plot_labels(labels, save_dir=log_dir)
if tb_writer:
# tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
tb_writer.add_histogram("classes", c, 0)
# Check anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
# Start training
t0 = time.time()
nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
maps = np.zeros(nc) # mAP per class
results = (
0,
0,
0,
0,
0,
0,
0,
) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
if rank in [0, -1]:
print("Image sizes %g train, %g test" % (imgsz, imgsz_test))
print("Using %g dataloader workers" % dataloader.num_workers)
print("Starting training for %g epochs..." % epochs)
# torch.autograd.set_detect_anomaly(True)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if dataset.image_weights:
# Generate indices
if rank in [-1, 0]:
w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
image_weights = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=w
)
dataset.indices = random.choices(
range(dataset.n), weights=image_weights, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = torch.zeros([dataset.n], dtype=torch.int)
if rank == 0:
indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
if rank in [-1, 0]:
print(
("\n" + "%10s" * 8)
% (
"Epoch",
"gpu_mem",
"GIoU",
"obj",
"cls",
"total",
"targets",
"img_size",
)
)
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for (
i,
(imgs, targets, paths, _),
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni, xi, [0.1 if j == 2 else 0.0, x["initial_lr"] * lf(epoch)]
)
if "momentum" in x:
x["momentum"] = np.interp(ni, xi, [0.9, hyp["momentum"]])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Autocast
with amp.autocast(enabled=cuda):
# Forward
pred = model(imgs)
# Loss
loss, loss_items = compute_loss(
pred, targets.to(device), model
) # scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
# if not torch.isfinite(loss):
# print('WARNING: non-finite loss, ending training ', loss_items)
# return results
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema is not None:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if ni < 3:
f = str(Path(log_dir) / ("train_batch%g.jpg" % ni)) # filename
result = plot_images(
images=imgs, targets=targets, paths=paths, fname=f
)
if tb_writer and result is not None:
tb_writer.add_image(
f, result, dataformats="HWC", global_step=epoch
)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema is not None:
ema.update_attr(
model, include=["yaml", "nc", "hyp", "gr", "names", "stride"]
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(
opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
save_json=final_epoch and opt.data.endswith(os.sep + "coco.yaml"),
model=ema.ema.module if hasattr(ema.ema, "module") else ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=log_dir,
)
# Write
with open(results_file, "a") as f:
f.write(
s + "%10.4g" * 7 % results + "\n"
) # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Tensorboard
if tb_writer:
tags = [
"train/giou_loss",
"train/obj_loss",
"train/cls_loss",
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/giou_loss",
"val/obj_loss",
"val/cls_loss",
]
for x, tag in zip(list(mloss[:-1]) + list(results), tags):
tb_writer.add_scalar(tag, x, epoch)
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, "r") as f: # create checkpoint
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": f.read(),
"model": ema.ema.module if hasattr(ema, "module") else ema.ema,
"optimizer": None if final_epoch else optimizer.state_dict(),
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
n = ("_" if len(opt.name) and not opt.name.isnumeric() else "") + opt.name
fresults, flast, fbest = (
"results%s.txt" % n,
wdir + "last%s.pt" % n,
wdir + "best%s.pt" % n,
)
for f1, f2 in zip(
[wdir + "last.pt", wdir + "best.pt", "results.txt"],
[flast, fbest, fresults],
):
if os.path.exists(f1):
os.rename(f1, f2) # rename
ispt = f2.endswith(".pt") # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer
os.system(
"gsutil cp %s gs://%s/weights" % (f2, opt.bucket)
) if opt.bucket and ispt else None # upload
# Finish
if not opt.evolve:
plot_results(save_dir=log_dir) # save as results.png
print(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
dist.destroy_process_group() if rank not in [-1, 0] else None
torch.cuda.empty_cache()
return results
|
https://github.com/ultralytics/yolov5/issues/566
|
Traceback (most recent call last):
File "yolov5/train.py", line 516, in <module>
print_mutation(hyp, results, opt.bucket)
File "/content/drive/.shortcut-targets-by-id/1hQ281-8ogL2dJzHbXZSWEYDIYei6RRbe/YOLOv5/yolov5/utils/utils.py", line 830, in print_mutation
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
TypeError: must be real number, not str
|
TypeError
|
def print_mutation(hyp, results, yaml_file="hyp_evolved.yaml", bucket=""):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = "%10s" * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = "%10.3g" * len(hyp) % tuple(hyp.values()) # hyperparam values
c = (
"%10.4g" * len(results) % results
) # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
print("\n%s\n%s\nEvolved fitness: %s\n" % (a, b, c))
if bucket:
os.system("gsutil cp gs://%s/evolve.txt ." % bucket) # download evolve.txt
with open("evolve.txt", "a") as f: # append result
f.write(c + b + "\n")
x = np.unique(np.loadtxt("evolve.txt", ndmin=2), axis=0) # load unique rows
x = x[np.argsort(-fitness(x))] # sort
np.savetxt("evolve.txt", x, "%10.3g") # save sort by fitness
if bucket:
os.system("gsutil cp evolve.txt gs://%s" % bucket) # upload evolve.txt
# Save yaml
for i, k in enumerate(hyp.keys()):
hyp[k] = float(x[0, i + 7])
with open(yaml_file, "w") as f:
f.write(
"# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: "
% len(x)
+ c
+ "\n\n"
)
yaml.dump(hyp, f, sort_keys=False)
|
def print_mutation(hyp, results, bucket=""):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = "%10s" * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = "%10.3g" * len(hyp) % tuple(hyp.values()) # hyperparam values
c = "%10.4g" * len(results) % results # results (P, R, mAP, F1, test_loss)
print("\n%s\n%s\nEvolved fitness: %s\n" % (a, b, c))
if bucket:
os.system("gsutil cp gs://%s/evolve.txt ." % bucket) # download evolve.txt
with open("evolve.txt", "a") as f: # append result
f.write(c + b + "\n")
x = np.unique(np.loadtxt("evolve.txt", ndmin=2), axis=0) # load unique rows
np.savetxt(
"evolve.txt", x[np.argsort(-fitness(x))], "%10.3g"
) # save sort by fitness
if bucket:
os.system("gsutil cp evolve.txt gs://%s" % bucket) # upload evolve.txt
|
https://github.com/ultralytics/yolov5/issues/566
|
Traceback (most recent call last):
File "yolov5/train.py", line 516, in <module>
print_mutation(hyp, results, opt.bucket)
File "/content/drive/.shortcut-targets-by-id/1hQ281-8ogL2dJzHbXZSWEYDIYei6RRbe/YOLOv5/yolov5/utils/utils.py", line 830, in print_mutation
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
TypeError: must be real number, not str
|
TypeError
|
def plot_evolution_results(
yaml_file="hyp_evolved.yaml",
): # from utils.utils import *; plot_evolution_results()
# Plot hyperparameter evolution results in evolve.txt
with open(yaml_file) as f:
hyp = yaml.load(f, Loader=yaml.FullLoader)
x = np.loadtxt("evolve.txt", ndmin=2)
f = fitness(x)
# weights = (f - f.min()) ** 2 # for weighted results
plt.figure(figsize=(14, 10), tight_layout=True)
matplotlib.rc("font", **{"size": 8})
for i, (k, v) in enumerate(hyp.items()):
y = x[:, i + 7]
# mu = (y * weights).sum() / weights.sum() # best weighted result
mu = y[f.argmax()] # best single result
plt.subplot(4, 6, i + 1)
plt.plot(mu, f.max(), "o", markersize=10)
plt.plot(y, f, ".")
plt.title("%s = %.3g" % (k, mu), fontdict={"size": 9}) # limit to 40 characters
print("%15s: %.3g" % (k, mu))
plt.savefig("evolve.png", dpi=200)
print("\nPlot saved as evolve.png")
|
def plot_evolution_results(
hyp,
): # from utils.utils import *; plot_evolution_results(hyp)
# Plot hyperparameter evolution results in evolve.txt
x = np.loadtxt("evolve.txt", ndmin=2)
f = fitness(x)
# weights = (f - f.min()) ** 2 # for weighted results
plt.figure(figsize=(12, 10), tight_layout=True)
matplotlib.rc("font", **{"size": 8})
for i, (k, v) in enumerate(hyp.items()):
y = x[:, i + 7]
# mu = (y * weights).sum() / weights.sum() # best weighted result
mu = y[f.argmax()] # best single result
plt.subplot(4, 5, i + 1)
plt.plot(mu, f.max(), "o", markersize=10)
plt.plot(y, f, ".")
plt.title("%s = %.3g" % (k, mu), fontdict={"size": 9}) # limit to 40 characters
print("%15s: %.3g" % (k, mu))
plt.savefig("evolve.png", dpi=200)
|
https://github.com/ultralytics/yolov5/issues/566
|
Traceback (most recent call last):
File "yolov5/train.py", line 516, in <module>
print_mutation(hyp, results, opt.bucket)
File "/content/drive/.shortcut-targets-by-id/1hQ281-8ogL2dJzHbXZSWEYDIYei6RRbe/YOLOv5/yolov5/utils/utils.py", line 830, in print_mutation
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
TypeError: must be real number, not str
|
TypeError
|
def hist2d(x, y, n=100):
# 2d histogram used in labels.png and evolve.png
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
return np.log(hist[xidx, yidx])
|
def hist2d(x, y, n=100):
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
return np.log(hist[xidx, yidx])
|
https://github.com/ultralytics/yolov5/issues/566
|
Traceback (most recent call last):
File "yolov5/train.py", line 516, in <module>
print_mutation(hyp, results, opt.bucket)
File "/content/drive/.shortcut-targets-by-id/1hQ281-8ogL2dJzHbXZSWEYDIYei6RRbe/YOLOv5/yolov5/utils/utils.py", line 830, in print_mutation
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
TypeError: must be real number, not str
|
TypeError
|
def plot_labels(labels, save_dir=""):
# plot dataset labels
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
nc = int(c.max() + 1) # number of classes
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
ax = ax.ravel()
ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
ax[0].set_xlabel("classes")
ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap="jet")
ax[1].set_xlabel("x")
ax[1].set_ylabel("y")
ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap="jet")
ax[2].set_xlabel("width")
ax[2].set_ylabel("height")
plt.savefig(Path(save_dir) / "labels.png", dpi=200)
plt.close()
|
def plot_labels(labels, save_dir=""):
# plot dataset labels
def hist2d(x, y, n=100):
xedges, yedges = (
np.linspace(x.min(), x.max(), n),
np.linspace(y.min(), y.max(), n),
)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
return np.log(hist[xidx, yidx])
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
nc = int(c.max() + 1) # number of classes
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
ax = ax.ravel()
ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
ax[0].set_xlabel("classes")
ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap="jet")
ax[1].set_xlabel("x")
ax[1].set_ylabel("y")
ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap="jet")
ax[2].set_xlabel("width")
ax[2].set_ylabel("height")
plt.savefig(Path(save_dir) / "labels.png", dpi=200)
plt.close()
|
https://github.com/ultralytics/yolov5/issues/566
|
Traceback (most recent call last):
File "yolov5/train.py", line 516, in <module>
print_mutation(hyp, results, opt.bucket)
File "/content/drive/.shortcut-targets-by-id/1hQ281-8ogL2dJzHbXZSWEYDIYei6RRbe/YOLOv5/yolov5/utils/utils.py", line 830, in print_mutation
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
TypeError: must be real number, not str
|
TypeError
|
def train(hyp, opt, device, tb_writer=None):
print(f"Hyperparameters {hyp}")
log_dir = tb_writer.log_dir if tb_writer else "runs/evolve" # run directory
wdir = str(Path(log_dir) / "weights") + os.sep # weights directory
os.makedirs(wdir, exist_ok=True)
last = wdir + "last.pt"
best = wdir + "best.pt"
results_file = log_dir + os.sep + "results.txt"
epochs, batch_size, total_batch_size, weights, rank = (
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.local_rank,
)
# TODO: Use DDP logging. Only the first process is allowed to log.
# Save run settings
with open(Path(log_dir) / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(Path(log_dir) / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
train_path = data_dict["train"]
test_path = data_dict["val"]
nc, names = (
(1, ["item"]) if opt.single_cls else (int(data_dict["nc"]), data_dict["names"])
) # number classes, names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Remove previous results
if rank in [-1, 0]:
for f in glob.glob("*_batch*.jpg") + glob.glob(results_file):
os.remove(f)
# Create model
model = Model(opt.cfg, nc=nc).to(device)
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# Optimizer
nbs = 64 # nominal batch size
# default DDP implementation is slow for accumulation according to: https://pytorch.org/docs/stable/notes/ddp.html
# all-reduce operation is carried out during loss.backward().
# Thus, there would be redundant all-reduce communications in a accumulation procedure,
# which means, the result is still right but the training speed gets slower.
# TODO: If acceleration is needed, there is an implementation of allreduce_post_accumulation
# in https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/run_pretraining.py
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_parameters():
if v.requires_grad:
if ".bias" in k:
pg2.append(v) # biases
elif ".weight" in k and ".bn" not in k:
pg1.append(v) # apply weight decay
else:
pg0.append(v) # all else
if opt.adam:
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
print(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = (
lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2
) # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Load Model
with torch_distributed_zero_first(rank):
google_utils.attempt_download(weights)
start_epoch, best_fitness = 0, 0.0
if weights.endswith(".pt"): # pytorch format
ckpt = torch.load(weights, map_location=device) # load checkpoint
# load model
try:
exclude = ["anchor"] # exclude keys
ckpt["model"] = {
k: v
for k, v in ckpt["model"].float().state_dict().items()
if k in model.state_dict()
and not any(x in k for x in exclude)
and model.state_dict()[k].shape == v.shape
}
model.load_state_dict(ckpt["model"], strict=False)
print(
"Transferred %g/%g items from %s"
% (len(ckpt["model"]), len(model.state_dict()), weights)
)
except KeyError as e:
s = (
"%s is not compatible with %s. This may be due to model differences or %s may be out of date. "
"Please delete or update %s and try again, or use --weights '' to train from scratch."
% (weights, opt.cfg, weights, weights)
)
raise KeyError(s) from e
# load optimizer
if ckpt["optimizer"] is not None:
optimizer.load_state_dict(ckpt["optimizer"])
best_fitness = ckpt["best_fitness"]
# load results
if ckpt.get("training_results") is not None:
with open(results_file, "w") as file:
file.write(ckpt["training_results"]) # write results.txt
# epochs
start_epoch = ckpt["epoch"] + 1
if epochs < start_epoch:
print(
"%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
% (weights, ckpt["epoch"], epochs)
)
epochs += ckpt["epoch"] # finetune additional epochs
del ckpt
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
print("Using SyncBatchNorm()")
# Exponential moving average
ema = torch_utils.ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[rank], output_device=rank)
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
local_rank=rank,
world_size=opt.world_size,
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, (
"Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g"
% (mlc, nc, opt.data, nc - 1)
)
# Testloader
if rank in [-1, 0]:
# local_rank is set to -1. Because only the first process is expected to do evaluation.
testloader = create_dataloader(
test_path,
imgsz_test,
total_batch_size,
gs,
opt,
hyp=hyp,
augment=False,
cache=opt.cache_images,
rect=True,
local_rank=-1,
world_size=opt.world_size,
)[0]
# Model parameters
hyp["cls"] *= nc / 80.0 # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(
device
) # attach class weights
model.names = names
# Class frequency
if rank in [-1, 0]:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1.
# model._initialize_biases(cf.to(device))
plot_labels(labels, save_dir=log_dir)
if tb_writer:
# tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
tb_writer.add_histogram("classes", c, 0)
# Check anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
# Start training
t0 = time.time()
nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (
0,
0,
0,
0,
0,
0,
0,
) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
if rank in [0, -1]:
print("Image sizes %g train, %g test" % (imgsz, imgsz_test))
print("Using %g dataloader workers" % dataloader.num_workers)
print("Starting training for %g epochs..." % epochs)
# torch.autograd.set_detect_anomaly(True)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if dataset.image_weights:
# Generate indices
if rank in [-1, 0]:
w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
image_weights = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=w
)
dataset.indices = random.choices(
range(dataset.n), weights=image_weights, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = torch.zeros([dataset.n], dtype=torch.int)
if rank == 0:
indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
if rank in [-1, 0]:
print(
("\n" + "%10s" * 8)
% (
"Epoch",
"gpu_mem",
"GIoU",
"obj",
"cls",
"total",
"targets",
"img_size",
)
)
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for (
i,
(imgs, targets, paths, _),
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni, xi, [0.1 if j == 2 else 0.0, x["initial_lr"] * lf(epoch)]
)
if "momentum" in x:
x["momentum"] = np.interp(ni, xi, [0.9, hyp["momentum"]])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Autocast
with amp.autocast(enabled=cuda):
# Forward
pred = model(imgs)
# Loss
loss, loss_items = compute_loss(
pred, targets.to(device), model
) # scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
# if not torch.isfinite(loss):
# print('WARNING: non-finite loss, ending training ', loss_items)
# return results
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema is not None:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if ni < 3:
f = str(Path(log_dir) / ("train_batch%g.jpg" % ni)) # filename
result = plot_images(
images=imgs, targets=targets, paths=paths, fname=f
)
if tb_writer and result is not None:
tb_writer.add_image(
f, result, dataformats="HWC", global_step=epoch
)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema is not None:
ema.update_attr(
model, include=["yaml", "nc", "hyp", "gr", "names", "stride"]
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(
opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
save_json=final_epoch and opt.data.endswith(os.sep + "coco.yaml"),
model=ema.ema.module if hasattr(ema.ema, "module") else ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=log_dir,
)
# Write
with open(results_file, "a") as f:
f.write(
s + "%10.4g" * 7 % results + "\n"
) # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Tensorboard
if tb_writer:
tags = [
"train/giou_loss",
"train/obj_loss",
"train/cls_loss",
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/giou_loss",
"val/obj_loss",
"val/cls_loss",
]
for x, tag in zip(list(mloss[:-1]) + list(results), tags):
tb_writer.add_scalar(tag, x, epoch)
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, "r") as f: # create checkpoint
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": f.read(),
"model": ema.ema.module if hasattr(ema, "module") else ema.ema,
"optimizer": None if final_epoch else optimizer.state_dict(),
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
n = ("_" if len(opt.name) and not opt.name.isnumeric() else "") + opt.name
fresults, flast, fbest = (
"results%s.txt" % n,
wdir + "last%s.pt" % n,
wdir + "best%s.pt" % n,
)
for f1, f2 in zip(
[wdir + "last.pt", wdir + "best.pt", "results.txt"],
[flast, fbest, fresults],
):
if os.path.exists(f1):
os.rename(f1, f2) # rename
ispt = f2.endswith(".pt") # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer
os.system(
"gsutil cp %s gs://%s/weights" % (f2, opt.bucket)
) if opt.bucket and ispt else None # upload
# Finish
if not opt.evolve:
plot_results(save_dir=log_dir) # save as results.png
print(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
dist.destroy_process_group() if rank not in [-1, 0] else None
torch.cuda.empty_cache()
return results
|
def train(hyp, opt, device, tb_writer=None):
print(f"Hyperparameters {hyp}")
log_dir = tb_writer.log_dir if tb_writer else "runs/evolve" # run directory
wdir = str(Path(log_dir) / "weights") + os.sep # weights directory
os.makedirs(wdir, exist_ok=True)
last = wdir + "last.pt"
best = wdir + "best.pt"
results_file = log_dir + os.sep + "results.txt"
epochs, batch_size, total_batch_size, weights, rank = (
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.local_rank,
)
# TODO: Use DDP logging. Only the first process is allowed to log.
# Save run settings
with open(Path(log_dir) / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(Path(log_dir) / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
train_path = data_dict["train"]
test_path = data_dict["val"]
nc, names = (
(1, ["item"]) if opt.single_cls else (int(data_dict["nc"]), data_dict["names"])
) # number classes, names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Remove previous results
if rank in [-1, 0]:
for f in glob.glob("*_batch*.jpg") + glob.glob(results_file):
os.remove(f)
# Create model
model = Model(opt.cfg, nc=nc).to(device)
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# Optimizer
nbs = 64 # nominal batch size
# default DDP implementation is slow for accumulation according to: https://pytorch.org/docs/stable/notes/ddp.html
# all-reduce operation is carried out during loss.backward().
# Thus, there would be redundant all-reduce communications in a accumulation procedure,
# which means, the result is still right but the training speed gets slower.
# TODO: If acceleration is needed, there is an implementation of allreduce_post_accumulation
# in https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/run_pretraining.py
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_parameters():
if v.requires_grad:
if ".bias" in k:
pg2.append(v) # biases
elif ".weight" in k and ".bn" not in k:
pg1.append(v) # apply weight decay
else:
pg0.append(v) # all else
if opt.adam:
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
print(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = (
lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2
) # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Load Model
with torch_distributed_zero_first(rank):
google_utils.attempt_download(weights)
start_epoch, best_fitness = 0, 0.0
if weights.endswith(".pt"): # pytorch format
ckpt = torch.load(weights, map_location=device) # load checkpoint
# load model
try:
exclude = ["anchor"] # exclude keys
ckpt["model"] = {
k: v
for k, v in ckpt["model"].float().state_dict().items()
if k in model.state_dict()
and not any(x in k for x in exclude)
and model.state_dict()[k].shape == v.shape
}
model.load_state_dict(ckpt["model"], strict=False)
print(
"Transferred %g/%g items from %s"
% (len(ckpt["model"]), len(model.state_dict()), weights)
)
except KeyError as e:
s = (
"%s is not compatible with %s. This may be due to model differences or %s may be out of date. "
"Please delete or update %s and try again, or use --weights '' to train from scratch."
% (weights, opt.cfg, weights, weights)
)
raise KeyError(s) from e
# load optimizer
if ckpt["optimizer"] is not None:
optimizer.load_state_dict(ckpt["optimizer"])
best_fitness = ckpt["best_fitness"]
# load results
if ckpt.get("training_results") is not None:
with open(results_file, "w") as file:
file.write(ckpt["training_results"]) # write results.txt
# epochs
start_epoch = ckpt["epoch"] + 1
if epochs < start_epoch:
print(
"%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
% (weights, ckpt["epoch"], epochs)
)
epochs += ckpt["epoch"] # finetune additional epochs
del ckpt
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
print("Using SyncBatchNorm()")
# Exponential moving average
ema = torch_utils.ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[rank], output_device=rank)
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
local_rank=rank,
world_size=opt.world_size,
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, (
"Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g"
% (mlc, nc, opt.data, nc - 1)
)
# Testloader
if rank in [-1, 0]:
# local_rank is set to -1. Because only the first process is expected to do evaluation.
testloader = create_dataloader(
test_path,
imgsz_test,
total_batch_size,
gs,
opt,
hyp=hyp,
augment=False,
cache=opt.cache_images,
rect=True,
local_rank=-1,
world_size=opt.world_size,
)[0]
# Model parameters
hyp["cls"] *= nc / 80.0 # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(
device
) # attach class weights
model.names = names
# Class frequency
if rank in [-1, 0]:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1.
# model._initialize_biases(cf.to(device))
plot_labels(labels, save_dir=log_dir)
if tb_writer:
# tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
tb_writer.add_histogram("classes", c, 0)
# Check anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
# Start training
t0 = time.time()
nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
maps = np.zeros(nc) # mAP per class
results = (
0,
0,
0,
0,
0,
0,
0,
) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
if rank in [0, -1]:
print("Image sizes %g train, %g test" % (imgsz, imgsz_test))
print("Using %g dataloader workers" % dataloader.num_workers)
print("Starting training for %g epochs..." % epochs)
# torch.autograd.set_detect_anomaly(True)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if dataset.image_weights:
# Generate indices
if rank in [-1, 0]:
w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
image_weights = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=w
)
dataset.indices = random.choices(
range(dataset.n), weights=image_weights, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = torch.zeros([dataset.n], dtype=torch.int)
if rank == 0:
indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
if rank in [-1, 0]:
print(
("\n" + "%10s" * 8)
% (
"Epoch",
"gpu_mem",
"GIoU",
"obj",
"cls",
"total",
"targets",
"img_size",
)
)
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for (
i,
(imgs, targets, paths, _),
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni, xi, [0.1 if j == 2 else 0.0, x["initial_lr"] * lf(epoch)]
)
if "momentum" in x:
x["momentum"] = np.interp(ni, xi, [0.9, hyp["momentum"]])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Autocast
with amp.autocast(enabled=cuda):
# Forward
pred = model(imgs)
# Loss
loss, loss_items = compute_loss(
pred, targets.to(device), model
) # scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
# if not torch.isfinite(loss):
# print('WARNING: non-finite loss, ending training ', loss_items)
# return results
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema is not None:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if ni < 3:
f = str(Path(log_dir) / ("train_batch%g.jpg" % ni)) # filename
result = plot_images(
images=imgs, targets=targets, paths=paths, fname=f
)
if tb_writer and result is not None:
tb_writer.add_image(
f, result, dataformats="HWC", global_step=epoch
)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema is not None:
ema.update_attr(
model, include=["yaml", "nc", "hyp", "gr", "names", "stride"]
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(
opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
save_json=final_epoch and opt.data.endswith(os.sep + "coco.yaml"),
model=ema.ema.module if hasattr(ema.ema, "module") else ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=log_dir,
)
# Write
with open(results_file, "a") as f:
f.write(
s + "%10.4g" * 7 % results + "\n"
) # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Tensorboard
if tb_writer:
tags = [
"train/giou_loss",
"train/obj_loss",
"train/cls_loss",
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/giou_loss",
"val/obj_loss",
"val/cls_loss",
]
for x, tag in zip(list(mloss[:-1]) + list(results), tags):
tb_writer.add_scalar(tag, x, epoch)
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, "r") as f: # create checkpoint
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": f.read(),
"model": ema.ema.module if hasattr(ema, "module") else ema.ema,
"optimizer": None if final_epoch else optimizer.state_dict(),
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
n = ("_" if len(opt.name) and not opt.name.isnumeric() else "") + opt.name
fresults, flast, fbest = (
"results%s.txt" % n,
wdir + "last%s.pt" % n,
wdir + "best%s.pt" % n,
)
for f1, f2 in zip(
[wdir + "last.pt", wdir + "best.pt", "results.txt"],
[flast, fbest, fresults],
):
if os.path.exists(f1):
os.rename(f1, f2) # rename
ispt = f2.endswith(".pt") # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer
os.system(
"gsutil cp %s gs://%s/weights" % (f2, opt.bucket)
) if opt.bucket and ispt else None # upload
# Finish
if not opt.evolve:
plot_results(save_dir=log_dir) # save as results.png
print(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
dist.destroy_process_group() if rank not in [-1, 0] else None
torch.cuda.empty_cache()
return results
|
https://github.com/ultralytics/yolov5/issues/566
|
Traceback (most recent call last):
File "yolov5/train.py", line 516, in <module>
print_mutation(hyp, results, opt.bucket)
File "/content/drive/.shortcut-targets-by-id/1hQ281-8ogL2dJzHbXZSWEYDIYei6RRbe/YOLOv5/yolov5/utils/utils.py", line 830, in print_mutation
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
TypeError: must be real number, not str
|
TypeError
|
def print_mutation(hyp, results, yaml_file="hyp_evolved.yaml", bucket=""):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = "%10s" * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = "%10.3g" * len(hyp) % tuple(hyp.values()) # hyperparam values
c = (
"%10.4g" * len(results) % results
) # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
print("\n%s\n%s\nEvolved fitness: %s\n" % (a, b, c))
if bucket:
os.system("gsutil cp gs://%s/evolve.txt ." % bucket) # download evolve.txt
with open("evolve.txt", "a") as f: # append result
f.write(c + b + "\n")
x = np.unique(np.loadtxt("evolve.txt", ndmin=2), axis=0) # load unique rows
x = x[np.argsort(-fitness(x))] # sort
np.savetxt("evolve.txt", x, "%10.3g") # save sort by fitness
if bucket:
os.system("gsutil cp evolve.txt gs://%s" % bucket) # upload evolve.txt
# Save yaml
for i, k in enumerate(hyp.keys()):
hyp[k] = float(x[0, i + 7])
with open(yaml_file, "w") as f:
results = x[0, :7]
c = (
"%10.4g" * len(results) % results
) # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
f.write(
"# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: "
% len(x)
+ c
+ "\n\n"
)
yaml.dump(hyp, f, sort_keys=False)
|
def print_mutation(hyp, results, yaml_file="hyp_evolved.yaml", bucket=""):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = "%10s" * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = "%10.3g" * len(hyp) % tuple(hyp.values()) # hyperparam values
c = (
"%10.4g" * len(results) % results
) # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
print("\n%s\n%s\nEvolved fitness: %s\n" % (a, b, c))
if bucket:
os.system("gsutil cp gs://%s/evolve.txt ." % bucket) # download evolve.txt
with open("evolve.txt", "a") as f: # append result
f.write(c + b + "\n")
x = np.unique(np.loadtxt("evolve.txt", ndmin=2), axis=0) # load unique rows
x = x[np.argsort(-fitness(x))] # sort
np.savetxt("evolve.txt", x, "%10.3g") # save sort by fitness
if bucket:
os.system("gsutil cp evolve.txt gs://%s" % bucket) # upload evolve.txt
# Save yaml
for i, k in enumerate(hyp.keys()):
hyp[k] = float(x[0, i + 7])
with open(yaml_file, "w") as f:
f.write(
"# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: "
% len(x)
+ c
+ "\n\n"
)
yaml.dump(hyp, f, sort_keys=False)
|
https://github.com/ultralytics/yolov5/issues/566
|
Traceback (most recent call last):
File "yolov5/train.py", line 516, in <module>
print_mutation(hyp, results, opt.bucket)
File "/content/drive/.shortcut-targets-by-id/1hQ281-8ogL2dJzHbXZSWEYDIYei6RRbe/YOLOv5/yolov5/utils/utils.py", line 830, in print_mutation
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
TypeError: must be real number, not str
|
TypeError
|
def print_mutation(hyp, results, yaml_file="hyp_evolved.yaml", bucket=""):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = "%10s" * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = "%10.3g" * len(hyp) % tuple(hyp.values()) # hyperparam values
c = (
"%10.4g" * len(results) % results
) # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
print("\n%s\n%s\nEvolved fitness: %s\n" % (a, b, c))
if bucket:
os.system("gsutil cp gs://%s/evolve.txt ." % bucket) # download evolve.txt
with open("evolve.txt", "a") as f: # append result
f.write(c + b + "\n")
x = np.unique(np.loadtxt("evolve.txt", ndmin=2), axis=0) # load unique rows
x = x[np.argsort(-fitness(x))] # sort
np.savetxt("evolve.txt", x, "%10.3g") # save sort by fitness
if bucket:
os.system("gsutil cp evolve.txt gs://%s" % bucket) # upload evolve.txt
# Save yaml
for i, k in enumerate(hyp.keys()):
hyp[k] = float(x[0, i + 7])
with open(yaml_file, "w") as f:
results = tuple(x[0, :7])
c = (
"%10.4g" * len(results) % results
) # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
f.write(
"# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: "
% len(x)
+ c
+ "\n\n"
)
yaml.dump(hyp, f, sort_keys=False)
|
def print_mutation(hyp, results, yaml_file="hyp_evolved.yaml", bucket=""):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = "%10s" * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = "%10.3g" * len(hyp) % tuple(hyp.values()) # hyperparam values
c = (
"%10.4g" * len(results) % results
) # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
print("\n%s\n%s\nEvolved fitness: %s\n" % (a, b, c))
if bucket:
os.system("gsutil cp gs://%s/evolve.txt ." % bucket) # download evolve.txt
with open("evolve.txt", "a") as f: # append result
f.write(c + b + "\n")
x = np.unique(np.loadtxt("evolve.txt", ndmin=2), axis=0) # load unique rows
x = x[np.argsort(-fitness(x))] # sort
np.savetxt("evolve.txt", x, "%10.3g") # save sort by fitness
if bucket:
os.system("gsutil cp evolve.txt gs://%s" % bucket) # upload evolve.txt
# Save yaml
for i, k in enumerate(hyp.keys()):
hyp[k] = float(x[0, i + 7])
with open(yaml_file, "w") as f:
results = x[0, :7]
c = (
"%10.4g" * len(results) % results
) # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
f.write(
"# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: "
% len(x)
+ c
+ "\n\n"
)
yaml.dump(hyp, f, sort_keys=False)
|
https://github.com/ultralytics/yolov5/issues/566
|
Traceback (most recent call last):
File "yolov5/train.py", line 516, in <module>
print_mutation(hyp, results, opt.bucket)
File "/content/drive/.shortcut-targets-by-id/1hQ281-8ogL2dJzHbXZSWEYDIYei6RRbe/YOLOv5/yolov5/utils/utils.py", line 830, in print_mutation
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
TypeError: must be real number, not str
|
TypeError
|
def load_solution(
self,
solution,
allow_consistent_values_for_fixed_vars=False,
comparison_tolerance_for_fixed_vars=1e-5,
):
"""
Load a solution.
Args:
solution: A :class:`pyomo.opt.Solution` object with a
symbol map. Optionally, the solution can be tagged
with a default variable value (e.g., 0) that will be
applied to those variables in the symbol map that do
not have a value in the solution.
allow_consistent_values_for_fixed_vars:
Indicates whether a solution can specify
consistent values for variables that are
fixed.
comparison_tolerance_for_fixed_vars: The
tolerance used to define whether or not a
value in the solution is consistent with the
value of a fixed variable.
"""
from pyomo.core.kernel.suffix import import_suffix_generator
symbol_map = solution.symbol_map
default_variable_value = getattr(solution, "default_variable_value", None)
# Generate the list of active import suffixes on
# this top level model
valid_import_suffixes = {
obj.storage_key: obj for obj in import_suffix_generator(self)
}
# To ensure that import suffix data gets properly
# overwritten (e.g., the case where nonzero dual
# values exist on the suffix and but only sparse
# dual values exist in the results object) we clear
# all active import suffixes.
for suffix in six.itervalues(valid_import_suffixes):
suffix.clear()
# Load problem (model) level suffixes. These would
# only come from ampl interfaced solution suffixes
# at this point in time.
for _attr_key, attr_value in six.iteritems(solution.problem):
attr_key = _attr_key[0].lower() + _attr_key[1:]
if attr_key in valid_import_suffixes:
valid_import_suffixes[attr_key][self] = attr_value
#
# Load variable data
#
from pyomo.core.kernel.variable import IVariable
for var in self.components(ctype=IVariable):
var.stale = True
var_skip_attrs = ["id", "canonical_label"]
seen_var_ids = set()
for label, entry in six.iteritems(solution.variable):
var = symbol_map.getObject(label)
if (var is None) or (var is SymbolMap.UnknownSymbol):
# NOTE: the following is a hack, to handle
# the ONE_VAR_CONSTANT variable that is
# necessary for the objective
# constant-offset terms. probably should
# create a dummy variable in the model
# map at the same time the objective
# expression is being constructed.
if "ONE_VAR_CONST" in label:
continue
else:
raise KeyError(
"Variable associated with symbol '%s' "
"is not found on this block" % (label)
)
seen_var_ids.add(id(var))
if (not allow_consistent_values_for_fixed_vars) and var.fixed:
raise ValueError(
"Variable '%s' is currently fixed. "
"A new value is not expected "
"in solution" % (var.name)
)
for _attr_key, attr_value in six.iteritems(entry):
attr_key = _attr_key[0].lower() + _attr_key[1:]
if attr_key == "value":
if (
allow_consistent_values_for_fixed_vars
and var.fixed
and (
math.fabs(attr_value - var.value)
> comparison_tolerance_for_fixed_vars
)
):
raise ValueError(
"Variable %s is currently fixed. "
"A value of '%s' in solution is "
"not within tolerance=%s of the current "
"value of '%s'"
% (
var.name,
attr_value,
comparison_tolerance_for_fixed_vars,
var.value,
)
)
var.value = attr_value
var.stale = False
elif attr_key in valid_import_suffixes:
valid_import_suffixes[attr_key][var] = attr_value
# start to build up the set of unseen variable ids
unseen_var_ids = set(symbol_map.byObject.keys())
# at this point it contains ids for non-variable types
unseen_var_ids.difference_update(seen_var_ids)
#
# Load objective solution (should simply be suffixes if
# they exist)
#
objective_skip_attrs = ["id", "canonical_label", "value"]
for label, entry in six.iteritems(solution.objective):
obj = symbol_map.getObject(label)
if (obj is None) or (obj is SymbolMap.UnknownSymbol):
raise KeyError(
"Objective associated with symbol '%s' "
"is not found on this block" % (label)
)
# Because of __default_objective__, an objective might
# appear twice in the objective dictionary.
unseen_var_ids.discard(id(obj))
for _attr_key, attr_value in six.iteritems(entry):
attr_key = _attr_key[0].lower() + _attr_key[1:]
if attr_key in valid_import_suffixes:
valid_import_suffixes[attr_key][obj] = attr_value
#
# Load constraint solution
#
con_skip_attrs = ["id", "canonical_label"]
for label, entry in six.iteritems(solution.constraint):
con = symbol_map.getObject(label)
if con is SymbolMap.UnknownSymbol:
#
# This is a hack - see above.
#
if "ONE_VAR_CONST" in label:
continue
else:
raise KeyError(
"Constraint associated with symbol '%s' "
"is not found on this block" % (label)
)
unseen_var_ids.remove(id(con))
for _attr_key, attr_value in six.iteritems(entry):
attr_key = _attr_key[0].lower() + _attr_key[1:]
if attr_key in valid_import_suffixes:
valid_import_suffixes[attr_key][con] = attr_value
#
# Load sparse variable solution
#
if default_variable_value is not None:
for var_id in unseen_var_ids:
var = symbol_map.getObject(symbol_map.byObject[var_id])
if var.ctype is not IVariable:
continue
if (not allow_consistent_values_for_fixed_vars) and var.fixed:
raise ValueError(
"Variable '%s' is currently fixed. "
"A new value is not expected "
"in solution" % (var.name)
)
if (
allow_consistent_values_for_fixed_vars
and var.fixed
and (
math.fabs(default_variable_value - var.value)
> comparison_tolerance_for_fixed_vars
)
):
raise ValueError(
"Variable %s is currently fixed. "
"A value of '%s' in solution is "
"not within tolerance=%s of the current "
"value of '%s'"
% (
var.name,
default_variable_value,
comparison_tolerance_for_fixed_vars,
var.value,
)
)
var.value = default_variable_value
var.stale = False
|
def load_solution(
self,
solution,
allow_consistent_values_for_fixed_vars=False,
comparison_tolerance_for_fixed_vars=1e-5,
):
"""
Load a solution.
Args:
solution: A :class:`pyomo.opt.Solution` object with a
symbol map. Optionally, the solution can be tagged
with a default variable value (e.g., 0) that will be
applied to those variables in the symbol map that do
not have a value in the solution.
allow_consistent_values_for_fixed_vars:
Indicates whether a solution can specify
consistent values for variables that are
fixed.
comparison_tolerance_for_fixed_vars: The
tolerance used to define whether or not a
value in the solution is consistent with the
value of a fixed variable.
"""
from pyomo.core.kernel.suffix import import_suffix_generator
symbol_map = solution.symbol_map
default_variable_value = getattr(solution, "default_variable_value", None)
# Generate the list of active import suffixes on
# this top level model
valid_import_suffixes = {
obj.storage_key: obj for obj in import_suffix_generator(self)
}
# To ensure that import suffix data gets properly
# overwritten (e.g., the case where nonzero dual
# values exist on the suffix and but only sparse
# dual values exist in the results object) we clear
# all active import suffixes.
for suffix in six.itervalues(valid_import_suffixes):
suffix.clear()
# Load problem (model) level suffixes. These would
# only come from ampl interfaced solution suffixes
# at this point in time.
for _attr_key, attr_value in six.iteritems(solution.problem):
attr_key = _attr_key[0].lower() + _attr_key[1:]
if attr_key in valid_import_suffixes:
valid_import_suffixes[attr_key][self] = attr_value
#
# Load variable data
#
from pyomo.core.kernel.variable import IVariable
for var in self.components(ctype=IVariable):
var.stale = True
var_skip_attrs = ["id", "canonical_label"]
seen_var_ids = set()
for label, entry in six.iteritems(solution.variable):
var = symbol_map.getObject(label)
if (var is None) or (var is SymbolMap.UnknownSymbol):
# NOTE: the following is a hack, to handle
# the ONE_VAR_CONSTANT variable that is
# necessary for the objective
# constant-offset terms. probably should
# create a dummy variable in the model
# map at the same time the objective
# expression is being constructed.
if "ONE_VAR_CONST" in label:
continue
else:
raise KeyError(
"Variable associated with symbol '%s' "
"is not found on this block" % (label)
)
seen_var_ids.add(id(var))
if (not allow_consistent_values_for_fixed_vars) and var.fixed:
raise ValueError(
"Variable '%s' is currently fixed. "
"A new value is not expected "
"in solution" % (var.name)
)
for _attr_key, attr_value in six.iteritems(entry):
attr_key = _attr_key[0].lower() + _attr_key[1:]
if attr_key == "value":
if (
allow_consistent_values_for_fixed_vars
and var.fixed
and (
math.fabs(attr_value - var.value)
> comparison_tolerance_for_fixed_vars
)
):
raise ValueError(
"Variable %s is currently fixed. "
"A value of '%s' in solution is "
"not within tolerance=%s of the current "
"value of '%s'"
% (
var.name,
attr_value,
comparison_tolerance_for_fixed_vars,
var.value,
)
)
var.value = attr_value
var.stale = False
elif attr_key in valid_import_suffixes:
valid_import_suffixes[attr_key][var] = attr_value
# start to build up the set of unseen variable ids
unseen_var_ids = set(symbol_map.byObject.keys())
# at this point it contains ids for non-variable types
unseen_var_ids.difference_update(seen_var_ids)
#
# Load objective solution (should simply be suffixes if
# they exist)
#
objective_skip_attrs = ["id", "canonical_label", "value"]
for label, entry in six.iteritems(solution.objective):
obj = symbol_map.getObject(label)
if (obj is None) or (obj is SymbolMap.UnknownSymbol):
raise KeyError(
"Objective associated with symbol '%s' "
"is not found on this block" % (label)
)
unseen_var_ids.remove(id(obj))
for _attr_key, attr_value in six.iteritems(entry):
attr_key = _attr_key[0].lower() + _attr_key[1:]
if attr_key in valid_import_suffixes:
valid_import_suffixes[attr_key][obj] = attr_value
#
# Load constraint solution
#
con_skip_attrs = ["id", "canonical_label"]
for label, entry in six.iteritems(solution.constraint):
con = symbol_map.getObject(label)
if con is SymbolMap.UnknownSymbol:
#
# This is a hack - see above.
#
if "ONE_VAR_CONST" in label:
continue
else:
raise KeyError(
"Constraint associated with symbol '%s' "
"is not found on this block" % (label)
)
unseen_var_ids.remove(id(con))
for _attr_key, attr_value in six.iteritems(entry):
attr_key = _attr_key[0].lower() + _attr_key[1:]
if attr_key in valid_import_suffixes:
valid_import_suffixes[attr_key][con] = attr_value
#
# Load sparse variable solution
#
if default_variable_value is not None:
for var_id in unseen_var_ids:
var = symbol_map.getObject(symbol_map.byObject[var_id])
if var.ctype is not IVariable:
continue
if (not allow_consistent_values_for_fixed_vars) and var.fixed:
raise ValueError(
"Variable '%s' is currently fixed. "
"A new value is not expected "
"in solution" % (var.name)
)
if (
allow_consistent_values_for_fixed_vars
and var.fixed
and (
math.fabs(default_variable_value - var.value)
> comparison_tolerance_for_fixed_vars
)
):
raise ValueError(
"Variable %s is currently fixed. "
"A value of '%s' in solution is "
"not within tolerance=%s of the current "
"value of '%s'"
% (
var.name,
default_variable_value,
comparison_tolerance_for_fixed_vars,
var.value,
)
)
var.value = default_variable_value
var.stale = False
|
https://github.com/Pyomo/pyomo/issues/1766
|
Welcome to IBM(R) ILOG(R) CPLEX(R) Interactive Optimizer 20.1.0.0
with Simplex, Mixed Integer & Barrier Optimizers
5725-A06 5725-A29 5724-Y48 5724-Y49 5724-Y54 5724-Y55 5655-Y21
Copyright IBM Corp. 1988, 2020. All Rights Reserved.
Type 'help' for a list of available commands.
Type 'help' followed by a command name for more
information on commands.
CPLEX> Logfile 'cplex.log' closed.
Logfile '/var/folders/pj/1113b24j7b3083zbt19pjxqm0000gn/T/tmp3ca49s13.cplex.log' open.
CPLEX> Problem '/var/folders/pj/1113b24j7b3083zbt19pjxqm0000gn/T/tmpsefek9qr.pyomo.lp' read.
Read time = 0.00 sec. (0.00 ticks)
CPLEX> Problem name : /var/folders/pj/1113b24j7b3083zbt19pjxqm0000gn/T/tmpsefek9qr.pyomo.lp
Objective sense : Minimize
Variables : 3 [Nneg: 2, Binary: 1]
Objective nonzeros : 2
Linear constraints : 4 [Less: 1, Greater: 2, Equal: 1]
Nonzeros : 6
RHS nonzeros : 4
Variables : Min LB: 0.000000 Max UB: 1.000000
Objective nonzeros : Min : 1.000000 Max : 3.000000
Linear constraints :
Nonzeros : Min : 1.000000 Max : 1.000000
RHS nonzeros : Min : 1.000000 Max : 10.00000
CPLEX> Version identifier: 20.1.0.0 | 2020-11-10 | 9bedb6d68
Found incumbent of value 13.000000 after 0.00 sec. (0.00 ticks)
Tried aggregator 1 time.
MIP Presolve eliminated 4 rows and 3 columns.
All rows and columns eliminated.
Presolve time = 0.00 sec. (0.00 ticks)
Root node processing (before b&c):
Real time = 0.00 sec. (0.00 ticks)
Parallel b&c, 12 threads:
Real time = 0.00 sec. (0.00 ticks)
Sync time (average) = 0.00 sec.
Wait time (average) = 0.00 sec.
------------
Total (root+branch&cut) = 0.00 sec. (0.00 ticks)
Solution pool: 2 solutions saved.
MIP - Integer optimal solution: Objective = 6.0000000000e+00
Solution time = 0.00 sec. Iterations = 0 Nodes = 0 (1)
Deterministic time = 0.00 ticks (1.79 ticks/sec)
CPLEX> Incumbent solution written to file '/var/folders/pj/1113b24j7b3083zbt19pjxqm0000gn/T/tmp8xwq6lvo.cplex.sol'.
CPLEX> Traceback (most recent call last):
File "/Users/zedongpeng/Github/pyomo-MINLP-benchmarking/test.py", line 115, in <module>
results = opt.solve(model,tee=True) #,warmstart=True)
File "/Users/zedongpeng/Github/pyomo/pyomo/opt/base/solvers.py", line 603, in solve
result = self._postsolve()
File "/Users/zedongpeng/Github/pyomo/pyomo/solvers/plugins/solvers/CPLEX.py", line 894, in _postsolve
results = ILMLicensedSystemCallSolver._postsolve(self)
File "/Users/zedongpeng/Github/pyomo/pyomo/opt/solver/shellcmd.py", line 269, in _postsolve
results = self.process_output(self._rc)
File "/Users/zedongpeng/Github/pyomo/pyomo/opt/solver/shellcmd.py", line 331, in process_output
self.process_soln_file(results)
File "/Users/zedongpeng/Github/pyomo/pyomo/solvers/plugins/solvers/CPLEX.py", line 780, in process_soln_file
objective_value = (tokens[0].split('=')[1].strip()).lstrip("\"").rstrip("\"")
IndexError: list index out of range
|
IndexError
|
def process_soln_file(self, results):
# the only suffixes that we extract from CPLEX are
# constraint duals, constraint slacks, and variable
# reduced-costs. scan through the solver suffix list
# and throw an exception if the user has specified
# any others.
extract_duals = False
extract_slacks = False
extract_reduced_costs = False
extract_rc = False
extract_lrc = False
extract_urc = False
for suffix in self._suffixes:
flag = False
if re.match(suffix, "dual"):
extract_duals = True
flag = True
if re.match(suffix, "slack"):
extract_slacks = True
flag = True
if re.match(suffix, "rc"):
extract_reduced_costs = True
extract_rc = True
flag = True
if re.match(suffix, "lrc"):
extract_reduced_costs = True
extract_lrc = True
flag = True
if re.match(suffix, "urc"):
extract_reduced_costs = True
extract_urc = True
flag = True
if not flag:
raise RuntimeError(
"***The CPLEX solver plugin cannot extract solution suffix=" + suffix
)
# check for existence of the solution file
# not sure why we just return - would think that we
# would want to indicate some sort of error
if not os.path.exists(self._soln_file):
return
range_duals = {}
range_slacks = {}
soln = Solution()
soln.objective["__default_objective__"] = {"Value": None}
# caching for efficiency
soln_variables = soln.variable
soln_constraints = soln.constraint
INPUT = open(self._soln_file, "r")
results.problem.number_of_objectives = 1
time_limit_exceeded = False
mip_problem = False
for line in INPUT:
line = line.strip()
line = line.lstrip("<?/")
line = line.rstrip("/>?")
tokens = line.split(" ")
if tokens[0] == "variable":
variable_name = None
variable_value = None
variable_reduced_cost = None
variable_status = None
for i in xrange(1, len(tokens)):
field_name = tokens[i].split("=")[0]
field_value = tokens[i].split("=")[1].lstrip('"').rstrip('"')
if field_name == "name":
variable_name = field_value
elif field_name == "value":
variable_value = field_value
elif (extract_reduced_costs is True) and (field_name == "reducedCost"):
variable_reduced_cost = field_value
elif (extract_reduced_costs is True) and (field_name == "status"):
variable_status = field_value
# skip the "constant-one" variable, used to capture/retain objective offsets in the CPLEX LP format.
if variable_name != "ONE_VAR_CONSTANT":
variable = soln_variables[variable_name] = {
"Value": float(variable_value)
}
if (variable_reduced_cost is not None) and (
extract_reduced_costs is True
):
try:
if extract_rc is True:
variable["Rc"] = float(variable_reduced_cost)
if variable_status is not None:
if extract_lrc is True:
if variable_status == "LL":
variable["Lrc"] = float(variable_reduced_cost)
else:
variable["Lrc"] = 0.0
if extract_urc is True:
if variable_status == "UL":
variable["Urc"] = float(variable_reduced_cost)
else:
variable["Urc"] = 0.0
except:
raise ValueError(
"Unexpected reduced-cost value="
+ str(variable_reduced_cost)
+ " encountered for variable="
+ variable_name
)
elif (tokens[0] == "constraint") and (
(extract_duals is True) or (extract_slacks is True)
):
is_range = False
rlabel = None
rkey = None
for i in xrange(1, len(tokens)):
field_name = tokens[i].split("=")[0]
field_value = tokens[i].split("=")[1].lstrip('"').rstrip('"')
if field_name == "name":
if field_value.startswith("c_"):
constraint = soln_constraints[field_value] = {}
elif field_value.startswith("r_l_"):
is_range = True
rlabel = field_value[4:]
rkey = 0
elif field_value.startswith("r_u_"):
is_range = True
rlabel = field_value[4:]
rkey = 1
elif (extract_duals is True) and (field_name == "dual"): # for LPs
if is_range is False:
constraint["Dual"] = float(field_value)
else:
range_duals.setdefault(rlabel, [0, 0])[rkey] = float(
field_value
)
elif (extract_slacks is True) and (field_name == "slack"): # for MIPs
if is_range is False:
constraint["Slack"] = float(field_value)
else:
range_slacks.setdefault(rlabel, [0, 0])[rkey] = float(
field_value
)
elif tokens[0].startswith("problemName"):
filename = (tokens[0].split("=")[1].strip()).lstrip('"').rstrip('"')
results.problem.name = os.path.basename(filename)
if "." in results.problem.name:
results.problem.name = results.problem.name.split(".")[0]
tINPUT = open(filename, "r")
for tline in tINPUT:
tline = tline.strip()
if tline == "":
continue
tokens = re.split("[\t ]+", tline)
if tokens[0][0] in ["\\", "*"]:
continue
elif tokens[0] == "NAME":
results.problem.name = tokens[1]
else:
sense = tokens[0].lower()
if sense in ["max", "maximize"]:
results.problem.sense = ProblemSense.maximize
if sense in ["min", "minimize"]:
results.problem.sense = ProblemSense.minimize
break
tINPUT.close()
elif tokens[0].startswith("objectiveValue") and tokens[0] != "objectiveValues":
# prior to 12.10.0, the objective value came back as an
# attribute on the <header> tag
objective_value = (tokens[0].split("=")[1].strip()).lstrip('"').rstrip('"')
soln.objective["__default_objective__"]["Value"] = float(objective_value)
elif tokens[0] == "objective":
# beginning in 12.10.0, CPLEX supports multiple
# objectives in an <objectiveValue> tag
fields = {}
for field in tokens[1:]:
k, v = field.split("=")
fields[k] = v.strip('"')
soln.objective.setdefault(fields["name"], {})["Value"] = float(
fields["value"]
)
elif tokens[0].startswith("solutionStatusValue"):
pieces = tokens[0].split("=")
solution_status = eval(pieces[1])
# solution status = 1 => optimal
# solution status = 3 => infeasible
if soln.status == SolutionStatus.unknown:
if solution_status == 1:
soln.status = SolutionStatus.optimal
elif solution_status == 3:
soln.status = SolutionStatus.infeasible
soln.gap = None
else:
# we are flagging anything with a solution status >= 4 as an error, to possibly
# be over-ridden as we learn more about the status (e.g., due to time limit exceeded).
soln.status = SolutionStatus.error
soln.gap = None
elif tokens[0].startswith("solutionStatusString"):
solution_status = (
((" ".join(tokens).split("=")[1]).strip()).lstrip('"').rstrip('"')
)
if solution_status in [
"optimal",
"integer optimal solution",
"integer optimal, tolerance",
]:
soln.status = SolutionStatus.optimal
soln.gap = 0.0
results.problem.lower_bound = soln.objective["__default_objective__"][
"Value"
]
results.problem.upper_bound = soln.objective["__default_objective__"][
"Value"
]
if "integer" in solution_status:
mip_problem = True
elif solution_status in ["infeasible"]:
soln.status = SolutionStatus.infeasible
soln.gap = None
elif solution_status in ["time limit exceeded"]:
# we need to know if the solution is primal feasible, and if it is, set the solution status accordingly.
# for now, just set the flag so we can trigger the logic when we see the primalFeasible keyword.
time_limit_exceeded = True
elif tokens[0].startswith("MIPNodes"):
if mip_problem:
n = eval(
eval((" ".join(tokens).split("=")[1]).strip())
.lstrip('"')
.rstrip('"')
)
results.solver.statistics.branch_and_bound.number_of_created_subproblems = n
results.solver.statistics.branch_and_bound.number_of_bounded_subproblems = n
elif tokens[0].startswith("primalFeasible") and (time_limit_exceeded is True):
primal_feasible = int(
((" ".join(tokens).split("=")[1]).strip()).lstrip('"').rstrip('"')
)
if primal_feasible == 1:
soln.status = SolutionStatus.feasible
if results.problem.sense == ProblemSense.minimize:
results.problem.upper_bound = soln.objective[
"__default_objective__"
]["Value"]
else:
results.problem.lower_bound = soln.objective[
"__default_objective__"
]["Value"]
else:
soln.status = SolutionStatus.infeasible
if self._best_bound is not None:
if results.problem.sense == ProblemSense.minimize:
results.problem.lower_bound = self._best_bound
else:
results.problem.upper_bound = self._best_bound
if self._gap is not None:
soln.gap = self._gap
# For the range constraints, supply only the dual with the largest
# magnitude (at least one should always be numerically zero)
for key, (ld, ud) in iteritems(range_duals):
if abs(ld) > abs(ud):
soln_constraints["r_l_" + key] = {"Dual": ld}
else:
soln_constraints["r_l_" + key] = {"Dual": ud} # Use the same key
# slacks
for key, (ls, us) in iteritems(range_slacks):
if abs(ls) > abs(us):
soln_constraints.setdefault("r_l_" + key, {})["Slack"] = ls
else:
soln_constraints.setdefault("r_l_" + key, {})["Slack"] = (
us # Use the same key
)
if not results.solver.status is SolverStatus.error:
if results.solver.termination_condition in [
TerminationCondition.unknown,
# TerminationCondition.maxIterations,
# TerminationCondition.minFunctionValue,
# TerminationCondition.minStepLength,
TerminationCondition.globallyOptimal,
TerminationCondition.locallyOptimal,
TerminationCondition.optimal,
# TerminationCondition.maxEvaluations,
TerminationCondition.other,
]:
results.solution.insert(soln)
elif (
results.solver.termination_condition is TerminationCondition.maxTimeLimit
) and (soln.status is not SolutionStatus.infeasible):
results.solution.insert(soln)
INPUT.close()
|
def process_soln_file(self, results):
# the only suffixes that we extract from CPLEX are
# constraint duals, constraint slacks, and variable
# reduced-costs. scan through the solver suffix list
# and throw an exception if the user has specified
# any others.
extract_duals = False
extract_slacks = False
extract_reduced_costs = False
extract_rc = False
extract_lrc = False
extract_urc = False
for suffix in self._suffixes:
flag = False
if re.match(suffix, "dual"):
extract_duals = True
flag = True
if re.match(suffix, "slack"):
extract_slacks = True
flag = True
if re.match(suffix, "rc"):
extract_reduced_costs = True
extract_rc = True
flag = True
if re.match(suffix, "lrc"):
extract_reduced_costs = True
extract_lrc = True
flag = True
if re.match(suffix, "urc"):
extract_reduced_costs = True
extract_urc = True
flag = True
if not flag:
raise RuntimeError(
"***The CPLEX solver plugin cannot extract solution suffix=" + suffix
)
# check for existence of the solution file
# not sure why we just return - would think that we
# would want to indicate some sort of error
if not os.path.exists(self._soln_file):
return
range_duals = {}
range_slacks = {}
soln = Solution()
soln.objective["__default_objective__"] = {"Value": None}
# caching for efficiency
soln_variables = soln.variable
soln_constraints = soln.constraint
INPUT = open(self._soln_file, "r")
results.problem.number_of_objectives = 1
time_limit_exceeded = False
mip_problem = False
for line in INPUT:
line = line.strip()
line = line.lstrip("<?/")
line = line.rstrip("/>?")
tokens = line.split(" ")
if tokens[0] == "variable":
variable_name = None
variable_value = None
variable_reduced_cost = None
variable_status = None
for i in xrange(1, len(tokens)):
field_name = tokens[i].split("=")[0]
field_value = tokens[i].split("=")[1].lstrip('"').rstrip('"')
if field_name == "name":
variable_name = field_value
elif field_name == "value":
variable_value = field_value
elif (extract_reduced_costs is True) and (field_name == "reducedCost"):
variable_reduced_cost = field_value
elif (extract_reduced_costs is True) and (field_name == "status"):
variable_status = field_value
# skip the "constant-one" variable, used to capture/retain objective offsets in the CPLEX LP format.
if variable_name != "ONE_VAR_CONSTANT":
variable = soln_variables[variable_name] = {
"Value": float(variable_value)
}
if (variable_reduced_cost is not None) and (
extract_reduced_costs is True
):
try:
if extract_rc is True:
variable["Rc"] = float(variable_reduced_cost)
if variable_status is not None:
if extract_lrc is True:
if variable_status == "LL":
variable["Lrc"] = float(variable_reduced_cost)
else:
variable["Lrc"] = 0.0
if extract_urc is True:
if variable_status == "UL":
variable["Urc"] = float(variable_reduced_cost)
else:
variable["Urc"] = 0.0
except:
raise ValueError(
"Unexpected reduced-cost value="
+ str(variable_reduced_cost)
+ " encountered for variable="
+ variable_name
)
elif (tokens[0] == "constraint") and (
(extract_duals is True) or (extract_slacks is True)
):
is_range = False
rlabel = None
rkey = None
for i in xrange(1, len(tokens)):
field_name = tokens[i].split("=")[0]
field_value = tokens[i].split("=")[1].lstrip('"').rstrip('"')
if field_name == "name":
if field_value.startswith("c_"):
constraint = soln_constraints[field_value] = {}
elif field_value.startswith("r_l_"):
is_range = True
rlabel = field_value[4:]
rkey = 0
elif field_value.startswith("r_u_"):
is_range = True
rlabel = field_value[4:]
rkey = 1
elif (extract_duals is True) and (field_name == "dual"): # for LPs
if is_range is False:
constraint["Dual"] = float(field_value)
else:
range_duals.setdefault(rlabel, [0, 0])[rkey] = float(
field_value
)
elif (extract_slacks is True) and (field_name == "slack"): # for MIPs
if is_range is False:
constraint["Slack"] = float(field_value)
else:
range_slacks.setdefault(rlabel, [0, 0])[rkey] = float(
field_value
)
elif tokens[0].startswith("problemName"):
filename = (tokens[0].split("=")[1].strip()).lstrip('"').rstrip('"')
results.problem.name = os.path.basename(filename)
if "." in results.problem.name:
results.problem.name = results.problem.name.split(".")[0]
tINPUT = open(filename, "r")
for tline in tINPUT:
tline = tline.strip()
if tline == "":
continue
tokens = re.split("[\t ]+", tline)
if tokens[0][0] in ["\\", "*"]:
continue
elif tokens[0] == "NAME":
results.problem.name = tokens[1]
else:
sense = tokens[0].lower()
if sense in ["max", "maximize"]:
results.problem.sense = ProblemSense.maximize
if sense in ["min", "minimize"]:
results.problem.sense = ProblemSense.minimize
break
tINPUT.close()
elif tokens[0].startswith("objectiveValue"):
objective_value = (tokens[0].split("=")[1].strip()).lstrip('"').rstrip('"')
soln.objective["__default_objective__"]["Value"] = float(objective_value)
elif tokens[0].startswith("solutionStatusValue"):
pieces = tokens[0].split("=")
solution_status = eval(pieces[1])
# solution status = 1 => optimal
# solution status = 3 => infeasible
if soln.status == SolutionStatus.unknown:
if solution_status == 1:
soln.status = SolutionStatus.optimal
elif solution_status == 3:
soln.status = SolutionStatus.infeasible
soln.gap = None
else:
# we are flagging anything with a solution status >= 4 as an error, to possibly
# be over-ridden as we learn more about the status (e.g., due to time limit exceeded).
soln.status = SolutionStatus.error
soln.gap = None
elif tokens[0].startswith("solutionStatusString"):
solution_status = (
((" ".join(tokens).split("=")[1]).strip()).lstrip('"').rstrip('"')
)
if solution_status in [
"optimal",
"integer optimal solution",
"integer optimal, tolerance",
]:
soln.status = SolutionStatus.optimal
soln.gap = 0.0
results.problem.lower_bound = soln.objective["__default_objective__"][
"Value"
]
results.problem.upper_bound = soln.objective["__default_objective__"][
"Value"
]
if "integer" in solution_status:
mip_problem = True
elif solution_status in ["infeasible"]:
soln.status = SolutionStatus.infeasible
soln.gap = None
elif solution_status in ["time limit exceeded"]:
# we need to know if the solution is primal feasible, and if it is, set the solution status accordingly.
# for now, just set the flag so we can trigger the logic when we see the primalFeasible keyword.
time_limit_exceeded = True
elif tokens[0].startswith("MIPNodes"):
if mip_problem:
n = eval(
eval((" ".join(tokens).split("=")[1]).strip())
.lstrip('"')
.rstrip('"')
)
results.solver.statistics.branch_and_bound.number_of_created_subproblems = n
results.solver.statistics.branch_and_bound.number_of_bounded_subproblems = n
elif tokens[0].startswith("primalFeasible") and (time_limit_exceeded is True):
primal_feasible = int(
((" ".join(tokens).split("=")[1]).strip()).lstrip('"').rstrip('"')
)
if primal_feasible == 1:
soln.status = SolutionStatus.feasible
if results.problem.sense == ProblemSense.minimize:
results.problem.upper_bound = soln.objective[
"__default_objective__"
]["Value"]
else:
results.problem.lower_bound = soln.objective[
"__default_objective__"
]["Value"]
else:
soln.status = SolutionStatus.infeasible
if self._best_bound is not None:
if results.problem.sense == ProblemSense.minimize:
results.problem.lower_bound = self._best_bound
else:
results.problem.upper_bound = self._best_bound
if self._gap is not None:
soln.gap = self._gap
# For the range constraints, supply only the dual with the largest
# magnitude (at least one should always be numerically zero)
for key, (ld, ud) in iteritems(range_duals):
if abs(ld) > abs(ud):
soln_constraints["r_l_" + key] = {"Dual": ld}
else:
soln_constraints["r_l_" + key] = {"Dual": ud} # Use the same key
# slacks
for key, (ls, us) in iteritems(range_slacks):
if abs(ls) > abs(us):
soln_constraints.setdefault("r_l_" + key, {})["Slack"] = ls
else:
soln_constraints.setdefault("r_l_" + key, {})["Slack"] = (
us # Use the same key
)
if not results.solver.status is SolverStatus.error:
if results.solver.termination_condition in [
TerminationCondition.unknown,
# TerminationCondition.maxIterations,
# TerminationCondition.minFunctionValue,
# TerminationCondition.minStepLength,
TerminationCondition.globallyOptimal,
TerminationCondition.locallyOptimal,
TerminationCondition.optimal,
# TerminationCondition.maxEvaluations,
TerminationCondition.other,
]:
results.solution.insert(soln)
elif (
results.solver.termination_condition is TerminationCondition.maxTimeLimit
) and (soln.status is not SolutionStatus.infeasible):
results.solution.insert(soln)
INPUT.close()
|
https://github.com/Pyomo/pyomo/issues/1766
|
Welcome to IBM(R) ILOG(R) CPLEX(R) Interactive Optimizer 20.1.0.0
with Simplex, Mixed Integer & Barrier Optimizers
5725-A06 5725-A29 5724-Y48 5724-Y49 5724-Y54 5724-Y55 5655-Y21
Copyright IBM Corp. 1988, 2020. All Rights Reserved.
Type 'help' for a list of available commands.
Type 'help' followed by a command name for more
information on commands.
CPLEX> Logfile 'cplex.log' closed.
Logfile '/var/folders/pj/1113b24j7b3083zbt19pjxqm0000gn/T/tmp3ca49s13.cplex.log' open.
CPLEX> Problem '/var/folders/pj/1113b24j7b3083zbt19pjxqm0000gn/T/tmpsefek9qr.pyomo.lp' read.
Read time = 0.00 sec. (0.00 ticks)
CPLEX> Problem name : /var/folders/pj/1113b24j7b3083zbt19pjxqm0000gn/T/tmpsefek9qr.pyomo.lp
Objective sense : Minimize
Variables : 3 [Nneg: 2, Binary: 1]
Objective nonzeros : 2
Linear constraints : 4 [Less: 1, Greater: 2, Equal: 1]
Nonzeros : 6
RHS nonzeros : 4
Variables : Min LB: 0.000000 Max UB: 1.000000
Objective nonzeros : Min : 1.000000 Max : 3.000000
Linear constraints :
Nonzeros : Min : 1.000000 Max : 1.000000
RHS nonzeros : Min : 1.000000 Max : 10.00000
CPLEX> Version identifier: 20.1.0.0 | 2020-11-10 | 9bedb6d68
Found incumbent of value 13.000000 after 0.00 sec. (0.00 ticks)
Tried aggregator 1 time.
MIP Presolve eliminated 4 rows and 3 columns.
All rows and columns eliminated.
Presolve time = 0.00 sec. (0.00 ticks)
Root node processing (before b&c):
Real time = 0.00 sec. (0.00 ticks)
Parallel b&c, 12 threads:
Real time = 0.00 sec. (0.00 ticks)
Sync time (average) = 0.00 sec.
Wait time (average) = 0.00 sec.
------------
Total (root+branch&cut) = 0.00 sec. (0.00 ticks)
Solution pool: 2 solutions saved.
MIP - Integer optimal solution: Objective = 6.0000000000e+00
Solution time = 0.00 sec. Iterations = 0 Nodes = 0 (1)
Deterministic time = 0.00 ticks (1.79 ticks/sec)
CPLEX> Incumbent solution written to file '/var/folders/pj/1113b24j7b3083zbt19pjxqm0000gn/T/tmp8xwq6lvo.cplex.sol'.
CPLEX> Traceback (most recent call last):
File "/Users/zedongpeng/Github/pyomo-MINLP-benchmarking/test.py", line 115, in <module>
results = opt.solve(model,tee=True) #,warmstart=True)
File "/Users/zedongpeng/Github/pyomo/pyomo/opt/base/solvers.py", line 603, in solve
result = self._postsolve()
File "/Users/zedongpeng/Github/pyomo/pyomo/solvers/plugins/solvers/CPLEX.py", line 894, in _postsolve
results = ILMLicensedSystemCallSolver._postsolve(self)
File "/Users/zedongpeng/Github/pyomo/pyomo/opt/solver/shellcmd.py", line 269, in _postsolve
results = self.process_output(self._rc)
File "/Users/zedongpeng/Github/pyomo/pyomo/opt/solver/shellcmd.py", line 331, in process_output
self.process_soln_file(results)
File "/Users/zedongpeng/Github/pyomo/pyomo/solvers/plugins/solvers/CPLEX.py", line 780, in process_soln_file
objective_value = (tokens[0].split('=')[1].strip()).lstrip("\"").rstrip("\"")
IndexError: list index out of range
|
IndexError
|
def _apply_solver(self):
if not self._save_results:
for block in self._pyomo_model.block_data_objects(
descend_into=True, active=True
):
for var in block.component_data_objects(
ctype=pyomo.core.base.var.Var,
descend_into=False,
active=True,
sort=False,
):
var.stale = True
# In recent versions of CPLEX it is helpful to manually open the
# log file and then explicitly close it after CPLEX is finished.
# This ensures that the file is closed (and unlocked) on Windows
# before the TempfileManager (or user) attempts to delete the
# log file. Passing in an opened file object is supported at
# least as far back as CPLEX 12.5.1 [the oldest version
# supported by IBM as of 1 Oct 2020]
if self.version() >= (12, 5, 1) and isinstance(self._log_file, six.string_types):
_log_file = (open(self._log_file, "a"),)
_close_log_file = True
else:
_log_file = (self._log_file,)
_close_log_file = False
if self._tee:
def _process_stream(arg):
sys.stdout.write(arg)
return arg
_log_file += (_process_stream,)
try:
self._solver_model.set_results_stream(*_log_file)
if self._keepfiles:
print("Solver log file: " + self._log_file)
obj_degree = self._objective.expr.polynomial_degree()
if obj_degree is None or obj_degree > 2:
raise DegreeError(
"CPLEXDirect does not support expressions of degree {0}.".format(
obj_degree
)
)
elif obj_degree == 2:
quadratic_objective = True
else:
quadratic_objective = False
num_integer_vars = self._solver_model.variables.get_num_integer()
num_binary_vars = self._solver_model.variables.get_num_binary()
num_sos = self._solver_model.SOS.get_num()
if self._solver_model.quadratic_constraints.get_num() != 0:
quadratic_cons = True
else:
quadratic_cons = False
if (num_integer_vars + num_binary_vars + num_sos) > 0:
integer = True
else:
integer = False
if integer:
if quadratic_cons:
self._solver_model.set_problem_type(
self._solver_model.problem_type.MIQCP
)
elif quadratic_objective:
self._solver_model.set_problem_type(
self._solver_model.problem_type.MIQP
)
else:
self._solver_model.set_problem_type(
self._solver_model.problem_type.MILP
)
else:
if quadratic_cons:
self._solver_model.set_problem_type(self._solver_model.problem_type.QCP)
elif quadratic_objective:
self._solver_model.set_problem_type(self._solver_model.problem_type.QP)
else:
self._solver_model.set_problem_type(self._solver_model.problem_type.LP)
for key, option in self.options.items():
opt_cmd = self._solver_model.parameters
key_pieces = key.split("_")
for key_piece in key_pieces:
opt_cmd = getattr(opt_cmd, key_piece)
# When options come from the pyomo command, all
# values are string types, so we try to cast
# them to a numeric value in the event that
# setting the parameter fails.
try:
opt_cmd.set(option)
except self._cplex.exceptions.CplexError:
# we place the exception handling for
# checking the cast of option to a float in
# another function so that we can simply
# call raise here instead of except
# TypeError as e / raise e, because the
# latter does not preserve the Cplex stack
# trace
if not _is_numeric(option):
raise
opt_cmd.set(float(option))
t0 = time.time()
self._solver_model.solve()
t1 = time.time()
self._wallclock_time = t1 - t0
finally:
self._solver_model.set_results_stream(None)
if _close_log_file:
_log_file[0].close()
# FIXME: can we get a return code indicating if CPLEX had a significant failure?
return Bunch(rc=None, log=None)
|
def _apply_solver(self):
if not self._save_results:
for block in self._pyomo_model.block_data_objects(
descend_into=True, active=True
):
for var in block.component_data_objects(
ctype=pyomo.core.base.var.Var,
descend_into=False,
active=True,
sort=False,
):
var.stale = True
_log_file = self._log_file
if self.version() >= (12, 10):
_log_file = open(self._log_file, "w")
try:
if self._tee:
def _process_stream(arg):
sys.stdout.write(arg)
return arg
self._solver_model.set_results_stream(_log_file, _process_stream)
else:
self._solver_model.set_results_stream(_log_file)
if self._keepfiles:
print("Solver log file: " + self._log_file)
obj_degree = self._objective.expr.polynomial_degree()
if obj_degree is None or obj_degree > 2:
raise DegreeError(
"CPLEXDirect does not support expressions of degree {0}.".format(
obj_degree
)
)
elif obj_degree == 2:
quadratic_objective = True
else:
quadratic_objective = False
num_integer_vars = self._solver_model.variables.get_num_integer()
num_binary_vars = self._solver_model.variables.get_num_binary()
num_sos = self._solver_model.SOS.get_num()
if self._solver_model.quadratic_constraints.get_num() != 0:
quadratic_cons = True
else:
quadratic_cons = False
if (num_integer_vars + num_binary_vars + num_sos) > 0:
integer = True
else:
integer = False
if integer:
if quadratic_cons:
self._solver_model.set_problem_type(
self._solver_model.problem_type.MIQCP
)
elif quadratic_objective:
self._solver_model.set_problem_type(
self._solver_model.problem_type.MIQP
)
else:
self._solver_model.set_problem_type(
self._solver_model.problem_type.MILP
)
else:
if quadratic_cons:
self._solver_model.set_problem_type(self._solver_model.problem_type.QCP)
elif quadratic_objective:
self._solver_model.set_problem_type(self._solver_model.problem_type.QP)
else:
self._solver_model.set_problem_type(self._solver_model.problem_type.LP)
for key, option in self.options.items():
opt_cmd = self._solver_model.parameters
key_pieces = key.split("_")
for key_piece in key_pieces:
opt_cmd = getattr(opt_cmd, key_piece)
# When options come from the pyomo command, all
# values are string types, so we try to cast
# them to a numeric value in the event that
# setting the parameter fails.
try:
opt_cmd.set(option)
except self._cplex.exceptions.CplexError:
# we place the exception handling for
# checking the cast of option to a float in
# another function so that we can simply
# call raise here instead of except
# TypeError as e / raise e, because the
# latter does not preserve the Cplex stack
# trace
if not _is_numeric(option):
raise
opt_cmd.set(float(option))
t0 = time.time()
self._solver_model.solve()
t1 = time.time()
self._wallclock_time = t1 - t0
finally:
if self.version() >= (12, 10):
_log_file.close()
# FIXME: can we get a return code indicating if CPLEX had a significant failure?
return Bunch(rc=None, log=None)
|
https://github.com/Pyomo/pyomo/issues/285
|
CPXPARAM_Read_DataCheck 1
CPXPARAM_Read_APIEncoding "UTF-8"
CPXPARAM_MIP_Strategy_CallbackReducedLP 0
Tried aggregator 1 time.
LP Presolve eliminated 200 rows and 101 columns.
All rows and columns eliminated.
Presolve time = 0.00 sec. (0.05 ticks)
Traceback (most recent call last):
File "C:\Anaconda3\envs\python35\lib\site-packages\pyutilib\component\config\tempfiles.py", line 171, in pop
os.remove(filename)
PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\Users\\Jasper\\AppData\\Local\\Temp\\tmp770of057.log'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "error.py", line 20, in <module>
results = opt.solve(m, tee=True, keepfiles=False)
File "C:\Anaconda3\envs\python35\lib\site-packages\pyomo\solvers\plugins\solvers\direct_solver.py", line 149, in solve
result = self._postsolve()
File "C:\Anaconda3\envs\python35\lib\site-packages\pyomo\solvers\plugins\solvers\cplex_direct.py", line 639, in _postsolve
pyutilib.services.TempfileManager.pop(remove=not self._keepfiles)
File "C:\Anaconda3\envs\python35\lib\site-packages\pyutilib\component\config\tempfiles.py", line 179, in pop
os.remove(filename)
PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\Users\\Jasper\\AppData\\Local\\Temp\\tmp770of057.log'
|
PermissionError
|
def _Q_opt(
self,
ThetaVals=None,
solver="ef_ipopt",
return_values=[],
bootlist=None,
calc_cov=False,
):
"""
Set up all thetas as first stage Vars, return resulting theta
values as well as the objective function value.
NOTE: If thetavals is present it will be attached to the
scenario tree so it can be used by the scenario creation
callback. Side note (feb 2018, dlw): if you later decide to
construct the tree just once and reuse it, then remember to
remove thetavals from it when none is desired.
"""
assert solver != "k_aug" or ThetaVals == None
# Create a tree with dummy scenarios (callback will supply when needed).
# Which names to use (i.e., numbers) depends on if it is for bootstrap.
# (Bootstrap scenarios will use indirection through the bootlist)
if bootlist is None:
tree_model = _treemaker(self._numbers_list)
else:
tree_model = _treemaker(range(len(self._numbers_list)))
stage1 = tree_model.Stages[1]
stage2 = tree_model.Stages[2]
tree_model.StageVariables[stage1] = self.theta_names
tree_model.StageVariables[stage2] = []
tree_model.StageCost[stage1] = "FirstStageCost"
tree_model.StageCost[stage2] = "SecondStageCost"
# Now attach things to the tree_model to pass them to the callback
tree_model.CallbackModule = None
tree_model.CallbackFunction = self._instance_creation_callback
if ThetaVals is not None:
tree_model.ThetaVals = ThetaVals
if bootlist is not None:
tree_model.BootList = bootlist
tree_model.cb_data = self.callback_data # None is OK
stsolver = st.StochSolver(
fsfile="pyomo.contrib.parmest.parmest",
fsfct="_pysp_instance_creation_callback",
tree_model=tree_model,
)
# Solve the extensive form with ipopt
if solver == "ef_ipopt":
# Generate the extensive form of the stochastic program using pysp
self.ef_instance = stsolver.make_ef()
# need_gap is a holdover from solve_ef in rapper.py. Would we ever want
# need_gap = True with parmest?
need_gap = False
assert not (need_gap and self.calc_cov), (
"Calculating both the gap and reduced hessian (covariance) is not currently supported."
)
if not calc_cov:
# Do not calculate the reduced hessian
solver = SolverFactory("ipopt")
if self.solver_options is not None:
for key in self.solver_options:
solver.options[key] = self.solver_options[key]
if need_gap:
solve_result = solver.solve(
self.ef_instance, tee=self.tee, load_solutions=False
)
if len(solve_result.solution) > 0:
absgap = solve_result.solution(0).gap
else:
absgap = None
self.ef_instance.solutions.load_from(solve_result)
else:
solve_result = solver.solve(self.ef_instance, tee=self.tee)
elif not asl_available:
raise ImportError(
"parmest requires ASL to calculate the covariance matrix with solver 'ipopt'"
)
else:
# parmest makes the fitted parameters stage 1 variables
# thus we need to convert from var names (string) to
# Pyomo vars
ind_vars = []
for v in self.theta_names:
# ind_vars.append(eval('ef.'+v))
ind_vars.append(self.ef_instance.MASTER_BLEND_VAR_RootNode[v])
# calculate the reduced hessian
solve_result, inv_red_hes = inv_reduced_hessian_barrier(
self.ef_instance,
independent_variables=ind_vars,
solver_options=self.solver_options,
tee=self.tee,
)
# Extract solution from pysp
stsolver.scenario_tree.pullScenarioSolutionsFromInstances()
stsolver.scenario_tree.snapshotSolutionFromScenarios() # update nodes
if self.diagnostic_mode:
print(
" Solver termination condition = ",
str(solve_result.solver.termination_condition),
)
# assume all first stage are thetas...
thetavals = {}
for name, solval in stsolver.root_Var_solution():
thetavals[name] = solval
objval = stsolver.root_E_obj()
if calc_cov:
# Calculate the covariance matrix
# Extract number of data points considered
n = len(self.callback_data)
# Extract number of fitted parameters
l = len(thetavals)
# Assumption: Objective value is sum of squared errors
sse = objval
"""Calculate covariance assuming experimental observation errors are
independent and follow a Gaussian
distribution with constant variance.
The formula used in parmest was verified against equations (7-5-15) and
(7-5-16) in "Nonlinear Parameter Estimation", Y. Bard, 1974.
This formula is also applicable if the objective is scaled by a constant;
the constant cancels out. (PySP scaled by 1/n because it computes an
expected value.)
"""
cov = 2 * sse / (n - l) * inv_red_hes
if len(return_values) > 0:
var_values = []
for exp_i in self.ef_instance.component_objects(Block, descend_into=False):
vals = {}
for var in return_values:
exp_i_var = eval("exp_i." + str(var))
temp = [pyo.value(_) for _ in exp_i_var.itervalues()]
if len(temp) == 1:
vals[var] = temp[0]
else:
vals[var] = temp
var_values.append(vals)
var_values = pd.DataFrame(var_values)
if calc_cov:
return objval, thetavals, var_values, cov
else:
return objval, thetavals, var_values
if calc_cov:
return objval, thetavals, cov
else:
return objval, thetavals
# Solve with sipopt and k_aug
elif solver == "k_aug":
# Just hope for the best with respect to degrees of freedom.
model = stsolver.make_ef()
stream_solver = True
ipopt = SolverFactory("ipopt")
sipopt = SolverFactory("ipopt_sens")
kaug = SolverFactory("k_aug")
#: ipopt suffixes REQUIRED FOR K_AUG!
model.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT_EXPORT)
model.ipopt_zL_out = pyo.Suffix(direction=pyo.Suffix.IMPORT)
model.ipopt_zU_out = pyo.Suffix(direction=pyo.Suffix.IMPORT)
model.ipopt_zL_in = pyo.Suffix(direction=pyo.Suffix.EXPORT)
model.ipopt_zU_in = pyo.Suffix(direction=pyo.Suffix.EXPORT)
# declare the suffix to be imported by the solver
model.red_hessian = pyo.Suffix(direction=pyo.Suffix.EXPORT)
#: K_AUG SUFFIXES
model.dof_v = pyo.Suffix(direction=pyo.Suffix.EXPORT)
model.rh_name = pyo.Suffix(direction=pyo.Suffix.IMPORT)
for vstrindex in range(len(self.theta_names)):
vstr = self.theta_names[vstrindex]
varobject = _ef_ROOT_node_Object_from_string(model, vstr)
varobject.set_suffix_value(model.red_hessian, vstrindex + 1)
varobject.set_suffix_value(model.dof_v, 1)
#: rh_name will tell us which position the corresponding variable has on the reduced hessian text file.
#: be sure to declare the suffix value (order)
# dof_v is "degree of freedom variable"
kaug.options["compute_inv"] = "" #: if the reduced hessian is desired.
#: please check the inv_.in file if the compute_inv option was used
#: write some options for ipopt sens
with open("ipopt.opt", "w") as f:
f.write(
"compute_red_hessian yes\n"
) #: computes the reduced hessian (sens_ipopt)
f.write("output_file my_ouput.txt\n")
f.write("rh_eigendecomp yes\n")
f.close()
#: Solve
sipopt.solve(model, tee=stream_solver)
with open("ipopt.opt", "w") as f:
f.close()
ipopt.solve(model, tee=stream_solver)
model.ipopt_zL_in.update(model.ipopt_zL_out)
model.ipopt_zU_in.update(model.ipopt_zU_out)
#: k_aug
print("k_aug \n\n\n")
# m.write('problem.nl', format=ProblemFormat.nl)
kaug.solve(model, tee=stream_solver)
HessDict = {}
thetavals = {}
print("k_aug red_hess")
with open("result_red_hess.txt", "r") as f:
lines = f.readlines()
# asseble the return values
objval = model.MASTER_OBJECTIVE_EXPRESSION.expr()
for i in range(len(lines)):
HessDict[self.theta_names[i]] = {}
linein = lines[i]
print(linein)
parts = linein.split()
for j in range(len(parts)):
HessDict[self.theta_names[i]][self.theta_names[j]] = float(parts[j])
# Get theta value (there is probably a better way...)
vstr = self.theta_names[i]
varobject = _ef_ROOT_node_Object_from_string(model, vstr)
thetavals[self.theta_names[i]] = pyo.value(varobject)
return objval, thetavals, HessDict
else:
raise RuntimeError("Unknown solver in Q_Opt=" + solver)
|
def _Q_opt(
self,
ThetaVals=None,
solver="ef_ipopt",
return_values=[],
bootlist=None,
calc_cov=False,
):
"""
Set up all thetas as first stage Vars, return resulting theta
values as well as the objective function value.
NOTE: If thetavals is present it will be attached to the
scenario tree so it can be used by the scenario creation
callback. Side note (feb 2018, dlw): if you later decide to
construct the tree just once and reuse it, then remember to
remove thetavals from it when none is desired.
"""
assert solver != "k_aug" or ThetaVals == None
# Create a tree with dummy scenarios (callback will supply when needed).
# Which names to use (i.e., numbers) depends on if it is for bootstrap.
# (Bootstrap scenarios will use indirection through the bootlist)
if bootlist is None:
tree_model = _treemaker(self._numbers_list)
else:
tree_model = _treemaker(range(len(self._numbers_list)))
stage1 = tree_model.Stages[1]
stage2 = tree_model.Stages[2]
tree_model.StageVariables[stage1] = self.theta_names
tree_model.StageVariables[stage2] = []
tree_model.StageCost[stage1] = "FirstStageCost"
tree_model.StageCost[stage2] = "SecondStageCost"
# Now attach things to the tree_model to pass them to the callback
tree_model.CallbackModule = None
tree_model.CallbackFunction = self._instance_creation_callback
if ThetaVals is not None:
tree_model.ThetaVals = ThetaVals
if bootlist is not None:
tree_model.BootList = bootlist
tree_model.cb_data = self.callback_data # None is OK
stsolver = st.StochSolver(
fsfile="pyomo.contrib.parmest.parmest",
fsfct="_pysp_instance_creation_callback",
tree_model=tree_model,
)
# Solve the extensive form with ipopt
if solver == "ef_ipopt":
# Generate the extensive form of the stochastic program using pysp
self.ef_instance = stsolver.make_ef()
# need_gap is a holdover from solve_ef in rapper.py. Would we ever want
# need_gap = True with parmest?
need_gap = False
assert not (need_gap and self.calc_cov), (
"Calculating both the gap and reduced hessian (covariance) is not currently supported."
)
if not calc_cov:
# Do not calculate the reduced hessian
solver = SolverFactory("ipopt")
if self.solver_options is not None:
for key in self.solver_options:
solver.options[key] = self.solver_options[key]
if need_gap:
solve_result = solver.solve(
self.ef_instance, tee=self.tee, load_solutions=False
)
if len(solve_result.solution) > 0:
absgap = solve_result.solution(0).gap
else:
absgap = None
self.ef_instance.solutions.load_from(solve_result)
else:
solve_result = solver.solve(self.ef_instance, tee=self.tee)
elif not asl_available:
raise ImportError(
"parmest requires ASL to calculate the covariance matrix with solver 'ipopt'"
)
else:
# parmest makes the fitted parameters stage 1 variables
# thus we need to convert from var names (string) to
# Pyomo vars
ind_vars = []
for v in self.theta_names:
# ind_vars.append(eval('ef.'+v))
ind_vars.append(self.ef_instance.MASTER_BLEND_VAR_RootNode[v])
# calculate the reduced hessian
solve_result, inv_red_hes = inv_reduced_hessian_barrier(
self.ef_instance,
independent_variables=ind_vars,
solver_options=self.solver_options,
tee=self.tee,
)
# Extract solution from pysp
stsolver.scenario_tree.pullScenarioSolutionsFromInstances()
stsolver.scenario_tree.snapshotSolutionFromScenarios() # update nodes
if self.diagnostic_mode:
print(
" Solver termination condition = ",
str(solve_result.solver.termination_condition),
)
# assume all first stage are thetas...
thetavals = {}
for name, solval in stsolver.root_Var_solution():
thetavals[name] = solval
objval = stsolver.root_E_obj()
if calc_cov:
# Calculate the covariance matrix
# Extract number of data points considered
n = len(self.callback_data)
# Extract number of fitted parameters
l = len(thetavals)
# Assumption: Objective value is sum of squared errors
sse = objval
"""Calculate covariance assuming experimental observation errors are
independent and follow a Gaussian
distribution with constant variance.
The formula used in parmest was verified against equations (7-5-15) and
(7-5-16) in "Nonlinear Parameter Estimation", Y. Bard, 1974.
This formula is also applicable if the objective is scaled by a constant;
the constant cancels out. (PySP scaled by 1/n because it computes an
expected value.)
"""
cov = 2 * sse / (n - l) * inv_red_hes
if len(return_values) > 0:
var_values = []
for exp_i in stsolver.ef_instance.component_objects(
Block, descend_into=False
):
vals = {}
for var in return_values:
exp_i_var = eval("exp_i." + str(var))
temp = [_.value for _ in exp_i_var.itervalues()]
if len(temp) == 1:
vals[var] = temp[0]
else:
vals[var] = temp
var_values.append(vals)
var_values = pd.DataFrame(var_values)
if calc_cov:
return objval, thetavals, var_values, cov
else:
return objval, thetavals, var_values
if calc_cov:
return objval, thetavals, cov
else:
return objval, thetavals
# Solve with sipopt and k_aug
elif solver == "k_aug":
# Just hope for the best with respect to degrees of freedom.
model = stsolver.make_ef()
stream_solver = True
ipopt = SolverFactory("ipopt")
sipopt = SolverFactory("ipopt_sens")
kaug = SolverFactory("k_aug")
#: ipopt suffixes REQUIRED FOR K_AUG!
model.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT_EXPORT)
model.ipopt_zL_out = pyo.Suffix(direction=pyo.Suffix.IMPORT)
model.ipopt_zU_out = pyo.Suffix(direction=pyo.Suffix.IMPORT)
model.ipopt_zL_in = pyo.Suffix(direction=pyo.Suffix.EXPORT)
model.ipopt_zU_in = pyo.Suffix(direction=pyo.Suffix.EXPORT)
# declare the suffix to be imported by the solver
model.red_hessian = pyo.Suffix(direction=pyo.Suffix.EXPORT)
#: K_AUG SUFFIXES
model.dof_v = pyo.Suffix(direction=pyo.Suffix.EXPORT)
model.rh_name = pyo.Suffix(direction=pyo.Suffix.IMPORT)
for vstrindex in range(len(self.theta_names)):
vstr = self.theta_names[vstrindex]
varobject = _ef_ROOT_node_Object_from_string(model, vstr)
varobject.set_suffix_value(model.red_hessian, vstrindex + 1)
varobject.set_suffix_value(model.dof_v, 1)
#: rh_name will tell us which position the corresponding variable has on the reduced hessian text file.
#: be sure to declare the suffix value (order)
# dof_v is "degree of freedom variable"
kaug.options["compute_inv"] = "" #: if the reduced hessian is desired.
#: please check the inv_.in file if the compute_inv option was used
#: write some options for ipopt sens
with open("ipopt.opt", "w") as f:
f.write(
"compute_red_hessian yes\n"
) #: computes the reduced hessian (sens_ipopt)
f.write("output_file my_ouput.txt\n")
f.write("rh_eigendecomp yes\n")
f.close()
#: Solve
sipopt.solve(model, tee=stream_solver)
with open("ipopt.opt", "w") as f:
f.close()
ipopt.solve(model, tee=stream_solver)
model.ipopt_zL_in.update(model.ipopt_zL_out)
model.ipopt_zU_in.update(model.ipopt_zU_out)
#: k_aug
print("k_aug \n\n\n")
# m.write('problem.nl', format=ProblemFormat.nl)
kaug.solve(model, tee=stream_solver)
HessDict = {}
thetavals = {}
print("k_aug red_hess")
with open("result_red_hess.txt", "r") as f:
lines = f.readlines()
# asseble the return values
objval = model.MASTER_OBJECTIVE_EXPRESSION.expr()
for i in range(len(lines)):
HessDict[self.theta_names[i]] = {}
linein = lines[i]
print(linein)
parts = linein.split()
for j in range(len(parts)):
HessDict[self.theta_names[i]][self.theta_names[j]] = float(parts[j])
# Get theta value (there is probably a better way...)
vstr = self.theta_names[i]
varobject = _ef_ROOT_node_Object_from_string(model, vstr)
thetavals[self.theta_names[i]] = pyo.value(varobject)
return objval, thetavals, HessDict
else:
raise RuntimeError("Unknown solver in Q_Opt=" + solver)
|
https://github.com/Pyomo/pyomo/issues/1642
|
Ipopt 3.12.10:
******************************************************************************
This program contains Ipopt, a library for large-scale nonlinear optimization.
Ipopt is released as open source code under the Eclipse Public License (EPL).
For more information visit http://projects.coin-or.org/Ipopt
******************************************************************************
This is Ipopt version 3.12.10, running with linear solver mumps.
NOTE: Other linear solvers might be more efficient (see Ipopt documentation).
Number of nonzeros in equality constraint Jacobian...: 24
Number of nonzeros in inequality constraint Jacobian.: 0
Number of nonzeros in Lagrangian Hessian.............: 18
Total number of variables............................: 14
variables with only lower bounds: 0
variables with lower and upper bounds: 0
variables with only upper bounds: 0
Total number of equality constraints.................: 12
Total number of inequality constraints...............: 0
inequality constraints with only lower bounds: 0
inequality constraints with lower and upper bounds: 0
inequality constraints with only upper bounds: 0
iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls
0 1.6754267e+01 1.50e+01 7.48e+00 -1.0 0.00e+00 - 0.00e+00 0.00e+00 0
1 4.4025852e+00 1.11e-16 2.09e+00 -1.0 1.91e+01 - 1.00e+00 1.00e+00f 1
2 4.3387273e+00 0.00e+00 5.38e-01 -1.0 1.95e-01 - 1.00e+00 1.00e+00f 1
3 4.3317515e+00 0.00e+00 5.35e-02 -1.7 1.28e-01 - 1.00e+00 1.00e+00f 1
4 4.3317112e+00 0.00e+00 2.78e-04 -2.5 6.94e-03 - 1.00e+00 1.00e+00f 1
5 4.3317112e+00 0.00e+00 1.27e-08 -5.7 5.68e-05 - 1.00e+00 1.00e+00f 1
6 4.3317112e+00 0.00e+00 8.88e-15 -9.0 1.88e-09 - 1.00e+00 1.00e+00f 1
Number of Iterations....: 6
(scaled) (unscaled)
Objective...............: 4.3317112136568863e+00 4.3317112136568863e+00
Dual infeasibility......: 8.8817841970012523e-15 8.8817841970012523e-15
Constraint violation....: 0.0000000000000000e+00 0.0000000000000000e+00
Complementarity.........: 0.0000000000000000e+00 0.0000000000000000e+00
Overall NLP error.......: 8.8817841970012523e-15 8.8817841970012523e-15
Number of objective function evaluations = 7
Number of objective gradient evaluations = 7
Number of equality constraint evaluations = 7
Number of inequality constraint evaluations = 0
Number of equality constraint Jacobian evaluations = 7
Number of inequality constraint Jacobian evaluations = 0
Number of Lagrangian Hessian evaluations = 6
Total CPU secs in IPOPT (w/o function evaluations) = 0.024
Total CPU secs in NLP function evaluations = 0.001
EXIT: Optimal Solution Found.
Traceback (most recent call last):
File "parmest_return_values_fail.py", line 28, in <module>
obj, theta = pest.theta_est(return_values=['response_function'])
File "/Users/adowling/DowlingLab/pyomo/pyomo/contrib/parmest/parmest.py", line 798, in theta_est
return self._Q_opt(solver=solver, return_values=return_values,
File "/Users/adowling/DowlingLab/pyomo/pyomo/contrib/parmest/parmest.py", line 557, in _Q_opt
for exp_i in stsolver.ef_instance.component_objects(Block, descend_into=False):
AttributeError: 'StochSolver' object has no attribute 'ef_instance'
|
AttributeError
|
def _component_data_iter(self, ctype=None, active=None, sort=False):
"""
Generator that returns a 3-tuple of (component name, index value,
and _ComponentData) for every component data in the block.
"""
_sort_indices = SortComponents.sort_indices(sort)
_subcomp = PseudoMap(self, ctype, active, sort)
for name, comp in _subcomp.iteritems():
# NOTE: Suffix has a dict interface (something other derived
# non-indexed Components may do as well), so we don't want
# to test the existence of iteritems as a check for
# component datas. We will rely on is_indexed() to catch
# all the indexed components. Then we will do special
# processing for the scalar components to catch the case
# where there are "sparse scalar components"
if comp.is_indexed():
_items = comp.iteritems()
elif hasattr(comp, "_data"):
# This may be an empty Scalar component (e.g., from
# Constraint.Skip on a scalar Constraint)
assert len(comp._data) <= 1
_items = iteritems(comp._data)
else:
_items = ((None, comp),)
if _sort_indices:
_items = sorted(_items, key=itemgetter(0))
if active is None or not isinstance(comp, ActiveIndexedComponent):
for idx, compData in _items:
yield (name, idx), compData
else:
for idx, compData in _items:
if compData.active == active:
yield (name, idx), compData
|
def _component_data_iter(self, ctype=None, active=None, sort=False):
"""
Generator that returns a 3-tuple of (component name, index value,
and _ComponentData) for every component data in the block.
"""
_sort_indices = SortComponents.sort_indices(sort)
_subcomp = PseudoMap(self, ctype, active, sort)
for name, comp in _subcomp.iteritems():
# _NOTE_: Suffix has a dict interface (something other
# derived non-indexed Components may do as well),
# so we don't want to test the existence of
# iteritems as a check for components. Also,
# the case where we test len(comp) after seeing
# that comp.is_indexed is False is a hack for a
# SimpleConstraint whose expression resolved to
# Constraint.skip or Constraint.feasible (in which
# case its data is empty and iteritems would have
# been empty as well)
# try:
# _items = comp.iteritems()
# except AttributeError:
# _items = [ (None, comp) ]
if comp.is_indexed():
_items = comp.iteritems()
# This is a hack (see _NOTE_ above).
elif len(comp) or not hasattr(comp, "_data"):
_items = ((None, comp),)
else:
_items = tuple()
if _sort_indices:
_items = sorted(_items, key=itemgetter(0))
if active is None or not isinstance(comp, ActiveIndexedComponent):
for idx, compData in _items:
yield (name, idx), compData
else:
for idx, compData in _items:
if compData.active == active:
yield (name, idx), compData
|
https://github.com/Pyomo/pyomo/issues/1435
|
Traceback (most recent call last):
File "infinite_set_bug.py", line 8, in <module>
for comp in m.component_data_objects():
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 1415, in component_data_objects
sort=sort):
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 1339, in _component_data_iter
elif len(comp) or not hasattr(comp, '_data'):
File "/home/esjohn/src/pyomo/pyomo/core/base/set.py", line 2955, in __len__
"The length of a non-finite Set is Inf; however, Python "
OverflowError: The length of a non-finite Set is Inf; however, Python requires len() to return a non-negative integer value. Check isfinite() before calling len() for possibly infinite Sets
|
OverflowError
|
def _add_temporary_set(self, val):
"""TODO: This method has known issues (see tickets) and needs to be
reviewed. [JDS 9/2014]"""
_component_sets = getattr(val, "_implicit_subsets", None)
#
# FIXME: The name attribute should begin with "_", and None
# should replace "_unknown_"
#
if _component_sets is not None:
for ctr, tset in enumerate(_component_sets):
if tset.parent_component()._name == "_unknown_":
self._construct_temporary_set(
tset, val.local_name + "_index_" + str(ctr)
)
if (
isinstance(val._index, _SetDataBase)
and val._index.parent_component().local_name == "_unknown_"
):
self._construct_temporary_set(val._index, val.local_name + "_index")
if (
isinstance(getattr(val, "initialize", None), _SetDataBase)
and val.initialize.parent_component().local_name == "_unknown_"
):
self._construct_temporary_set(val.initialize, val.local_name + "_index_init")
if (
getattr(val, "domain", None) is not None
and getattr(val.domain, "local_name", None) == "_unknown_"
):
self._construct_temporary_set(val.domain, val.local_name + "_domain")
|
def _add_temporary_set(self, val):
"""TODO: This method has known issues (see tickets) and needs to be
reviewed. [JDS 9/2014]"""
_component_sets = getattr(val, "_implicit_subsets", None)
#
# FIXME: The name attribute should begin with "_", and None
# should replace "_unknown_"
#
if _component_sets is not None:
for ctr, tset in enumerate(_component_sets):
if tset._name == "_unknown_":
self._construct_temporary_set(
tset, val.local_name + "_index_" + str(ctr)
)
if (
isinstance(val._index, _SetDataBase)
and val._index.parent_component().local_name == "_unknown_"
):
self._construct_temporary_set(val._index, val.local_name + "_index")
if (
isinstance(getattr(val, "initialize", None), _SetDataBase)
and val.initialize.parent_component().local_name == "_unknown_"
):
self._construct_temporary_set(val.initialize, val.local_name + "_index_init")
if (
getattr(val, "domain", None) is not None
and getattr(val.domain, "local_name", None) == "_unknown_"
):
self._construct_temporary_set(val.domain, val.local_name + "_domain")
|
https://github.com/Pyomo/pyomo/issues/191
|
m.v_1 = Var(m.s['s1'], m.s2, initialize=10)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/blnicho/Research/pyomo/pyomo/core/base/var.py", line 471, in __init__
IndexedComponent.__init__(self, *args, **kwd)
File "/home/blnicho/Research/pyomo/pyomo/core/base/indexed_component.py", line 308, in __init__
self._index = tmp[0].cross(*tmp[1:])
AttributeError: '_IndexedSetData' object has no attribute 'cross'
m.v_1 = Var(m.s2, m.s['s1'], initialize=10)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 542, in __setattr__
self.add_component(name, val)
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 870, in add_component
self._add_temporary_set(val)
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 686, in _add_temporary_set
if tset._name == "_unknown_":
AttributeError: '_IndexedSetData' object has no attribute '_name'
|
AttributeError
|
def _transform_constraint(self, obj, disjunct, bigMargs, suffix_list):
# add constraint to the transformation block, we'll transform it there.
transBlock = disjunct._transformation_block()
bigm_src = transBlock.bigm_src
constraintMap = self._get_constraint_map_dict(transBlock)
disjunctionRelaxationBlock = transBlock.parent_block()
# Though rare, it is possible to get naming conflicts here
# since constraints from all blocks are getting moved onto the
# same block. So we get a unique name
cons_name = obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER)
name = unique_component_name(transBlock, cons_name)
if obj.is_indexed():
try:
newConstraint = Constraint(obj.index_set(), disjunctionRelaxationBlock.lbub)
# HACK: We get burned by #191 here... When set rewrite is merged we
# can stop catching the AttributeError.
except (TypeError, AttributeError):
# The original constraint may have been indexed by a
# non-concrete set (like an Any). We will give up on
# strict index verification and just blindly proceed.
newConstraint = Constraint(Any)
else:
newConstraint = Constraint(disjunctionRelaxationBlock.lbub)
transBlock.add_component(name, newConstraint)
# add mapping of original constraint to transformed constraint
constraintMap["srcConstraints"][newConstraint] = obj
constraintMap["transformedConstraints"][obj] = newConstraint
for i in sorted(iterkeys(obj)):
c = obj[i]
if not c.active:
continue
# first, we see if an M value was specified in the arguments.
# (This returns None if not)
M = self._get_M_from_args(c, bigMargs, bigm_src)
if __debug__ and logger.isEnabledFor(logging.DEBUG):
_name = obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER)
logger.debug(
"GDP(BigM): The value for M for constraint %s "
"from the BigM argument is %s." % (cons_name, str(M))
)
# if we didn't get something from args, try suffixes:
if M is None:
M = self._get_M_from_suffixes(c, suffix_list, bigm_src)
if __debug__ and logger.isEnabledFor(logging.DEBUG):
_name = obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER)
logger.debug(
"GDP(BigM): The value for M for constraint %s "
"after checking suffixes is %s." % (cons_name, str(M))
)
if not isinstance(M, (tuple, list)):
if M is None:
M = (None, None)
else:
try:
M = (-M, M)
except:
logger.error(
"Error converting scalar M-value %s "
"to (-M,M). Is %s not a numeric type?" % (M, type(M))
)
raise
if len(M) != 2:
raise GDP_Error(
"Big-M %s for constraint %s is not of "
"length two. "
"Expected either a single value or "
"tuple or list of length two for M." % (str(M), name)
)
if c.lower is not None and M[0] is None:
M = (self._estimate_M(c.body, name)[0] - c.lower, M[1])
bigm_src[c] = M
if c.upper is not None and M[1] is None:
M = (M[0], self._estimate_M(c.body, name)[1] - c.upper)
bigm_src[c] = M
if __debug__ and logger.isEnabledFor(logging.DEBUG):
_name = obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER)
logger.debug(
"GDP(BigM): The value for M for constraint %s "
"after estimating (if needed) is %s." % (cons_name, str(M))
)
# Handle indices for both SimpleConstraint and IndexedConstraint
if i.__class__ is tuple:
i_lb = i + ("lb",)
i_ub = i + ("ub",)
elif obj.is_indexed():
i_lb = (
i,
"lb",
)
i_ub = (
i,
"ub",
)
else:
i_lb = "lb"
i_ub = "ub"
if c.lower is not None:
if M[0] is None:
raise GDP_Error(
"Cannot relax disjunctive constraint %s "
"because M is not defined." % name
)
M_expr = M[0] * (1 - disjunct.indicator_var)
newConstraint.add(i_lb, c.lower <= c.body - M_expr)
if c.upper is not None:
if M[1] is None:
raise GDP_Error(
"Cannot relax disjunctive constraint %s "
"because M is not defined." % name
)
M_expr = M[1] * (1 - disjunct.indicator_var)
newConstraint.add(i_ub, c.body - M_expr <= c.upper)
# deactivate because we relaxed
c.deactivate()
|
def _transform_constraint(self, obj, disjunct, bigMargs, suffix_list):
# add constraint to the transformation block, we'll transform it there.
transBlock = disjunct._transformation_block()
bigm_src = transBlock.bigm_src
constraintMap = self._get_constraint_map_dict(transBlock)
disjunctionRelaxationBlock = transBlock.parent_block()
# Though rare, it is possible to get naming conflicts here
# since constraints from all blocks are getting moved onto the
# same block. So we get a unique name
cons_name = obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER)
name = unique_component_name(transBlock, cons_name)
if obj.is_indexed():
try:
newConstraint = Constraint(obj.index_set(), disjunctionRelaxationBlock.lbub)
except TypeError:
# The original constraint may have been indexed by a
# non-concrete set (like an Any). We will give up on
# strict index verification and just blindly proceed.
newConstraint = Constraint(Any)
else:
newConstraint = Constraint(disjunctionRelaxationBlock.lbub)
transBlock.add_component(name, newConstraint)
# add mapping of original constraint to transformed constraint
constraintMap["srcConstraints"][newConstraint] = obj
constraintMap["transformedConstraints"][obj] = newConstraint
for i in sorted(iterkeys(obj)):
c = obj[i]
if not c.active:
continue
# first, we see if an M value was specified in the arguments.
# (This returns None if not)
M = self._get_M_from_args(c, bigMargs, bigm_src)
if __debug__ and logger.isEnabledFor(logging.DEBUG):
_name = obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER)
logger.debug(
"GDP(BigM): The value for M for constraint %s "
"from the BigM argument is %s." % (cons_name, str(M))
)
# if we didn't get something from args, try suffixes:
if M is None:
M = self._get_M_from_suffixes(c, suffix_list, bigm_src)
if __debug__ and logger.isEnabledFor(logging.DEBUG):
_name = obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER)
logger.debug(
"GDP(BigM): The value for M for constraint %s "
"after checking suffixes is %s." % (cons_name, str(M))
)
if not isinstance(M, (tuple, list)):
if M is None:
M = (None, None)
else:
try:
M = (-M, M)
except:
logger.error(
"Error converting scalar M-value %s "
"to (-M,M). Is %s not a numeric type?" % (M, type(M))
)
raise
if len(M) != 2:
raise GDP_Error(
"Big-M %s for constraint %s is not of "
"length two. "
"Expected either a single value or "
"tuple or list of length two for M." % (str(M), name)
)
if c.lower is not None and M[0] is None:
M = (self._estimate_M(c.body, name)[0] - c.lower, M[1])
bigm_src[c] = M
if c.upper is not None and M[1] is None:
M = (M[0], self._estimate_M(c.body, name)[1] - c.upper)
bigm_src[c] = M
if __debug__ and logger.isEnabledFor(logging.DEBUG):
_name = obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER)
logger.debug(
"GDP(BigM): The value for M for constraint %s "
"after estimating (if needed) is %s." % (cons_name, str(M))
)
# Handle indices for both SimpleConstraint and IndexedConstraint
if i.__class__ is tuple:
i_lb = i + ("lb",)
i_ub = i + ("ub",)
elif obj.is_indexed():
i_lb = (
i,
"lb",
)
i_ub = (
i,
"ub",
)
else:
i_lb = "lb"
i_ub = "ub"
if c.lower is not None:
if M[0] is None:
raise GDP_Error(
"Cannot relax disjunctive constraint %s "
"because M is not defined." % name
)
M_expr = M[0] * (1 - disjunct.indicator_var)
newConstraint.add(i_lb, c.lower <= c.body - M_expr)
if c.upper is not None:
if M[1] is None:
raise GDP_Error(
"Cannot relax disjunctive constraint %s "
"because M is not defined." % name
)
M_expr = M[1] * (1 - disjunct.indicator_var)
newConstraint.add(i_ub, c.body - M_expr <= c.upper)
# deactivate because we relaxed
c.deactivate()
|
https://github.com/Pyomo/pyomo/issues/191
|
m.v_1 = Var(m.s['s1'], m.s2, initialize=10)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/blnicho/Research/pyomo/pyomo/core/base/var.py", line 471, in __init__
IndexedComponent.__init__(self, *args, **kwd)
File "/home/blnicho/Research/pyomo/pyomo/core/base/indexed_component.py", line 308, in __init__
self._index = tmp[0].cross(*tmp[1:])
AttributeError: '_IndexedSetData' object has no attribute 'cross'
m.v_1 = Var(m.s2, m.s['s1'], initialize=10)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 542, in __setattr__
self.add_component(name, val)
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 870, in add_component
self._add_temporary_set(val)
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 686, in _add_temporary_set
if tset._name == "_unknown_":
AttributeError: '_IndexedSetData' object has no attribute '_name'
|
AttributeError
|
def __call__(self, parent, index):
_val = self._init(parent, index)
if self._dimen in {1, None, UnknownSetDimen}:
return _val
elif _val is Set.Skip:
return _val
elif not _val:
return _val
if not isinstance(_val, collections_Sequence):
_val = tuple(_val)
if len(_val) == 0:
return _val
if isinstance(_val[0], tuple):
return _val
return self._tuplize(_val, parent, index)
|
def __call__(self, parent, index):
_val = self._init(parent, index)
if self._dimen in {1, None, UnknownSetDimen}:
return _val
elif _val is Set.Skip:
return _val
elif not _val:
return _val
if not isinstance(_val, collections_Sequence):
_val = tuple(_val)
if isinstance(_val[0], tuple):
return _val
return self._tuplize(_val, parent, index)
|
https://github.com/Pyomo/pyomo/issues/1375
|
a : Size=1, Index=None, Ordered=Insertion
Key : Dimen : Domain : Size : Members
None : 1 : Any : 0 : {}
ERROR: Constructing component 'b' from data=None failed: IndexError: tuple
index out of range
Traceback (most recent call last):
File "blah.py", line 18, in <module>
m.b = pe.Set(initialize=b_rule, dimen=2)
File "/.../pyomo/pyomo/core/base/block.py", line 543, in __setattr__
self.add_component(name, val)
File "/.../pyomo/pyomo/core/base/block.py", line 1079, in add_component
val.construct(data)
File "/.../pyomo/pyomo/core/base/util.py", line 125, in construct
return base.construct(self, data)
File "/.../pyomo/pyomo/core/base/set.py", line 1976, in construct
self._getitem_when_not_present(index)
File "/.../pyomo/pyomo/core/base/set.py", line 2019, in _getitem_when_not_present
_values = self._init_values(_block, index)
File "/.../pyomo/pyomo/core/base/set.py", line 419, in __call__
if isinstance(_val[0], tuple):
IndexError: tuple index out of range
|
IndexError
|
def set_value(self, val):
raise RuntimeError(
textwrap.dedent(
"""\
Block components do not support assignment or set_value().
Use the transfer_attributes_from() method to transfer the
components and public attributes from one block to another:
model.b[1].transfer_attributes_from(other_block)
"""
)
)
|
def set_value(self, val):
for k in list(getattr(self, "_decl", {})):
self.del_component(k)
self._ctypes = {}
self._decl = {}
self._decl_order = []
if val:
for k in sorted(iterkeys(val)):
self.add_component(k, val[k])
|
https://github.com/Pyomo/pyomo/issues/1106
|
Traceback (most recent call last):
File "blockSetValue.py", line 11, in <module>
m.block_list[1] = movingBlock
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 412, in __setitem__
return self._setitem_when_not_present(index, val)
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 675, in _setitem_when_not_present
obj.set_value(value)
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 711, in set_value
self.add_component(k,val[k])
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 923, in add_component
super(_BlockData, self).__setattr__(name, val)
TypeError: attribute name must be string, not 'NoneType'
|
TypeError
|
def add_component(self, name, val):
"""
Add a component 'name' to the block.
This method assumes that the attribute is not in the model.
"""
#
# Error checks
#
if not val.valid_model_component():
raise RuntimeError("Cannot add '%s' as a component to a block" % str(type(val)))
if name in self._Block_reserved_words and hasattr(self, name):
raise ValueError(
"Attempting to declare a block component using "
"the name of a reserved attribute:\n\t%s" % (name,)
)
if name in self.__dict__:
raise RuntimeError(
"Cannot add component '%s' (type %s) to block '%s': a "
"component by that name (type %s) is already defined."
% (name, type(val), self.name, type(getattr(self, name)))
)
#
# Skip the add_component() logic if this is a
# component type that is suppressed.
#
_component = self.parent_component()
_type = val.type()
if _type in _component._suppress_ctypes:
return
#
# Raise an exception if the component already has a parent.
#
if (val._parent is not None) and (val._parent() is not None):
if val._parent() is self:
msg = """
Attempting to re-assign the component '%s' to the same
block under a different name (%s).""" % (val.name, name)
else:
msg = """
Re-assigning the component '%s' from block '%s' to
block '%s' as '%s'.""" % (val._name, val._parent().name, self.name, name)
raise RuntimeError(
"""%s
This behavior is not supported by Pyomo; components must have a
single owning block (or model), and a component may not appear
multiple times in a block. If you want to re-name or move this
component, use the block del_component() and add_component() methods.
"""
% (msg.strip(),)
)
#
# If the new component is a Block, then there is the chance that
# it is the model(), and assigning it would create a circular
# hierarchy. Note that we only have to check the model as the
# check immediately above would catch any "internal" blocks in
# the block hierarchy
#
if isinstance(val, Block) and val is self.model():
raise ValueError(
"Cannot assign the top-level block as a subblock of one of "
"its children (%s): creates a circular hierarchy" % (self,)
)
#
# Set the name and parent pointer of this component.
#
val._name = name
val._parent = weakref.ref(self)
#
# We want to add the temporary / implicit sets first so that
# they get constructed before this component
#
# FIXME: This is sloppy and wasteful (most components trigger
# this, even when there is no need for it). We should
# reconsider the whole _implicit_subsets logic to defer this
# kind of thing to an "update_parent()" method on the
# components.
#
if hasattr(val, "_index"):
self._add_temporary_set(val)
#
# Add the component to the underlying Component store
#
_new_idx = len(self._decl_order)
self._decl[name] = _new_idx
self._decl_order.append((val, None))
#
# Add the component as an attribute. Note that
#
# self.__dict__[name]=val
#
# is inappropriate here. The correct way to add the attribute
# is to delegate the work to the next class up the MRO.
#
super(_BlockData, self).__setattr__(name, val)
#
# Update the ctype linked lists
#
if _type in self._ctypes:
idx_info = self._ctypes[_type]
tmp = idx_info[1]
self._decl_order[tmp] = (self._decl_order[tmp][0], _new_idx)
idx_info[1] = _new_idx
idx_info[2] += 1
else:
self._ctypes[_type] = [_new_idx, _new_idx, 1]
#
# Propagate properties to sub-blocks:
# suppressed ctypes
#
if _type is Block:
val._suppress_ctypes |= _component._suppress_ctypes
#
# Error, for disabled support implicit rule names
#
if "_rule" in val.__dict__ and val._rule is None:
_found = False
try:
_test = val.local_name + "_rule"
for i in (1, 2):
frame = sys._getframe(i)
_found |= _test in frame.f_locals
except:
pass
if _found:
# JDS: Do not blindly reformat this message. The
# formatter inserts arbitrarily-long names(), which can
# cause the resulting logged message to be very poorly
# formatted due to long lines.
logger.warning(
"""As of Pyomo 4.0, Pyomo components no longer support implicit rules.
You defined a component (%s) that appears
to rely on an implicit rule (%s).
Components must now specify their rules explicitly using 'rule=' keywords."""
% (val.name, _test)
)
#
# Don't reconstruct if this component has already been constructed.
# This allows a user to move a component from one block to
# another.
#
if val._constructed is True:
return
#
# If the block is Concrete, construct the component
# Note: we are explicitly using getattr because (Scalar)
# classes that derive from Block may want to declare components
# within their __init__() [notably, pyomo.gdp's Disjunct).
# Those components are added *before* the _constructed flag is
# added to the class by Block.__init__()
#
if getattr(_component, "_constructed", False):
# NB: we don't have to construct the temporary / implicit
# sets here: if necessary, that happens when
# _add_temporary_set() calls add_component().
if id(self) in _BlockConstruction.data:
data = _BlockConstruction.data[id(self)].get(name, None)
else:
data = None
if __debug__ and logger.isEnabledFor(logging.DEBUG):
# This is tricky: If we are in the middle of
# constructing an indexed block, the block component
# already has _constructed=True. Now, if the
# _BlockData.__init__() defines any local variables
# (like pyomo.gdp.Disjunct's indicator_var), name(True)
# will fail: this block data exists and has a parent(),
# but it has not yet been added to the parent's _data
# (so the idx lookup will fail in name).
if self.parent_block() is None:
_blockName = "[Model]"
else:
try:
_blockName = "Block '%s'" % self.name
except:
_blockName = "Block '%s[...]'" % self.parent_component().name
logger.debug(
"Constructing %s '%s' on %s from data=%s",
val.__class__.__name__,
val.name,
_blockName,
str(data),
)
try:
val.construct(data)
except:
err = sys.exc_info()[1]
logger.error(
"Constructing component '%s' from data=%s failed:\n%s: %s",
str(val.name),
str(data).strip(),
type(err).__name__,
err,
)
raise
if __debug__ and logger.isEnabledFor(logging.DEBUG):
if _blockName[-1] == "'":
_blockName = _blockName[:-1] + "." + val.name + "'"
else:
_blockName = "'" + _blockName + "." + val.name + "'"
_out = StringIO()
val.pprint(ostream=_out)
logger.debug(
"Constructed component '%s':\n%s" % (_blockName, _out.getvalue())
)
|
def add_component(self, name, val):
"""
Add a component 'name' to the block.
This method assumes that the attribute is not in the model.
"""
#
# Error checks
#
if not val.valid_model_component():
raise RuntimeError("Cannot add '%s' as a component to a block" % str(type(val)))
if name in self._Block_reserved_words:
raise ValueError(
"Attempting to declare a block component using "
"the name of a reserved attribute:\n\t%s" % (name,)
)
if name in self.__dict__:
raise RuntimeError(
"Cannot add component '%s' (type %s) to block '%s': a "
"component by that name (type %s) is already defined."
% (name, type(val), self.name, type(getattr(self, name)))
)
#
# Skip the add_component() logic if this is a
# component type that is suppressed.
#
_component = self.parent_component()
_type = val.type()
if _type in _component._suppress_ctypes:
return
#
# Raise an exception if the component already has a parent.
#
if (val._parent is not None) and (val._parent() is not None):
if val._parent() is self:
msg = """
Attempting to re-assign the component '%s' to the same
block under a different name (%s).""" % (val.name, name)
else:
msg = """
Re-assigning the component '%s' from block '%s' to
block '%s' as '%s'.""" % (val._name, val._parent().name, self.name, name)
raise RuntimeError(
"""%s
This behavior is not supported by Pyomo; components must have a
single owning block (or model), and a component may not appear
multiple times in a block. If you want to re-name or move this
component, use the block del_component() and add_component() methods.
"""
% (msg.strip(),)
)
#
# Set the name and parent pointer of this component.
#
val._name = name
val._parent = weakref.ref(self)
#
# We want to add the temporary / implicit sets first so that
# they get constructed before this component
#
# FIXME: This is sloppy and wasteful (most components trigger
# this, even when there is no need for it). We should
# reconsider the whole _implicit_subsets logic to defer this
# kind of thing to an "update_parent()" method on the
# components.
#
if hasattr(val, "_index"):
self._add_temporary_set(val)
#
# Add the component to the underlying Component store
#
_new_idx = len(self._decl_order)
self._decl[name] = _new_idx
self._decl_order.append((val, None))
#
# Add the component as an attribute. Note that
#
# self.__dict__[name]=val
#
# is inappropriate here. The correct way to add the attribute
# is to delegate the work to the next class up the MRO.
#
super(_BlockData, self).__setattr__(name, val)
#
# Update the ctype linked lists
#
if _type in self._ctypes:
idx_info = self._ctypes[_type]
tmp = idx_info[1]
self._decl_order[tmp] = (self._decl_order[tmp][0], _new_idx)
idx_info[1] = _new_idx
idx_info[2] += 1
else:
self._ctypes[_type] = [_new_idx, _new_idx, 1]
#
# Propagate properties to sub-blocks:
# suppressed ctypes
#
if _type is Block:
val._suppress_ctypes |= _component._suppress_ctypes
#
# Error, for disabled support implicit rule names
#
if "_rule" in val.__dict__ and val._rule is None:
_found = False
try:
_test = val.local_name + "_rule"
for i in (1, 2):
frame = sys._getframe(i)
_found |= _test in frame.f_locals
except:
pass
if _found:
# JDS: Do not blindly reformat this message. The
# formatter inserts arbitrarily-long names(), which can
# cause the resulting logged message to be very poorly
# formatted due to long lines.
logger.warning(
"""As of Pyomo 4.0, Pyomo components no longer support implicit rules.
You defined a component (%s) that appears
to rely on an implicit rule (%s).
Components must now specify their rules explicitly using 'rule=' keywords."""
% (val.name, _test)
)
#
# Don't reconstruct if this component has already been constructed.
# This allows a user to move a component from one block to
# another.
#
if val._constructed is True:
return
#
# If the block is Concrete, construct the component
# Note: we are explicitly using getattr because (Scalar)
# classes that derive from Block may want to declare components
# within their __init__() [notably, pyomo.gdp's Disjunct).
# Those components are added *before* the _constructed flag is
# added to the class by Block.__init__()
#
if getattr(_component, "_constructed", False):
# NB: we don't have to construct the temporary / implicit
# sets here: if necessary, that happens when
# _add_temporary_set() calls add_component().
if id(self) in _BlockConstruction.data:
data = _BlockConstruction.data[id(self)].get(name, None)
else:
data = None
if __debug__ and logger.isEnabledFor(logging.DEBUG):
# This is tricky: If we are in the middle of
# constructing an indexed block, the block component
# already has _constructed=True. Now, if the
# _BlockData.__init__() defines any local variables
# (like pyomo.gdp.Disjunct's indicator_var), name(True)
# will fail: this block data exists and has a parent(),
# but it has not yet been added to the parent's _data
# (so the idx lookup will fail in name).
if self.parent_block() is None:
_blockName = "[Model]"
else:
try:
_blockName = "Block '%s'" % self.name
except:
_blockName = "Block '%s[...]'" % self.parent_component().name
logger.debug(
"Constructing %s '%s' on %s from data=%s",
val.__class__.__name__,
val.name,
_blockName,
str(data),
)
try:
val.construct(data)
except:
err = sys.exc_info()[1]
logger.error(
"Constructing component '%s' from data=%s failed:\n%s: %s",
str(val.name),
str(data).strip(),
type(err).__name__,
err,
)
raise
if __debug__ and logger.isEnabledFor(logging.DEBUG):
if _blockName[-1] == "'":
_blockName = _blockName[:-1] + "." + val.name + "'"
else:
_blockName = "'" + _blockName + "." + val.name + "'"
_out = StringIO()
val.pprint(ostream=_out)
logger.debug(
"Constructed component '%s':\n%s" % (_blockName, _out.getvalue())
)
|
https://github.com/Pyomo/pyomo/issues/1106
|
Traceback (most recent call last):
File "blockSetValue.py", line 11, in <module>
m.block_list[1] = movingBlock
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 412, in __setitem__
return self._setitem_when_not_present(index, val)
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 675, in _setitem_when_not_present
obj.set_value(value)
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 711, in set_value
self.add_component(k,val[k])
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 923, in add_component
super(_BlockData, self).__setattr__(name, val)
TypeError: attribute name must be string, not 'NoneType'
|
TypeError
|
def _getitem_when_not_present(self, idx):
return self._setitem_when_not_present(idx)
|
def _getitem_when_not_present(self, idx):
return self._setitem_when_not_present(idx, None)
|
https://github.com/Pyomo/pyomo/issues/1106
|
Traceback (most recent call last):
File "blockSetValue.py", line 11, in <module>
m.block_list[1] = movingBlock
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 412, in __setitem__
return self._setitem_when_not_present(index, val)
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 675, in _setitem_when_not_present
obj.set_value(value)
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 711, in set_value
self.add_component(k,val[k])
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 923, in add_component
super(_BlockData, self).__setattr__(name, val)
TypeError: attribute name must be string, not 'NoneType'
|
TypeError
|
def construct(self, data=None):
"""
Initialize the block
"""
if __debug__ and logger.isEnabledFor(logging.DEBUG):
logger.debug(
"Constructing %s '%s', from data=%s",
self.__class__.__name__,
self.name,
str(data),
)
if self._constructed:
return
timer = ConstructionTimer(self)
self._constructed = True
# We must check that any pre-existing components are
# constructed. This catches the case where someone is building
# a Concrete model by building (potentially pseudo-abstract)
# sub-blocks and then adding them to a Concrete model block.
for idx in self._data:
_block = self[idx]
for name, obj in iteritems(_block.component_map()):
if not obj._constructed:
if data is None:
_data = None
else:
_data = data.get(name, None)
obj.construct(_data)
if self._rule is None:
# Ensure the _data dictionary is populated for singleton
# blocks
if not self.is_indexed():
self[None]
timer.report()
return
# If we have a rule, fire the rule for all indices.
# Notes:
# - Since this block is now concrete, any components added to
# it will be immediately constructed by
# block.add_component().
# - Since the rule does not pass any "data" on, we build a
# scalar "stack" of pointers to block data
# (_BlockConstruction.data) that the individual blocks'
# add_component() can refer back to to handle component
# construction.
for idx in self._index:
_block = self[idx]
if data is not None and idx in data:
_BlockConstruction.data[id(_block)] = data[idx]
obj = apply_indexed_rule(self, self._rule, _block, idx, self._options)
if id(_block) in _BlockConstruction.data:
del _BlockConstruction.data[id(_block)]
if obj is not _block and isinstance(obj, _BlockData):
# If the user returns a block, transfer over everything
# they defined into the empty one we created.
_block.transfer_attributes_from(obj)
# TBD: Should we allow skipping Blocks???
# if obj is Block.Skip and idx is not None:
# del self._data[idx]
timer.report()
|
def construct(self, data=None):
"""
Initialize the block
"""
if __debug__ and logger.isEnabledFor(logging.DEBUG):
logger.debug(
"Constructing %s '%s', from data=%s",
self.__class__.__name__,
self.name,
str(data),
)
if self._constructed:
return
timer = ConstructionTimer(self)
self._constructed = True
# We must check that any pre-existing components are
# constructed. This catches the case where someone is building
# a Concrete model by building (potentially pseudo-abstract)
# sub-blocks and then adding them to a Concrete model block.
for idx in self._data:
_block = self[idx]
for name, obj in iteritems(_block.component_map()):
if not obj._constructed:
if data is None:
_data = None
else:
_data = data.get(name, None)
obj.construct(_data)
if self._rule is None:
# Ensure the _data dictionary is populated for singleton
# blocks
if not self.is_indexed():
self[None]
timer.report()
return
# If we have a rule, fire the rule for all indices.
# Notes:
# - Since this block is now concrete, any components added to
# it will be immediately constructed by
# block.add_component().
# - Since the rule does not pass any "data" on, we build a
# scalar "stack" of pointers to block data
# (_BlockConstruction.data) that the individual blocks'
# add_component() can refer back to to handle component
# construction.
for idx in self._index:
_block = self[idx]
if data is not None and idx in data:
_BlockConstruction.data[id(_block)] = data[idx]
obj = apply_indexed_rule(self, self._rule, _block, idx, self._options)
if id(_block) in _BlockConstruction.data:
del _BlockConstruction.data[id(_block)]
if isinstance(obj, _BlockData) and obj is not _block:
# If the user returns a block, use their block instead
# of the empty one we just created.
for c in list(obj.component_objects(descend_into=False)):
obj.del_component(c)
_block.add_component(c.local_name, c)
# transfer over any other attributes that are not components
for name, val in iteritems(obj.__dict__):
if not hasattr(_block, name) and not hasattr(self, name):
super(_BlockData, _block).__setattr__(name, val)
# TBD: Should we allow skipping Blocks???
# if obj is Block.Skip and idx is not None:
# del self._data[idx]
timer.report()
|
https://github.com/Pyomo/pyomo/issues/1106
|
Traceback (most recent call last):
File "blockSetValue.py", line 11, in <module>
m.block_list[1] = movingBlock
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 412, in __setitem__
return self._setitem_when_not_present(index, val)
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 675, in _setitem_when_not_present
obj.set_value(value)
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 711, in set_value
self.add_component(k,val[k])
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 923, in add_component
super(_BlockData, self).__setattr__(name, val)
TypeError: attribute name must be string, not 'NoneType'
|
TypeError
|
def _setitem_when_not_present(self, index, value=_NotSpecified):
"""Perform the fundamental component item creation and storage.
Components that want to implement a nonstandard storage mechanism
should override this method.
Implementations may assume that the index has already been
validated and is a legitimate entry in the _data dict.
"""
#
# If we are a scalar, then idx will be None (_validate_index ensures
# this)
if index is None and not self.is_indexed():
obj = self._data[index] = self
else:
obj = self._data[index] = self._ComponentDataClass(component=self)
try:
if value is not _NotSpecified:
obj.set_value(value)
except:
del self._data[index]
raise
return obj
|
def _setitem_when_not_present(self, index, value):
"""Perform the fundamental component item creation and storage.
Components that want to implement a nonstandard storage mechanism
should override this method.
Implementations may assume that the index has already been
validated and is a legitimate entry in the _data dict.
"""
#
# If we are a scalar, then idx will be None (_validate_index ensures
# this)
if index is None and not self.is_indexed():
obj = self._data[index] = self
else:
obj = self._data[index] = self._ComponentDataClass(component=self)
try:
obj.set_value(value)
return obj
except:
del self._data[index]
raise
|
https://github.com/Pyomo/pyomo/issues/1106
|
Traceback (most recent call last):
File "blockSetValue.py", line 11, in <module>
m.block_list[1] = movingBlock
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 412, in __setitem__
return self._setitem_when_not_present(index, val)
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 675, in _setitem_when_not_present
obj.set_value(value)
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 711, in set_value
self.add_component(k,val[k])
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 923, in add_component
super(_BlockData, self).__setattr__(name, val)
TypeError: attribute name must be string, not 'NoneType'
|
TypeError
|
def set_value(self, val, guarantee_components=set()):
# Copy over everything from the other block. If the other
# block has an indicator_var, it should override this block's.
# Otherwise restore this block's indicator_var.
guarantee_components.add("indicator_var")
super(_DisjunctData, self).set_value(val, guarantee_components)
|
def set_value(self, val):
_indicator_var = self.indicator_var
# Remove everything
for k in list(getattr(self, "_decl", {})):
self.del_component(k)
self._ctypes = {}
self._decl = {}
self._decl_order = []
# Now copy over everything from the other block. If the other
# block has an indicator_var, it should override this block's.
# Otherwise restore this block's indicator_var.
if val:
if "indicator_var" not in val:
self.add_component("indicator_var", _indicator_var)
for k in sorted(iterkeys(val)):
self.add_component(k, val[k])
else:
self.add_component("indicator_var", _indicator_var)
|
https://github.com/Pyomo/pyomo/issues/1106
|
Traceback (most recent call last):
File "blockSetValue.py", line 11, in <module>
m.block_list[1] = movingBlock
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 412, in __setitem__
return self._setitem_when_not_present(index, val)
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 675, in _setitem_when_not_present
obj.set_value(value)
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 711, in set_value
self.add_component(k,val[k])
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 923, in add_component
super(_BlockData, self).__setattr__(name, val)
TypeError: attribute name must be string, not 'NoneType'
|
TypeError
|
def _transform_block_components(self, block, disjunct, bigM, suffix_list):
# We first need to find any transformed disjunctions that might be here
# because we need to move their transformation blocks up onto the parent
# block before we transform anything else on this block
destinationBlock = disjunct._transformation_block().parent_block()
for obj in block.component_data_objects(
Disjunction, sort=SortComponents.deterministic, descend_into=(Block)
):
if not obj.algebraic_constraint:
# This could be bad if it's active, but we'll wait to yell
# until the next loop
continue
disjParentBlock = disjunct.parent_block()
# get this disjunction's relaxation block.
transBlock = None
for d in obj.disjuncts:
if d._transformation_block:
transBlock = d._transformation_block().parent_block()
if transBlock is None:
raise GDP_Error(
"Found transformed disjunction %s on disjunt %s, "
"but none of its disjuncts have been transformed. "
"This is very strange if not impossible" % (obj.name, disjunct.name)
)
# move transBlock up to parent component
transBlock.parent_block().del_component(transBlock)
self._transfer_transBlock_data(transBlock, destinationBlock)
# Now look through the component map of block and transform
# everything we have a handler for. Yell if we don't know how
# to handle it.
for name, obj in list(iteritems(block.component_map())):
if hasattr(obj, "active") and not obj.active:
continue
handler = self.handlers.get(obj.type(), None)
if not handler:
if handler is None:
raise GDP_Error(
"No BigM transformation handler registered "
"for modeling components of type %s. If your "
"disjuncts contain non-GDP Pyomo components that "
"require transformation, please transform them first." % obj.type()
)
continue
# obj is what we are transforming, we pass disjunct
# through so that we will have access to the indicator
# variables down the line.
handler(obj, disjunct, bigM, suffix_list)
|
def _transform_block_components(self, block, disjunct, bigM, suffix_list):
# We first need to find any transformed disjunctions that might be here
# because we need to move their transformation blocks up onto the parent
# block before we transform anything else on this block
destinationBlock = disjunct._transformation_block().parent_block()
for obj in block.component_data_objects(
Disjunction, sort=SortComponents.deterministic, descend_into=(Block)
):
print(obj)
if not obj.algebraic_constraint:
# This could be bad if it's active, but we'll wait to yell
# until the next loop
continue
disjParentBlock = disjunct.parent_block()
# get this disjunction's relaxation block.
transBlock = None
for d in obj.disjuncts:
if d._transformation_block:
transBlock = d._transformation_block().parent_block()
if transBlock is None:
raise GDP_Error(
"Found transformed disjunction %s on disjunt %s, "
"but none of its disjuncts have been transformed. "
"This is very strange if not impossible" % (obj.name, disjunct.name)
)
# move transBlock up to parent component
transBlock.parent_block().del_component(transBlock)
self._transfer_transBlock_data(transBlock, destinationBlock)
# Now look through the component map of block and transform
# everything we have a handler for. Yell if we don't know how
# to handle it.
for name, obj in list(iteritems(block.component_map())):
if hasattr(obj, "active") and not obj.active:
continue
handler = self.handlers.get(obj.type(), None)
if not handler:
if handler is None:
raise GDP_Error(
"No BigM transformation handler registered "
"for modeling components of type %s. If your "
"disjuncts contain non-GDP Pyomo components that "
"require transformation, please transform them first." % obj.type()
)
continue
# obj is what we are transforming, we pass disjunct
# through so that we will have access to the indicator
# variables down the line.
handler(obj, disjunct, bigM, suffix_list)
|
https://github.com/Pyomo/pyomo/issues/1106
|
Traceback (most recent call last):
File "blockSetValue.py", line 11, in <module>
m.block_list[1] = movingBlock
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 412, in __setitem__
return self._setitem_when_not_present(index, val)
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 675, in _setitem_when_not_present
obj.set_value(value)
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 711, in set_value
self.add_component(k,val[k])
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 923, in add_component
super(_BlockData, self).__setattr__(name, val)
TypeError: attribute name must be string, not 'NoneType'
|
TypeError
|
def _transfer_transBlock_data(self, fromBlock, toBlock):
# We know that we have a list of transformed disjuncts on both. We need
# to move those over. Then there might be constraints on the block also
# (at this point only the diaggregation constraints from chull,
# but... I'll leave it general for now.
disjunctList = toBlock.relaxedDisjuncts
for idx, disjunctBlock in iteritems(fromBlock.relaxedDisjuncts):
# I think this should work when #1106 is resolved:
# disjunctList[len(disjunctList)] = disjunctBlock
# newblock = disjunctList[len(disjunctList)-1]
# HACK in the meantime:
newblock = disjunctList[len(disjunctList)]
self._copy_to_block(disjunctBlock, newblock)
# update the mappings
original = disjunctBlock._srcDisjunct()
original._transformation_block = weakref_ref(newblock)
newblock._srcDisjunct = weakref_ref(original)
# move any constraints. I'm assuming they are all just on the
# transformation block right now, because that is in our control and I
# can't think why we would do anything messier at the moment. (And I
# don't want to descend into Blocks because we already handled the
# above).
for cons in fromBlock.component_data_objects(Constraint):
toBlock.add_component(unique_component_name(cons.name, toBlock), cons)
|
def _transfer_transBlock_data(self, fromBlock, toBlock):
# We know that we have a list of transformed disjuncts on both. We need
# to move those over. Then there might be constraints on the block also
# (at this point only the diaggregation constraints from chull,
# but... I'll leave it general for now.
disjunctList = toBlock.relaxedDisjuncts
for idx, disjunctBlock in iteritems(fromBlock.relaxedDisjuncts):
# TODO [ESJ 07/18/2019] John! I thought you said something like this
# would work?
# newblock = disjunctList[len(disjunctList)] = disjunctBlock
# Try:
# disjunctList[len(disjunctList)] = disjunctBlock
# newblock = disjunctList[len(disjunctList)-1]
# I'm just hacking for now because I am confused:
newblock = disjunctList[len(disjunctList)]
self._copy_to_block(disjunctBlock, newblock)
# update the mappings
original = disjunctBlock._srcDisjunct()
original._transformation_block = weakref_ref(newblock)
newblock._srcDisjunct = weakref_ref(original)
# move any constraints. I'm assuming they are all just on the
# transformation block right now, because that is in our control and I
# can't think why we would do anything messier at the moment. (And I
# don't want to descend into Blocks because we already handled the
# above).
for cons in fromBlock.component_data_objects(Constraint):
toBlock.add_component(unique_component_name(cons.name, toBlock), cons)
|
https://github.com/Pyomo/pyomo/issues/1106
|
Traceback (most recent call last):
File "blockSetValue.py", line 11, in <module>
m.block_list[1] = movingBlock
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 412, in __setitem__
return self._setitem_when_not_present(index, val)
File "/home/esjohn/src/pyomo/pyomo/core/base/indexed_component.py", line 675, in _setitem_when_not_present
obj.set_value(value)
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 711, in set_value
self.add_component(k,val[k])
File "/home/esjohn/src/pyomo/pyomo/core/base/block.py", line 923, in add_component
super(_BlockData, self).__setattr__(name, val)
TypeError: attribute name must be string, not 'NoneType'
|
TypeError
|
def __call__(self, *idx, **kwds):
"""Special handling of the "()" operator for component slices.
Creating a slice of a component returns a _IndexedComponent_slice
object. Subsequent attempts to call items hit this method. We
handle the __call__ method separately based on the item (identifier
immediately before the "()") being called:
- if the item was 'component', then we defer resolution of this call
until we are actually iterating over the slice. This allows users
to do operations like `m.b[:].component('foo').bar[:]`
- if the item is anything else, then we will immediately iterate over
the slice and call the item. This allows "vector-like" operations
like: `m.x[:,1].fix(0)`.
"""
# There is a weird case in pypy3.6-7.2.0 where __name__ gets
# called after retrieving an attribute that will be called. I
# don't know why that happens, but we will trap it here and
# remove the getattr(__name__) from the call stack.
if (
self._call_stack[-1][0] == _IndexedComponent_slice.get_attribute
and self._call_stack[-1][1] == "__name__"
):
self._call_stack.pop()
self._call_stack.append((_IndexedComponent_slice.call, idx, kwds))
if self._call_stack[-2][1] == "component":
return self
else:
# Note: simply calling "list(self)" results in infinite
# recursion in python2.6
return list(i for i in self)
|
def __call__(self, *idx, **kwds):
"""Special handling of the "()" operator for component slices.
Creating a slice of a component returns a _IndexedComponent_slice
object. Subsequent attempts to call items hit this method. We
handle the __call__ method separately based on the item (identifier
immediately before the "()") being called:
- if the item was 'component', then we defer resolution of this call
until we are actually iterating over the slice. This allows users
to do operations like `m.b[:].component('foo').bar[:]`
- if the item is anything else, then we will immediately iterate over
the slice and call the item. This allows "vector-like" operations
like: `m.x[:,1].fix(0)`.
"""
self._call_stack.append((_IndexedComponent_slice.call, idx, kwds))
if self._call_stack[-2][1] == "component":
return self
else:
# Note: simply calling "list(self)" results in infinite
# recursion in python2.6
return list(i for i in self)
|
https://github.com/Pyomo/pyomo/issues/1138
|
======================================================================
ERROR: test_bundles (pyomo.pysp.tests.unit.test_scenariotree.TestScenarioTreeFromNetworkX)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/tests/unit/test_scenariotree.py", line 745, in test_bundles
edge_probability_attribute=None)
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 313, in ScenarioTreeModelFromNetworkX
networkx.dfs_successors(tree, root))
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 296, in _setup
_setup(v, succ)
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 311, in _setup
tree.node[u].get('bundle', None)
AttributeError: 'DiGraph' object has no attribute 'node'
|
AttributeError
|
def _disable_method(fcn, msg=None):
if msg is None:
msg = "access %s on" % (fcn.__name__,)
def impl(self, *args, **kwds):
raise RuntimeError(
"Cannot %s %s '%s' before it has been constructed (initialized)."
% (msg, type(self).__name__, self.name)
)
# functools.wraps doesn't preserve the function signature until
# Python 3.4. For backwards compatability with Python 2.x, we will
# create a temporary (lambda) function using eval that matches the
# function signature passed in and calls the generic impl() function
args = inspect.formatargspec(*getargspec(fcn))
impl_args = eval("lambda %s: impl%s" % (args[1:-1], args), {"impl": impl})
return functools.wraps(fcn)(impl_args)
|
def _disable_method(fcn, msg=None):
if msg is None:
msg = "access %s on" % (fcn.__name__,)
def impl(self, *args, **kwds):
raise RuntimeError(
"Cannot %s %s '%s' before it has been constructed (initialized)."
% (msg, type(self).__name__, self.name)
)
# functools.wraps doesn't preserve the function signature until
# Python 3.4. For backwards compatability with Python 2.x, we will
# create a temporary (lambda) function using eval that matches the
# function signature passed in and calls the generic impl() function
args = inspect.formatargspec(*inspect.getargspec(fcn))
impl_args = eval("lambda %s: impl%s" % (args[1:-1], args), {"impl": impl})
return functools.wraps(fcn)(impl_args)
|
https://github.com/Pyomo/pyomo/issues/1138
|
======================================================================
ERROR: test_bundles (pyomo.pysp.tests.unit.test_scenariotree.TestScenarioTreeFromNetworkX)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/tests/unit/test_scenariotree.py", line 745, in test_bundles
edge_probability_attribute=None)
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 313, in ScenarioTreeModelFromNetworkX
networkx.dfs_successors(tree, root))
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 296, in _setup
_setup(v, succ)
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 311, in _setup
tree.node[u].get('bundle', None)
AttributeError: 'DiGraph' object has no attribute 'node'
|
AttributeError
|
def ScenarioTreeModelFromNetworkX(
tree,
node_name_attribute=None,
edge_probability_attribute="weight",
stage_names=None,
scenario_name_attribute=None,
):
"""
Create a scenario tree model from a networkx tree. The
height of the tree must be at least 1 (meaning at least
2 stages).
Required node attributes:
- cost (str): A string identifying a component on
the model whose value indicates the cost at
the time stage of the node for any scenario
traveling through it.
Optional node attributes:
- variables (list): A list of variable identifiers
that will be tracked by the node. If the node
is not a leaf node, these indicate variables
with non-anticipativity constraints.
- derived_variables (list): A list of variable or
expression identifiers that will be tracked by
the node (but will never have
non-anticipativity constraints enforced).
- bundle: A bundle identifier for the scenario
defined by a leaf-stage node. This attribute
is ignored on non-terminal tree nodes. This
attribute appears on at least one leaf-stage
node (and is not set to :const:`None`), then
it must be set on all leaf-stage nodes (to
something other than :const:`None`);
otherwise, an exception will be raised.
Optional edge attributes:
- weight (float): Indicates the conditional
probability of moving from the parent node to
the child node in the directed edge. If not
present, it will be assumed that all edges
leaving the parent node have equal probability
(normalized to sum to one).
Args:
stage_names: Can define a list of stage names to use
(assumed in time order). The length of this list
much match the number of stages in the tree. The
default value of :const:`None` indicates that
stage names should be automatically generated in
with the form ['Stage1','Stage2',...].
node_name_attribute: By default, node names are the
same as the node hash in the networkx tree. This
keyword can be set to the name of some property
of nodes in the graph that will be used for their
name in the PySP scenario tree.
scenario_name_attribute: By default, scenario names
are the same as the leaf-node hash in the
networkx tree. This keyword can be set to the
name of some property of leaf-nodes in the graph
that will be used for their corresponding
scenario name in the PySP scenario tree.
edge_probability_attribute: Can be set to the name
of some property of edges in the graph that
defines the conditional probability of that
branch (default: 'weight'). If this keyword is
set to :const:`None`, then all branches leaving a
node are assigned equal conditional
probabilities.
Examples:
A 2-stage scenario tree with 10 scenarios grouped
into 2 bundles:
>>> G = networkx.DiGraph()
>>> G.add_node("root", variables=["x"])
>>> N = 10
>>> for i in range(N):
>>> node_name = "s"+str(i)
>>> bundle_name = "b"+str(i%2)
>>> G.add_node(node_name, bundle=bundle)
>>> G.add_edge("root", node_name, weight=1.0/N)
>>> model = ScenarioTreeModelFromNetworkX(G)
A 4-stage scenario tree with 125 scenarios:
>>> branching_factor = 5
>>> height = 3
>>> G = networkx.balanced_tree(
branching_factor,
height,
networkx.DiGraph())
>>> model = ScenarioTreeModelFromNetworkX(G)
"""
if not has_networkx: # pragma:nocover
raise ValueError("networkx>=2.0 module is not available")
if not networkx.is_tree(tree):
raise TypeError("Graph object is not a tree (see networkx.is_tree)")
if not networkx.is_directed(tree):
raise TypeError("Graph object is not directed (see networkx.is_directed)")
if not networkx.is_branching(tree):
raise TypeError("Grapn object is not a branching (see networkx.is_branching")
in_degree_items = tree.in_degree()
# Prior to networkx ~2.0, in_degree() returned a dictionary.
# Now it is a view on items, so only call .items() for the old case
if hasattr(in_degree_items, "items"):
in_degree_items = in_degree_items.items()
root = [u for u, d in in_degree_items if d == 0]
assert len(root) == 1
root = root[0]
num_stages = networkx.eccentricity(tree, v=root) + 1
if num_stages < 2:
raise ValueError("The number of stages must be at least 2")
m = CreateAbstractScenarioTreeModel()
if stage_names is not None:
unique_stage_names = set()
for cnt, stage_name in enumerate(stage_names, 1):
m.Stages.add(stage_name)
unique_stage_names.add(stage_name)
if cnt != num_stages:
raise ValueError(
"incorrect number of stages names (%s), should be %s"
% (cnt, num_stages)
)
if len(unique_stage_names) != cnt:
raise ValueError("all stage names were not unique")
else:
for i in range(num_stages):
m.Stages.add("Stage" + str(i + 1))
node_to_name = {}
node_to_scenario = {}
scenario_bundle = {}
def _setup(u, succ):
if node_name_attribute is not None:
if node_name_attribute not in tree.nodes[u]:
raise KeyError(
"node '%s' missing node name "
"attribute: '%s'" % (u, node_name_attribute)
)
node_name = tree.nodes[u][node_name_attribute]
else:
node_name = u
node_to_name[u] = node_name
m.Nodes.add(node_name)
if u in succ:
for v in succ[u]:
_setup(v, succ)
else:
# a leaf node
if scenario_name_attribute is not None:
if scenario_name_attribute not in tree.nodes[u]:
raise KeyError(
"node '%s' missing scenario name "
"attribute: '%s'" % (u, scenario_name_attribute)
)
scenario_name = tree.nodes[u][scenario_name_attribute]
else:
scenario_name = u
node_to_scenario[u] = scenario_name
m.Scenarios.add(scenario_name)
scenario_bundle[scenario_name] = tree.nodes[u].get("bundle", None)
_setup(root, networkx.dfs_successors(tree, root))
m = m.create_instance()
def _add_node(u, stage, succ, pred):
node_name = node_to_name[u]
m.NodeStage[node_name] = m.Stages[stage]
if u == root:
m.ConditionalProbability[node_name] = 1.0
else:
assert u in pred
# prior to networkx ~2.0, we used a .edge attribute on DiGraph,
# which no longer exists.
if hasattr(tree, "edge"):
edge = tree.edge[pred[u]][u]
else:
edge = tree.edges[pred[u], u]
probability = None
if edge_probability_attribute is not None:
if edge_probability_attribute not in edge:
raise KeyError(
"edge '(%s, %s)' missing probability attribute: '%s'"
% (pred[u], u, edge_probability_attribute)
)
probability = edge[edge_probability_attribute]
else:
probability = 1.0 / len(succ[pred[u]])
m.ConditionalProbability[node_name] = probability
# get node variables
if "variables" in tree.nodes[u]:
node_variables = tree.nodes[u]["variables"]
assert type(node_variables) in [tuple, list]
for varstring in node_variables:
m.NodeVariables[node_name].add(varstring)
if "derived_variables" in tree.nodes[u]:
node_derived_variables = tree.nodes[u]["derived_variables"]
assert type(node_derived_variables) in [tuple, list]
for varstring in node_derived_variables:
m.NodeDerivedVariables[node_name].add(varstring)
if "cost" in tree.nodes[u]:
assert isinstance(tree.nodes[u]["cost"], six.string_types)
m.NodeCost[node_name].value = tree.nodes[u]["cost"]
if u in succ:
child_names = []
for v in succ[u]:
child_names.append(_add_node(v, stage + 1, succ, pred))
total_probability = 0.0
for child_name in child_names:
m.Children[node_name].add(child_name)
total_probability += pyomo.core.value(
m.ConditionalProbability[child_name]
)
if abs(total_probability - 1.0) > 1e-5:
raise ValueError(
"edge probabilities leaving node '%s' "
"do not sum to 1 (total=%r)" % (u, total_probability)
)
else:
# a leaf node
scenario_name = node_to_scenario[u]
m.ScenarioLeafNode[scenario_name] = node_name
m.Children[node_name].clear()
return node_name
_add_node(
root,
1,
networkx.dfs_successors(tree, root),
networkx.dfs_predecessors(tree, root),
)
if any(_b is not None for _b in scenario_bundle.values()):
if any(_b is None for _b in scenario_bundle.values()):
raise ValueError(
"Incomplete bundle specification. "
"All scenarios require a bundle "
"identifier."
)
m.Bundling.value = True
bundle_scenarios = {}
for bundle_name in sorted(set(scenario_bundle.values())):
m.Bundles.add(bundle_name)
bundle_scenarios[bundle_name] = []
for scenario_name in m.Scenarios:
bundle_scenarios[scenario_bundle[scenario_name]].append(scenario_name)
for bundle_name in m.Bundles:
for scenario_name in sorted(bundle_scenarios[bundle_name]):
m.BundleScenarios[bundle_name].add(scenario_name)
return m
|
def ScenarioTreeModelFromNetworkX(
tree,
node_name_attribute=None,
edge_probability_attribute="weight",
stage_names=None,
scenario_name_attribute=None,
):
"""
Create a scenario tree model from a networkx tree. The
height of the tree must be at least 1 (meaning at least
2 stages).
Required node attributes:
- cost (str): A string identifying a component on
the model whose value indicates the cost at
the time stage of the node for any scenario
traveling through it.
Optional node attributes:
- variables (list): A list of variable identifiers
that will be tracked by the node. If the node
is not a leaf node, these indicate variables
with non-anticipativity constraints.
- derived_variables (list): A list of variable or
expression identifiers that will be tracked by
the node (but will never have
non-anticipativity constraints enforced).
- bundle: A bundle identifier for the scenario
defined by a leaf-stage node. This attribute
is ignored on non-terminal tree nodes. This
attribute appears on at least one leaf-stage
node (and is not set to :const:`None`), then
it must be set on all leaf-stage nodes (to
something other than :const:`None`);
otherwise, an exception will be raised.
Optional edge attributes:
- weight (float): Indicates the conditional
probability of moving from the parent node to
the child node in the directed edge. If not
present, it will be assumed that all edges
leaving the parent node have equal probability
(normalized to sum to one).
Args:
stage_names: Can define a list of stage names to use
(assumed in time order). The length of this list
much match the number of stages in the tree. The
default value of :const:`None` indicates that
stage names should be automatically generated in
with the form ['Stage1','Stage2',...].
node_name_attribute: By default, node names are the
same as the node hash in the networkx tree. This
keyword can be set to the name of some property
of nodes in the graph that will be used for their
name in the PySP scenario tree.
scenario_name_attribute: By default, scenario names
are the same as the leaf-node hash in the
networkx tree. This keyword can be set to the
name of some property of leaf-nodes in the graph
that will be used for their corresponding
scenario name in the PySP scenario tree.
edge_probability_attribute: Can be set to the name
of some property of edges in the graph that
defines the conditional probability of that
branch (default: 'weight'). If this keyword is
set to :const:`None`, then all branches leaving a
node are assigned equal conditional
probabilities.
Examples:
A 2-stage scenario tree with 10 scenarios grouped
into 2 bundles:
>>> G = networkx.DiGraph()
>>> G.add_node("root", variables=["x"])
>>> N = 10
>>> for i in range(N):
>>> node_name = "s"+str(i)
>>> bundle_name = "b"+str(i%2)
>>> G.add_node(node_name, bundle=bundle)
>>> G.add_edge("root", node_name, weight=1.0/N)
>>> model = ScenarioTreeModelFromNetworkX(G)
A 4-stage scenario tree with 125 scenarios:
>>> branching_factor = 5
>>> height = 3
>>> G = networkx.balanced_tree(
branching_factor,
height,
networkx.DiGraph())
>>> model = ScenarioTreeModelFromNetworkX(G)
"""
if not has_networkx: # pragma:nocover
raise ValueError("networkx module is not available")
if not networkx.is_tree(tree):
raise TypeError("Graph object is not a tree (see networkx.is_tree)")
if not networkx.is_directed(tree):
raise TypeError("Graph object is not directed (see networkx.is_directed)")
if not networkx.is_branching(tree):
raise TypeError("Grapn object is not a branching (see networkx.is_branching")
in_degree_items = tree.in_degree()
# Prior to networkx ~2.0, in_degree() returned a dictionary.
# Now it is a view on items, so only call .items() for the old case
if hasattr(in_degree_items, "items"):
in_degree_items = in_degree_items.items()
root = [u for u, d in in_degree_items if d == 0]
assert len(root) == 1
root = root[0]
num_stages = networkx.eccentricity(tree, v=root) + 1
if num_stages < 2:
raise ValueError("The number of stages must be at least 2")
m = CreateAbstractScenarioTreeModel()
if stage_names is not None:
unique_stage_names = set()
for cnt, stage_name in enumerate(stage_names, 1):
m.Stages.add(stage_name)
unique_stage_names.add(stage_name)
if cnt != num_stages:
raise ValueError(
"incorrect number of stages names (%s), should be %s"
% (cnt, num_stages)
)
if len(unique_stage_names) != cnt:
raise ValueError("all stage names were not unique")
else:
for i in range(num_stages):
m.Stages.add("Stage" + str(i + 1))
node_to_name = {}
node_to_scenario = {}
scenario_bundle = {}
def _setup(u, succ):
if node_name_attribute is not None:
if node_name_attribute not in tree.node[u]:
raise KeyError(
"node '%s' missing node name "
"attribute: '%s'" % (u, node_name_attribute)
)
node_name = tree.node[u][node_name_attribute]
else:
node_name = u
node_to_name[u] = node_name
m.Nodes.add(node_name)
if u in succ:
for v in succ[u]:
_setup(v, succ)
else:
# a leaf node
if scenario_name_attribute is not None:
if scenario_name_attribute not in tree.node[u]:
raise KeyError(
"node '%s' missing scenario name "
"attribute: '%s'" % (u, scenario_name_attribute)
)
scenario_name = tree.node[u][scenario_name_attribute]
else:
scenario_name = u
node_to_scenario[u] = scenario_name
m.Scenarios.add(scenario_name)
scenario_bundle[scenario_name] = tree.node[u].get("bundle", None)
_setup(root, networkx.dfs_successors(tree, root))
m = m.create_instance()
def _add_node(u, stage, succ, pred):
node_name = node_to_name[u]
m.NodeStage[node_name] = m.Stages[stage]
if u == root:
m.ConditionalProbability[node_name] = 1.0
else:
assert u in pred
# prior to networkx ~2.0, we used a .edge attribute on DiGraph,
# which no longer exists.
if hasattr(tree, "edge"):
edge = tree.edge[pred[u]][u]
else:
edge = tree.edges[pred[u], u]
probability = None
if edge_probability_attribute is not None:
if edge_probability_attribute not in edge:
raise KeyError(
"edge '(%s, %s)' missing probability attribute: '%s'"
% (pred[u], u, edge_probability_attribute)
)
probability = edge[edge_probability_attribute]
else:
probability = 1.0 / len(succ[pred[u]])
m.ConditionalProbability[node_name] = probability
# get node variables
if "variables" in tree.node[u]:
node_variables = tree.node[u]["variables"]
assert type(node_variables) in [tuple, list]
for varstring in node_variables:
m.NodeVariables[node_name].add(varstring)
if "derived_variables" in tree.node[u]:
node_derived_variables = tree.node[u]["derived_variables"]
assert type(node_derived_variables) in [tuple, list]
for varstring in node_derived_variables:
m.NodeDerivedVariables[node_name].add(varstring)
if "cost" in tree.node[u]:
assert isinstance(tree.node[u]["cost"], six.string_types)
m.NodeCost[node_name].value = tree.node[u]["cost"]
if u in succ:
child_names = []
for v in succ[u]:
child_names.append(_add_node(v, stage + 1, succ, pred))
total_probability = 0.0
for child_name in child_names:
m.Children[node_name].add(child_name)
total_probability += pyomo.core.value(
m.ConditionalProbability[child_name]
)
if abs(total_probability - 1.0) > 1e-5:
raise ValueError(
"edge probabilities leaving node '%s' "
"do not sum to 1 (total=%r)" % (u, total_probability)
)
else:
# a leaf node
scenario_name = node_to_scenario[u]
m.ScenarioLeafNode[scenario_name] = node_name
m.Children[node_name].clear()
return node_name
_add_node(
root,
1,
networkx.dfs_successors(tree, root),
networkx.dfs_predecessors(tree, root),
)
if any(_b is not None for _b in scenario_bundle.values()):
if any(_b is None for _b in scenario_bundle.values()):
raise ValueError(
"Incomplete bundle specification. "
"All scenarios require a bundle "
"identifier."
)
m.Bundling.value = True
bundle_scenarios = {}
for bundle_name in sorted(set(scenario_bundle.values())):
m.Bundles.add(bundle_name)
bundle_scenarios[bundle_name] = []
for scenario_name in m.Scenarios:
bundle_scenarios[scenario_bundle[scenario_name]].append(scenario_name)
for bundle_name in m.Bundles:
for scenario_name in sorted(bundle_scenarios[bundle_name]):
m.BundleScenarios[bundle_name].add(scenario_name)
return m
|
https://github.com/Pyomo/pyomo/issues/1138
|
======================================================================
ERROR: test_bundles (pyomo.pysp.tests.unit.test_scenariotree.TestScenarioTreeFromNetworkX)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/tests/unit/test_scenariotree.py", line 745, in test_bundles
edge_probability_attribute=None)
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 313, in ScenarioTreeModelFromNetworkX
networkx.dfs_successors(tree, root))
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 296, in _setup
_setup(v, succ)
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 311, in _setup
tree.node[u].get('bundle', None)
AttributeError: 'DiGraph' object has no attribute 'node'
|
AttributeError
|
def _setup(u, succ):
if node_name_attribute is not None:
if node_name_attribute not in tree.nodes[u]:
raise KeyError(
"node '%s' missing node name attribute: '%s'" % (u, node_name_attribute)
)
node_name = tree.nodes[u][node_name_attribute]
else:
node_name = u
node_to_name[u] = node_name
m.Nodes.add(node_name)
if u in succ:
for v in succ[u]:
_setup(v, succ)
else:
# a leaf node
if scenario_name_attribute is not None:
if scenario_name_attribute not in tree.nodes[u]:
raise KeyError(
"node '%s' missing scenario name "
"attribute: '%s'" % (u, scenario_name_attribute)
)
scenario_name = tree.nodes[u][scenario_name_attribute]
else:
scenario_name = u
node_to_scenario[u] = scenario_name
m.Scenarios.add(scenario_name)
scenario_bundle[scenario_name] = tree.nodes[u].get("bundle", None)
|
def _setup(u, succ):
if node_name_attribute is not None:
if node_name_attribute not in tree.node[u]:
raise KeyError(
"node '%s' missing node name attribute: '%s'" % (u, node_name_attribute)
)
node_name = tree.node[u][node_name_attribute]
else:
node_name = u
node_to_name[u] = node_name
m.Nodes.add(node_name)
if u in succ:
for v in succ[u]:
_setup(v, succ)
else:
# a leaf node
if scenario_name_attribute is not None:
if scenario_name_attribute not in tree.node[u]:
raise KeyError(
"node '%s' missing scenario name "
"attribute: '%s'" % (u, scenario_name_attribute)
)
scenario_name = tree.node[u][scenario_name_attribute]
else:
scenario_name = u
node_to_scenario[u] = scenario_name
m.Scenarios.add(scenario_name)
scenario_bundle[scenario_name] = tree.node[u].get("bundle", None)
|
https://github.com/Pyomo/pyomo/issues/1138
|
======================================================================
ERROR: test_bundles (pyomo.pysp.tests.unit.test_scenariotree.TestScenarioTreeFromNetworkX)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/tests/unit/test_scenariotree.py", line 745, in test_bundles
edge_probability_attribute=None)
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 313, in ScenarioTreeModelFromNetworkX
networkx.dfs_successors(tree, root))
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 296, in _setup
_setup(v, succ)
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 311, in _setup
tree.node[u].get('bundle', None)
AttributeError: 'DiGraph' object has no attribute 'node'
|
AttributeError
|
def _add_node(u, stage, succ, pred):
node_name = node_to_name[u]
m.NodeStage[node_name] = m.Stages[stage]
if u == root:
m.ConditionalProbability[node_name] = 1.0
else:
assert u in pred
# prior to networkx ~2.0, we used a .edge attribute on DiGraph,
# which no longer exists.
if hasattr(tree, "edge"):
edge = tree.edge[pred[u]][u]
else:
edge = tree.edges[pred[u], u]
probability = None
if edge_probability_attribute is not None:
if edge_probability_attribute not in edge:
raise KeyError(
"edge '(%s, %s)' missing probability attribute: '%s'"
% (pred[u], u, edge_probability_attribute)
)
probability = edge[edge_probability_attribute]
else:
probability = 1.0 / len(succ[pred[u]])
m.ConditionalProbability[node_name] = probability
# get node variables
if "variables" in tree.nodes[u]:
node_variables = tree.nodes[u]["variables"]
assert type(node_variables) in [tuple, list]
for varstring in node_variables:
m.NodeVariables[node_name].add(varstring)
if "derived_variables" in tree.nodes[u]:
node_derived_variables = tree.nodes[u]["derived_variables"]
assert type(node_derived_variables) in [tuple, list]
for varstring in node_derived_variables:
m.NodeDerivedVariables[node_name].add(varstring)
if "cost" in tree.nodes[u]:
assert isinstance(tree.nodes[u]["cost"], six.string_types)
m.NodeCost[node_name].value = tree.nodes[u]["cost"]
if u in succ:
child_names = []
for v in succ[u]:
child_names.append(_add_node(v, stage + 1, succ, pred))
total_probability = 0.0
for child_name in child_names:
m.Children[node_name].add(child_name)
total_probability += pyomo.core.value(m.ConditionalProbability[child_name])
if abs(total_probability - 1.0) > 1e-5:
raise ValueError(
"edge probabilities leaving node '%s' "
"do not sum to 1 (total=%r)" % (u, total_probability)
)
else:
# a leaf node
scenario_name = node_to_scenario[u]
m.ScenarioLeafNode[scenario_name] = node_name
m.Children[node_name].clear()
return node_name
|
def _add_node(u, stage, succ, pred):
node_name = node_to_name[u]
m.NodeStage[node_name] = m.Stages[stage]
if u == root:
m.ConditionalProbability[node_name] = 1.0
else:
assert u in pred
# prior to networkx ~2.0, we used a .edge attribute on DiGraph,
# which no longer exists.
if hasattr(tree, "edge"):
edge = tree.edge[pred[u]][u]
else:
edge = tree.edges[pred[u], u]
probability = None
if edge_probability_attribute is not None:
if edge_probability_attribute not in edge:
raise KeyError(
"edge '(%s, %s)' missing probability attribute: '%s'"
% (pred[u], u, edge_probability_attribute)
)
probability = edge[edge_probability_attribute]
else:
probability = 1.0 / len(succ[pred[u]])
m.ConditionalProbability[node_name] = probability
# get node variables
if "variables" in tree.node[u]:
node_variables = tree.node[u]["variables"]
assert type(node_variables) in [tuple, list]
for varstring in node_variables:
m.NodeVariables[node_name].add(varstring)
if "derived_variables" in tree.node[u]:
node_derived_variables = tree.node[u]["derived_variables"]
assert type(node_derived_variables) in [tuple, list]
for varstring in node_derived_variables:
m.NodeDerivedVariables[node_name].add(varstring)
if "cost" in tree.node[u]:
assert isinstance(tree.node[u]["cost"], six.string_types)
m.NodeCost[node_name].value = tree.node[u]["cost"]
if u in succ:
child_names = []
for v in succ[u]:
child_names.append(_add_node(v, stage + 1, succ, pred))
total_probability = 0.0
for child_name in child_names:
m.Children[node_name].add(child_name)
total_probability += pyomo.core.value(m.ConditionalProbability[child_name])
if abs(total_probability - 1.0) > 1e-5:
raise ValueError(
"edge probabilities leaving node '%s' "
"do not sum to 1 (total=%r)" % (u, total_probability)
)
else:
# a leaf node
scenario_name = node_to_scenario[u]
m.ScenarioLeafNode[scenario_name] = node_name
m.Children[node_name].clear()
return node_name
|
https://github.com/Pyomo/pyomo/issues/1138
|
======================================================================
ERROR: test_bundles (pyomo.pysp.tests.unit.test_scenariotree.TestScenarioTreeFromNetworkX)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/tests/unit/test_scenariotree.py", line 745, in test_bundles
edge_probability_attribute=None)
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 313, in ScenarioTreeModelFromNetworkX
networkx.dfs_successors(tree, root))
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 296, in _setup
_setup(v, succ)
File "/home/travis/build/Pyomo/pyomo/pyomo/pysp/scenariotree/tree_structure_model.py", line 311, in _setup
tree.node[u].get('bundle', None)
AttributeError: 'DiGraph' object has no attribute 'node'
|
AttributeError
|
def normalize_index(index):
"""
Flatten a component index. If it has length 1, then
return just the element. If it has length > 1, then
return a tuple.
"""
ans = flatten(index)
if len(ans) == 1:
return ans[0]
return ans
|
def normalize_index(index):
"""
Flatten a component index. If it has length 1, then
return just the element. If it has length > 1, then
return a tuple.
"""
idx = pyutilib.misc.flatten(index)
if type(idx) is list:
if len(idx) == 1:
idx = idx[0]
else:
idx = tuple(idx)
return idx
|
https://github.com/Pyomo/pyomo/issues/116
|
qichen@QC-CMU-Tower:~$ pyothon
Python 2.7.11+ (default, Apr 17 2016, 14:00:29)
[GCC 5.3.1 20160413] on linux2
Type "help", "copyright", "credits" or "license" for more information.
from pyomo.environ import *
m = ConcreteModel()
m.s = Set(initialize=['one'])
m.c = Constraint(m.s)
m.c[m.s].deactivate()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/qichen/.solvers/pyomo/src/pyomo/pyomo/core/base/indexed_component.py", line 492, in __getitem__
return self._default(ndx)
File "/home/qichen/.solvers/pyomo/src/pyomo/pyomo/core/base/indexed_component.py", line 655, in _default
% (self.__class__.__name__,))
pyomo.util._config.DeveloperError: Internal Pyomo implementation error:
'Derived component IndexedConstraint failed to define _default().'
Please report this to the Pyomo Developers.
|
pyomo.util._config.DeveloperError
|
def _processUnhashableIndex(self, idx):
"""Process a call to __getitem__ with unhashable elements
There are three basic ways to get here:
1) the index contains one or more slices or ellipsis
2) the index contains an unhashable type (e.g., a Pyomo
(Simple)Component
3) the index contains an IndexTemplate
"""
from pyomo.core.expr import current as EXPR
#
# Iterate through the index and look for slices and constant
# components
#
fixed = {}
sliced = {}
ellipsis = None
_found_numeric = False
#
# Setup the slice template (in fixed)
#
idx = flatten(idx)
for i, val in enumerate(idx):
if type(val) is slice:
if val.start is not None or val.stop is not None:
raise IndexError(
"Indexed components can only be indexed with simple "
"slices: start and stop values are not allowed."
)
if val.step is not None:
logger.warning(
"DEPRECATION WARNING: The special wildcard slice "
"(::0) is deprecated. Please use an ellipsis (...) "
"to indicate '0 or more' indices"
)
val = Ellipsis
else:
if ellipsis is None:
sliced[i] = val
else:
sliced[i - len(idx)] = val
continue
if val is Ellipsis:
if ellipsis is not None:
raise IndexError(
"Indexed components can only be indexed with simple "
"slices: the Pyomo wildcard slice (Ellipsis; "
"e.g., '...') can only appear once"
)
ellipsis = i
continue
if hasattr(val, "is_expression_type"):
_num_val = val
# Attempt to retrieve the numeric value .. if this
# is a template expression generation, then it
# should raise a TemplateExpressionError
try:
val = EXPR.evaluate_expression(val, constant=True)
_found_numeric = True
except TemplateExpressionError:
#
# The index is a template expression, so return the
# templatized expression.
#
from pyomo.core.expr import current as EXPR
return EXPR.GetItemExpression(tuple(idx), self)
except EXPR.NonConstantExpressionError:
#
# The expression contains an unfixed variable
#
raise RuntimeError(
"""Error retrieving the value of an indexed item %s:
index %s is not a constant value. This is likely not what you meant to
do, as if you later change the fixed value of the object this lookup
will not change. If you understand the implications of using
non-constant values, you can get the current value of the object using
the value() function."""
% (self.name, i)
)
except EXPR.FixedExpressionError:
#
# The expression contains a fixed variable
#
raise RuntimeError(
"""Error retrieving the value of an indexed item %s:
index %s is a fixed but not constant value. This is likely not what you
meant to do, as if you later change the fixed value of the object this
lookup will not change. If you understand the implications of using
fixed but not constant values, you can get the current value using the
value() function."""
% (self.name, i)
)
#
# There are other ways we could get an exception such as
# evaluating a Param / Var that is not initialized.
# These exceptions will continue up the call stack.
#
# verify that the value is hashable
hash(val)
if ellipsis is None:
fixed[i] = val
else:
fixed[i - len(idx)] = val
if sliced or ellipsis is not None:
return _IndexedComponent_slice(self, fixed, sliced, ellipsis)
elif _found_numeric:
if len(idx) == 1:
return fixed[0]
else:
return tuple(fixed[i] for i in range(len(idx)))
else:
raise DeveloperError(
"Unknown problem encountered when trying to retrieve "
"index for component %s" % (self.name,)
)
|
def _processUnhashableIndex(self, idx):
"""Process a call to __getitem__ with unhashable elements
There are three basic ways to get here:
1) the index contains one or more slices or ellipsis
2) the index contains an unhashable type (e.g., a Pyomo
(Simple)Component
3) the index contains an IndexTemplate
"""
from pyomo.core.expr import current as EXPR
#
# Iterate through the index and look for slices and constant
# components
#
fixed = {}
sliced = {}
ellipsis = None
_found_numeric = False
#
# Setup the slice template (in fixed)
#
if type(idx) is tuple:
# We would normally do "flatten()" here, but the current
# (10/2016) implementation of flatten() is too aggressive:
# it will attempt to expand *any* iterable, including
# SimpleParam.
idx = pyutilib.misc.flatten_tuple(idx)
elif type(idx) is list:
idx = pyutilib.misc.flatten_tuple(tuple(idx))
else:
idx = (idx,)
for i, val in enumerate(idx):
if type(val) is slice:
if val.start is not None or val.stop is not None:
raise IndexError(
"Indexed components can only be indexed with simple "
"slices: start and stop values are not allowed."
)
if val.step is not None:
logger.warning(
"DEPRECATION WARNING: The special wildcard slice "
"(::0) is deprecated. Please use an ellipsis (...) "
"to indicate '0 or more' indices"
)
val = Ellipsis
else:
if ellipsis is None:
sliced[i] = val
else:
sliced[i - len(idx)] = val
continue
if val is Ellipsis:
if ellipsis is not None:
raise IndexError(
"Indexed components can only be indexed with simple "
"slices: the Pyomo wildcard slice (Ellipsis; "
"e.g., '...') can only appear once"
)
ellipsis = i
continue
if hasattr(val, "is_expression_type"):
_num_val = val
# Attempt to retrieve the numeric value .. if this
# is a template expression generation, then it
# should raise a TemplateExpressionError
try:
val = EXPR.evaluate_expression(val, constant=True)
_found_numeric = True
except TemplateExpressionError:
#
# The index is a template expression, so return the
# templatized expression.
#
from pyomo.core.expr import current as EXPR
return EXPR.GetItemExpression(tuple(idx), self)
except EXPR.NonConstantExpressionError:
#
# The expression contains an unfixed variable
#
raise RuntimeError(
"""Error retrieving the value of an indexed item %s:
index %s is not a constant value. This is likely not what you meant to
do, as if you later change the fixed value of the object this lookup
will not change. If you understand the implications of using
non-constant values, you can get the current value of the object using
the value() function."""
% (self.name, i)
)
except EXPR.FixedExpressionError:
#
# The expression contains a fixed variable
#
raise RuntimeError(
"""Error retrieving the value of an indexed item %s:
index %s is a fixed but not constant value. This is likely not what you
meant to do, as if you later change the fixed value of the object this
lookup will not change. If you understand the implications of using
fixed but not constant values, you can get the current value using the
value() function."""
% (self.name, i)
)
#
# There are other ways we could get an exception such as
# evaluating a Param / Var that is not initialized.
# These exceptions will continue up the call stack.
#
# verify that the value is hashable
hash(val)
if ellipsis is None:
fixed[i] = val
else:
fixed[i - len(idx)] = val
if sliced or ellipsis is not None:
return _IndexedComponent_slice(self, fixed, sliced, ellipsis)
elif _found_numeric:
if len(idx) == 1:
return fixed[0]
else:
return tuple(fixed[i] for i in range(len(idx)))
else:
raise DeveloperError(
"Unknown problem encountered when trying to retrieve "
"index for component %s" % (self.name,)
)
|
https://github.com/Pyomo/pyomo/issues/116
|
qichen@QC-CMU-Tower:~$ pyothon
Python 2.7.11+ (default, Apr 17 2016, 14:00:29)
[GCC 5.3.1 20160413] on linux2
Type "help", "copyright", "credits" or "license" for more information.
from pyomo.environ import *
m = ConcreteModel()
m.s = Set(initialize=['one'])
m.c = Constraint(m.s)
m.c[m.s].deactivate()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/qichen/.solvers/pyomo/src/pyomo/pyomo/core/base/indexed_component.py", line 492, in __getitem__
return self._default(ndx)
File "/home/qichen/.solvers/pyomo/src/pyomo/pyomo/core/base/indexed_component.py", line 655, in _default
% (self.__class__.__name__,))
pyomo.util._config.DeveloperError: Internal Pyomo implementation error:
'Derived component IndexedConstraint failed to define _default().'
Please report this to the Pyomo Developers.
|
pyomo.util._config.DeveloperError
|
def add(self, value):
if normalize_index.flatten:
if type(value) is tuple:
_value = flatten_tuple(value)
_d = len(_value)
if _d == 1:
_value = _value[0]
else:
_value = value
_d = 1
else:
# If we are not normalizing indices, then we cannot reliably
# infer the set dimen
_value = value
_d = None
if _value not in self._domain:
raise ValueError(
"Cannot add value %s to Set %s.\n"
"\tThe value is not in the domain %s" % (value, self.name, self._domain)
)
# We wrap this check in a try-except because some values (like lists)
# are not hashable and can raise exceptions.
try:
if _value in self:
logger.warning(
"Element %s already exists in Set %s; no action taken"
% (value, self.name)
)
return False
except:
exc = sys.exc_info()
raise TypeError(
"Unable to insert '%s' into Set %s:\n\t%s: %s"
% (value, self.name, exc[0].__name__, exc[1])
)
if self._filter is not None:
if not self._filter(self, _value):
return False
if self._validate is not None:
try:
flag = self._validate(self, _value)
except:
logger.error(
"Exception raised while validating element '%s' for Set %s"
% (value, self.name)
)
raise
if not flag:
raise ValueError(
"The value=%s violates the validation rule of Set %s"
% (value, self.name)
)
# If the Set has a fixed dimension, check that this element is
# compatible.
if self._dimen is not None:
if _d != self._dimen:
if self._dimen is _UnknownSetDimen:
# The first thing added to a Set with unknown
# dimension sets its dimension
self._dimen = _d
else:
raise ValueError(
"The value=%s has dimension %s and is not valid for "
"Set %s which has dimen=%s" % (value, _d, self.name, self._dimen)
)
# Add the value to this object (this last redirection allows
# derived classes to implement a different storage mmechanism)
self._add_impl(_value)
return True
|
def add(self, value):
if type(value) is tuple:
_value = flatten_tuple(value)
if len(_value) == 1:
_value = _value[0]
_d = 1
else:
_d = len(_value)
else:
_value = value
_d = 1
if _value not in self._domain:
raise ValueError(
"Cannot add value %s to Set %s.\n"
"\tThe value is not in the domain %s" % (value, self.name, self._domain)
)
# We wrap this check in a try-except because some values (like lists)
# are not hashable and can raise exceptions.
try:
if _value in self:
logger.warning(
"Element %s already exists in Set %s; no action taken"
% (value, self.name)
)
return False
except:
exc = sys.exc_info()
raise TypeError(
"Unable to insert '%s' into Set %s:\n\t%s: %s"
% (value, self.name, exc[0].__name__, exc[1])
)
if self._filter is not None:
if not self._filter(self, _value):
return False
if self._validate is not None:
try:
flag = self._validate(self, _value)
except:
logger.error(
"Exception raised while validating element '%s' for Set %s"
% (value, self.name)
)
raise
if not flag:
raise ValueError(
"The value=%s violates the validation rule of Set %s"
% (value, self.name)
)
# If the Set has a fixed dimension, check that this element is
# compatible.
if self._dimen is not None:
if _d != self._dimen:
if self._dimen is _UnknownSetDimen:
# The first thing added to a Set with unknown
# dimension sets its dimension
self._dimen = _d
else:
raise ValueError(
"The value=%s has dimension %s and is not valid for "
"Set %s which has dimen=%s" % (value, _d, self.name, self._dimen)
)
# Add the value to this object (this last redirection allows
# derived classes to implement a different storage mmechanism)
self._add_impl(_value)
return True
|
https://github.com/Pyomo/pyomo/issues/116
|
qichen@QC-CMU-Tower:~$ pyothon
Python 2.7.11+ (default, Apr 17 2016, 14:00:29)
[GCC 5.3.1 20160413] on linux2
Type "help", "copyright", "credits" or "license" for more information.
from pyomo.environ import *
m = ConcreteModel()
m.s = Set(initialize=['one'])
m.c = Constraint(m.s)
m.c[m.s].deactivate()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/qichen/.solvers/pyomo/src/pyomo/pyomo/core/base/indexed_component.py", line 492, in __getitem__
return self._default(ndx)
File "/home/qichen/.solvers/pyomo/src/pyomo/pyomo/core/base/indexed_component.py", line 655, in _default
% (self.__class__.__name__,))
pyomo.util._config.DeveloperError: Internal Pyomo implementation error:
'Derived component IndexedConstraint failed to define _default().'
Please report this to the Pyomo Developers.
|
pyomo.util._config.DeveloperError
|
def load():
import pyomo.scripting.plugins.check
import pyomo.scripting.plugins.convert
import pyomo.scripting.plugins.solve
import pyomo.scripting.plugins.download
import pyomo.scripting.plugins.build_ext
import pyomo.scripting.plugins.extras
|
def load():
import pyomo.scripting.plugins.check
import pyomo.scripting.plugins.convert
import pyomo.scripting.plugins.solve
import pyomo.scripting.plugins.download
import pyomo.scripting.plugins.build_ext
|
https://github.com/Pyomo/pyomo/issues/243
|
Traceback (most recent call last):
File "/XXX/myvenv/bin/get_pyomo_extras", line 7, in <module>
from scripts.get_pyomo_extras import main
ModuleNotFoundError: No module named 'scripts'
|
ModuleNotFoundError
|
def __deepcopy__(self, memo):
# The problem we are addressing is when we want to clone a
# sub-block in a model. In that case, the block can have
# references to both child components and to external
# ComponentData (mostly through expressions pointing to Vars
# and Params outside this block). For everything stored beneath
# this block, we want to clone the Component (and all
# corresponding ComponentData objects). But for everything
# stored outside this Block, we want to do a simple shallow
# copy.
#
# Nominally, expressions only point to ComponentData
# derivatives. However, with the development of Expression
# Templates (and the corresponding _GetItemExpression object),
# expressions can refer to container (non-Simple) components, so
# we need to override __deepcopy__ for both Component and
# ComponentData.
# try:
# print("Component: %s" % (self.name,))
# except:
# print("DANGLING ComponentData: %s on %s" % (
# type(self),self.parent_component()))
# Note: there is an edge case when cloning a block: the initial
# call to deepcopy (on the target block) has __block_scope__
# defined, however, the parent block of self is either None, or
# is (by definition) out of scope. So we will check that
# id(self) is not in __block_scope__: if it is, then this is the
# top-level block and we need to do the normal deepcopy.
if "__block_scope__" in memo and id(self) not in memo["__block_scope__"]:
_known = memo["__block_scope__"]
_new = []
tmp = self.parent_block()
tmpId = id(tmp)
# Note: normally we would need to check that tmp does not
# end up being None. However, since clone() inserts
# id(None) into the __block_scope__ dictionary, we are safe
while tmpId not in _known:
_new.append(tmpId)
tmp = tmp.parent_block()
tmpId = id(tmp)
# Remember whether all newly-encountered blocks are in or
# out of scope (prevent duplicate work)
for _id in _new:
_known[_id] = _known[tmpId]
if not _known[tmpId]:
# component is out-of-scope. shallow copy only
ans = memo[id(self)] = self
return ans
#
# There is a particularly subtle bug with 'uncopyable'
# attributes: if the exception is thrown while copying a complex
# data structure, we can be in a state where objects have been
# created and assigned to the memo in the try block, but they
# haven't had their state set yet. When the exception moves us
# into the except block, we need to effectively "undo" those
# partially copied classes. The only way is to restore the memo
# to the state it was in before we started. Right now, our
# solution is to make a (shallow) copy of the memo before each
# operation and restoring it in the case of exception.
# Unfortunately that is a lot of usually unnecessary work.
# Since *most* classes are copyable, we will avoid that
# "paranoia" unless the naive clone generated an error - in
# which case Block.clone() will switch over to the more
# "paranoid" mode.
#
paranoid = memo.get("__paranoid__", None)
ans = memo[id(self)] = self.__class__.__new__(self.__class__)
# We can't do the "obvious", since this is a (partially)
# slot-ized class and the __dict__ structure is
# nonauthoritative:
#
# for key, val in self.__dict__.iteritems():
# object.__setattr__(ans, key, deepcopy(val, memo))
#
# Further, __slots__ is also nonauthoritative (this may be a
# singleton component -- in which case it also has a __dict__).
# Plus, as this may be a derived class with several layers of
# slots. So, we will resort to partially "pickling" the object,
# deepcopying the state dict, and then restoring the copy into
# the new instance.
#
# [JDS 7/7/14] I worry about the efficiency of using both
# getstate/setstate *and* deepcopy, but we need deepcopy to
# update the _parent refs appropriately, and since this is a
# slot-ized class, we cannot overwrite the __deepcopy__
# attribute to prevent infinite recursion.
state = self.__getstate__()
try:
if paranoid:
saved_memo = dict(memo)
new_state = deepcopy(state, memo)
except:
if paranoid:
# Note: memo is intentionally pass-by-reference. We
# need to clear and reset the object we were handed (and
# not overwrite it)
memo.clear()
memo.update(saved_memo)
elif paranoid is not None:
raise PickleError()
new_state = {}
for k, v in iteritems(state):
try:
if paranoid:
saved_memo = dict(memo)
new_state[k] = deepcopy(v, memo)
except:
if paranoid:
memo.clear()
memo.update(saved_memo)
elif paranoid is None:
logger.warning("""
Uncopyable field encountered when deep
copying outside the scope of Block.clone().
There is a distinct possibility that the new
copy is not complete. To avoid this
situation, either use Block.clone() or set
'paranoid' mode by adding '__paranoid__' ==
True to the memo before calling
copy.deepcopy.""")
if self.model() is self:
what = "Model"
else:
what = "Component"
logger.error(
"Unable to clone Pyomo component attribute.\n"
"%s '%s' contains an uncopyable field '%s' (%s)"
% (what, self.name, k, type(v))
)
ans.__setstate__(new_state)
return ans
|
def __deepcopy__(self, memo):
# The problem we are addressing is when we want to clone a
# sub-block in a model. In that case, the block can have
# references to both child components and to external
# ComponentData (mostly through expressions pointing to Vars
# and Params outside this block). For everything stored beneath
# this block, we want to clone the Component (and all
# corresponding ComponentData objects). But for everything
# stored outside this Block, we want to do a simple shallow
# copy.
#
# Nominally, expressions only point to ComponentData
# derivatives. However, with the development of Expression
# Templates (and the corresponding _GetItemExpression object),
# expressions can refer to container (non-Simple) components, so
# we need to override __deepcopy__ for both Component and
# ComponentData.
# try:
# print("Component: %s" % (self.name,))
# except:
# print("DANGLING ComponentData: %s on %s" % (
# type(self),self.parent_component()))
# Note: there is an edge case when cloning a block: the initial
# call to deepcopy (on the target block) has __block_scope__
# defined, however, the parent block of self is either None, or
# is (by definition) out of scope. So we will check that
# id(self) is not in __block_scope__: if it is, then this is the
# top-level block and we need to do the normal deepcopy.
if "__block_scope__" in memo and id(self) not in memo["__block_scope__"]:
_known = memo["__block_scope__"]
_new = []
tmp = self.parent_block()
tmpId = id(tmp)
# Note: normally we would need to check that tmp does not
# end up being None. However, since clone() inserts
# id(None) into the __block_scope__ dictionary, we are safe
while tmpId not in _known:
_new.append(tmpId)
tmp = tmp.parent_block()
tmpId = id(tmp)
# Remember whether all newly-encountered blocks are in or
# out of scope (prevent duplicate work)
for _id in _new:
_known[_id] = _known[tmpId]
if not _known[tmpId]:
# component is out-of-scope. shallow copy only
ans = memo[id(self)] = self
return ans
#
# There is a particularly subtle bug with 'uncopyable'
# attributes: if the exception is thrown while copying a complex
# data structure, we can be in a state where objects have been
# created and assigned to the memo in the try block, but they
# haven't had their state set yet. When the exception moves us
# into the except block, we need to effectively "undo" those
# partially copied classes. The only way is to restore the memo
# to the state it was in before we started. Right now, our
# solution is to make a (shallow) copy of the memo before each
# operation and restoring it in the case of exception.
# Unfortunately that is a lot of usually unnecessary work.
# Since *most* classes are copyable, we will avoid that
# "paranoia" unless the naive clone generated an error - in
# which case Block.clone() will switch over to the more
# "paranoid" mode.
#
paranoid = memo.get("__paranoid__", None)
ans = memo[id(self)] = self.__class__.__new__(self.__class__)
# We can't do the "obvious", since this is a (partially)
# slot-ized class and the __dict__ structure is
# nonauthoritative:
#
# for key, val in self.__dict__.iteritems():
# object.__setattr__(ans, key, deepcopy(val, memo))
#
# Further, __slots__ is also nonauthoritative (this may be a
# singleton component -- in which case it also has a __dict__).
# Plus, as this may be a derived class with several layers of
# slots. So, we will resort to partially "pickling" the object,
# deepcopying the state dict, and then restoring the copy into
# the new instance.
#
# [JDS 7/7/14] I worry about the efficiency of using both
# getstate/setstate *and* deepcopy, but we need deepcopy to
# update the _parent refs appropriately, and since this is a
# slot-ized class, we cannot overwrite the __deepcopy__
# attribute to prevent infinite recursion.
state = self.__getstate__()
try:
if paranoid:
saved_memo = dict(memo)
new_state = deepcopy(state, memo)
except:
if paranoid:
# Note: memo is intentionally pass-by-reference. We
# need to clear and reset the object we were handed (and
# not overwrite it)
memo.clear()
memo.update(saved_memo)
elif paranoid is not None:
raise PickleError()
new_state = {}
for k, v in iteritems(state):
try:
if paranoid:
saved_memo = dict(memo)
new_state[k] = deepcopy(v, memo)
except:
if paranoid:
memo.clear()
memo.update(saved_memo)
elif paranoid is None:
logger.warning("""
Uncopyable field encountered when deep
copying outside the scope of Block.clone().
There is a distinct possibility that the new
copy is not complete. To avoid this
situation, either use Block.clone() or set
'paranoid' mode by adding '__paranoid__' ==
True to the memo before calling
copy.deepcopy.""")
logger.error(
"Unable to clone Pyomo component attribute.\n"
"Component '%s' contains an uncopyable field '%s' (%s)"
% (self.name, k, type(v))
)
ans.__setstate__(new_state)
return ans
|
https://github.com/Pyomo/pyomo/issues/912
|
import pyomo.environ as pe
m = pe.ConcreteModel()
m.x = pe.Var()
m.x_ref = pe.Reference(m.x)
m2 = m.clone()
m.x_ref._data._slice
<pyomo.core.base.indexed_component_slice._IndexedComponent_slice object at 0x151fa63438>
m2.x_ref._data._slice
[<pyomo.core.base.var.SimpleVar object at 0x152002b048>]
for v in m.component_data_objects(pe.Var):
... print(v)
...
x
x
for v in m2.component_data_objects(pe.Var):
... print(v)
...
x
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/.../pyomo/core/base/block.py", line 1342, in component_data_objects
sort=sort):
File "/Users/.../pyomo/core/base/block.py", line 1274, in _component_data_iter
for idx, compData in _items:
File "/Users/.../pyomo/core/base/indexed_component.py", line 309, in iteritems
yield key, self[key]
File "/Users/.../pyomo/core/base/indexed_component.py", line 319, in __getitem__
obj = self._data.get(index, _NotFound)
File "/Users/.../_collections_abc.py", line 660, in get
return self[key]
File "/Users/.../pyomo/core/base/reference.py", line 178, in __getitem__
self._get_iter(self._slice, key, get_if_not_present=True)
File "/Users/.../pyomo/core/base/reference.py", line 292, in _get_iter
get_if_not_present=get_if_not_present)
File "/Users/.../pyomo/core/base/indexed_component_slice.py", line 265, in __init__
call_stack = self._slice._call_stack
AttributeError: 'list' object has no attribute '_call_stack'
|
AttributeError
|
def setup_connection(self):
# on *NIX, the proxy can show up either upper or lowercase.
# Prefer lower case, and prefer HTTPS over HTTP if the
# NEOS.scheme is https.
proxy = os.environ.get("http_proxy", os.environ.get("HTTP_PROXY", ""))
if NEOS.scheme == "https":
proxy = os.environ.get("https_proxy", os.environ.get("HTTPS_PROXY", proxy))
transport = None
if proxy:
transport = ProxiedTransport()
transport.set_proxy(proxy)
self.neos = xmlrpclib.ServerProxy(
"%s://%s:%s" % (NEOS.scheme, NEOS.host, NEOS.port), transport=transport
)
logger.info("Connecting to the NEOS server ... ")
try:
result = self.neos.ping()
logger.info("OK.")
except socket.error:
e = sys.exc_info()[1]
self.neos = None
logger.info("Fail.")
logger.warning("NEOS is temporarily unavailable.\n")
|
def setup_connection(self):
# on *NIX, the proxy can show up either upper or lowercase.
# Prefer lower case, and prefer HTTPS over HTTP if the urlscheme
# is https.
proxy = os.environ.get("http_proxy", os.environ.get("HTTP_PROXY", ""))
if urlscheme == "https":
proxy = os.environ.get("https_proxy", os.environ.get("HTTPS_PROXY", proxy))
if proxy:
p = ProxiedTransport()
p.set_proxy(proxy)
self.neos = xmlrpclib.ServerProxy(
urlscheme + "://www.neos-server.org:" + port, transport=p
)
else:
self.neos = xmlrpclib.ServerProxy(urlscheme + "://www.neos-server.org:" + port)
logger.info("Connecting to the NEOS server ... ")
try:
result = self.neos.ping()
logger.info("OK.")
except socket.error:
e = sys.exc_info()[1]
self.neos = None
logger.info("Fail.")
logger.warning("NEOS is temporarily unavailable.\n")
|
https://github.com/Pyomo/pyomo/issues/560
|
Traceback (most recent call last):
File "/ascldap/users/gseastr/envs/python3.6/bin/pyomo", line 11, in <module>
load_entry_point('Pyomo', 'console_scripts', 'pyomo')()
File "/home/gseastr/envs/pyomo/pyomo/scripting/pyomo_main.py", line 82, in main
retval = _options.func(_options)
File "/home/gseastr/envs/pyomo/pyomo/scripting/driver_help.py", line 456, in help_exec
help_solvers()
File "/home/gseastr/envs/pyomo/pyomo/scripting/driver_help.py", line 373, in help_solvers
kestrel = pyomo.neos.kestrel.kestrelAMPL()
File "/home/gseastr/envs/pyomo/pyomo/neos/kestrel.py", line 66, in __init__
self.setup_connection()
File "/home/gseastr/envs/pyomo/pyomo/neos/kestrel.py", line 87, in setup_connection
result = self.neos.ping()
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1112, in __call__
return self.__send(self.__name, args)
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1452, in __request
verbose=self.__verbose
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1154, in request
return self.single_request(host, handler, request_body, verbose)
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1166, in single_request
http_conn = self.send_request(host, handler, request_body, verbose)
TypeError: send_request() takes 4 positional arguments but 5 were given
|
TypeError
|
def set_proxy(self, host):
self.proxy = urlparse(host)
if not self.proxy.hostname:
# User omitted scheme from the proxy; assume http
self.proxy = urlparse("http://" + proxy)
|
def set_proxy(self, proxy):
self.proxy = proxy
|
https://github.com/Pyomo/pyomo/issues/560
|
Traceback (most recent call last):
File "/ascldap/users/gseastr/envs/python3.6/bin/pyomo", line 11, in <module>
load_entry_point('Pyomo', 'console_scripts', 'pyomo')()
File "/home/gseastr/envs/pyomo/pyomo/scripting/pyomo_main.py", line 82, in main
retval = _options.func(_options)
File "/home/gseastr/envs/pyomo/pyomo/scripting/driver_help.py", line 456, in help_exec
help_solvers()
File "/home/gseastr/envs/pyomo/pyomo/scripting/driver_help.py", line 373, in help_solvers
kestrel = pyomo.neos.kestrel.kestrelAMPL()
File "/home/gseastr/envs/pyomo/pyomo/neos/kestrel.py", line 66, in __init__
self.setup_connection()
File "/home/gseastr/envs/pyomo/pyomo/neos/kestrel.py", line 87, in setup_connection
result = self.neos.ping()
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1112, in __call__
return self.__send(self.__name, args)
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1452, in __request
verbose=self.__verbose
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1154, in request
return self.single_request(host, handler, request_body, verbose)
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1166, in single_request
http_conn = self.send_request(host, handler, request_body, verbose)
TypeError: send_request() takes 4 positional arguments but 5 were given
|
TypeError
|
def make_connection(self, host):
scheme = urlparse(host).scheme
if not scheme:
scheme = NEOS.scheme
# Empirically, the connection class in Python 3.x needs to
# match the final endpoint connection scheme, NOT the proxy
# scheme. The set_tunnel host then should NOT have a scheme
# attached to it.
if scheme == "https":
connClass = httplib.HTTPSConnection
else:
connClass = httplib.HTTPConnection
connection = connClass(self.proxy.hostname, self.proxy.port)
connection.set_tunnel(host)
return connection
|
def make_connection(self, host):
self.realhost = host
h = six.moves.http_client.HTTP(self.proxy)
return h
|
https://github.com/Pyomo/pyomo/issues/560
|
Traceback (most recent call last):
File "/ascldap/users/gseastr/envs/python3.6/bin/pyomo", line 11, in <module>
load_entry_point('Pyomo', 'console_scripts', 'pyomo')()
File "/home/gseastr/envs/pyomo/pyomo/scripting/pyomo_main.py", line 82, in main
retval = _options.func(_options)
File "/home/gseastr/envs/pyomo/pyomo/scripting/driver_help.py", line 456, in help_exec
help_solvers()
File "/home/gseastr/envs/pyomo/pyomo/scripting/driver_help.py", line 373, in help_solvers
kestrel = pyomo.neos.kestrel.kestrelAMPL()
File "/home/gseastr/envs/pyomo/pyomo/neos/kestrel.py", line 66, in __init__
self.setup_connection()
File "/home/gseastr/envs/pyomo/pyomo/neos/kestrel.py", line 87, in setup_connection
result = self.neos.ping()
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1112, in __call__
return self.__send(self.__name, args)
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1452, in __request
verbose=self.__verbose
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1154, in request
return self.single_request(host, handler, request_body, verbose)
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1166, in single_request
http_conn = self.send_request(host, handler, request_body, verbose)
TypeError: send_request() takes 4 positional arguments but 5 were given
|
TypeError
|
def send_request(self, connection, handler, request_body):
connection.putrequest("POST", "%s%s" % (self.realhost, handler))
|
def send_request(self, connection, handler, request_body):
connection.putrequest("POST", "%s://%s%s" % (urlscheme, self.realhost, handler))
|
https://github.com/Pyomo/pyomo/issues/560
|
Traceback (most recent call last):
File "/ascldap/users/gseastr/envs/python3.6/bin/pyomo", line 11, in <module>
load_entry_point('Pyomo', 'console_scripts', 'pyomo')()
File "/home/gseastr/envs/pyomo/pyomo/scripting/pyomo_main.py", line 82, in main
retval = _options.func(_options)
File "/home/gseastr/envs/pyomo/pyomo/scripting/driver_help.py", line 456, in help_exec
help_solvers()
File "/home/gseastr/envs/pyomo/pyomo/scripting/driver_help.py", line 373, in help_solvers
kestrel = pyomo.neos.kestrel.kestrelAMPL()
File "/home/gseastr/envs/pyomo/pyomo/neos/kestrel.py", line 66, in __init__
self.setup_connection()
File "/home/gseastr/envs/pyomo/pyomo/neos/kestrel.py", line 87, in setup_connection
result = self.neos.ping()
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1112, in __call__
return self.__send(self.__name, args)
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1452, in __request
verbose=self.__verbose
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1154, in request
return self.single_request(host, handler, request_body, verbose)
File "/home/gseastr/envs/python3.6/lib/python3.6/xmlrpc/client.py", line 1166, in single_request
http_conn = self.send_request(host, handler, request_body, verbose)
TypeError: send_request() takes 4 positional arguments but 5 were given
|
TypeError
|
def __init__(self, m, package="scipy"):
self._intpackage = package
if self._intpackage not in ["scipy", "casadi"]:
raise DAE_Error(
"Unrecognized simulator package %s. Please select from "
"%s" % (self._intpackage, ["scipy", "casadi"])
)
if self._intpackage == "scipy":
if not scipy_available:
# Converting this to a warning so that Simulator initialization
# can be tested even when scipy is unavailable
logger.warning(
"The scipy module is not available. You may "
"build the Simulator object but you will not "
"be able to run the simulation."
)
else:
if not casadi_available:
# Initializing the simulator for use with casadi requires
# access to casadi objects. Therefore, we must throw an error
# here instead of a warning.
raise ValueError(
"The casadi module is not available. Cannot simulate model."
)
# Check for active Blocks and throw error if any are found
if len(list(m.component_data_objects(Block, active=True, descend_into=False))):
raise DAE_Error(
"The Simulator cannot handle hierarchical models at the moment."
)
temp = m.component_map(ContinuousSet)
if len(temp) != 1:
raise DAE_Error(
"Currently the simulator may only be applied to "
"Pyomo models with a single ContinuousSet"
)
# Get the ContinuousSet in the model
contset = list(temp.values())[0]
# Create a index template for the continuous set
cstemplate = IndexTemplate(contset)
# Ensure that there is at least one derivative in the model
derivs = m.component_map(DerivativeVar)
if len(derivs) == 0:
raise DAE_Error("Cannot simulate a model with no derivatives")
templatemap = {} # Map for template substituter
rhsdict = {} # Map of derivative to its RHS templated expr
derivlist = [] # Ordered list of derivatives
alglist = [] # list of templated algebraic equations
# Loop over constraints to find differential equations with separable
# RHS. Must find a RHS for every derivative var otherwise ERROR. Build
# dictionary of DerivativeVar:RHS equation.
for con in m.component_objects(Constraint, active=True):
# Skip the discretization equations if model is discretized
if "_disc_eq" in con.name:
continue
# Check dimension of the Constraint. Check if the
# Constraint is indexed by the continuous set and
# determine its order in the indexing sets
if con.dim() == 0:
continue
elif con._implicit_subsets is None:
# Check if the continuous set is the indexing set
if con._index is not contset:
continue
else:
csidx = 0
noncsidx = (None,)
else:
temp = con._implicit_subsets
dimsum = 0
csidx = -1
noncsidx = None
for s in temp:
if s is contset:
if csidx != -1:
raise DAE_Error(
"Cannot simulate the constraint %s because "
"it is indexed by duplicate ContinuousSets" % con.name
)
csidx = dimsum
elif noncsidx is None:
noncsidx = s
else:
noncsidx = noncsidx.cross(s)
dimsum += s.dimen
if csidx == -1:
continue
# Get the rule used to construct the constraint
conrule = con.rule
for i in noncsidx:
# Insert the index template and call the rule to
# create a templated expression
if i is None:
tempexp = conrule(m, cstemplate)
else:
if not isinstance(i, tuple):
i = (i,)
tempidx = i[0:csidx] + (cstemplate,) + i[csidx:]
tempexp = conrule(m, *tempidx)
# Check to make sure it's an EqualityExpression
if not type(tempexp) is EXPR.EqualityExpression:
continue
# Check to make sure it's a differential equation with
# separable RHS
args = None
# Case 1: m.dxdt[t] = RHS
if type(tempexp.arg(0)) is EXPR.GetItemExpression:
args = _check_getitemexpression(tempexp, 0)
# Case 2: RHS = m.dxdt[t]
if args is None:
if type(tempexp.arg(1)) is EXPR.GetItemExpression:
args = _check_getitemexpression(tempexp, 1)
# Case 3: m.p*m.dxdt[t] = RHS
if args is None:
if (
type(tempexp.arg(0)) is EXPR.ProductExpression
or type(tempexp.arg(0)) is EXPR.ReciprocalExpression
):
args = _check_productexpression(tempexp, 0)
# Case 4: RHS = m.p*m.dxdt[t]
if args is None:
if (
type(tempexp.arg(1)) is EXPR.ProductExpression
or type(tempexp.arg(1)) is EXPR.ReciprocalExpression
):
args = _check_productexpression(tempexp, 1)
# Case 5: m.dxdt[t] + sum(ELSE) = RHS
# or CONSTANT + m.dxdt[t] = RHS
if args is None:
if type(tempexp.arg(0)) is EXPR.SumExpression:
args = _check_viewsumexpression(tempexp, 0)
# Case 6: RHS = m.dxdt[t] + sum(ELSE)
if args is None:
if type(tempexp.arg(1)) is EXPR.SumExpression:
args = _check_viewsumexpression(tempexp, 1)
# Case 7: RHS = m.p*m.dxdt[t] + CONSTANT
# This case will be caught by Case 6 if p is immutable. If
# p is mutable then this case will not be detected as a
# separable differential equation
# Case 8: - dxdt[t] = RHS
if args is None:
if type(tempexp.arg(0)) is EXPR.NegationExpression:
args = _check_negationexpression(tempexp, 0)
# Case 9: RHS = - dxdt[t]
if args is None:
if type(tempexp.arg(1)) is EXPR.NegationExpression:
args = _check_negationexpression(tempexp, 1)
# At this point if args is not None then args[0] contains
# the _GetItemExpression for the DerivativeVar and args[1]
# contains the RHS expression. If args is None then the
# constraint is considered an algebraic equation
if args is None:
# Constraint is an algebraic equation or unsupported
# differential equation
if self._intpackage == "scipy":
raise DAE_Error(
"Model contains an algebraic equation or "
"unrecognized differential equation. Constraint "
"'%s' cannot be simulated using Scipy. If you are "
"trying to simulate a DAE model you must use "
"CasADi as the integration package." % str(con.name)
)
tempexp = tempexp.arg(0) - tempexp.arg(1)
algexp = substitute_pyomo2casadi(tempexp, templatemap)
alglist.append(algexp)
continue
# Add the differential equation to rhsdict and derivlist
dv = args[0]
RHS = args[1]
dvkey = _GetItemIndexer(dv)
if dvkey in rhsdict.keys():
raise DAE_Error(
"Found multiple RHS expressions for the "
"DerivativeVar %s" % str(dvkey)
)
derivlist.append(dvkey)
if self._intpackage is "casadi":
rhsdict[dvkey] = substitute_pyomo2casadi(RHS, templatemap)
else:
rhsdict[dvkey] = convert_pyomo2scipy(RHS, templatemap)
# Check to see if we found a RHS for every DerivativeVar in
# the model
# FIXME: Not sure how to rework this for multi-index case
# allderivs = derivs.keys()
# if set(allderivs) != set(derivlist):
# missing = list(set(allderivs)-set(derivlist))
# print("WARNING: Could not find a RHS expression for the "
# "following DerivativeVar components "+str(missing))
# Create ordered list of differential variables corresponding
# to the list of derivatives.
diffvars = []
for deriv in derivlist:
sv = deriv._base.get_state_var()
diffvars.append(_GetItemIndexer(sv[deriv._args]))
# Create ordered list of algebraic variables and time-varying
# parameters
algvars = []
for item in iterkeys(templatemap):
if item._base.name in derivs.keys():
# Make sure there are no DerivativeVars in the
# template map
raise DAE_Error(
"Cannot simulate a differential equation with multiple DerivativeVars"
)
if item not in diffvars:
# Finds time varying parameters and algebraic vars
algvars.append(item)
if self._intpackage == "scipy":
# Function sent to scipy integrator
def _rhsfun(t, x):
residual = []
cstemplate.set_value(t)
for idx, v in enumerate(diffvars):
if v in templatemap:
templatemap[v].set_value(x[idx])
for d in derivlist:
residual.append(rhsdict[d]())
return residual
self._rhsfun = _rhsfun
# Add any diffvars not added by expression walker to self._templatemap
if self._intpackage == "casadi":
for _id in diffvars:
if _id not in templatemap:
name = "%s[%s]" % (_id._base.name, ",".join(str(x) for x in _id._args))
templatemap[_id] = casadi.SX.sym(name)
self._contset = contset
self._cstemplate = cstemplate
self._diffvars = diffvars
self._derivlist = derivlist
self._templatemap = templatemap
self._rhsdict = rhsdict
self._alglist = alglist
self._algvars = algvars
self._model = m
self._tsim = None
self._simsolution = None
# The algebraic vars in the most recent simulation
self._simalgvars = None
# The time-varying inputs in the most recent simulation
self._siminputvars = None
|
def __init__(self, m, package="scipy"):
self._intpackage = package
if self._intpackage not in ["scipy", "casadi"]:
raise DAE_Error(
"Unrecognized simulator package %s. Please select from "
"%s" % (self._intpackage, ["scipy", "casadi"])
)
if self._intpackage == "scipy":
if not scipy_available:
# Converting this to a warning so that Simulator initialization
# can be tested even when scipy is unavailable
logger.warning(
"The scipy module is not available. You may "
"build the Simulator object but you will not "
"be able to run the simulation."
)
else:
if not casadi_available:
# Initializing the simulator for use with casadi requires
# access to casadi objects. Therefore, we must throw an error
# here instead of a warning.
raise ValueError(
"The casadi module is not available. Cannot simulate model."
)
# Check for active Blocks and throw error if any are found
if len(list(m.component_data_objects(Block, active=True, descend_into=False))):
raise DAE_Error(
"The Simulator cannot handle hierarchical models at the moment."
)
temp = m.component_map(ContinuousSet)
if len(temp) != 1:
raise DAE_Error(
"Currently the simulator may only be applied to "
"Pyomo models with a single ContinuousSet"
)
# Get the ContinuousSet in the model
contset = list(temp.values())[0]
# Create a index template for the continuous set
cstemplate = IndexTemplate(contset)
# Ensure that there is at least one derivative in the model
derivs = m.component_map(DerivativeVar)
if len(derivs) == 0:
raise DAE_Error("Cannot simulate a model with no derivatives")
templatemap = {} # Map for template substituter
rhsdict = {} # Map of derivative to its RHS templated expr
derivlist = [] # Ordered list of derivatives
alglist = [] # list of templated algebraic equations
# Loop over constraints to find differential equations with separable
# RHS. Must find a RHS for every derivative var otherwise ERROR. Build
# dictionary of DerivativeVar:RHS equation.
for con in m.component_objects(Constraint, active=True):
# Skip the discretization equations if model is discretized
if "_disc_eq" in con.name:
continue
# Check dimension of the Constraint. Check if the
# Constraint is indexed by the continuous set and
# determine its order in the indexing sets
if con.dim() == 0:
continue
elif con._implicit_subsets is None:
# Check if the continuous set is the indexing set
if con._index is not contset:
continue
else:
csidx = 0
noncsidx = (None,)
else:
temp = con._implicit_subsets
dimsum = 0
csidx = -1
noncsidx = None
for s in temp:
if s is contset:
if csidx != -1:
raise DAE_Error(
"Cannot simulate the constraint %s because "
"it is indexed by duplicate ContinuousSets" % con.name
)
csidx = dimsum
elif noncsidx is None:
noncsidx = s
else:
noncsidx = noncsidx.cross(s)
dimsum += s.dimen
if csidx == -1:
continue
# Get the rule used to construct the constraint
conrule = con.rule
for i in noncsidx:
# Insert the index template and call the rule to
# create a templated expression
if i is None:
tempexp = conrule(m, cstemplate)
else:
if not isinstance(i, tuple):
i = (i,)
tempidx = i[0:csidx] + (cstemplate,) + i[csidx:]
tempexp = conrule(m, *tempidx)
# Check to make sure it's an EqualityExpression
if not type(tempexp) is EXPR.EqualityExpression:
continue
# Check to make sure it's a differential equation with
# separable RHS
args = None
# Case 1: m.dxdt[t] = RHS
if type(tempexp.arg(0)) is EXPR.GetItemExpression:
args = _check_getitemexpression(tempexp, 0)
# Case 2: RHS = m.dxdt[t]
if args is None:
if type(tempexp.arg(1)) is EXPR.GetItemExpression:
args = _check_getitemexpression(tempexp, 1)
# Case 3: m.p*m.dxdt[t] = RHS
if args is None:
if (
type(tempexp.arg(0)) is EXPR.ProductExpression
or type(tempexp.arg(0)) is EXPR.ReciprocalExpression
):
args = _check_productexpression(tempexp, 0)
# Case 4: RHS = m.p*m.dxdt[t]
if args is None:
if (
type(tempexp.arg(1)) is EXPR.ProductExpression
or type(tempexp.arg(1)) is EXPR.ReciprocalExpression
):
args = _check_productexpression(tempexp, 1)
# Case 5: m.dxdt[t] + sum(ELSE) = RHS
# or CONSTANT + m.dxdt[t] = RHS
if args is None:
if type(tempexp.arg(0)) is EXPR.SumExpression:
args = _check_viewsumexpression(tempexp, 0)
# Case 6: RHS = m.dxdt[t] + sum(ELSE)
if args is None:
if type(tempexp.arg(1)) is EXPR.SumExpression:
args = _check_viewsumexpression(tempexp, 1)
# Case 7: RHS = m.p*m.dxdt[t] + CONSTANT
# This case will be caught by Case 6 if p is immutable. If
# p is mutable then this case will not be detected as a
# separable differential equation
# Case 8: - dxdt[t] = RHS
if args is None:
if type(tempexp.arg(0)) is EXPR.NegationExpression:
args = _check_negationexpression(tempexp, 0)
# Case 9: RHS = - dxdt[t]
if args is None:
if type(tempexp.arg(1)) is EXPR.NegationExpression:
args = _check_negationexpression(tempexp, 1)
# At this point if args is not None then args[0] contains
# the _GetItemExpression for the DerivativeVar and args[1]
# contains the RHS expression. If args is None then the
# constraint is considered an algebraic equation
if args is None:
# Constraint is an algebraic equation or unsupported
# differential equation
if self._intpackage == "scipy":
raise DAE_Error(
"Model contains an algebraic equation or "
"unrecognized differential equation. Constraint "
"'%s' cannot be simulated using Scipy. If you are "
"trying to simulate a DAE model you must use "
"CasADi as the integration package." % str(con.name)
)
tempexp = tempexp.arg(0) - tempexp.arg(1)
algexp = substitute_pyomo2casadi(tempexp, templatemap)
alglist.append(algexp)
continue
# Add the differential equation to rhsdict and derivlist
dv = args[0]
RHS = args[1]
dvkey = _GetItemIndexer(dv)
if dvkey in rhsdict.keys():
raise DAE_Error(
"Found multiple RHS expressions for the "
"DerivativeVar %s" % str(dvkey)
)
derivlist.append(dvkey)
if self._intpackage is "casadi":
rhsdict[dvkey] = substitute_pyomo2casadi(RHS, templatemap)
else:
rhsdict[dvkey] = convert_pyomo2scipy(RHS, templatemap)
# Check to see if we found a RHS for every DerivativeVar in
# the model
# FIXME: Not sure how to rework this for multi-index case
# allderivs = derivs.keys()
# if set(allderivs) != set(derivlist):
# missing = list(set(allderivs)-set(derivlist))
# print("WARNING: Could not find a RHS expression for the "
# "following DerivativeVar components "+str(missing))
# Create ordered list of differential variables corresponding
# to the list of derivatives.
diffvars = []
for deriv in derivlist:
sv = deriv._base.get_state_var()
diffvars.append(_GetItemIndexer(sv[deriv._args]))
# Create ordered list of algebraic variables and time-varying
# parameters
algvars = []
for item in iterkeys(templatemap):
if item._base.name in derivs.keys():
# Make sure there are no DerivativeVars in the
# template map
raise DAE_Error(
"Cannot simulate a differential equation with multiple DerivativeVars"
)
if item not in diffvars:
# Finds time varying parameters and algebraic vars
algvars.append(item)
if self._intpackage == "scipy":
# Function sent to scipy integrator
def _rhsfun(t, x):
residual = []
cstemplate.set_value(t)
for idx, v in enumerate(diffvars):
if v in templatemap:
templatemap[v].set_value(x[idx])
for d in derivlist:
residual.append(rhsdict[d]())
return residual
self._rhsfun = _rhsfun
self._contset = contset
self._cstemplate = cstemplate
self._diffvars = diffvars
self._derivlist = derivlist
self._templatemap = templatemap
self._rhsdict = rhsdict
self._alglist = alglist
self._algvars = algvars
self._model = m
self._tsim = None
self._simsolution = None
# The algebraic vars in the most recent simulation
self._simalgvars = None
# The time-varying inputs in the most recent simulation
self._siminputvars = None
|
https://github.com/Pyomo/pyomo/issues/345
|
Traceback (most recent call last):
File "temp.py", line 17, in <module>
tsim, profiles = mysim.simulate(numpoints=100)
File "/home/blnicho/Research/pyomo/pyomo/dae/simulator.py", line 773, in simulate
integrator_options)
File "/home/blnicho/Research/pyomo/pyomo/dae/simulator.py", line 813, in _simulate_with_casadi_no_inputs
xalltemp = [self._templatemap[i] for i in self._diffvars]
KeyError: <pyomo.core.base.template_expr._GetItemIndexer object at 0x7fbf3c852590>
|
KeyError
|
def help_solvers():
import pyomo.environ
wrapper = textwrap.TextWrapper(replace_whitespace=False)
print("")
print("Pyomo Solvers and Solver Managers")
print("---------------------------------")
print(
wrapper.fill(
"Pyomo uses 'solver managers' to execute 'solvers' that perform optimization and other forms of model analysis. A solver directly executes an optimizer, typically using an executable found on the user's PATH environment. Solver managers support a flexible mechanism for asyncronously executing solvers either locally or remotely. The following solver managers are available in Pyomo:"
)
)
print("")
solvermgr_list = pyomo.opt.SolverManagerFactory.services()
solvermgr_list = sorted(filter(lambda x: "_" != x[0], solvermgr_list))
n = max(map(len, solvermgr_list))
wrapper = textwrap.TextWrapper(subsequent_indent=" " * (n + 9))
for s in solvermgr_list:
format = " %-" + str(n) + "s %s"
print(wrapper.fill(format % (s, pyomo.opt.SolverManagerFactory.doc(s))))
print("")
wrapper = textwrap.TextWrapper(subsequent_indent="")
print(
wrapper.fill(
"If no solver manager is specified, Pyomo uses the serial solver manager to execute solvers locally. The pyro and phpyro solver managers require the installation and configuration of the pyro software. The neos solver manager is used to execute solvers on the NEOS optimization server."
)
)
print("")
print("")
print("Serial Solver Interfaces")
print("------------------------")
print(
wrapper.fill(
"The serial, pyro and phpyro solver managers support the following solver interfaces:"
)
)
print("")
solver_list = pyomo.opt.SolverFactory.services()
solver_list = sorted(filter(lambda x: "_" != x[0], solver_list))
n = max(map(len, solver_list))
wrapper = textwrap.TextWrapper(subsequent_indent=" " * (n + 9))
try:
# Disable warnings
logging.disable(logging.WARNING)
for s in solver_list:
# Create a solver, and see if it is available
with pyomo.opt.SolverFactory(s) as opt:
if s == "py" or (hasattr(opt, "_metasolver") and opt._metasolver):
# py is a metasolver, but since we don't specify a subsolver
# for this test, opt is actually an UnknownSolver, so we
# can't try to get the _metasolver attribute from it.
# Also, default to False if the attribute isn't implemented
msg = " %-" + str(n) + "s + %s"
elif opt.available(False):
msg = " %-" + str(n) + "s * %s"
else:
msg = " %-" + str(n) + "s %s"
print(wrapper.fill(msg % (s, pyomo.opt.SolverFactory.doc(s))))
finally:
# Reset logging level
logging.disable(logging.NOTSET)
print("")
wrapper = textwrap.TextWrapper(subsequent_indent="")
print(
wrapper.fill(
"An asterisk indicates solvers that are currently available to be run from Pyomo with the serial solver manager. A plus indicates meta-solvers, that are always available."
)
)
print("")
print(
wrapper.fill(
"Pyomo also supports solver interfaces that are wrappers around third-party solver interfaces. These interfaces require a subsolver specification that indicates the solver being executed. For example, the following indicates that the ipopt solver will be used:"
)
)
print("")
print(" asl:ipopt")
print("")
print(
wrapper.fill(
"The asl interface provides a generic wrapper for all solvers that use the AMPL Solver Library."
)
)
print("")
print(
wrapper.fill(
"Note that subsolvers can not be enumerated automatically for these interfaces. However, if a solver is specified that is not found, Pyomo assumes that the asl solver interface is being used. Thus the following solver name will launch ipopt if the 'ipopt' executable is on the user's path:"
)
)
print("")
print(" ipopt")
print("")
try:
logging.disable(logging.WARNING)
import pyomo.neos.kestrel
kestrel = pyomo.neos.kestrel.kestrelAMPL()
# print "HERE", solver_list
solver_list = list(
set(
[
name[:-5].lower()
for name in kestrel.solvers()
if name.endswith("AMPL")
]
)
)
# print "HERE", solver_list
if len(solver_list) > 0:
print("")
print("NEOS Solver Interfaces")
print("----------------------")
print(
wrapper.fill(
"The neos solver manager supports solver interfaces that can be executed remotely on the NEOS optimization server. The following solver interfaces are available with your current system configuration:"
)
)
print("")
solver_list = sorted(solver_list)
n = max(map(len, solver_list))
format = " %-" + str(n) + "s %s"
for name in solver_list:
print(
wrapper.fill(
format
% (name, pyomo.neos.doc.get(name, "Unexpected NEOS solver"))
)
)
print("")
else:
print("")
print("NEOS Solver Interfaces")
print("----------------------")
print(
wrapper.fill(
"The neos solver manager supports solver interfaces that can be executed remotely on the NEOS optimization server. This server is not available with your current system configuration."
)
)
print("")
except ImportError:
pass
finally:
logging.disable(logging.NOTSET)
|
def help_solvers():
import pyomo.environ
wrapper = textwrap.TextWrapper(replace_whitespace=False)
print("")
print("Pyomo Solvers and Solver Managers")
print("---------------------------------")
print(
wrapper.fill(
"Pyomo uses 'solver managers' to execute 'solvers' that perform optimization and other forms of model analysis. A solver directly executes an optimizer, typically using an executable found on the user's PATH environment. Solver managers support a flexible mechanism for asyncronously executing solvers either locally or remotely. The following solver managers are available in Pyomo:"
)
)
print("")
solvermgr_list = pyomo.opt.SolverManagerFactory.services()
solvermgr_list = sorted(filter(lambda x: "_" != x[0], solvermgr_list))
n = max(map(len, solvermgr_list))
wrapper = textwrap.TextWrapper(subsequent_indent=" " * (n + 9))
for s in solvermgr_list:
format = " %-" + str(n) + "s %s"
print(wrapper.fill(format % (s, pyomo.opt.SolverManagerFactory.doc(s))))
print("")
wrapper = textwrap.TextWrapper(subsequent_indent="")
print(
wrapper.fill(
"If no solver manager is specified, Pyomo uses the serial solver manager to execute solvers locally. The pyro and phpyro solver managers require the installation and configuration of the pyro software. The neos solver manager is used to execute solvers on the NEOS optimization server."
)
)
print("")
print("")
print("Serial Solver Interfaces")
print("------------------------")
print(
wrapper.fill(
"The serial, pyro and phpyro solver managers support the following solver interfaces:"
)
)
print("")
solver_list = pyomo.opt.SolverFactory.services()
solver_list = sorted(filter(lambda x: "_" != x[0], solver_list))
n = max(map(len, solver_list))
wrapper = textwrap.TextWrapper(subsequent_indent=" " * (n + 9))
try:
# Disable warnings
logger.disable(logging.WARNING)
for s in solver_list:
# Create a solver, and see if it is available
with pyomo.opt.SolverFactory(s) as opt:
if s == "py" or opt._metasolver:
msg = " %-" + str(n) + "s + %s"
elif opt.available(False):
msg = " %-" + str(n) + "s * %s"
else:
msg = " %-" + str(n) + "s %s"
print(wrapper.fill(msg % (s, pyomo.opt.SolverFactory.doc(s))))
finally:
# Reset logging level
logger.disable(logging.NOTSET)
print("")
wrapper = textwrap.TextWrapper(subsequent_indent="")
print(
wrapper.fill(
"An asterisk indicates solvers that are currently available to be run from Pyomo with the serial solver manager. A plus indicates meta-solvers, that are always available."
)
)
print("")
print(
wrapper.fill(
"Pyomo also supports solver interfaces that are wrappers around third-party solver interfaces. These interfaces require a subsolver specification that indicates the solver being executed. For example, the following indicates that the ipopt solver will be used:"
)
)
print("")
print(" asl:ipopt")
print("")
print(
wrapper.fill(
"The asl interface provides a generic wrapper for all solvers that use the AMPL Solver Library."
)
)
print("")
print(
wrapper.fill(
"Note that subsolvers can not be enumerated automatically for these interfaces. However, if a solver is specified that is not found, Pyomo assumes that the asl solver interface is being used. Thus the following solver name will launch ipopt if the 'ipopt' executable is on the user's path:"
)
)
print("")
print(" ipopt")
print("")
try:
logging.disable(logging.WARNING)
import pyomo.neos.kestrel
kestrel = pyomo.neos.kestrel.kestrelAMPL()
# print "HERE", solver_list
solver_list = list(
set(
[
name[:-5].lower()
for name in kestrel.solvers()
if name.endswith("AMPL")
]
)
)
# print "HERE", solver_list
if len(solver_list) > 0:
print("")
print("NEOS Solver Interfaces")
print("----------------------")
print(
wrapper.fill(
"The neos solver manager supports solver interfaces that can be executed remotely on the NEOS optimization server. The following solver interfaces are available with your current system configuration:"
)
)
print("")
solver_list = sorted(solver_list)
n = max(map(len, solver_list))
format = " %-" + str(n) + "s %s"
for name in solver_list:
print(
wrapper.fill(
format
% (name, pyomo.neos.doc.get(name, "Unexpected NEOS solver"))
)
)
print("")
else:
print("")
print("NEOS Solver Interfaces")
print("----------------------")
print(
wrapper.fill(
"The neos solver manager supports solver interfaces that can be executed remotely on the NEOS optimization server. This server is not available with your current system configuration."
)
)
print("")
except ImportError:
pass
finally:
logging.disable(logging.NOTSET)
|
https://github.com/Pyomo/pyomo/issues/266
|
Traceback (most recent call last):
File "/usr/bin/pyomo", line 11, in <module>
sys.exit(main())
File "/usr/lib64/python3.4/site-packages/pyomo/scripting/pyomo_main.py", line 82, in main
retval = _options.func(_options)
File "/usr/lib64/python3.4/site-packages/pyomo/scripting/driver_help.py", line 457, in help_exec
help_solvers()
File "/usr/lib64/python3.4/site-packages/pyomo/scripting/driver_help.py", line 347, in help_solvers
if s == 'py' or opt._metasolver:
AttributeError: 'GAMSShell' object has no attribute '_metasolver'
|
AttributeError
|
def __init__(self, **kwds):
self._version = None
self._default_variable_value = None
self._metasolver = False
self._capabilities = Options()
self._capabilities.linear = True
self._capabilities.quadratic_objective = True
self._capabilities.quadratic_constraint = True
self._capabilities.integer = True
self._capabilities.sos1 = False
self._capabilities.sos2 = False
self.options = Options() # ignored
pyomo.common.plugin.Plugin.__init__(self, **kwds)
|
def __init__(self, **kwds):
self._version = None
self._default_variable_value = None
self._capabilities = Options()
self._capabilities.linear = True
self._capabilities.quadratic_objective = True
self._capabilities.quadratic_constraint = True
self._capabilities.integer = True
self._capabilities.sos1 = False
self._capabilities.sos2 = False
self.options = Options() # ignored
pyomo.common.plugin.Plugin.__init__(self, **kwds)
|
https://github.com/Pyomo/pyomo/issues/266
|
Traceback (most recent call last):
File "/usr/bin/pyomo", line 11, in <module>
sys.exit(main())
File "/usr/lib64/python3.4/site-packages/pyomo/scripting/pyomo_main.py", line 82, in main
retval = _options.func(_options)
File "/usr/lib64/python3.4/site-packages/pyomo/scripting/driver_help.py", line 457, in help_exec
help_solvers()
File "/usr/lib64/python3.4/site-packages/pyomo/scripting/driver_help.py", line 347, in help_solvers
if s == 'py' or opt._metasolver:
AttributeError: 'GAMSShell' object has no attribute '_metasolver'
|
AttributeError
|
def update_contset_indexed_component(comp, expansion_map):
"""
Update any model components which are indexed by a ContinuousSet that
has changed
"""
# This implemenation will *NOT* check for or update
# components which use a ContinuousSet implicitly. ex) an
# objective function which iterates through a ContinuousSet and
# sums the squared error. If you use a ContinuousSet implicitly
# you must initialize it with every index you would like to have
# access to!
if comp.type() is Suffix:
return
# Params indexed by a ContinuousSet should include an initialize
# and/or default rule which will be called automatically when the
# parameter value at a new point in the ContinuousSet is
# requested. Therefore, no special processing is required for
# Params.
if comp.type() is Param:
return
# Components indexed by a ContinuousSet must have a dimension of at
# least 1
if comp.dim() == 0:
return
# Extract the indexing sets. Must treat components with a single
# index separately from components with multiple indexing sets.
if comp._implicit_subsets is None:
indexset = [comp._index]
else:
indexset = comp._implicit_subsets
for s in indexset:
if s.type() == ContinuousSet and s.get_changed():
if isinstance(comp, Var): # Don't use the type() method here
# because we want to catch DerivativeVar components as well
# as Var components
expansion_map[comp] = _update_var
_update_var(comp)
elif comp.type() == Constraint:
expansion_map[comp] = _update_constraint
_update_constraint(comp)
elif comp.type() == Expression:
expansion_map[comp] = _update_expression
_update_expression(comp)
elif isinstance(comp, Piecewise):
expansion_map[comp] = _update_piecewise
_update_piecewise(comp)
elif comp.type() == Block:
expansion_map[comp] = _update_block
_update_block(comp)
else:
raise TypeError(
"Found component %s of type %s indexed "
"by a ContinuousSet. Components of this type are "
"not currently supported by the automatic "
"discretization transformation in pyomo.dae. "
"Try adding the component to the model "
"after discretizing. Alert the pyomo developers "
"for more assistance." % (str(comp), comp.type())
)
|
def update_contset_indexed_component(comp):
"""
Update any model components which are indexed by a ContinuousSet
that has changed
"""
# This implemenation will *NOT* check for or update
# components which use a ContinuousSet implicitly. ex) an
# objective function which iterates through a ContinuousSet and
# sums the squared error. If you use a ContinuousSet implicitly
# you must initialize it with every index you would like to have
# access to!
if comp.type() is Suffix:
return
# Params indexed by a ContinuousSet should include an initialize
# and/or default rule which will be called automatically when the
# parameter value at a new point in the ContinuousSet is
# requested. Therefore, no special processing is required for
# Params.
if comp.type() is Param:
return
# Components indexed by a ContinuousSet must have a dimension of at
# least 1
if comp.dim() == 0:
return
# Extract the indexing sets. Must treat components with a single
# index separately from components with multiple indexing sets.
if comp._implicit_subsets is None:
indexset = [comp._index]
else:
indexset = comp._implicit_subsets
for s in indexset:
if s.type() == ContinuousSet and s.get_changed():
if isinstance(comp, Var): # Don't use the type() method here
# because we want to catch DerivativeVar components as well
# as Var components
_update_var(comp)
elif comp.type() == Constraint:
_update_constraint(comp)
elif comp.type() == Expression:
_update_expression(comp)
elif isinstance(comp, Piecewise):
_update_piecewise(comp)
elif comp.type() == Block:
_update_block(comp)
else:
raise TypeError(
"Found component %s of type %s indexed "
"by a ContinuousSet. Components of this type are "
"not currently supported by the automatic "
"discretization transformation in pyomo.dae. "
"Try adding the component to the model "
"after discretizing. Alert the pyomo developers "
"for more assistance." % (str(comp), comp.type())
)
|
https://github.com/Pyomo/pyomo/issues/353
|
$ python -i temp.py
Traceback (most recent call last):
File "temp.py", line 26, in <module>
disc.apply_to(m, nfe=2)
File "/home/blnicho/Research/pyomo/pyomo/core/base/plugin.py", line 334, in apply_to
self._apply_to(model, **kwds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 187, in _apply_to
self._transformBlock(block, currentds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 218, in _transformBlock
update_contset_indexed_component(c)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 145, in update_contset_indexed_component
_update_constraint(comp)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 189, in _update_constraint
con.add(i, apply_indexed_rule(con, _rule, _parent, i))
File "/home/blnicho/Research/pyomo/pyomo/core/base/misc.py", line 61, in apply_indexed_rule
return rule(model, index)
File "temp.py", line 22, in _ratio_rule
return b.ratioP[t] <= b.holdup.properties_in[t].pressure - b.holdup.properties_out[t].pressure
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 521, in __getattr__
% (self.__class__.__name__, val))
AttributeError: '_BlockData' object has no attribute 'pressure'
|
AttributeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.