after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def estimate_ate(
self,
X,
treatment,
y,
p=None,
bootstrap_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
):
"""Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
bootstrap_ci (bool): whether run bootstrap for confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
"""
te, dhat_cs, dhat_ts = self.fit_predict(X, treatment, y, p, return_components=True)
X, treatment, y = convert_pd_to_np(X, treatment, y)
if p is None:
p = self.propensity
else:
check_p_conditions(p, self.t_groups)
if isinstance(p, (np.ndarray, pd.Series)):
treatment_name = self.t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()}
ate = np.zeros(self.t_groups.shape[0])
ate_lb = np.zeros(self.t_groups.shape[0])
ate_ub = np.zeros(self.t_groups.shape[0])
for i, group in enumerate(self.t_groups):
_ate = te[:, i].mean()
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
w = (treatment_filt == group).astype(int)
prob_treatment = float(sum(w)) / w.shape[0]
dhat_c = dhat_cs[group][mask]
dhat_t = dhat_ts[group][mask]
p_filt = p[group][mask]
# SE formula is based on the lower bound formula (7) from Imbens, Guido W., and Jeffrey M. Wooldridge. 2009.
# "Recent Developments in the Econometrics of Program Evaluation." Journal of Economic Literature
se = np.sqrt(
(
self.vars_t[group] / prob_treatment
+ self.vars_c[group] / (1 - prob_treatment)
+ (p_filt * dhat_c + (1 - p_filt) * dhat_t).var()
)
/ w.shape[0]
)
_ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2)
_ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2)
ate[i] = _ate
ate_lb[i] = _ate_lb
ate_ub[i] = _ate_ub
if not bootstrap_ci:
return ate, ate_lb, ate_ub
else:
t_groups_global = self.t_groups
_classes_global = self._classes
models_mu_c_global = deepcopy(self.models_mu_c)
models_mu_t_global = deepcopy(self.models_mu_t)
models_tau_c_global = deepcopy(self.models_tau_c)
models_tau_t_global = deepcopy(self.models_tau_t)
logger.info("Bootstrap Confidence Intervals for ATE")
ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps))
for n in tqdm(range(n_bootstraps)):
cate_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size)
ate_bootstraps[:, n] = cate_b.mean()
ate_lower = np.percentile(ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1)
ate_upper = np.percentile(
ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1
)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.models_mu_c = deepcopy(models_mu_c_global)
self.models_mu_t = deepcopy(models_mu_t_global)
self.models_tau_c = deepcopy(models_tau_c_global)
self.models_tau_t = deepcopy(models_tau_t_global)
return ate, ate_lower, ate_upper
|
def estimate_ate(
self,
X,
treatment,
y,
p=None,
bootstrap_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
):
"""Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
bootstrap_ci (bool): whether run bootstrap for confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
"""
te, dhat_cs, dhat_ts = self.fit_predict(X, treatment, y, p, return_components=True)
X, treatment, y = convert_pd_to_np(X, treatment, y)
if p is None:
p = self.propensity
else:
check_p_conditions(p, self.t_groups)
if isinstance(p, np.ndarray):
treatment_name = self.t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()}
ate = np.zeros(self.t_groups.shape[0])
ate_lb = np.zeros(self.t_groups.shape[0])
ate_ub = np.zeros(self.t_groups.shape[0])
for i, group in enumerate(self.t_groups):
_ate = te[:, i].mean()
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
w = (treatment_filt == group).astype(int)
prob_treatment = float(sum(w)) / w.shape[0]
dhat_c = dhat_cs[group][mask]
dhat_t = dhat_ts[group][mask]
p_filt = p[group][mask]
# SE formula is based on the lower bound formula (7) from Imbens, Guido W., and Jeffrey M. Wooldridge. 2009.
# "Recent Developments in the Econometrics of Program Evaluation." Journal of Economic Literature
se = np.sqrt(
(
self.vars_t[group] / prob_treatment
+ self.vars_c[group] / (1 - prob_treatment)
+ (p_filt * dhat_c + (1 - p_filt) * dhat_t).var()
)
/ w.shape[0]
)
_ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2)
_ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2)
ate[i] = _ate
ate_lb[i] = _ate_lb
ate_ub[i] = _ate_ub
if not bootstrap_ci:
return ate, ate_lb, ate_ub
else:
t_groups_global = self.t_groups
_classes_global = self._classes
models_mu_c_global = deepcopy(self.models_mu_c)
models_mu_t_global = deepcopy(self.models_mu_t)
models_tau_c_global = deepcopy(self.models_tau_c)
models_tau_t_global = deepcopy(self.models_tau_t)
logger.info("Bootstrap Confidence Intervals for ATE")
ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps))
for n in tqdm(range(n_bootstraps)):
cate_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size)
ate_bootstraps[:, n] = cate_b.mean()
ate_lower = np.percentile(ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1)
ate_upper = np.percentile(
ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1
)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.models_mu_c = deepcopy(models_mu_c_global)
self.models_mu_t = deepcopy(models_mu_t_global)
self.models_tau_c = deepcopy(models_tau_c_global)
self.models_tau_t = deepcopy(models_tau_t_global)
return ate, ate_lower, ate_upper
|
https://github.com/uber/causalml/issues/241
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-59-76ffa2ace99a> in <module>
1 learner_x = BaseXRegressor(learner=XGBRegressor(random_state=42))
----> 2 te, lb, ub = learner_x.estimate_ate(X=X, p=df['p'], treatment=treatment, y=y)
3 print('Average Treatment Effect (BaseXRegressor using XGBoost): {:.2f} ({:.2f}, {:.2f})'.format(te[0], lb[0], ub[0]))
/dsw/snapshots/720ce1e8-1e96-45bf-a192-8bef9122fb73/python3/lib/python3.6/site-packages/causalml/inference/meta/xlearner.py in estimate_ate(self, X, treatment, y, p, bootstrap_ci, n_bootstraps, bootstrap_size)
326 dhat_c = dhat_cs[group][mask]
327 dhat_t = dhat_ts[group][mask]
--> 328 p_filt = p[group][mask]
329
330 # SE formula is based on the lower bound formula (7) from Imbens, Guido W., and Jeffrey M. Wooldridge. 2009.
IndexError: invalid index to scalar variable.
|
IndexError
|
def rmdir(self, path=None):
path = normalize_storage_path(path)
if path:
with self.lock:
self.cursor.execute('DELETE FROM zarr WHERE k LIKE (? || "/%")', (path,))
else:
self.clear()
|
def rmdir(self, path=None):
path = normalize_storage_path(path)
if path:
with self.lock:
self.cursor.execute('DELETE FROM zarr WHERE k LIKE (? || "_%")', (path,))
else:
self.clear()
|
https://github.com/zarr-developers/zarr-python/issues/439
|
import zarr
import numpy as np
path = 'D:/test.db'
store = zarr.SQLiteStore(path)
group = zarr.open_group(store)
group.array('prefix', np.arange(1000))
group.array('prefix_suffix', np.arange(100))
group['prefix_suffix']
Traceback (most recent call last):
Python Shell, prompt 3, line 1
# Used internally for debug sandbox under external interpreter
File "d:\Program\Anaconda3\envs\dev\Lib\site-packages\zarr\hierarchy.py", line 333, in __getitem__
raise KeyError(item)
builtins.KeyError: 'prefix_suffix'
|
builtins.KeyError
|
def listdir(self, path=None):
path = normalize_storage_path(path)
sep = "_" if path == "" else "/"
keys = self.cursor.execute(
"""
SELECT DISTINCT SUBSTR(m, 0, INSTR(m, "/")) AS l FROM (
SELECT LTRIM(SUBSTR(k, LENGTH(?) + 1), "/") || "/" AS m
FROM zarr WHERE k LIKE (? || "{sep}%")
) ORDER BY l ASC
""".format(sep=sep),
(path, path),
)
keys = list(map(operator.itemgetter(0), keys))
return keys
|
def listdir(self, path=None):
path = normalize_storage_path(path)
keys = self.cursor.execute(
"""
SELECT DISTINCT SUBSTR(m, 0, INSTR(m, "/")) AS l FROM (
SELECT LTRIM(SUBSTR(k, LENGTH(?) + 1), "/") || "/" AS m
FROM zarr WHERE k LIKE (? || "_%")
) ORDER BY l ASC
""",
(path, path),
)
keys = list(map(operator.itemgetter(0), keys))
return keys
|
https://github.com/zarr-developers/zarr-python/issues/439
|
import zarr
import numpy as np
path = 'D:/test.db'
store = zarr.SQLiteStore(path)
group = zarr.open_group(store)
group.array('prefix', np.arange(1000))
group.array('prefix_suffix', np.arange(100))
group['prefix_suffix']
Traceback (most recent call last):
Python Shell, prompt 3, line 1
# Used internally for debug sandbox under external interpreter
File "d:\Program\Anaconda3\envs\dev\Lib\site-packages\zarr\hierarchy.py", line 333, in __getitem__
raise KeyError(item)
builtins.KeyError: 'prefix_suffix'
|
builtins.KeyError
|
def _chunk_setitem_nosync(self, chunk_coords, chunk_selection, value, fields=None):
# obtain key for chunk storage
ckey = self._chunk_key(chunk_coords)
if is_total_slice(chunk_selection, self._chunks) and not fields:
# totally replace chunk
# optimization: we are completely replacing the chunk, so no need
# to access the existing chunk data
if is_scalar(value, self._dtype):
# setup array filled with value
chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)
chunk.fill(value)
else:
# ensure array is contiguous
if self._order == "F":
chunk = np.asfortranarray(value, dtype=self._dtype)
else:
chunk = np.ascontiguousarray(value, dtype=self._dtype)
else:
# partially replace the contents of this chunk
try:
# obtain compressed data for chunk
cdata = self.chunk_store[ckey]
except KeyError:
# chunk not initialized
if self._fill_value is not None:
chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)
chunk.fill(self._fill_value)
elif self._dtype == object:
chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)
else:
# N.B., use zeros here so any region beyond the array has consistent
# and compressible data
chunk = np.zeros(self._chunks, dtype=self._dtype, order=self._order)
else:
# decode chunk
chunk = self._decode_chunk(cdata)
if not chunk.flags.writeable:
chunk = chunk.copy(order="K")
# modify
if fields:
# N.B., currently multi-field assignment is not supported in numpy, so
# this only works for a single field
chunk[fields][chunk_selection] = value
else:
chunk[chunk_selection] = value
# encode chunk
cdata = self._encode_chunk(chunk)
# store
self.chunk_store[ckey] = cdata
|
def _chunk_setitem_nosync(self, chunk_coords, chunk_selection, value, fields=None):
# obtain key for chunk storage
ckey = self._chunk_key(chunk_coords)
if is_total_slice(chunk_selection, self._chunks) and not fields:
# totally replace chunk
# optimization: we are completely replacing the chunk, so no need
# to access the existing chunk data
if is_scalar(value, self._dtype):
# setup array filled with value
chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)
chunk.fill(value)
else:
if not self._compressor and not self._filters:
# https://github.com/alimanfoo/zarr/issues/79
# Ensure a copy is taken so we don't end up storing
# a view into someone else's array.
# N.B., this assumes that filters or compressor always
# take a copy and never attempt to apply encoding in-place.
chunk = np.array(value, dtype=self._dtype, order=self._order)
else:
# ensure array is contiguous
if self._order == "F":
chunk = np.asfortranarray(value, dtype=self._dtype)
else:
chunk = np.ascontiguousarray(value, dtype=self._dtype)
else:
# partially replace the contents of this chunk
try:
# obtain compressed data for chunk
cdata = self.chunk_store[ckey]
except KeyError:
# chunk not initialized
if self._fill_value is not None:
chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)
chunk.fill(self._fill_value)
elif self._dtype == object:
chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)
else:
# N.B., use zeros here so any region beyond the array has consistent
# and compressible data
chunk = np.zeros(self._chunks, dtype=self._dtype, order=self._order)
else:
# decode chunk
chunk = self._decode_chunk(cdata)
if not chunk.flags.writeable:
chunk = chunk.copy(order="K")
# modify
if fields:
# N.B., currently multi-field assignment is not supported in numpy, so
# this only works for a single field
chunk[fields][chunk_selection] = value
else:
chunk[chunk_selection] = value
# encode chunk
cdata = self._encode_chunk(chunk)
# store
self.chunk_store[ckey] = cdata
|
https://github.com/zarr-developers/zarr-python/issues/348
|
In [1]: import zarr
In [2]: z1 = zarr.zeros((100,), chunks=(10,), dtype='i4', compressor=None)
In [3]: z2 = zarr.zeros((100,), chunks=(10,), dtype='i4', compressor=None)
In [4]: z1 == z2
Out[4]: True
In [5]: z1[0] = 5
In [6]: z2[0] = 5
In [7]: z1 == z2
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-4176b4e1c607> in <module>
----> 1 z1 == z2
/zopt/conda3/envs/test_zarr/lib/python3.6/site-packages/zarr/core.py in __eq__(self, other)
410 return (
411 isinstance(other, Array) and
--> 412 self.store == other.store and
413 self.read_only == other.read_only and
414 self.path == other.path and
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def _encode_chunk(self, chunk):
# apply filters
if self._filters:
for f in self._filters:
chunk = f.encode(chunk)
# check object encoding
if isinstance(chunk, np.ndarray) and chunk.dtype == object:
raise RuntimeError("cannot write object array without object codec")
# compress
if self._compressor:
cdata = self._compressor.encode(chunk)
else:
cdata = chunk
# ensure in-memory data is immutable and easy to compare
if isinstance(self.chunk_store, dict):
cdata = ensure_bytes(cdata)
return cdata
|
def _encode_chunk(self, chunk):
# apply filters
if self._filters:
for f in self._filters:
chunk = f.encode(chunk)
# check object encoding
if isinstance(chunk, np.ndarray) and chunk.dtype == object:
raise RuntimeError("cannot write object array without object codec")
# compress
if self._compressor:
cdata = self._compressor.encode(chunk)
else:
cdata = chunk
return cdata
|
https://github.com/zarr-developers/zarr-python/issues/348
|
In [1]: import zarr
In [2]: z1 = zarr.zeros((100,), chunks=(10,), dtype='i4', compressor=None)
In [3]: z2 = zarr.zeros((100,), chunks=(10,), dtype='i4', compressor=None)
In [4]: z1 == z2
Out[4]: True
In [5]: z1[0] = 5
In [6]: z2[0] = 5
In [7]: z1 == z2
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-4176b4e1c607> in <module>
----> 1 z1 == z2
/zopt/conda3/envs/test_zarr/lib/python3.6/site-packages/zarr/core.py in __eq__(self, other)
410 return (
411 isinstance(other, Array) and
--> 412 self.store == other.store and
413 self.read_only == other.read_only and
414 self.path == other.path and
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def __setitem__(self, item, value):
with self.write_mutex:
parent, key = self._require_parent(item)
value = ensure_bytes(value)
parent[key] = value
|
def __setitem__(self, item, value):
with self.write_mutex:
parent, key = self._require_parent(item)
parent[key] = value
|
https://github.com/zarr-developers/zarr-python/issues/348
|
In [1]: import zarr
In [2]: z1 = zarr.zeros((100,), chunks=(10,), dtype='i4', compressor=None)
In [3]: z2 = zarr.zeros((100,), chunks=(10,), dtype='i4', compressor=None)
In [4]: z1 == z2
Out[4]: True
In [5]: z1[0] = 5
In [6]: z2[0] = 5
In [7]: z1 == z2
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-4176b4e1c607> in <module>
----> 1 z1 == z2
/zopt/conda3/envs/test_zarr/lib/python3.6/site-packages/zarr/core.py in __eq__(self, other)
410 return (
411 isinstance(other, Array) and
--> 412 self.store == other.store and
413 self.read_only == other.read_only and
414 self.path == other.path and
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def getsize(self, path=None):
path = normalize_storage_path(path)
# obtain value to return size of
value = None
if path:
try:
parent, key = self._get_parent(path)
value = parent[key]
except KeyError:
pass
else:
value = self.root
# obtain size of value
if value is None:
return 0
elif isinstance(value, self.cls):
# total size for directory
size = 0
for v in value.values():
if not isinstance(v, self.cls):
size += buffer_size(v)
return size
else:
return buffer_size(value)
|
def getsize(self, path=None):
path = normalize_storage_path(path)
# obtain value to return size of
value = None
if path:
try:
parent, key = self._get_parent(path)
value = parent[key]
except KeyError:
pass
else:
value = self.root
# obtain size of value
if value is None:
return 0
elif isinstance(value, self.cls):
# total size for directory
size = 0
for v in value.values():
if not isinstance(v, self.cls):
try:
size += buffer_size(v)
except TypeError:
return -1
return size
else:
try:
return buffer_size(value)
except TypeError:
return -1
|
https://github.com/zarr-developers/zarr-python/issues/348
|
In [1]: import zarr
In [2]: z1 = zarr.zeros((100,), chunks=(10,), dtype='i4', compressor=None)
In [3]: z2 = zarr.zeros((100,), chunks=(10,), dtype='i4', compressor=None)
In [4]: z1 == z2
Out[4]: True
In [5]: z1[0] = 5
In [6]: z2[0] = 5
In [7]: z1 == z2
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-4176b4e1c607> in <module>
----> 1 z1 == z2
/zopt/conda3/envs/test_zarr/lib/python3.6/site-packages/zarr/core.py in __eq__(self, other)
410 return (
411 isinstance(other, Array) and
--> 412 self.store == other.store and
413 self.read_only == other.read_only and
414 self.path == other.path and
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def encode_fill_value(v, dtype):
# early out
if v is None:
return v
if dtype.kind == "f":
if np.isnan(v):
return "NaN"
elif np.isposinf(v):
return "Infinity"
elif np.isneginf(v):
return "-Infinity"
else:
return float(v)
elif dtype.kind in "ui":
return int(v)
elif dtype.kind == "b":
return bool(v)
elif dtype.kind in "SV":
v = base64.standard_b64encode(v)
if not PY2: # pragma: py2 no cover
v = str(v, "ascii")
return v
elif dtype.kind == "U":
return v
elif dtype.kind in "mM":
return int(v.view("i8"))
else:
return v
|
def encode_fill_value(v, dtype):
# early out
if v is None:
return v
if dtype.kind == "f":
if np.isnan(v):
return "NaN"
elif np.isposinf(v):
return "Infinity"
elif np.isneginf(v):
return "-Infinity"
else:
return float(v)
elif dtype.kind in "ui":
return int(v)
elif dtype.kind == "b":
return bool(v)
elif dtype.kind in "SV":
v = base64.standard_b64encode(v)
if not PY2: # pragma: py2 no cover
v = str(v, "ascii")
return v
elif dtype.kind == "U":
return v
elif dtype.kind in "mM":
return int(v.view("u8"))
else:
return v
|
https://github.com/zarr-developers/zarr-python/issues/342
|
OverflowError: int too big to convert
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/tom/anaconda3/lib/python3.6/site-packages/zarr/meta.py", line 38, in decode_array_metadata
fill_value = decode_fill_value(meta['fill_value'], dtype)
File "/home/tom/anaconda3/lib/python3.6/site-packages/zarr/meta.py", line 159, in decode_fill_value
return np.array(v, dtype=dtype)[()]
SystemError: <built-in function array> returned a result with an error set
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/tom/anaconda3/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2963, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-156-d1f331fc15fd>", line 6, in <module>
shape=10)
File "/home/tom/anaconda3/lib/python3.6/site-packages/zarr/creation.py", line 274, in full
return create(shape=shape, fill_value=fill_value, **kwargs)
File "/home/tom/anaconda3/lib/python3.6/site-packages/zarr/creation.py", line 123, in create
cache_metadata=cache_metadata, cache_attrs=cache_attrs, read_only=read_only)
File "/home/tom/anaconda3/lib/python3.6/site-packages/zarr/core.py", line 123, in __init__
self._load_metadata()
File "/home/tom/anaconda3/lib/python3.6/site-packages/zarr/core.py", line 140, in _load_metadata
self._load_metadata_nosync()
File "/home/tom/anaconda3/lib/python3.6/site-packages/zarr/core.py", line 155, in _load_metadata_nosync
meta = decode_array_metadata(meta_bytes)
File "/home/tom/anaconda3/lib/python3.6/site-packages/zarr/meta.py", line 50, in decode_array_metadata
raise MetadataError('error decoding metadata: %s' % e)
zarr.errors.MetadataError: error decoding metadata: <built-in function array> returned a result with an error set
|
SystemError
|
def normalize_array_selection(item, shape):
"""Convenience function to normalize a selection within an array with
the given `shape`."""
# ensure tuple
if not isinstance(item, tuple):
item = (item,)
# handle ellipsis
n_ellipsis = sum(1 for i in item if i == Ellipsis)
if n_ellipsis > 1:
raise IndexError("an index can only have a single ellipsis ('...')")
elif n_ellipsis == 1:
idx_ellipsis = item.index(Ellipsis)
n_items_l = idx_ellipsis # items to left of ellipsis
n_items_r = len(item) - (idx_ellipsis + 1) # items to right of ellipsis
n_items = len(item) - 1 # all non-ellipsis items
if n_items >= len(shape):
# ellipsis does nothing, just remove it
item = tuple(i for i in item if i != Ellipsis)
else:
# replace ellipsis with slices
new_item = item[:n_items_l] + ((slice(None),) * (len(shape) - n_items))
if n_items_r:
new_item += item[-n_items_r:]
item = new_item
# check dimensionality
if len(item) > len(shape):
raise IndexError("too many indices for array")
# determine start and stop indices for all axes
selection = tuple(normalize_axis_selection(i, l) for i, l in zip(item, shape))
# fill out selection if not completely specified
if len(selection) < len(shape):
selection += tuple(slice(0, l) for l in shape[len(selection) :])
return selection
|
def normalize_array_selection(item, shape):
"""Convenience function to normalize a selection within an array with
the given `shape`."""
# normalize item
if isinstance(item, numbers.Integral):
item = (int(item),)
elif isinstance(item, slice):
item = (item,)
elif item == Ellipsis:
item = (slice(None),)
# handle tuple of indices/slices
if isinstance(item, tuple):
# determine start and stop indices for all axes
selection = tuple(normalize_axis_selection(i, l) for i, l in zip(item, shape))
# fill out selection if not completely specified
if len(selection) < len(shape):
selection += tuple(slice(0, l) for l in shape[len(selection) :])
return selection
else:
raise TypeError("expected indices or slice, found: %r" % item)
|
https://github.com/zarr-developers/zarr-python/issues/93
|
In [1]: import zarr
In [2]: z = zarr.empty(shape=(100, 110), chunks=(10, 11), dtype=float)
In [3]: z[0]
Out[3]:
array([ 6.91088620e-310, 6.91088620e-310, 2.10918838e-316,
2.10918838e-316, 1.94607893e-316, 5.72938864e-313,
0.00000000e+000, 3.95252517e-323, 1.57027689e-312,
1.93101617e-312, 1.25197752e-312, 1.18831764e-312,
1.12465777e-312, 1.06099790e-312, 2.31297541e-312,
2.33419537e-312, 1.93101617e-312, 1.93101617e-312,
1.25197752e-312, 1.18831764e-312, 1.12465777e-312,
1.03977794e-312, 1.25197752e-312, 2.31297541e-312,
5.72938864e-313, 1.01855798e-312, 1.08221785e-312,
1.25197752e-312, 1.25197752e-312, 1.18831764e-312,
1.97345609e-312, 6.79038653e-313, 1.93101617e-312,
2.31297541e-312, 5.72938864e-313, 1.01855798e-312,
1.93101617e-312, 1.93101617e-312, 1.25197752e-312,
1.18831764e-312, 1.12465777e-312, 1.06099790e-312,
2.31297541e-312, 5.72938864e-313, 1.01855798e-312,
1.97345609e-312, 1.93101617e-312, 1.06099790e-312,
5.72938864e-313, 1.01855798e-312, 2.75859453e-313,
5.72938864e-313, 1.57027689e-312, 1.93101617e-312,
1.16709769e-312, 5.72938864e-313, 1.01855798e-312,
5.72938864e-313, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 4.94065646e-324, 6.91087535e-310,
0.00000000e+000, 4.94065646e-324, 2.45550626e-321,
6.91088620e-310, 1.98184750e-316, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
6.91087588e-310, 1.19069821e-321, 2.05561901e-316,
6.91088620e-310, 6.91070369e-310, 1.03259720e-321,
6.91088620e-310, 6.91088620e-310, 7.93037613e-120,
1.44506353e+214, 3.63859382e+185, 2.43896203e-154,
7.75110916e+228, 4.44743484e+252])
In [4]: z[0, :]
Out[4]:
array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.])
In [5]: z[0, ...]
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-27fbc5543985> in <module>()
----> 1 z[0, ...]
/opt/conda2/lib/python2.7/site-packages/zarr/core.pyc in __getitem__(self, item)
446
447 # normalize selection
--> 448 selection = normalize_array_selection(item, self._shape)
449
450 # determine output array shape
/opt/conda2/lib/python2.7/site-packages/zarr/util.pyc in normalize_array_selection(item, shape)
184 # determine start and stop indices for all axes
185 selection = tuple(normalize_axis_selection(i, l)
--> 186 for i, l in zip(item, shape))
187
188 # fill out selection if not completely specified
/opt/conda2/lib/python2.7/site-packages/zarr/util.pyc in <genexpr>((i, l))
184 # determine start and stop indices for all axes
185 selection = tuple(normalize_axis_selection(i, l)
--> 186 for i, l in zip(item, shape))
187
188 # fill out selection if not completely specified
/opt/conda2/lib/python2.7/site-packages/zarr/util.pyc in normalize_axis_selection(item, l)
163
164 else:
--> 165 raise TypeError('expected integer or slice, found: %r' % item)
166
167
TypeError: expected integer or slice, found: Ellipsis
In [6]: z[...]
Out[6]:
array([[ 6.91088620e-310, 6.91088620e-310, 2.12535499e-316, ...,
0.00000000e+000, 2.28439709e-032, 6.91088696e-310],
[ 0.00000000e+000, -5.25530781e-026, 6.91088696e-310, ...,
6.91087565e-310, 3.95252517e-323, 1.41861741e-316],
[ 6.91087582e-310, 4.44659081e-323, 1.41861622e-316, ...,
1.41867314e-316, 6.91087582e-310, 2.22329541e-322],
...,
[ 0.00000000e+000, 0.00000000e+000, 0.00000000e+000, ...,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000],
[ 0.00000000e+000, 0.00000000e+000, 0.00000000e+000, ...,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000],
[ 0.00000000e+000, 0.00000000e+000, 0.00000000e+000, ...,
1.41861267e-316, 6.91087582e-310, 6.42285340e-323]])
In [7]: z[:, 0]
Out[7]:
array([ 6.91088620e-310, 6.91088620e-310, 6.91087579e-310,
6.91087579e-310, 6.91087579e-310, 6.91087579e-310,
6.91087579e-310, 6.91087579e-310, 6.91087535e-310,
6.91087535e-310, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
1.82894873e-060, 6.91087535e-310, 3.40411230e-321,
2.03587931e-316, 6.91088620e-310, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
1.82894873e-060, 6.91087534e-310, 6.91087578e-310,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
1.82894873e-060, 6.91087282e-310, 6.91087282e-310,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
1.82894873e-060, 6.91087578e-310, 6.91087535e-310,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
1.82894873e-060, 6.91087580e-310, 6.91087580e-310,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
1.82894873e-060, 6.91087580e-310, 6.91087566e-310,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
1.82894873e-060, 6.91087282e-310, 6.91087582e-310,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000])
|
TypeError
|
def shutdown_agents(opts):
if "rmq" == utils.get_messagebus() and not check_rabbit_status():
opts.aip.rmq_shutdown()
else:
opts.connection.call("shutdown")
_log.debug("Calling stop_platform")
if opts.platform:
opts.connection.notify("stop_platform")
|
def shutdown_agents(opts):
opts.connection.call("shutdown")
_log.debug("Calling stop_platform")
if opts.platform:
opts.connection.notify("stop_platform")
|
https://github.com/VOLTTRON/volttron/issues/1886
|
Shutting down VOLTTRON
Traceback (most recent call last):
File "/home/osboxes/repos/pr-test/env/local/lib/python2.7/site-packages/gevent/greenlet.py", line 534, in run
result = self._run(*self.args, **self.kwargs)
File "/home/osboxes/repos/pr-test/volttron/platform/vip/agent/core.py", line 276, in run
looper.next()
File "/home/osboxes/repos/pr-test/volttron/platform/vip/agent/core.py", line 888, in loop
param = self._build_connection_parameters()
File "/home/osboxes/repos/pr-test/volttron/platform/vip/agent/core.py", line 883, in _build_connection_parameters
self.instance_name)
File "/home/osboxes/repos/pr-test/volttron/utils/rmq_mgmt.py", line 876, in build_agent_connection
self.create_user_with_permissions(rmq_user, permissions, ssl_auth=self.is_ssl)
File "/home/osboxes/repos/pr-test/volttron/utils/rmq_mgmt.py", line 847, in create_user_with_permissions
if user not in self.get_users(ssl_auth):
File "/home/osboxes/repos/pr-test/volttron/utils/rmq_mgmt.py", line 248, in get_users
response = self._http_get_request(url, ssl_auth)
File "/home/osboxes/repos/pr-test/volttron/utils/rmq_mgmt.py", line 163, in _http_get_request
response = self._call_grequest('get', url, ssl_auth)
File "/home/osboxes/repos/pr-test/volttron/utils/rmq_mgmt.py", line 116, in _call_grequest
raise e
AttributeError: 'NoneType' object has no attribute 'raise_for_status'
<Greenlet at 0x7ff71ca50eb0: <bound method RMQCore.run of <volttron.platform.vip.agent.core.RMQCore object at 0x7ff71c976f50>>(<gevent.event.Event object at 0x7ff71c9685d0>)> failed with AttributeError
|
AttributeError
|
def send_vip_object(self, message):
"""
Send the VIP message over RabbitMQ message bus.
Reformat the VIP message object into Pika message object and
publish it using Pika library
:param message: VIP message object
:return:
"""
platform = getattr(message, "platform", self._instance_name)
if message.peer == b"":
message.peer = "router"
if platform == b"":
platform = self._instance_name
destination_routing_key = "{0}.{1}".format(platform, message.peer)
# Fit VIP frames in the PIKA properties dict
# VIP format - [SENDER, RECIPIENT, PROTO, USER_ID, MSG_ID, SUBSYS, ARGS...]
dct = {
"user_id": self._rmq_userid,
"app_id": self.routing_key, # Routing key of SENDER
"headers": dict(
recipient=destination_routing_key, # RECEIVER
proto=b"VIP", # PROTO
user=getattr(message, "user", self._rmq_userid), # USER_ID
),
"message_id": getattr(message, "id", b""), # MSG_ID
"type": message.subsystem, # SUBSYS
"content_type": "application/json",
}
properties = pika.BasicProperties(**dct)
msg = getattr(message, "args", None) # ARGS
# _log.debug("PUBLISHING TO CHANNEL {0}, {1}, {2}, {3}".format(destination_routing_key,
# msg,
# properties,
# self.routing_key))
try:
self.channel.basic_publish(
self.exchange,
destination_routing_key,
json.dumps(msg, ensure_ascii=False),
properties,
)
except (
pika.exceptions.AMQPConnectionError,
pika.exceptions.AMQPChannelError,
) as exc:
raise Unreachable(
errno.EHOSTUNREACH,
"Connection to RabbitMQ is lost",
"rabbitmq broker",
"rmq_connection",
)
|
def send_vip_object(self, message):
"""
Send the VIP message over RabbitMQ message bus.
Reformat the VIP message object into Pika message object and
publish it using Pika library
:param message: VIP message object
:return:
"""
platform = getattr(message, "platform", self._instance_name)
if message.peer == b"":
message.peer = "router"
if platform == b"":
platform = self._instance_name
destination_routing_key = "{0}.{1}".format(platform, message.peer)
# Fit VIP frames in the PIKA properties dict
# VIP format - [SENDER, RECIPIENT, PROTO, USER_ID, MSG_ID, SUBSYS, ARGS...]
dct = {
"user_id": self._rmq_userid,
"app_id": self.routing_key, # Routing key of SENDER
"headers": dict(
recipient=destination_routing_key, # RECEIVER
proto=b"VIP", # PROTO
user=getattr(message, "user", self._rmq_userid), # USER_ID
),
"message_id": getattr(message, "id", b""), # MSG_ID
"type": message.subsystem, # SUBSYS
"content_type": "application/json",
}
properties = pika.BasicProperties(**dct)
msg = getattr(message, "args", None) # ARGS
# _log.debug("PUBLISHING TO CHANNEL {0}, {1}, {2}, {3}".format(destination_routing_key,
# msg,
# properties,
# self.routing_key))
try:
self.channel.basic_publish(
self.exchange,
destination_routing_key,
json.dumps(msg, ensure_ascii=False),
properties,
)
except (
pika.exceptions.AMQPConnectionErro,
pika.exceptions.AMQPChannelError,
) as exc:
raise Unreachable(
errno.EHOSTUNREACH,
"Connection to RabbitMQ is lost",
"rabbitmq broker",
"rmq_connection",
)
|
https://github.com/VOLTTRON/volttron/issues/1886
|
Shutting down VOLTTRON
Traceback (most recent call last):
File "/home/osboxes/repos/pr-test/env/local/lib/python2.7/site-packages/gevent/greenlet.py", line 534, in run
result = self._run(*self.args, **self.kwargs)
File "/home/osboxes/repos/pr-test/volttron/platform/vip/agent/core.py", line 276, in run
looper.next()
File "/home/osboxes/repos/pr-test/volttron/platform/vip/agent/core.py", line 888, in loop
param = self._build_connection_parameters()
File "/home/osboxes/repos/pr-test/volttron/platform/vip/agent/core.py", line 883, in _build_connection_parameters
self.instance_name)
File "/home/osboxes/repos/pr-test/volttron/utils/rmq_mgmt.py", line 876, in build_agent_connection
self.create_user_with_permissions(rmq_user, permissions, ssl_auth=self.is_ssl)
File "/home/osboxes/repos/pr-test/volttron/utils/rmq_mgmt.py", line 847, in create_user_with_permissions
if user not in self.get_users(ssl_auth):
File "/home/osboxes/repos/pr-test/volttron/utils/rmq_mgmt.py", line 248, in get_users
response = self._http_get_request(url, ssl_auth)
File "/home/osboxes/repos/pr-test/volttron/utils/rmq_mgmt.py", line 163, in _http_get_request
response = self._call_grequest('get', url, ssl_auth)
File "/home/osboxes/repos/pr-test/volttron/utils/rmq_mgmt.py", line 116, in _call_grequest
raise e
AttributeError: 'NoneType' object has no attribute 'raise_for_status'
<Greenlet at 0x7ff71ca50eb0: <bound method RMQCore.run of <volttron.platform.vip.agent.core.RMQCore object at 0x7ff71c976f50>>(<gevent.event.Event object at 0x7ff71c9685d0>)> failed with AttributeError
|
AttributeError
|
def __init__(
self,
destination_vip,
destination_serverkey,
destination_historian_identity=PLATFORM_HISTORIAN,
**kwargs,
):
"""
:param destination_vip: vip address of the destination volttron
instance
:param destination_serverkey: public key of the destination server
:param services_topic_list: subset of topics that are inherently
supported by base historian. Default is device, analysis, logger,
and record topics
:param custom_topic_list: any additional topics this historian
should subscribe to.
:param destination_historian_identity: vip identity of the
destination historian. default is 'platform.historian'
:param kwargs: additional arguments to be passed along to parent class
"""
kwargs["process_loop_in_greenlet"] = True
super(DataMover, self).__init__(**kwargs)
self.destination_vip = destination_vip
self.destination_serverkey = destination_serverkey
self.destination_historian_identity = destination_historian_identity
config = {
"destination_vip": self.destination_vip,
"destination_serverkey": self.destination_serverkey,
"destination_historian_identity": self.destination_historian_identity,
}
self.update_default_config(config)
# will be available in both threads.
self._last_timeout = 0
|
def __init__(
self,
destination_vip,
destination_serverkey,
destination_historian_identity=PLATFORM_HISTORIAN,
**kwargs,
):
"""
:param destination_vip: vip address of the destination volttron
instance
:param destination_serverkey: public key of the destination server
:param services_topic_list: subset of topics that are inherently
supported by base historian. Default is device, analysis, logger,
and record topics
:param custom_topic_list: any additional topics this historian
should subscribe to.
:param destination_historian_identity: vip identity of the
destination historian. default is 'platform.historian'
:param kwargs: additional arguments to be passed along to parent class
"""
super(DataMover, self).__init__(**kwargs)
self.destination_vip = destination_vip
self.destination_serverkey = destination_serverkey
self.destination_historian_identity = destination_historian_identity
config = {
"destination_vip": self.destination_vip,
"destination_serverkey": self.destination_serverkey,
"destination_historian_identity": self.destination_historian_identity,
}
self.update_default_config(config)
# will be available in both threads.
self._last_timeout = 0
|
https://github.com/VOLTTRON/volttron/issues/1484
|
('RPC ERROR', 'Traceback (most recent call last):
File "/home/jer/git/volttron-craig/volttron/platform/vip/agent/subsystems/rpc.py", line 169, in method
return method(*args, **kwargs)
File "/home/jer/git/volttron-craig/volttron/platform/store.py", line 256, in manage_get_metadata
raise KeyError(\'No configuration file "{}" for VIP IDENTIY {}\'.format(config_name, identity))
KeyError: \'No configuration file "config" for VIP IDENTIY forwarder2\'\n')
|
KeyError
|
def __init__(
self,
destination_vip,
destination_serverkey,
custom_topic_list=[],
topic_replace_list=[],
required_target_agents=[],
cache_only=False,
**kwargs,
):
kwargs["process_loop_in_greenlet"] = True
super(ForwardHistorian, self).__init__(**kwargs)
# will be available in both threads.
self._topic_replace_map = {}
self.topic_replace_list = topic_replace_list
self._num_failures = 0
self._last_timeout = 0
self._target_platform = None
self._current_custom_topics = set()
self.destination_vip = destination_vip
self.destination_serverkey = destination_serverkey
self.required_target_agents = required_target_agents
self.cache_only = cache_only
config = {
"custom_topic_list": custom_topic_list,
"topic_replace_list": self.topic_replace_list,
"required_target_agents": self.required_target_agents,
"destination_vip": self.destination_vip,
"destination_serverkey": self.destination_serverkey,
"cache_only": self.cache_only,
}
self.update_default_config(config)
# We do not support the insert RPC call.
self.no_insert = True
# We do not support the query RPC call.
self.no_query = True
|
def __init__(
self,
destination_vip,
destination_serverkey,
custom_topic_list=[],
topic_replace_list=[],
required_target_agents=[],
cache_only=False,
**kwargs,
):
super(ForwardHistorian, self).__init__(**kwargs)
# will be available in both threads.
self._topic_replace_map = {}
self.topic_replace_list = topic_replace_list
self._num_failures = 0
self._last_timeout = 0
self._target_platform = None
self._current_custom_topics = set()
self.destination_vip = destination_vip
self.destination_serverkey = destination_serverkey
self.required_target_agents = required_target_agents
self.cache_only = cache_only
config = {
"custom_topic_list": custom_topic_list,
"topic_replace_list": self.topic_replace_list,
"required_target_agents": self.required_target_agents,
"destination_vip": self.destination_vip,
"destination_serverkey": self.destination_serverkey,
"cache_only": self.cache_only,
}
self.update_default_config(config)
# We do not support the insert RPC call.
self.no_insert = True
# We do not support the query RPC call.
self.no_query = True
|
https://github.com/VOLTTRON/volttron/issues/1484
|
('RPC ERROR', 'Traceback (most recent call last):
File "/home/jer/git/volttron-craig/volttron/platform/vip/agent/subsystems/rpc.py", line 169, in method
return method(*args, **kwargs)
File "/home/jer/git/volttron-craig/volttron/platform/store.py", line 256, in manage_get_metadata
raise KeyError(\'No configuration file "{}" for VIP IDENTIY {}\'.format(config_name, identity))
KeyError: \'No configuration file "config" for VIP IDENTIY forwarder2\'\n')
|
KeyError
|
def __init__(
self,
retry_period=300.0,
submit_size_limit=1000,
max_time_publishing=30.0,
backup_storage_limit_gb=None,
topic_replace_list=[],
gather_timing_data=False,
readonly=False,
process_loop_in_greenlet=False,
capture_device_data=True,
capture_log_data=True,
capture_analysis_data=True,
capture_record_data=True,
**kwargs,
):
super(BaseHistorianAgent, self).__init__(**kwargs)
# This should resemble a dictionary that has key's from and to which
# will be replaced within the topics before it's stored in the
# cache database
self._process_loop_in_greenlet = process_loop_in_greenlet
self._topic_replace_list = topic_replace_list
_log.info("Topic string replace list: {}".format(self._topic_replace_list))
self.gather_timing_data = bool(gather_timing_data)
self.volttron_table_defs = "volttron_table_definitions"
self._backup_storage_limit_gb = backup_storage_limit_gb
self._retry_period = float(retry_period)
self._submit_size_limit = int(submit_size_limit)
self._max_time_publishing = float(max_time_publishing)
self._successful_published = set()
# Remove the need to reset subscriptions to eliminate possible data
# loss at config change.
self._current_subscriptions = set()
self._topic_replace_map = {}
self._event_queue = (
gevent.queue.Queue() if self._process_loop_in_greenlet else Queue()
)
self._readonly = bool(readonly)
self._stop_process_loop = False
self._process_thread = None
self.no_insert = False
self.no_query = False
self.instance_name = None
self._default_config = {
"retry_period": self._retry_period,
"submit_size_limit": self._submit_size_limit,
"max_time_publishing": self._max_time_publishing,
"backup_storage_limit_gb": self._backup_storage_limit_gb,
"topic_replace_list": self._topic_replace_list,
"gather_timing_data": self.gather_timing_data,
"readonly": self._readonly,
"capture_device_data": capture_device_data,
"capture_log_data": capture_log_data,
"capture_analysis_data": capture_analysis_data,
"capture_record_data": capture_record_data,
}
self.vip.config.set_default("config", self._default_config)
self.vip.config.subscribe(
self._configure, actions=["NEW", "UPDATE"], pattern="config"
)
|
def __init__(
self,
retry_period=300.0,
submit_size_limit=1000,
max_time_publishing=30.0,
backup_storage_limit_gb=None,
topic_replace_list=[],
gather_timing_data=False,
readonly=False,
capture_device_data=True,
capture_log_data=True,
capture_analysis_data=True,
capture_record_data=True,
**kwargs,
):
super(BaseHistorianAgent, self).__init__(**kwargs)
# This should resemble a dictionary that has key's from and to which
# will be replaced within the topics before it's stored in the
# cache database
self._topic_replace_list = topic_replace_list
_log.info("Topic string replace list: {}".format(self._topic_replace_list))
self.gather_timing_data = bool(gather_timing_data)
self.volttron_table_defs = "volttron_table_definitions"
self._backup_storage_limit_gb = backup_storage_limit_gb
self._retry_period = float(retry_period)
self._submit_size_limit = int(submit_size_limit)
self._max_time_publishing = float(max_time_publishing)
self._successful_published = set()
# Remove the need to reset subscriptions to eliminate possible data
# loss at config change.
self._current_subscriptions = set()
self._topic_replace_map = {}
self._event_queue = Queue()
self._readonly = bool(readonly)
self._stop_process_loop = False
self._process_thread = None
self.no_insert = False
self.no_query = False
self.instance_name = None
self._default_config = {
"retry_period": self._retry_period,
"submit_size_limit": self._submit_size_limit,
"max_time_publishing": self._max_time_publishing,
"backup_storage_limit_gb": self._backup_storage_limit_gb,
"topic_replace_list": self._topic_replace_list,
"gather_timing_data": self.gather_timing_data,
"readonly": self._readonly,
"capture_device_data": capture_device_data,
"capture_log_data": capture_log_data,
"capture_analysis_data": capture_analysis_data,
"capture_record_data": capture_record_data,
}
self.vip.config.set_default("config", self._default_config)
self.vip.config.subscribe(
self._configure, actions=["NEW", "UPDATE"], pattern="config"
)
|
https://github.com/VOLTTRON/volttron/issues/1484
|
('RPC ERROR', 'Traceback (most recent call last):
File "/home/jer/git/volttron-craig/volttron/platform/vip/agent/subsystems/rpc.py", line 169, in method
return method(*args, **kwargs)
File "/home/jer/git/volttron-craig/volttron/platform/store.py", line 256, in manage_get_metadata
raise KeyError(\'No configuration file "{}" for VIP IDENTIY {}\'.format(config_name, identity))
KeyError: \'No configuration file "config" for VIP IDENTIY forwarder2\'\n')
|
KeyError
|
def start_process_thread(self):
if self._process_loop_in_greenlet:
self._process_thread = self.core.spawn(self._process_loop)
self._process_thread.start()
_log.debug("Process greenlet started.")
else:
self._process_thread = Thread(target=self._process_loop)
self._process_thread.daemon = True # Don't wait on thread to exit.
self._process_thread.start()
_log.debug("Process thread started.")
|
def start_process_thread(self):
self._process_thread = Thread(target=self._process_loop)
self._process_thread.daemon = True # Don't wait on thread to exit.
self._process_thread.start()
_log.debug("Process thread started.")
|
https://github.com/VOLTTRON/volttron/issues/1484
|
('RPC ERROR', 'Traceback (most recent call last):
File "/home/jer/git/volttron-craig/volttron/platform/vip/agent/subsystems/rpc.py", line 169, in method
return method(*args, **kwargs)
File "/home/jer/git/volttron-craig/volttron/platform/store.py", line 256, in manage_get_metadata
raise KeyError(\'No configuration file "{}" for VIP IDENTIY {}\'.format(config_name, identity))
KeyError: \'No configuration file "config" for VIP IDENTIY forwarder2\'\n')
|
KeyError
|
def stop_process_thread(self):
_log.debug("Stopping the process loop.")
if self._process_thread is None:
return
# Tell the loop it needs to die.
self._stop_process_loop = True
# Wake the loop.
self._event_queue.put(None)
# 9 seconds as configuration timeout is 10 seconds.
self._process_thread.join(9.0)
# Greenlets have slightly different API than threads in this case.
if self._process_loop_in_greenlet:
if not self._process_thread.ready():
_log.error("Failed to stop process greenlet during reconfiguration!")
elif self._process_thread.is_alive():
_log.error("Failed to stop process thread during reconfiguration!")
self._process_thread = None
_log.debug("Process loop stopped.")
|
def stop_process_thread(self):
_log.debug("Stopping the process thread.")
if self._process_thread is None:
return
# Tell the loop it needs to die.
self._stop_process_loop = True
# Wake the loop.
self._event_queue.put(None)
# 9 seconds as configuration timeout is 10 seconds.
self._process_thread.join(9.0)
if self._process_thread.is_alive():
_log.error("Failed to stop process thread during reconfiguration!")
self._process_thread = None
_log.debug("Process thread stopped.")
|
https://github.com/VOLTTRON/volttron/issues/1484
|
('RPC ERROR', 'Traceback (most recent call last):
File "/home/jer/git/volttron-craig/volttron/platform/vip/agent/subsystems/rpc.py", line 169, in method
return method(*args, **kwargs)
File "/home/jer/git/volttron-craig/volttron/platform/store.py", line 256, in manage_get_metadata
raise KeyError(\'No configuration file "{}" for VIP IDENTIY {}\'.format(config_name, identity))
KeyError: \'No configuration file "config" for VIP IDENTIY forwarder2\'\n')
|
KeyError
|
def _process_loop(self):
"""
The process loop is called off of the main thread and will not exit
unless the main agent is shutdown or the Agent is reconfigured.
"""
_log.debug("Starting process loop.")
# Sets up the concrete historian
# call this method even in case of readonly mode in case historian
# is setting up connections that are shared for both query and write
# operations
self.historian_setup()
if self._readonly:
_log.info("Historian setup in readonly mode.")
return
# Record the names of data, topics, meta tables in a metadata table
self.record_table_definitions(self.volttron_table_defs)
backupdb = BackupDatabase(self, self._backup_storage_limit_gb)
# now that everything is setup we need to make sure that the topics
# are synchronized between
# Based on the state of the back log and whether or not successful
# publishing is currently happening (and how long it's taking)
# we may or may not want to wait on the event queue for more input
# before proceeding with the rest of the loop.
wait_for_input = not bool(
backupdb.get_outstanding_to_publish(self._submit_size_limit)
)
while True:
try:
_log.debug("Reading from/waiting for queue.")
new_to_publish = [self._event_queue.get(wait_for_input, self._retry_period)]
except Empty:
_log.debug("Queue wait timed out. Falling out.")
new_to_publish = []
if new_to_publish:
_log.debug("Checking for queue build up.")
while True:
try:
new_to_publish.append(self._event_queue.get_nowait())
except Empty:
break
# We wake the thread after a configuration change by passing a None to the queue.
# Backup anything new before checking for a stop.
backupdb.backup_new_data((x for x in new_to_publish if x is not None))
# Check for a stop for reconfiguration.
if self._stop_process_loop:
break
wait_for_input = True
start_time = datetime.utcnow()
while True:
to_publish_list = backupdb.get_outstanding_to_publish(
self._submit_size_limit
)
# Check for a stop for reconfiguration.
if not to_publish_list or self._stop_process_loop:
break
try:
self.publish_to_historian(to_publish_list)
except Exception as exp:
_log.exception("An unhandled exception occured while publishing.")
# if the successful queue is empty then we need not remove
# them from the database.
if not self._successful_published:
break
backupdb.remove_successfully_published(
self._successful_published, self._submit_size_limit
)
self._successful_published = set()
now = datetime.utcnow()
if now - start_time > self._max_time_publishing:
wait_for_input = False
break
# Check for a stop for reconfiguration.
if self._stop_process_loop:
break
# Check for a stop for reconfiguration.
if self._stop_process_loop:
break
backupdb.close()
self.historian_teardown()
_log.debug("Process loop stopped.")
|
def _process_loop(self):
"""
The process loop is called off of the main thread and will not exit
unless the main agent is shutdown.
"""
_log.debug("Starting process loop.")
# Sets up the concrete historian
# call this method even in case of readonly mode in case historian
# is setting up connections that are shared for both query and write
# operations
self.historian_setup()
if self._readonly:
_log.info("Historian setup in readonly mode.")
return
# Record the names of data, topics, meta tables in a metadata table
self.record_table_definitions(self.volttron_table_defs)
backupdb = BackupDatabase(self, self._backup_storage_limit_gb)
# now that everything is setup we need to make sure that the topics
# are synchronized between
# Based on the state of the back log and whether or not successful
# publishing is currently happening (and how long it's taking)
# we may or may not want to wait on the event queue for more input
# before proceeding with the rest of the loop.
wait_for_input = not bool(
backupdb.get_outstanding_to_publish(self._submit_size_limit)
)
while True:
try:
_log.debug("Reading from/waiting for queue.")
new_to_publish = [self._event_queue.get(wait_for_input, self._retry_period)]
except Empty:
_log.debug("Queue wait timed out. Falling out.")
new_to_publish = []
if new_to_publish:
_log.debug("Checking for queue build up.")
while True:
try:
new_to_publish.append(self._event_queue.get_nowait())
except Empty:
break
# We wake the thread after a configuration change by passing a None to the queue.
# Backup anything new before checking for a stop.
backupdb.backup_new_data((x for x in new_to_publish if x is not None))
# Check for a stop for reconfiguration.
if self._stop_process_loop:
break
wait_for_input = True
start_time = datetime.utcnow()
while True:
to_publish_list = backupdb.get_outstanding_to_publish(
self._submit_size_limit
)
# Check for a stop for reconfiguration.
if not to_publish_list or self._stop_process_loop:
break
try:
self.publish_to_historian(to_publish_list)
except Exception as exp:
_log.exception("An unhandled exception occured while publishing.")
# if the successful queue is empty then we need not remove
# them from the database.
if not self._successful_published:
break
backupdb.remove_successfully_published(
self._successful_published, self._submit_size_limit
)
self._successful_published = set()
now = datetime.utcnow()
if now - start_time > self._max_time_publishing:
wait_for_input = False
break
# Check for a stop for reconfiguration.
if self._stop_process_loop:
break
# Check for a stop for reconfiguration.
if self._stop_process_loop:
break
backupdb.close()
self.historian_teardown()
_log.debug("Process loop stopped.")
|
https://github.com/VOLTTRON/volttron/issues/1484
|
('RPC ERROR', 'Traceback (most recent call last):
File "/home/jer/git/volttron-craig/volttron/platform/vip/agent/subsystems/rpc.py", line 169, in method
return method(*args, **kwargs)
File "/home/jer/git/volttron-craig/volttron/platform/store.py", line 256, in manage_get_metadata
raise KeyError(\'No configuration file "{}" for VIP IDENTIY {}\'.format(config_name, identity))
KeyError: \'No configuration file "config" for VIP IDENTIY forwarder2\'\n')
|
KeyError
|
def __init__(self, owner, backup_storage_limit_gb, check_same_thread=True):
# The topic cache is only meant as a local lookup and should not be
# accessed via the implemented historians.
self._backup_cache = {}
self._meta_data = defaultdict(dict)
self._owner = weakref.ref(owner)
self._backup_storage_limit_gb = backup_storage_limit_gb
self._connection = None
self._setupdb(check_same_thread)
|
def __init__(self, owner, backup_storage_limit_gb):
# The topic cache is only meant as a local lookup and should not be
# accessed via the implemented historians.
self._backup_cache = {}
self._meta_data = defaultdict(dict)
self._owner = weakref.ref(owner)
self._backup_storage_limit_gb = backup_storage_limit_gb
self._setupdb()
|
https://github.com/VOLTTRON/volttron/issues/1484
|
('RPC ERROR', 'Traceback (most recent call last):
File "/home/jer/git/volttron-craig/volttron/platform/vip/agent/subsystems/rpc.py", line 169, in method
return method(*args, **kwargs)
File "/home/jer/git/volttron-craig/volttron/platform/store.py", line 256, in manage_get_metadata
raise KeyError(\'No configuration file "{}" for VIP IDENTIY {}\'.format(config_name, identity))
KeyError: \'No configuration file "config" for VIP IDENTIY forwarder2\'\n')
|
KeyError
|
def close(self):
self._connection.close()
self._connection = None
|
def close(self):
self._connection.close()
|
https://github.com/VOLTTRON/volttron/issues/1484
|
('RPC ERROR', 'Traceback (most recent call last):
File "/home/jer/git/volttron-craig/volttron/platform/vip/agent/subsystems/rpc.py", line 169, in method
return method(*args, **kwargs)
File "/home/jer/git/volttron-craig/volttron/platform/store.py", line 256, in manage_get_metadata
raise KeyError(\'No configuration file "{}" for VIP IDENTIY {}\'.format(config_name, identity))
KeyError: \'No configuration file "config" for VIP IDENTIY forwarder2\'\n')
|
KeyError
|
def _setupdb(self, check_same_thread):
"""Creates a backup database for the historian if doesn't exist."""
_log.debug("Setting up backup DB.")
self._connection = sqlite3.connect(
"backup.sqlite",
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
check_same_thread=check_same_thread,
)
c = self._connection.cursor()
if self._backup_storage_limit_gb is not None:
c.execute("""PRAGMA page_size""")
page_size = c.fetchone()[0]
max_storage_bytes = self._backup_storage_limit_gb * 1024**3
self.max_pages = max_storage_bytes / page_size
c.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='outstanding';"
)
if c.fetchone() is None:
_log.debug("Configuring backup DB for the first time.")
self._connection.execute("""PRAGMA auto_vacuum = FULL""")
self._connection.execute("""CREATE TABLE outstanding
(id INTEGER PRIMARY KEY,
ts timestamp NOT NULL,
source TEXT NOT NULL,
topic_id INTEGER NOT NULL,
value_string TEXT NOT NULL,
header_string TEXT)""")
else:
# Check to see if we have a header_string column.
c.execute("pragma table_info(outstanding);")
name_index = 0
for description in c.description:
if description[0] == "name":
break
name_index += 1
found_header_column = False
for row in c:
if row[name_index] == "header_string":
found_header_column = True
break
if not found_header_column:
_log.info("Updating cache database to support storing header data.")
c.execute("ALTER TABLE outstanding ADD COLUMN header_string text;")
c.execute("""CREATE INDEX IF NOT EXISTS outstanding_ts_index
ON outstanding (ts)""")
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='metadata';")
if c.fetchone() is None:
self._connection.execute("""CREATE TABLE metadata
(source TEXT NOT NULL,
topic_id INTEGER NOT NULL,
name TEXT NOT NULL,
value TEXT NOT NULL,
UNIQUE(topic_id, source, name))""")
else:
c.execute("SELECT * FROM metadata")
for row in c:
self._meta_data[(row[0], row[1])][row[2]] = row[3]
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='topics';")
if c.fetchone() is None:
self._connection.execute("""create table topics
(topic_id INTEGER PRIMARY KEY,
topic_name TEXT NOT NULL,
UNIQUE(topic_name))""")
else:
c.execute("SELECT * FROM topics")
for row in c:
self._backup_cache[row[0]] = row[1]
self._backup_cache[row[1]] = row[0]
c.close()
self._connection.commit()
|
def _setupdb(self):
"""Creates a backup database for the historian if doesn't exist."""
_log.debug("Setting up backup DB.")
self._connection = sqlite3.connect(
"backup.sqlite", detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
)
c = self._connection.cursor()
if self._backup_storage_limit_gb is not None:
c.execute("""PRAGMA page_size""")
page_size = c.fetchone()[0]
max_storage_bytes = self._backup_storage_limit_gb * 1024**3
self.max_pages = max_storage_bytes / page_size
c.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='outstanding';"
)
if c.fetchone() is None:
_log.debug("Configuring backup DB for the first time.")
self._connection.execute("""PRAGMA auto_vacuum = FULL""")
self._connection.execute("""CREATE TABLE outstanding
(id INTEGER PRIMARY KEY,
ts timestamp NOT NULL,
source TEXT NOT NULL,
topic_id INTEGER NOT NULL,
value_string TEXT NOT NULL,
header_string TEXT)""")
else:
# Check to see if we have a header_string column.
c.execute("pragma table_info(outstanding);")
name_index = 0
for description in c.description:
if description[0] == "name":
break
name_index += 1
found_header_column = False
for row in c:
if row[name_index] == "header_string":
found_header_column = True
break
if not found_header_column:
_log.info("Updating cache database to support storing header data.")
c.execute("ALTER TABLE outstanding ADD COLUMN header_string text;")
c.execute("""CREATE INDEX IF NOT EXISTS outstanding_ts_index
ON outstanding (ts)""")
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='metadata';")
if c.fetchone() is None:
self._connection.execute("""CREATE TABLE metadata
(source TEXT NOT NULL,
topic_id INTEGER NOT NULL,
name TEXT NOT NULL,
value TEXT NOT NULL,
UNIQUE(topic_id, source, name))""")
else:
c.execute("SELECT * FROM metadata")
for row in c:
self._meta_data[(row[0], row[1])][row[2]] = row[3]
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='topics';")
if c.fetchone() is None:
self._connection.execute("""create table topics
(topic_id INTEGER PRIMARY KEY,
topic_name TEXT NOT NULL,
UNIQUE(topic_name))""")
else:
c.execute("SELECT * FROM topics")
for row in c:
self._backup_cache[row[0]] = row[1]
self._backup_cache[row[1]] = row[0]
c.close()
self._connection.commit()
|
https://github.com/VOLTTRON/volttron/issues/1484
|
('RPC ERROR', 'Traceback (most recent call last):
File "/home/jer/git/volttron-craig/volttron/platform/vip/agent/subsystems/rpc.py", line 169, in method
return method(*args, **kwargs)
File "/home/jer/git/volttron-craig/volttron/platform/store.py", line 256, in manage_get_metadata
raise KeyError(\'No configuration file "{}" for VIP IDENTIY {}\'.format(config_name, identity))
KeyError: \'No configuration file "config" for VIP IDENTIY forwarder2\'\n')
|
KeyError
|
def __init__(self, config, **kwargs):
"""Initialise the historian.
The historian makes two connections to the data store. Both of
these connections are available across the main and processing
thread of the historian. topic_map and topic_meta are used as
cache for the meta data and topic maps.
:param config: dictionary object containing the configurations for
this historian
:param kwargs: additional keyword arguments. (optional identity and
topic_replace_list used by parent classes)
"""
self.config = config
self.topic_id_map = {}
self.topic_name_map = {}
self.topic_meta = {}
self.agg_topic_id_map = {}
self.tables_def = {}
self.reader = None
self.writer = None
super(SQLHistorian, self).__init__(**kwargs)
|
def __init__(self, config, **kwargs):
"""Initialise the historian.
The historian makes two connections to the data store. Both of
these connections are available across the main and processing
thread of the historian. topic_map and topic_meta are used as
cache for the meta data and topic maps.
:param config: dictionary object containing the configurations for
this historian
:param kwargs: additional keyword arguments. (optional identity and
topic_replace_list used by parent classes)
"""
super(SQLHistorian, self).__init__(**kwargs)
database_type = config["connection"]["type"]
self.tables_def, table_names = self.parse_table_def(config)
db_functs_class = sqlutils.get_dbfuncts_class(database_type)
self.reader = db_functs_class(config["connection"]["params"], table_names)
self.writer = db_functs_class(config["connection"]["params"], table_names)
self.reader.setup_historian_tables()
self.topic_id_map = {}
self.topic_name_map = {}
self.topic_meta = {}
self.agg_topic_id_map = {}
|
https://github.com/VOLTTRON/volttron/issues/988
|
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: Exception in thread Thread-2:
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: Traceback (most recent call last):
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: self.run()
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: File "/usr/lib/python2.7/threading.py", line 754, in run
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: self.__target(*self.__args, **self.__kwargs)
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: File "/home/building-testbed/volttron4-latest/volttron/platform/agent/base_historian.py", line 639, in _process_loop
2016-10-26 15:44:11,092 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: self.record_table_definitions(self.volttron_table_defs)
2016-10-26 15:44:11,092 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: File "/home/building-testbed/.volttron/agents/26d7b095-9e5d-43df-bdfa-7c551584efd0/sqlhistorianagent-3.6.0/sqlhistorian/historian.py", line 192, in record_table_definitions
2016-10-26 15:44:11,092 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: self.writer.record_table_definitions(self.tables_def,
2016-10-26 15:44:11,092 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: AttributeError: 'SQLHistorian' object has no attribute 'writer'
|
AttributeError
|
def historian_setup(self):
thread_name = threading.currentThread().getName()
_log.debug("historian_setup on Thread: {}".format(thread_name))
database_type = self.config["connection"]["type"]
self.tables_def, table_names = self.parse_table_def(self.config)
db_functs_class = sqlutils.get_dbfuncts_class(database_type)
self.reader = db_functs_class(self.config["connection"]["params"], table_names)
self.writer = db_functs_class(self.config["connection"]["params"], table_names)
self.reader.setup_historian_tables()
topic_id_map, topic_name_map = self.reader.get_topic_map()
self.topic_id_map.update(topic_id_map)
self.topic_name_map.update(topic_name_map)
self.agg_topic_id_map = self.reader.get_agg_topic_map()
|
def historian_setup(self):
thread_name = threading.currentThread().getName()
_log.debug("historian_setup on Thread: {}".format(thread_name))
|
https://github.com/VOLTTRON/volttron/issues/988
|
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: Exception in thread Thread-2:
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: Traceback (most recent call last):
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: self.run()
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: File "/usr/lib/python2.7/threading.py", line 754, in run
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: self.__target(*self.__args, **self.__kwargs)
2016-10-26 15:44:11,091 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: File "/home/building-testbed/volttron4-latest/volttron/platform/agent/base_historian.py", line 639, in _process_loop
2016-10-26 15:44:11,092 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: self.record_table_definitions(self.volttron_table_defs)
2016-10-26 15:44:11,092 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: File "/home/building-testbed/.volttron/agents/26d7b095-9e5d-43df-bdfa-7c551584efd0/sqlhistorianagent-3.6.0/sqlhistorian/historian.py", line 192, in record_table_definitions
2016-10-26 15:44:11,092 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: self.writer.record_table_definitions(self.tables_def,
2016-10-26 15:44:11,092 (sqlhistorianagent-3.6.0 1179) <stderr> ERROR: AttributeError: 'SQLHistorian' object has no attribute 'writer'
|
AttributeError
|
def _process_loop(self):
"""
The process loop is called off of the main thread and will not exit
unless the main agent is shutdown.
"""
_log.debug("Starting process loop.")
self._setup_backup_db()
self.historian_setup()
# now that everything is setup we need to make sure that the topics
# are syncronized between
# Based on the state of the back log and whether or not sucessful
# publishing is currently happening (and how long it's taking)
# we may or may not want to wait on the event queue for more input
# before proceeding with the rest of the loop.
# wait_for_input = not bool(self._get_outstanding_to_publish())
wait_for_input = not bool(self._get_outstanding_to_publish())
while True:
try:
_log.debug("Reading from/waiting for queue.")
new_to_publish = [self._event_queue.get(wait_for_input, self._retry_period)]
except Empty:
_log.debug("Queue wait timed out. Falling out.")
new_to_publish = []
if new_to_publish:
_log.debug("Checking for queue build up.")
while True:
try:
new_to_publish.append(self._event_queue.get_nowait())
except Empty:
break
self._backup_new_to_publish(new_to_publish)
wait_for_input = True
start_time = datetime.utcnow()
_log.debug("Calling publish_to_historian.")
while True:
to_publish_list = self._get_outstanding_to_publish()
if not to_publish_list:
break
try:
self.publish_to_historian(to_publish_list)
except Exception:
_log.exception(
"An unhandled exception occured while publishing to the historian."
)
if not self._any_sucessfull_publishes():
break
self._cleanup_successful_publishes()
now = datetime.utcnow()
if now - start_time > self._max_time_publishing:
wait_for_input = False
break
_log.debug("Finished processing")
|
def _process_loop(self):
"""
The process loop is called off of the main thread and will not exit
unless the main agent is shutdown.
"""
_log.debug("Starting process loop.")
self._setup_backup_db()
self.historian_setup()
# now that everything is setup we need to make sure that the topics
# are syncronized between
# Based on the state of the back log and whether or not sucessful
# publishing is currently happening (and how long it's taking)
# we may or may not want to wait on the event queue for more input
# before proceeding with the rest of the loop.
# wait_for_input = not bool(self._get_outstanding_to_publish())
wait_for_input = not bool(self._get_outstanding_to_publish())
while True:
try:
_log.debug("Reading from/waiting for queue.")
new_to_publish = [self._event_queue.get(wait_for_input, self._retry_period)]
except Empty:
_log.debug("Queue wait timed out. Falling out.")
new_to_publish = []
if new_to_publish:
_log.debug("Checking for queue build up.")
while True:
try:
new_to_publish.append(self._event_queue.get_nowait())
except Empty:
break
self._backup_new_to_publish(new_to_publish)
wait_for_input = True
start_time = datetime.utcnow()
_log.debug("Calling publish_to_historian.")
while True:
to_publish_list = self._get_outstanding_to_publish()
if not to_publish_list:
break
try:
self.publish_to_historian(to_publish_list)
except Exception as exp:
_log.error(
"An unhandled exception has occured while publishing to historian."
)
_log.exception(exp)
if not self._any_sucessfull_publishes():
break
self._cleanup_successful_publishes()
now = datetime.utcnow()
if now - start_time > self._max_time_publishing:
wait_for_input = False
break
_log.debug("Finished processing")
|
https://github.com/VOLTTRON/volttron/issues/168
|
2015-08-28 08:38:59,118 (forwarderagent-3.0 6934) volttron.platform.agent.base_historian ERROR: An unhandled exception has occured while publishing to historian.
2015-08-28 08:38:59,119 (forwarderagent-3.0 6934) <stderr> ERROR: Traceback (most recent call last):
2015-08-28 08:38:59,119 (forwarderagent-3.0 6934) <stderr> ERROR: File "/usr/lib/python2.7/logging/__init__.py", line 851, in emit
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: msg = self.format(record)
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: File "/usr/lib/python2.7/logging/__init__.py", line 724, in format
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: return fmt.format(record)
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/volttron/platform/agent/utils.py", line 228, in format
2015-08-28 08:38:59,121 (forwarderagent-3.0 6934) <stderr> ERROR: return jsonapi.dumps(dct)
2015-08-28 08:38:59,121 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/zmq/utils/jsonapi.py", line 40, in dumps
2015-08-28 08:38:59,121 (forwarderagent-3.0 6934) <stderr> ERROR: s = jsonmod.dumps(o, **kwargs)
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/__init__.py", line 397, in dumps
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: **kw).encode(obj)
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/encoder.py", line 275, in encode
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: chunks = self.iterencode(o, _one_shot=True)
2015-08-28 08:38:59,123 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/encoder.py", line 357, in iterencode
2015-08-28 08:38:59,123 (forwarderagent-3.0 6934) <stderr> ERROR: return _iterencode(o, 0)
2015-08-28 08:38:59,123 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/encoder.py", line 252, in default
2015-08-28 08:38:59,124 (forwarderagent-3.0 6934) <stderr> ERROR: raise TypeError(repr(o) + " is not JSON serializable")
2015-08-28 08:38:59,124 (forwarderagent-3.0 6934) <stderr> ERROR: TypeError: ProtocolError('previous send operation is not complete',) is not JSON serializable
2015-08-28 08:38:59,124 (forwarderagent-3.0 6934) <stderr> ERROR: Logged from file base_historian.py, line 370
|
TypeError
|
def periodic_read(self):
_log.debug("scraping device: " + self.device_name)
try:
results = self.interface.scrape_all()
except Exception:
_log.exception("unhandled exception")
return
# XXX: Does a warning need to be printed?
if not results:
return
now = datetime.datetime.utcnow().isoformat(" ") + "Z"
headers = {
headers_mod.DATE: now,
}
for point, value in results.iteritems():
topics = self.get_paths_for_point(point)
for topic in topics:
message = [value, self.meta_data[point]]
self.vip.pubsub.publish("pubsub", topic, headers=headers, message=message)
message = [results, self.meta_data]
self.vip.pubsub.publish(
"pubsub", self.all_path_depth, headers=headers, message=message
)
self.vip.pubsub.publish(
"pubsub", self.all_path_breadth, headers=headers, message=message
)
|
def periodic_read(self):
_log.debug("scraping device: " + self.device_name)
try:
results = self.interface.scrape_all()
except Exception as ex:
_log.exception(ex)
return
# XXX: Does a warning need to be printed?
if not results:
return
now = datetime.datetime.utcnow().isoformat(" ") + "Z"
headers = {
headers_mod.DATE: now,
}
for point, value in results.iteritems():
topics = self.get_paths_for_point(point)
for topic in topics:
message = [value, self.meta_data[point]]
self.vip.pubsub.publish("pubsub", topic, headers=headers, message=message)
message = [results, self.meta_data]
self.vip.pubsub.publish(
"pubsub", self.all_path_depth, headers=headers, message=message
)
self.vip.pubsub.publish(
"pubsub", self.all_path_breadth, headers=headers, message=message
)
|
https://github.com/VOLTTRON/volttron/issues/168
|
2015-08-28 08:38:59,118 (forwarderagent-3.0 6934) volttron.platform.agent.base_historian ERROR: An unhandled exception has occured while publishing to historian.
2015-08-28 08:38:59,119 (forwarderagent-3.0 6934) <stderr> ERROR: Traceback (most recent call last):
2015-08-28 08:38:59,119 (forwarderagent-3.0 6934) <stderr> ERROR: File "/usr/lib/python2.7/logging/__init__.py", line 851, in emit
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: msg = self.format(record)
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: File "/usr/lib/python2.7/logging/__init__.py", line 724, in format
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: return fmt.format(record)
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/volttron/platform/agent/utils.py", line 228, in format
2015-08-28 08:38:59,121 (forwarderagent-3.0 6934) <stderr> ERROR: return jsonapi.dumps(dct)
2015-08-28 08:38:59,121 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/zmq/utils/jsonapi.py", line 40, in dumps
2015-08-28 08:38:59,121 (forwarderagent-3.0 6934) <stderr> ERROR: s = jsonmod.dumps(o, **kwargs)
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/__init__.py", line 397, in dumps
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: **kw).encode(obj)
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/encoder.py", line 275, in encode
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: chunks = self.iterencode(o, _one_shot=True)
2015-08-28 08:38:59,123 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/encoder.py", line 357, in iterencode
2015-08-28 08:38:59,123 (forwarderagent-3.0 6934) <stderr> ERROR: return _iterencode(o, 0)
2015-08-28 08:38:59,123 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/encoder.py", line 252, in default
2015-08-28 08:38:59,124 (forwarderagent-3.0 6934) <stderr> ERROR: raise TypeError(repr(o) + " is not JSON serializable")
2015-08-28 08:38:59,124 (forwarderagent-3.0 6934) <stderr> ERROR: TypeError: ProtocolError('previous send operation is not complete',) is not JSON serializable
2015-08-28 08:38:59,124 (forwarderagent-3.0 6934) <stderr> ERROR: Logged from file base_historian.py, line 370
|
TypeError
|
def historian(config_path, **kwargs):
config = utils.load_config(config_path)
connection = config.get("connection", None)
assert connection is not None
databaseType = connection.get("type", None)
assert databaseType is not None
params = connection.get("params", None)
assert params is not None
identity = config.get("identity", kwargs.pop("identity", None))
if databaseType == "sqlite":
from .db.sqlitefuncts import SqlLiteFuncts as DbFuncts
elif databaseType == "mysql":
from .db.mysqlfuncts import MySqlFuncts as DbFuncts
else:
_log.error("Unknown database type specified!")
raise Exception("Unkown database type specified!")
class SQLHistorian(BaseHistorian):
"""This is a simple example of a historian agent that writes stuff
to a SQLite database. It is designed to test some of the functionality
of the BaseHistorianAgent.
"""
@Core.receiver("onstart")
def starting(self, sender, **kwargs):
print(
"Starting address: {} identity: {}".format(
self.core.address, self.core.identity
)
)
try:
self.reader = DbFuncts(**connection["params"])
except AttributeError:
_log.exception("bad connection parameters")
self.core.stop()
return
self.topic_map = self.reader.get_topic_map()
if self.core.identity == "platform.historian":
# Check to see if the platform agent is available, if it isn't then
# subscribe to the /platform topic to be notified when the platform
# agent becomes available.
try:
ping = self.vip.ping("platform.agent", "awake?").get(timeout=3)
_log.debug("Ping response was? " + str(ping))
self.vip.rpc.call(
"platform.agent", "register_service", self.core.identity
).get(timeout=3)
except Unreachable:
_log.debug("Could not register historian service")
finally:
self.vip.pubsub.subscribe("pubsub", "/platform", self.__platform)
_log.debug("Listening to /platform")
def __platform(self, peer, sender, bus, topic, headers, message):
_log.debug("Platform is now: {}".format(message))
if message == "available" and self.core.identity == "platform.historian":
gevent.spawn(
self.vip.rpc.call,
"platform.agent",
"register_service",
self.core.identity,
)
gevent.sleep(0)
def publish_to_historian(self, to_publish_list):
_log.debug(
"publish_to_historian number of items: {}".format(len(to_publish_list))
)
# load a topic map if there isn't one yet.
try:
self.topic_map.items()
except:
self.topic_map = self.reader.get_topic_map()
for x in to_publish_list:
ts = x["timestamp"]
topic = x["topic"]
value = x["value"]
# look at the topics that are stored in the database already
# to see if this topic has a value
topic_id = self.topic_map.get(topic)
if topic_id is None:
row = self.writer.insert_topic(topic)
topic_id = row[0]
self.topic_map[topic] = topic_id
self.writer.insert_data(ts, topic_id, value)
_log.debug("published {} data values:".format(len(to_publish_list)))
self.report_all_published()
def query_topic_list(self):
if len(self.topic_map) > 0:
return self.topic_map.keys()
else:
# No topics present.
return []
def query_historian(
self, topic, start=None, end=None, skip=0, count=None, order="FIRST_TO_LAST"
):
"""This function should return the results of a query in the form:
{"values": [(timestamp1, value1), (timestamp2, value2), ...],
"metadata": {"key1": value1, "key2": value2, ...}}
metadata is not required (The caller will normalize this to {} for you)
"""
return self.reader.query(
topic, start=start, end=end, skip=skip, count=count, order=order
)
def historian_setup(self):
try:
self.writer = DbFuncts(**connection["params"])
except AttributeError as exc:
print(exc)
self.core.stop()
SQLHistorian.__name__ = "SQLHistorian"
return SQLHistorian(identity=identity, **kwargs)
|
def historian(config_path, **kwargs):
config = utils.load_config(config_path)
connection = config.get("connection", None)
assert connection is not None
databaseType = connection.get("type", None)
assert databaseType is not None
params = connection.get("params", None)
assert params is not None
identity = config.get("identity", kwargs.pop("identity", None))
if databaseType == "sqlite":
from .db.sqlitefuncts import SqlLiteFuncts as DbFuncts
elif databaseType == "mysql":
from .db.mysqlfuncts import MySqlFuncts as DbFuncts
else:
_log.error("Unknown database type specified!")
raise Exception("Unkown database type specified!")
class SQLHistorian(BaseHistorian):
"""This is a simple example of a historian agent that writes stuff
to a SQLite database. It is designed to test some of the functionality
of the BaseHistorianAgent.
"""
@Core.receiver("onstart")
def starting(self, sender, **kwargs):
print(
"Starting address: {} identity: {}".format(
self.core.address, self.core.identity
)
)
try:
self.reader = DbFuncts(**connection["params"])
except AttributeError as exc:
_log.exception(exp)
self.core.stop()
return
self.topic_map = self.reader.get_topic_map()
if self.core.identity == "platform.historian":
# Check to see if the platform agent is available, if it isn't then
# subscribe to the /platform topic to be notified when the platform
# agent becomes available.
try:
ping = self.vip.ping("platform.agent", "awake?").get(timeout=3)
_log.debug("Ping response was? " + str(ping))
self.vip.rpc.call(
"platform.agent", "register_service", self.core.identity
).get(timeout=3)
except Unreachable:
_log.debug("Could not register historian service")
finally:
self.vip.pubsub.subscribe("pubsub", "/platform", self.__platform)
_log.debug("Listening to /platform")
def __platform(self, peer, sender, bus, topic, headers, message):
_log.debug("Platform is now: {}".format(message))
if message == "available" and self.core.identity == "platform.historian":
gevent.spawn(
self.vip.rpc.call,
"platform.agent",
"register_service",
self.core.identity,
)
gevent.sleep(0)
def publish_to_historian(self, to_publish_list):
_log.debug(
"publish_to_historian number of items: {}".format(len(to_publish_list))
)
# load a topic map if there isn't one yet.
try:
self.topic_map.items()
except:
self.topic_map = self.reader.get_topic_map()
for x in to_publish_list:
ts = x["timestamp"]
topic = x["topic"]
value = x["value"]
# look at the topics that are stored in the database already
# to see if this topic has a value
topic_id = self.topic_map.get(topic)
if topic_id is None:
row = self.writer.insert_topic(topic)
topic_id = row[0]
self.topic_map[topic] = topic_id
self.writer.insert_data(ts, topic_id, value)
_log.debug("published {} data values:".format(len(to_publish_list)))
self.report_all_published()
def query_topic_list(self):
if len(self.topic_map) > 0:
return self.topic_map.keys()
else:
# No topics present.
return []
def query_historian(
self, topic, start=None, end=None, skip=0, count=None, order="FIRST_TO_LAST"
):
"""This function should return the results of a query in the form:
{"values": [(timestamp1, value1), (timestamp2, value2), ...],
"metadata": {"key1": value1, "key2": value2, ...}}
metadata is not required (The caller will normalize this to {} for you)
"""
return self.reader.query(
topic, start=start, end=end, skip=skip, count=count, order=order
)
def historian_setup(self):
try:
self.writer = DbFuncts(**connection["params"])
except AttributeError as exc:
print(exc)
self.core.stop()
SQLHistorian.__name__ = "SQLHistorian"
return SQLHistorian(identity=identity, **kwargs)
|
https://github.com/VOLTTRON/volttron/issues/168
|
2015-08-28 08:38:59,118 (forwarderagent-3.0 6934) volttron.platform.agent.base_historian ERROR: An unhandled exception has occured while publishing to historian.
2015-08-28 08:38:59,119 (forwarderagent-3.0 6934) <stderr> ERROR: Traceback (most recent call last):
2015-08-28 08:38:59,119 (forwarderagent-3.0 6934) <stderr> ERROR: File "/usr/lib/python2.7/logging/__init__.py", line 851, in emit
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: msg = self.format(record)
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: File "/usr/lib/python2.7/logging/__init__.py", line 724, in format
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: return fmt.format(record)
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/volttron/platform/agent/utils.py", line 228, in format
2015-08-28 08:38:59,121 (forwarderagent-3.0 6934) <stderr> ERROR: return jsonapi.dumps(dct)
2015-08-28 08:38:59,121 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/zmq/utils/jsonapi.py", line 40, in dumps
2015-08-28 08:38:59,121 (forwarderagent-3.0 6934) <stderr> ERROR: s = jsonmod.dumps(o, **kwargs)
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/__init__.py", line 397, in dumps
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: **kw).encode(obj)
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/encoder.py", line 275, in encode
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: chunks = self.iterencode(o, _one_shot=True)
2015-08-28 08:38:59,123 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/encoder.py", line 357, in iterencode
2015-08-28 08:38:59,123 (forwarderagent-3.0 6934) <stderr> ERROR: return _iterencode(o, 0)
2015-08-28 08:38:59,123 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/encoder.py", line 252, in default
2015-08-28 08:38:59,124 (forwarderagent-3.0 6934) <stderr> ERROR: raise TypeError(repr(o) + " is not JSON serializable")
2015-08-28 08:38:59,124 (forwarderagent-3.0 6934) <stderr> ERROR: TypeError: ProtocolError('previous send operation is not complete',) is not JSON serializable
2015-08-28 08:38:59,124 (forwarderagent-3.0 6934) <stderr> ERROR: Logged from file base_historian.py, line 370
|
TypeError
|
def starting(self, sender, **kwargs):
print(
"Starting address: {} identity: {}".format(
self.core.address, self.core.identity
)
)
try:
self.reader = DbFuncts(**connection["params"])
except AttributeError:
_log.exception("bad connection parameters")
self.core.stop()
return
self.topic_map = self.reader.get_topic_map()
if self.core.identity == "platform.historian":
# Check to see if the platform agent is available, if it isn't then
# subscribe to the /platform topic to be notified when the platform
# agent becomes available.
try:
ping = self.vip.ping("platform.agent", "awake?").get(timeout=3)
_log.debug("Ping response was? " + str(ping))
self.vip.rpc.call(
"platform.agent", "register_service", self.core.identity
).get(timeout=3)
except Unreachable:
_log.debug("Could not register historian service")
finally:
self.vip.pubsub.subscribe("pubsub", "/platform", self.__platform)
_log.debug("Listening to /platform")
|
def starting(self, sender, **kwargs):
print(
"Starting address: {} identity: {}".format(
self.core.address, self.core.identity
)
)
try:
self.reader = DbFuncts(**connection["params"])
except AttributeError as exc:
_log.exception(exp)
self.core.stop()
return
self.topic_map = self.reader.get_topic_map()
if self.core.identity == "platform.historian":
# Check to see if the platform agent is available, if it isn't then
# subscribe to the /platform topic to be notified when the platform
# agent becomes available.
try:
ping = self.vip.ping("platform.agent", "awake?").get(timeout=3)
_log.debug("Ping response was? " + str(ping))
self.vip.rpc.call(
"platform.agent", "register_service", self.core.identity
).get(timeout=3)
except Unreachable:
_log.debug("Could not register historian service")
finally:
self.vip.pubsub.subscribe("pubsub", "/platform", self.__platform)
_log.debug("Listening to /platform")
|
https://github.com/VOLTTRON/volttron/issues/168
|
2015-08-28 08:38:59,118 (forwarderagent-3.0 6934) volttron.platform.agent.base_historian ERROR: An unhandled exception has occured while publishing to historian.
2015-08-28 08:38:59,119 (forwarderagent-3.0 6934) <stderr> ERROR: Traceback (most recent call last):
2015-08-28 08:38:59,119 (forwarderagent-3.0 6934) <stderr> ERROR: File "/usr/lib/python2.7/logging/__init__.py", line 851, in emit
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: msg = self.format(record)
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: File "/usr/lib/python2.7/logging/__init__.py", line 724, in format
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: return fmt.format(record)
2015-08-28 08:38:59,120 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/volttron/platform/agent/utils.py", line 228, in format
2015-08-28 08:38:59,121 (forwarderagent-3.0 6934) <stderr> ERROR: return jsonapi.dumps(dct)
2015-08-28 08:38:59,121 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/zmq/utils/jsonapi.py", line 40, in dumps
2015-08-28 08:38:59,121 (forwarderagent-3.0 6934) <stderr> ERROR: s = jsonmod.dumps(o, **kwargs)
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/__init__.py", line 397, in dumps
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: **kw).encode(obj)
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/encoder.py", line 275, in encode
2015-08-28 08:38:59,122 (forwarderagent-3.0 6934) <stderr> ERROR: chunks = self.iterencode(o, _one_shot=True)
2015-08-28 08:38:59,123 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/encoder.py", line 357, in iterencode
2015-08-28 08:38:59,123 (forwarderagent-3.0 6934) <stderr> ERROR: return _iterencode(o, 0)
2015-08-28 08:38:59,123 (forwarderagent-3.0 6934) <stderr> ERROR: File "/home/volttron/volttron-3x/env/local/lib/python2.7/site-packages/simplejson/encoder.py", line 252, in default
2015-08-28 08:38:59,124 (forwarderagent-3.0 6934) <stderr> ERROR: raise TypeError(repr(o) + " is not JSON serializable")
2015-08-28 08:38:59,124 (forwarderagent-3.0 6934) <stderr> ERROR: TypeError: ProtocolError('previous send operation is not complete',) is not JSON serializable
2015-08-28 08:38:59,124 (forwarderagent-3.0 6934) <stderr> ERROR: Logged from file base_historian.py, line 370
|
TypeError
|
def send(self, frame, flags=0, copy=True, track=False):
"""Send a single frame while enforcing VIP protocol.
Expects frames to be sent in the following order:
PEER USER_ID MESSAGE_ID SUBSYSTEM [ARG]...
If the socket is a ROUTER, an INTERMEDIARY must be sent before
PEER. The VIP protocol signature, PROTO, is automatically sent
between PEER and USER_ID. Zero or more ARG frames may be sent
after SUBSYSTEM, which may not be empty. All frames up to
SUBSYSTEM must be sent with the SNDMORE flag.
"""
self.wait_send(flags)
state = self._send_state
if state == 4:
# Verify that subsystem has some non-space content
subsystem = bytes(frame)
if not subsystem.strip():
raise ProtocolError("invalid subsystem: {!r}".format(subsystem))
if not flags & SNDMORE:
# Must have SNDMORE flag until sending SUBSYSTEM frame.
if state < 4:
raise ProtocolError(
"expecting at least {} more frames".format(4 - state - 1)
)
# Reset the send state when the last frame is sent
self._send_state = -1 if self.type == ROUTER else 0
elif state < 5:
if state == 1:
# Automatically send PROTO frame
super(_Socket, self).send(b"VIP1", flags=flags | SNDMORE)
state += 1
self._send_state = state + 1
try:
super(_Socket, self).send(frame, flags=flags, copy=copy, track=track)
except Exception:
self._send_state = state
raise
|
def send(self, frame, flags=0, copy=True, track=False):
"""Send a single frame while enforcing VIP protocol.
Expects frames to be sent in the following order:
PEER USER_ID MESSAGE_ID SUBSYSTEM [ARG]...
If the socket is a ROUTER, an INTERMEDIARY must be sent before
PEER. The VIP protocol signature, PROTO, is automatically sent
between PEER and USER_ID. Zero or more ARG frames may be sent
after SUBSYSTEM, which may not be empty. All frames up to
SUBSYSTEM must be sent with the SNDMORE flag.
"""
state = self._send_state
if state == 4:
# Verify that subsystem has some non-space content
subsystem = bytes(frame)
if not subsystem.strip():
raise ProtocolError("invalid subsystem: {!r}".format(subsystem))
if not flags & SNDMORE:
# Must have SNDMORE flag until sending SUBSYSTEM frame.
if state < 4:
raise ProtocolError(
"expecting at least {} more frames".format(4 - state - 1)
)
# Reset the send state when the last frame is sent
self._send_state = -1 if self.type == ROUTER else 0
elif state < 5:
if state == 1:
# Automatically send PROTO frame
super(_Socket, self).send(b"VIP1", flags=flags | SNDMORE)
state += 1
self._send_state = state + 1
try:
super(_Socket, self).send(frame, flags=flags, copy=copy, track=track)
except Exception:
self._send_state = state
raise
|
https://github.com/VOLTTRON/volttron/issues/133
|
Traceback (most recent call last):
File "/home/kyle/workspaces/volttron/env/local/lib/python2.7/site-packages/gevent/greenlet.py", line 327, in run
result = self._run(*self.args, **self.kwargs)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/pubsub.py", line 280, in subscribe
self.rpc().call(peer, 'pubsub.subscribe', prefix, bus=bus).get()
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/rpc.py", line 282, in call
self.core().socket.send_vip(peer, 'RPC', [request], msg_id=ident)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/socket.py", line 225, in send_vip
raise ProtocolError('previous send operation is not complete')
ProtocolError: previous send operation is not complete
<Greenlet at 0xb2fc002cL: subscribe(<volttron.platform.vip.agent.subsystems.pubsub.Pub, peer='pubsub', callback=<bound method ActuatorAgent.handle_set of <__main_, prefix=Topic(u'devices/actuators/set'))> failed with ProtocolError
Traceback (most recent call last):
File "/home/kyle/workspaces/volttron/env/local/lib/python2.7/site-packages/gevent/greenlet.py", line 327, in run
result = self._run(*self.args, **self.kwargs)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/pubsub.py", line 280, in subscribe
self.rpc().call(peer, 'pubsub.subscribe', prefix, bus=bus).get()
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/rpc.py", line 282, in call
self.core().socket.send_vip(peer, 'RPC', [request], msg_id=ident)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/socket.py", line 225, in send_vip
raise ProtocolError('previous send operation is not complete')
ProtocolError: previous send operation is not complete
<Greenlet at 0xb2fc007cL: subscribe(<volttron.platform.vip.agent.subsystems.pubsub.Pub, peer='pubsub', callback=<bound method ActuatorAgent.handle_schedule_reques, prefix=Topic(u'devices/actuators/schedule/request'))> failed with ProtocolError
|
ProtocolError
|
def send_vip(
self,
peer,
subsystem,
args=None,
msg_id=b"",
user=b"",
via=None,
flags=0,
copy=True,
track=False,
):
"""Send an entire VIP message by individual parts.
This method will raise a ProtocolError exception if the previous
send was made with the SNDMORE flag or if other protocol
constraints are violated. If SNDMORE flag is used, additional
arguments may be sent. via is required for ROUTER sockets.
"""
self.wait_send(flags)
state = self._send_state
if state > 0:
raise ProtocolError("previous send operation is not complete")
elif state == -1:
if via is None:
raise ValueError("missing 'via' argument required by ROUTER sockets")
self.send(via, flags=flags | SNDMORE, copy=copy, track=track)
if msg_id is None:
msg_id = b""
if user is None:
user = b""
more = SNDMORE if args else 0
self.send_multipart(
[peer, user, msg_id, subsystem], flags=flags | more, copy=copy, track=track
)
if args:
send = self.send if isinstance(args, basestring) else self.send_multipart
send(args, flags=flags, copy=copy, track=track)
|
def send_vip(
self,
peer,
subsystem,
args=None,
msg_id=b"",
user=b"",
via=None,
flags=0,
copy=True,
track=False,
):
"""Send an entire VIP message by individual parts.
This method will raise a ProtocolError exception if the previous
send was made with the SNDMORE flag or if other protocol
constraints are violated. If SNDMORE flag is used, additional
arguments may be sent. via is required for ROUTER sockets.
"""
state = self._send_state
if state > 0:
raise ProtocolError("previous send operation is not complete")
elif state == -1:
if via is None:
raise ValueError("missing 'via' argument required by ROUTER sockets")
self.send(via, flags=flags | SNDMORE, copy=copy, track=track)
if msg_id is None:
msg_id = b""
if user is None:
user = b""
more = SNDMORE if args else 0
self.send_multipart(
[peer, user, msg_id, subsystem], flags=flags | more, copy=copy, track=track
)
if args:
send = self.send if isinstance(args, basestring) else self.send_multipart
send(args, flags=flags, copy=copy, track=track)
|
https://github.com/VOLTTRON/volttron/issues/133
|
Traceback (most recent call last):
File "/home/kyle/workspaces/volttron/env/local/lib/python2.7/site-packages/gevent/greenlet.py", line 327, in run
result = self._run(*self.args, **self.kwargs)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/pubsub.py", line 280, in subscribe
self.rpc().call(peer, 'pubsub.subscribe', prefix, bus=bus).get()
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/rpc.py", line 282, in call
self.core().socket.send_vip(peer, 'RPC', [request], msg_id=ident)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/socket.py", line 225, in send_vip
raise ProtocolError('previous send operation is not complete')
ProtocolError: previous send operation is not complete
<Greenlet at 0xb2fc002cL: subscribe(<volttron.platform.vip.agent.subsystems.pubsub.Pub, peer='pubsub', callback=<bound method ActuatorAgent.handle_set of <__main_, prefix=Topic(u'devices/actuators/set'))> failed with ProtocolError
Traceback (most recent call last):
File "/home/kyle/workspaces/volttron/env/local/lib/python2.7/site-packages/gevent/greenlet.py", line 327, in run
result = self._run(*self.args, **self.kwargs)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/pubsub.py", line 280, in subscribe
self.rpc().call(peer, 'pubsub.subscribe', prefix, bus=bus).get()
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/rpc.py", line 282, in call
self.core().socket.send_vip(peer, 'RPC', [request], msg_id=ident)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/socket.py", line 225, in send_vip
raise ProtocolError('previous send operation is not complete')
ProtocolError: previous send operation is not complete
<Greenlet at 0xb2fc007cL: subscribe(<volttron.platform.vip.agent.subsystems.pubsub.Pub, peer='pubsub', callback=<bound method ActuatorAgent.handle_schedule_reques, prefix=Topic(u'devices/actuators/schedule/request'))> failed with ProtocolError
|
ProtocolError
|
def __init__(self, context=None, socket_type=DEALER, shadow=None):
"""Initialize the object and the send and receive state."""
if context is None:
context = self._context_class.instance()
# There are multiple backends which handle shadow differently.
# It is best to send it as a positional to avoid problems.
base = super(_Socket, self)
if shadow is None:
base.__init__(context, socket_type)
else:
base.__init__(context, socket_type, shadow)
# Initialize send and receive states, which are mapped as:
# state: -1 0 [ 1 ] 2 3 4 5
# frame: VIA PEER [PROTO] USER_ID MSG_ID SUBSYS ...
state = -1 if self.type == ROUTER else 0
object.__setattr__(self, "_send_state", state)
object.__setattr__(self, "_recv_state", state)
object.__setattr__(self, "_Socket__local", self._local_class())
self.immediate = True
|
def __init__(self, context=None, socket_type=DEALER, shadow=None):
"""Initialize the object and the send and receive state."""
if context is None:
context = self._context_class.instance()
# There are multiple backends which handle shadow differently.
# It is best to send it as a positional to avoid problems.
base = super(_Socket, self)
if shadow is None:
base.__init__(context, socket_type)
else:
base.__init__(context, socket_type, shadow)
# Initialize send and receive states, which are mapped as:
# state: -1 0 [ 1 ] 2 3 4 5
# frame: VIA PEER [PROTO] USER_ID MSG_ID SUBSYS ...
state = -1 if self.type == ROUTER else 0
object.__setattr__(self, "_send_state", state)
object.__setattr__(self, "_recv_state", state)
self.immediate = True
|
https://github.com/VOLTTRON/volttron/issues/133
|
Traceback (most recent call last):
File "/home/kyle/workspaces/volttron/env/local/lib/python2.7/site-packages/gevent/greenlet.py", line 327, in run
result = self._run(*self.args, **self.kwargs)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/pubsub.py", line 280, in subscribe
self.rpc().call(peer, 'pubsub.subscribe', prefix, bus=bus).get()
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/rpc.py", line 282, in call
self.core().socket.send_vip(peer, 'RPC', [request], msg_id=ident)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/socket.py", line 225, in send_vip
raise ProtocolError('previous send operation is not complete')
ProtocolError: previous send operation is not complete
<Greenlet at 0xb2fc002cL: subscribe(<volttron.platform.vip.agent.subsystems.pubsub.Pub, peer='pubsub', callback=<bound method ActuatorAgent.handle_set of <__main_, prefix=Topic(u'devices/actuators/set'))> failed with ProtocolError
Traceback (most recent call last):
File "/home/kyle/workspaces/volttron/env/local/lib/python2.7/site-packages/gevent/greenlet.py", line 327, in run
result = self._run(*self.args, **self.kwargs)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/pubsub.py", line 280, in subscribe
self.rpc().call(peer, 'pubsub.subscribe', prefix, bus=bus).get()
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/rpc.py", line 282, in call
self.core().socket.send_vip(peer, 'RPC', [request], msg_id=ident)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/socket.py", line 225, in send_vip
raise ProtocolError('previous send operation is not complete')
ProtocolError: previous send operation is not complete
<Greenlet at 0xb2fc007cL: subscribe(<volttron.platform.vip.agent.subsystems.pubsub.Pub, peer='pubsub', callback=<bound method ActuatorAgent.handle_schedule_reques, prefix=Topic(u'devices/actuators/schedule/request'))> failed with ProtocolError
|
ProtocolError
|
def send(self, frame, flags=0, copy=True, track=False):
"""Send a single frame while enforcing VIP protocol.
Expects frames to be sent in the following order:
PEER USER_ID MESSAGE_ID SUBSYSTEM [ARG]...
If the socket is a ROUTER, an INTERMEDIARY must be sent before
PEER. The VIP protocol signature, PROTO, is automatically sent
between PEER and USER_ID. Zero or more ARG frames may be sent
after SUBSYSTEM, which may not be empty. All frames up to
SUBSYSTEM must be sent with the SNDMORE flag.
"""
with self._sending(flags) as flags:
state = self._send_state
if state == 4:
# Verify that subsystem has some non-space content
subsystem = bytes(frame)
if not subsystem.strip():
raise ProtocolError("invalid subsystem: %s" % subsystem)
if not flags & SNDMORE:
# Must have SNDMORE flag until sending SUBSYSTEM frame.
if state < 4:
raise ProtocolError(
"expecting at least %d more frames" % (4 - state - 1)
)
# Reset the send state when the last frame is sent
self._send_state = -1 if self.type == ROUTER else 0
elif state < 5:
if state == 1:
# Automatically send PROTO frame
super(_Socket, self).send(b"VIP1", flags=flags | SNDMORE)
state += 1
self._send_state = state + 1
try:
super(_Socket, self).send(frame, flags=flags, copy=copy, track=track)
except Exception:
self._send_state = state
raise
|
def send(self, frame, flags=0, copy=True, track=False):
"""Send a single frame while enforcing VIP protocol.
Expects frames to be sent in the following order:
PEER USER_ID MESSAGE_ID SUBSYSTEM [ARG]...
If the socket is a ROUTER, an INTERMEDIARY must be sent before
PEER. The VIP protocol signature, PROTO, is automatically sent
between PEER and USER_ID. Zero or more ARG frames may be sent
after SUBSYSTEM, which may not be empty. All frames up to
SUBSYSTEM must be sent with the SNDMORE flag.
"""
self.wait_send(flags)
state = self._send_state
if state == 4:
# Verify that subsystem has some non-space content
subsystem = bytes(frame)
if not subsystem.strip():
raise ProtocolError("invalid subsystem: {!r}".format(subsystem))
if not flags & SNDMORE:
# Must have SNDMORE flag until sending SUBSYSTEM frame.
if state < 4:
raise ProtocolError(
"expecting at least {} more frames".format(4 - state - 1)
)
# Reset the send state when the last frame is sent
self._send_state = -1 if self.type == ROUTER else 0
elif state < 5:
if state == 1:
# Automatically send PROTO frame
super(_Socket, self).send(b"VIP1", flags=flags | SNDMORE)
state += 1
self._send_state = state + 1
try:
super(_Socket, self).send(frame, flags=flags, copy=copy, track=track)
except Exception:
self._send_state = state
raise
|
https://github.com/VOLTTRON/volttron/issues/133
|
Traceback (most recent call last):
File "/home/kyle/workspaces/volttron/env/local/lib/python2.7/site-packages/gevent/greenlet.py", line 327, in run
result = self._run(*self.args, **self.kwargs)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/pubsub.py", line 280, in subscribe
self.rpc().call(peer, 'pubsub.subscribe', prefix, bus=bus).get()
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/rpc.py", line 282, in call
self.core().socket.send_vip(peer, 'RPC', [request], msg_id=ident)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/socket.py", line 225, in send_vip
raise ProtocolError('previous send operation is not complete')
ProtocolError: previous send operation is not complete
<Greenlet at 0xb2fc002cL: subscribe(<volttron.platform.vip.agent.subsystems.pubsub.Pub, peer='pubsub', callback=<bound method ActuatorAgent.handle_set of <__main_, prefix=Topic(u'devices/actuators/set'))> failed with ProtocolError
Traceback (most recent call last):
File "/home/kyle/workspaces/volttron/env/local/lib/python2.7/site-packages/gevent/greenlet.py", line 327, in run
result = self._run(*self.args, **self.kwargs)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/pubsub.py", line 280, in subscribe
self.rpc().call(peer, 'pubsub.subscribe', prefix, bus=bus).get()
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/rpc.py", line 282, in call
self.core().socket.send_vip(peer, 'RPC', [request], msg_id=ident)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/socket.py", line 225, in send_vip
raise ProtocolError('previous send operation is not complete')
ProtocolError: previous send operation is not complete
<Greenlet at 0xb2fc007cL: subscribe(<volttron.platform.vip.agent.subsystems.pubsub.Pub, peer='pubsub', callback=<bound method ActuatorAgent.handle_schedule_reques, prefix=Topic(u'devices/actuators/schedule/request'))> failed with ProtocolError
|
ProtocolError
|
def send_vip(
self,
peer,
subsystem,
args=None,
msg_id=b"",
user=b"",
via=None,
flags=0,
copy=True,
track=False,
):
"""Send an entire VIP message by individual parts.
This method will raise a ProtocolError exception if the previous
send was made with the SNDMORE flag or if other protocol
constraints are violated. If SNDMORE flag is used, additional
arguments may be sent. via is required for ROUTER sockets.
"""
with self._sending(flags) as flags:
state = self._send_state
if state > 0:
raise ProtocolError("previous send operation is not complete")
elif state == -1:
if via is None:
raise ValueError("missing 'via' argument required by ROUTER sockets")
self.send(via, flags=flags | SNDMORE, copy=copy, track=track)
if msg_id is None:
msg_id = b""
if user is None:
user = b""
more = SNDMORE if args else 0
self.send_multipart(
[peer, user, msg_id, subsystem], flags=flags | more, copy=copy, track=track
)
if args:
send = self.send if isinstance(args, basestring) else self.send_multipart
send(args, flags=flags, copy=copy, track=track)
|
def send_vip(
self,
peer,
subsystem,
args=None,
msg_id=b"",
user=b"",
via=None,
flags=0,
copy=True,
track=False,
):
"""Send an entire VIP message by individual parts.
This method will raise a ProtocolError exception if the previous
send was made with the SNDMORE flag or if other protocol
constraints are violated. If SNDMORE flag is used, additional
arguments may be sent. via is required for ROUTER sockets.
"""
self.wait_send(flags)
state = self._send_state
if state > 0:
raise ProtocolError("previous send operation is not complete")
elif state == -1:
if via is None:
raise ValueError("missing 'via' argument required by ROUTER sockets")
self.send(via, flags=flags | SNDMORE, copy=copy, track=track)
if msg_id is None:
msg_id = b""
if user is None:
user = b""
more = SNDMORE if args else 0
self.send_multipart(
[peer, user, msg_id, subsystem], flags=flags | more, copy=copy, track=track
)
if args:
send = self.send if isinstance(args, basestring) else self.send_multipart
send(args, flags=flags, copy=copy, track=track)
|
https://github.com/VOLTTRON/volttron/issues/133
|
Traceback (most recent call last):
File "/home/kyle/workspaces/volttron/env/local/lib/python2.7/site-packages/gevent/greenlet.py", line 327, in run
result = self._run(*self.args, **self.kwargs)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/pubsub.py", line 280, in subscribe
self.rpc().call(peer, 'pubsub.subscribe', prefix, bus=bus).get()
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/rpc.py", line 282, in call
self.core().socket.send_vip(peer, 'RPC', [request], msg_id=ident)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/socket.py", line 225, in send_vip
raise ProtocolError('previous send operation is not complete')
ProtocolError: previous send operation is not complete
<Greenlet at 0xb2fc002cL: subscribe(<volttron.platform.vip.agent.subsystems.pubsub.Pub, peer='pubsub', callback=<bound method ActuatorAgent.handle_set of <__main_, prefix=Topic(u'devices/actuators/set'))> failed with ProtocolError
Traceback (most recent call last):
File "/home/kyle/workspaces/volttron/env/local/lib/python2.7/site-packages/gevent/greenlet.py", line 327, in run
result = self._run(*self.args, **self.kwargs)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/pubsub.py", line 280, in subscribe
self.rpc().call(peer, 'pubsub.subscribe', prefix, bus=bus).get()
File "/home/kyle/workspaces/volttron/volttron/platform/vip/agent/subsystems/rpc.py", line 282, in call
self.core().socket.send_vip(peer, 'RPC', [request], msg_id=ident)
File "/home/kyle/workspaces/volttron/volttron/platform/vip/socket.py", line 225, in send_vip
raise ProtocolError('previous send operation is not complete')
ProtocolError: previous send operation is not complete
<Greenlet at 0xb2fc007cL: subscribe(<volttron.platform.vip.agent.subsystems.pubsub.Pub, peer='pubsub', callback=<bound method ActuatorAgent.handle_schedule_reques, prefix=Topic(u'devices/actuators/schedule/request'))> failed with ProtocolError
|
ProtocolError
|
def main(database):
history = InMemoryHistory()
connection = sqlite3.connect(database)
while True:
try:
text = get_input(
"> ",
lexer=SqlLexer,
completer=sql_completer,
style=DocumentStyle,
history=history,
on_abort=AbortAction.RETRY,
)
except EOFError:
break # Control-D pressed.
with connection:
try:
messages = connection.execute(text)
except Exception as e:
print(repr(e))
else:
for message in messages:
print(message)
print("GoodBye!")
|
def main(database):
history = History()
connection = sqlite3.connect(database)
while True:
try:
text = get_input(
"> ",
lexer=SqlLexer,
completer=sql_completer,
style=DocumentStyle,
history=history,
on_abort=AbortAction.RETRY,
)
except EOFError:
break # Control-D pressed.
with connection:
try:
messages = connection.execute(text)
except Exception as e:
print(repr(e))
else:
for message in messages:
print(message)
print("GoodBye!")
|
https://github.com/prompt-toolkit/python-prompt-toolkit/issues/150
|
Traceback (most recent call last):
File "sqlite-cli.py", line 55, in <module>
main(db)
File "sqlite-cli.py", line 29, in main
history = History()
TypeError: Can't instantiate abstract class History with abstract methods __getitem__, __len__, append
|
TypeError
|
def get_completions(self, document):
"""Ask jedi to complete."""
script = get_jedi_script_from_document(
document, self.get_locals(), self.get_globals()
)
if script:
try:
completions = script.completions()
except TypeError:
# Issue #9: bad syntax causes completions() to fail in jedi.
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/9
pass
except UnicodeDecodeError:
# Issue #43: UnicodeDecodeError on OpenBSD
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/43
pass
else:
for c in completions:
yield Completion(
c.name_with_symbols,
len(c.complete) - len(c.name_with_symbols),
display=c.name_with_symbols,
)
|
def get_completions(self, document):
"""Ask jedi to complete."""
script = get_jedi_script_from_document(
document, self.get_locals(), self.get_globals()
)
if script:
try:
completions = script.completions()
except TypeError:
# Issue #9: bad syntax causes completions() to fail in jedi.
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/9
pass
else:
for c in completions:
yield Completion(
c.name_with_symbols,
len(c.complete) - len(c.name_with_symbols),
display=c.name_with_symbols,
)
|
https://github.com/prompt-toolkit/python-prompt-toolkit/issues/43
|
In [1]: import sException in thread Thread-8:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/home/edd/.local/lib/python2.7/site-packages/prompt_toolkit/__init__.pyn
callback()
File "/home/edd/.local/lib/python2.7/site-packages/prompt_toolkit/__init__.pyn
completions = list(line.completer.get_completions(document))
File "/home/edd/.local/lib/python2.7/site-packages/prompt_toolkit/contrib/ipys
for c in super(IPythonCompleter, self).get_completions(document):
File "/home/edd/.local/lib/python2.7/site-packages/prompt_toolkit/contrib/pyts
completions = script.completions()
File "/home/edd/.local/lib/python2.7/site-packages/jedi/api/__init__.py", lins
completions = get_completions(user_stmt, b)
File "/home/edd/.local/lib/python2.7/site-packages/jedi/api/__init__.py", lins
return self._simple_complete(path, like)
File "/home/edd/.local/lib/python2.7/site-packages/jedi/api/__init__.py", line
return super(Interpreter, self)._simple_complete(path, like)
File "/home/edd/.local/lib/python2.7/site-packages/jedi/api/__init__.py", line
names = s.get_defined_names(on_import_stmt=True)
File "/home/edd/.local/lib/python2.7/site-packages/jedi/evaluate/imports.py",s
names += self._get_module_names([path])
File "/home/edd/.local/lib/python2.7/site-packages/jedi/evaluate/imports.py",s
for module_loader, name, is_pkg in pkgutil.iter_modules(search_path):
File "/usr/local/lib/python2.7/pkgutil.py", line 147, in iter_modules
for name, ispkg in iter_importer_modules(i, prefix):
File "/usr/local/lib/python2.7/pkgutil.py", line 202, in iter_modules
filenames.sort() # handle packages before same-named modules
UnicodeDecodeError: 'ascii' codec can't decode byte 0xb0 in position 25: ordina)
|
UnicodeDecodeError
|
def get_tokens(self, cli, width):
result = []
append = result.append
Signature = Token.Toolbar.Signature
if cli.line.signatures:
sig = cli.line.signatures[0] # Always take the first one.
append((Token, " "))
try:
append((Signature, sig.full_name))
except IndexError:
# Workaround for #37: https://github.com/jonathanslenders/python-prompt-toolkit/issues/37
# See also: https://github.com/davidhalter/jedi/issues/490
return []
append((Signature.Operator, "("))
for i, p in enumerate(sig.params):
if i == sig.index:
append((Signature.CurrentName, str(p.name)))
else:
append((Signature, str(p.name)))
append((Signature.Operator, ", "))
result.pop() # Pop last comma
append((Signature.Operator, ")"))
return result
|
def get_tokens(self, cli, width):
result = []
append = result.append
Signature = Token.Toolbar.Signature
if cli.line.signatures:
sig = cli.line.signatures[0] # Always take the first one.
append((Token, " "))
append((Signature, sig.full_name))
append((Signature.Operator, "("))
for i, p in enumerate(sig.params):
if i == sig.index:
append((Signature.CurrentName, str(p.name)))
else:
append((Signature, str(p.name)))
append((Signature.Operator, ", "))
result.pop() # Pop last comma
append((Signature.Operator, ")"))
return result
|
https://github.com/prompt-toolkit/python-prompt-toolkit/issues/37
|
➜ ~ ptpython --vi
In [1]: def bez(cps, t):
2. if len(cps) < 2:
3. return cps[0]
4. p1 = bez(Traceback (most recent call last):
File "/usr/local/bin/ptpython", line 80, in <module>
_run_repl() all
File "/usr/local/bin/ptpython", line 77, in _run_repl
startup_paths=startup_paths, always_multiline=always_multiline)
File "/usr/local/lib/python2.7/dist-packages/prompt_toolkit/contrib/repl.py",d
cli.start_repl(startup_paths=startup_paths)
File "/usr/local/lib/python2.7/dist-packages/prompt_toolkit/contrib/repl.py",l
on_exit=AbortAction.RAISE_EXCEPTION)
File "/usr/local/lib/python2.7/dist-packages/prompt_toolkit/__init__.py", lint
self._redraw()
File "/usr/local/lib/python2.7/dist-packages/prompt_toolkit/__init__.py", linw
self.renderer.render(self)
File "/usr/local/lib/python2.7/dist-packages/prompt_toolkit/renderer.py", linr
output = self.render_to_str(cli)
File "/usr/local/lib/python2.7/dist-packages/prompt_toolkit/renderer.py", linr
self.layout.write_to_screen(cli, screen, height)
File "/usr/local/lib/python2.7/dist-packages/prompt_toolkit/layout/__init__.pn
t.write(cli, screen)z(cps, t)
File "/usr/local/lib/python2.7/dist-packages/prompt_toolkit/layout/toolbars.pe
tokens = self.get_tokens(cli, width)
File "/usr/local/lib/python2.7/dist-packages/prompt_toolkit/contrib/python_ins
append((Signature, sig.full_name))
File "/usr/local/lib/python2.7/dist-packages/jedi/api/classes.py", line 295, e
path = [unicode(p) for p in self._path()]
File "/usr/local/lib/python2.7/dist-packages/jedi/api/classes.py", line 170, h
path.insert(0, par.name)
File "/usr/local/lib/python2.7/dist-packages/jedi/api/interpreter.py", line 3_
return getattr(self.parser_module, name)
File "/usr/local/lib/python2.7/dist-packages/jedi/parser/fast.py", line 38, i_
return getattr(self.parsers[0].module, name)
IndexError: list index out of range
|
IndexError
|
def get_completions(self, document):
"""Ask jedi to complete."""
script = get_jedi_script_from_document(
document, self.get_locals(), self.get_globals()
)
if script:
try:
completions = script.completions()
except TypeError:
# Issue #9: bad syntax causes completions() to fail in jedi.
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/9
pass
else:
for c in completions:
yield Completion(
c.name_with_symbols,
len(c.complete) - len(c.name_with_symbols),
display=c.name_with_symbols,
)
|
def get_completions(self, document):
"""Ask jedi to complete."""
script = get_jedi_script_from_document(
document, self.get_locals(), self.get_globals()
)
if script:
for c in script.completions():
yield Completion(
c.name_with_symbols,
len(c.complete) - len(c.name_with_symbols),
display=c.name_with_symbols,
)
|
https://github.com/prompt-toolkit/python-prompt-toolkit/issues/9
|
In [1]: for i in range)Exception in thread Thread-13:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "build/bdist.linux-x86_64/egg/prompt_toolkit/__init__.py", line 200, in run
callback()
File "build/bdist.linux-x86_64/egg/prompt_toolkit/__init__.py", line 440, in run
completions = list(line.completer.get_completions(document))
File "build/bdist.linux-x86_64/egg/prompt_toolkit/contrib/python_input.py", line 449, in get_completions
for c in script.completions():
File "/home/cj/.local/lib/python2.7/site-packages/jedi-0.8.1_final0-py2.7.egg/jedi/api/__init__.py", line 187, in completions
new = classes.Completion(self._evaluator, c, needs_dot, len(like), s)
File "/home/cj/.local/lib/python2.7/site-packages/jedi-0.8.1_final0-py2.7.egg/jedi/api/classes.py", line 375, in __init__
super(Completion, self).__init__(evaluator, name.parent, name.start_pos)
File "/home/cj/.local/lib/python2.7/site-packages/jedi-0.8.1_final0-py2.7.egg/jedi/cache.py", line 139, in wrapper
result = func(self)
File "/home/cj/.local/lib/python2.7/site-packages/jedi-0.8.1_final0-py2.7.egg/jedi/api/interpreter.py", line 69, in parent
module = __import__(module_name)
TypeError: __import__() argument 1 must be string, not None
|
TypeError
|
def get_jedi_script_from_document(document, locals, globals):
try:
return jedi.Interpreter(
document.text,
column=document.cursor_position_col,
line=document.cursor_position_row + 1,
path="input-text",
namespaces=[locals, globals],
)
except jedi.common.MultiLevelStopIteration:
# This happens when the document is just a backslash.
return None
except ValueError:
# Invalid cursor position.
# ValueError('`column` parameter is not in a valid range.')
return None
except AttributeError:
# Workaround for #65: https://github.com/jonathanslenders/python-prompt-toolkit/issues/65
# See also: https://github.com/davidhalter/jedi/issues/508
return None
|
def get_jedi_script_from_document(document, locals, globals):
try:
return jedi.Interpreter(
document.text,
column=document.cursor_position_col,
line=document.cursor_position_row + 1,
path="input-text",
namespaces=[locals, globals],
)
except jedi.common.MultiLevelStopIteration:
# This happens when the document is just a backslash.
return None
except ValueError:
# Invalid cursor position.
# ValueError('`column` parameter is not in a valid range.')
return None
|
https://github.com/prompt-toolkit/python-prompt-toolkit/issues/65
|
In [1]: yieldException in thread Thread-7:
Traceback (most recent call last):
File "/usr/lib/python3.4/threading.py", line 921, in _bootstrap_inner
self.run()
File "/usr/lib/python3.4/threading.py", line 869, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.4/site-packages/prompt_toolkit-0.22-py3.4.egg/prompt_toolkit/__init__.py", line 400, in run
completions = list(line.completer.get_completions(document))
File "/usr/lib/python3.4/site-packages/prompt_toolkit-0.22-py3.4.egg/prompt_toolkit/contrib/ipython.py", line 69, in get_completions
for c in super(IPythonCompleter, self).get_completions(document):
File "/usr/lib/python3.4/site-packages/prompt_toolkit-0.22-py3.4.egg/prompt_toolkit/contrib/python_input.py", line 477, in get_completions
script = get_jedi_script_from_document(document, self.get_locals(), self.get_globals())
File "/usr/lib/python3.4/site-packages/prompt_toolkit-0.22-py3.4.egg/prompt_toolkit/contrib/python_input.py", line 457, in get_jedi_script_from_document
namespaces=[locals, globals])
File "/usr/lib/python3.4/site-packages/jedi-0.8.1_final0-py3.4.egg/jedi/api/__init__.py", line 642, in __init__
interpreter.create(self._evaluator, namespaces[0], self._parser.module())
File "/usr/lib/python3.4/site-packages/jedi-0.8.1_final0-py3.4.egg/jedi/api/interpreter.py", line 108, in create
something.parent = ns
AttributeError: 'NoneType' object has no attribute 'parent'
Exception in thread Thread-8:
Traceback (most recent call last):
File "/usr/lib/python3.4/threading.py", line 921, in _bootstrap_inner
self.run()
File "/usr/lib/python3.4/threading.py", line 869, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.4/site-packages/prompt_toolkit-0.22-py3.4.egg/prompt_toolkit/contrib/python_input.py", line 579, in run
script = get_jedi_script_from_document(document, self.get_locals(), self.get_globals())
File "/usr/lib/python3.4/site-packages/prompt_toolkit-0.22-py3.4.egg/prompt_toolkit/contrib/python_input.py", line 457, in get_jedi_script_from_document
namespaces=[locals, globals])
File "/usr/lib/python3.4/site-packages/jedi-0.8.1_final0-py3.4.egg/jedi/api/__init__.py", line 642, in __init__
interpreter.create(self._evaluator, namespaces[0], self._parser.module())
File "/usr/lib/python3.4/site-packages/jedi-0.8.1_final0-py3.4.egg/jedi/api/interpreter.py", line 108, in create
something.parent = ns
AttributeError: 'NoneType' object has no attribute 'parent'
|
AttributeError
|
def save(self, labels: SemanticSegmentationLabels) -> None:
"""Save labels to disk.
More info on rasterio IO:
- https://github.com/mapbox/rasterio/blob/master/docs/quickstart.rst
- https://rasterio.readthedocs.io/en/latest/topics/windowed-rw.html
Args:
labels - (SemanticSegmentationLabels) labels to be saved
"""
local_root = get_local_path(self.root_uri, self.tmp_dir)
make_dir(local_root)
out_profile = {
"driver": "GTiff",
"height": self.extent.ymax,
"width": self.extent.xmax,
"transform": self.crs_transformer.get_affine_transform(),
"crs": self.crs_transformer.get_image_crs(),
"blockxsize": self.rasterio_block_size,
"blockysize": self.rasterio_block_size,
}
# if old scores exist, combine them with the new ones
if self.score_raster_source:
log.info("Old scores found. Merging with current scores.")
old_labels = self.get_scores()
labels += old_labels
self.write_discrete_raster_output(
out_profile, get_local_path(self.label_uri, self.tmp_dir), labels
)
if self.smooth_output:
self.write_smooth_raster_output(
out_profile,
get_local_path(self.score_uri, self.tmp_dir),
get_local_path(self.hits_uri, self.tmp_dir),
labels,
chip_sz=self.rasterio_block_size,
)
if self.vector_outputs:
self.write_vector_outputs(labels)
sync_to_dir(local_root, self.root_uri)
|
def save(self, labels: SemanticSegmentationLabels) -> None:
"""Save labels to disk.
More info on rasterio IO:
- https://github.com/mapbox/rasterio/blob/master/docs/quickstart.rst
- https://rasterio.readthedocs.io/en/latest/topics/windowed-rw.html
Args:
labels - (SemanticSegmentationLabels) labels to be saved
"""
local_root = get_local_path(self.root_uri, self.tmp_dir)
make_dir(local_root)
out_smooth_profile = {
"driver": "GTiff",
"height": self.extent.ymax,
"width": self.extent.xmax,
"transform": self.crs_transformer.get_affine_transform(),
"crs": self.crs_transformer.get_image_crs(),
"blockxsize": self.rasterio_block_size,
"blockysize": self.rasterio_block_size,
}
# if old scores exist, combine them with the new ones
if self.score_raster_source:
log.info("Old scores found. Merging with current scores.")
old_labels = self.get_scores()
labels += old_labels
self.write_discrete_raster_output(
out_smooth_profile, get_local_path(self.label_uri, self.tmp_dir), labels
)
if self.smooth_output:
self.write_smooth_raster_output(
out_smooth_profile,
get_local_path(self.score_uri, self.tmp_dir),
get_local_path(self.hits_uri, self.tmp_dir),
labels,
chip_sz=self.rasterio_block_size,
)
if self.vector_outputs:
self.write_vector_outputs(labels)
sync_to_dir(local_root, self.root_uri)
|
https://github.com/azavea/raster-vision/issues/1073
|
Running predict command...
2021-01-18 09:29:36:rastervision.pytorch_learner.learner: INFO - Loading model weights from: /opt/data/tmp/tmpno2kuo4_/model-bundle/model.pth
2021-01-18 09:29:38:rastervision.core.rv_pipeline.rv_pipeline: INFO - Making predictions for scene
................................................................................................................................................................................................................................................................................................................................................................................................................
2021-01-18 09:29:48:rastervision.core.data.label_store.semantic_segmentation_label_store: INFO - Writing labels to disk.
[#-----------------------------------] 5% 0d 00:00:48
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 248, in <module>
main()
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 240, in run_command
runner=runner)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 217, in _run_command
command_fn()
File "/opt/src/rastervision_core/rastervision/core/rv_pipeline/rv_pipeline.py", line 165, in predict
for s in dataset.validation_scenes
File "/opt/src/rastervision_core/rastervision/core/rv_pipeline/rv_pipeline.py", line 161, in _predict
label_store.save(labels)
File "/opt/src/rastervision_core/rastervision/core/data/label_store/semantic_segmentation_label_store.py", line 194, in save
labels)
File "/opt/src/rastervision_core/rastervision/core/data/label_store/semantic_segmentation_label_store.py", line 256, in write_discrete_raster_output
label_arr = labels.get_label_arr(window)
File "/opt/src/rastervision_core/rastervision/core/data/label/semantic_segmentation_labels.py", line 170, in get_label_arr
return self.window_to_label_arr[window]
KeyError: Box(0, 4864, 256, 5000)
|
KeyError
|
def write_smooth_raster_output(
self,
out_profile: dict,
scores_path: str,
hits_path: str,
labels: SemanticSegmentationLabels,
chip_sz: Optional[int] = None,
) -> None:
dtype = np.uint8 if self.smooth_as_uint8 else np.float32
out_profile.update(
{
"count": labels.num_classes,
"dtype": dtype,
}
)
if chip_sz is None:
windows = [self.extent]
else:
windows = labels.get_windows(chip_sz=chip_sz)
log.info("Writing smooth labels to disk.")
with rio.open(scores_path, "w", **out_profile) as dataset:
with click.progressbar(windows) as bar:
for window in bar:
window, _ = self._clip_to_extent(self.extent, window)
score_arr = labels.get_score_arr(window)
if self.smooth_as_uint8:
score_arr = self._scores_to_uint8(score_arr)
self._write_array(dataset, window, score_arr)
# save pixel hits too
np.save(hits_path, labels.pixel_hits)
|
def write_smooth_raster_output(
self,
out_smooth_profile: dict,
scores_path: str,
hits_path: str,
labels: SemanticSegmentationLabels,
chip_sz: Optional[int] = None,
) -> None:
dtype = np.uint8 if self.smooth_as_uint8 else np.float32
out_smooth_profile.update(
{
"count": labels.num_classes,
"dtype": dtype,
}
)
if chip_sz is None:
windows = [self.extent]
else:
windows = labels.get_windows(chip_sz=chip_sz)
log.info("Writing smooth labels to disk.")
with rio.open(scores_path, "w", **out_smooth_profile) as dataset:
with click.progressbar(windows) as bar:
for window in bar:
window = window.intersection(self.extent)
score_arr = labels.get_score_arr(window)
if self.smooth_as_uint8:
score_arr *= 255
score_arr = np.around(score_arr, out=score_arr)
score_arr = score_arr.astype(dtype)
window = window.rasterio_format()
for i, class_scores in enumerate(score_arr, start=1):
dataset.write_band(i, class_scores, window=window)
# save pixel hits too
np.save(hits_path, labels.pixel_hits)
|
https://github.com/azavea/raster-vision/issues/1073
|
Running predict command...
2021-01-18 09:29:36:rastervision.pytorch_learner.learner: INFO - Loading model weights from: /opt/data/tmp/tmpno2kuo4_/model-bundle/model.pth
2021-01-18 09:29:38:rastervision.core.rv_pipeline.rv_pipeline: INFO - Making predictions for scene
................................................................................................................................................................................................................................................................................................................................................................................................................
2021-01-18 09:29:48:rastervision.core.data.label_store.semantic_segmentation_label_store: INFO - Writing labels to disk.
[#-----------------------------------] 5% 0d 00:00:48
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 248, in <module>
main()
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 240, in run_command
runner=runner)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 217, in _run_command
command_fn()
File "/opt/src/rastervision_core/rastervision/core/rv_pipeline/rv_pipeline.py", line 165, in predict
for s in dataset.validation_scenes
File "/opt/src/rastervision_core/rastervision/core/rv_pipeline/rv_pipeline.py", line 161, in _predict
label_store.save(labels)
File "/opt/src/rastervision_core/rastervision/core/data/label_store/semantic_segmentation_label_store.py", line 194, in save
labels)
File "/opt/src/rastervision_core/rastervision/core/data/label_store/semantic_segmentation_label_store.py", line 256, in write_discrete_raster_output
label_arr = labels.get_label_arr(window)
File "/opt/src/rastervision_core/rastervision/core/data/label/semantic_segmentation_labels.py", line 170, in get_label_arr
return self.window_to_label_arr[window]
KeyError: Box(0, 4864, 256, 5000)
|
KeyError
|
def write_discrete_raster_output(
self, out_profile: dict, path: str, labels: SemanticSegmentationLabels
) -> None:
num_bands = 1 if self.class_transformer is None else 3
out_profile.update({"count": num_bands, "dtype": np.uint8})
windows = labels.get_windows()
log.info("Writing labels to disk.")
with rio.open(path, "w", **out_profile) as dataset:
with click.progressbar(windows) as bar:
for window in bar:
label_arr = labels.get_label_arr(window)
window, label_arr = self._clip_to_extent(self.extent, window, label_arr)
if self.class_transformer is not None:
label_arr = self.class_transformer.class_to_rgb(label_arr)
label_arr = label_arr.transpose(2, 0, 1)
self._write_array(dataset, window, label_arr)
|
def write_discrete_raster_output(
self, out_smooth_profile: dict, path: str, labels: SemanticSegmentationLabels
) -> None:
num_bands = 1 if self.class_transformer is None else 3
out_smooth_profile.update({"count": num_bands, "dtype": np.uint8})
windows = labels.get_windows()
log.info("Writing labels to disk.")
with rio.open(path, "w", **out_smooth_profile) as dataset:
with click.progressbar(windows) as bar:
for window in bar:
window = window.intersection(self.extent)
label_arr = labels.get_label_arr(window)
window = window.rasterio_format()
if self.class_transformer is None:
dataset.write_band(1, label_arr, window=window)
else:
rgb_labels = self.class_transformer.class_to_rgb(label_arr)
rgb_labels = rgb_labels.transpose(2, 0, 1)
for i, band in enumerate(rgb_labels, start=1):
dataset.write_band(i, band, window=window)
|
https://github.com/azavea/raster-vision/issues/1073
|
Running predict command...
2021-01-18 09:29:36:rastervision.pytorch_learner.learner: INFO - Loading model weights from: /opt/data/tmp/tmpno2kuo4_/model-bundle/model.pth
2021-01-18 09:29:38:rastervision.core.rv_pipeline.rv_pipeline: INFO - Making predictions for scene
................................................................................................................................................................................................................................................................................................................................................................................................................
2021-01-18 09:29:48:rastervision.core.data.label_store.semantic_segmentation_label_store: INFO - Writing labels to disk.
[#-----------------------------------] 5% 0d 00:00:48
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 248, in <module>
main()
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 240, in run_command
runner=runner)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 217, in _run_command
command_fn()
File "/opt/src/rastervision_core/rastervision/core/rv_pipeline/rv_pipeline.py", line 165, in predict
for s in dataset.validation_scenes
File "/opt/src/rastervision_core/rastervision/core/rv_pipeline/rv_pipeline.py", line 161, in _predict
label_store.save(labels)
File "/opt/src/rastervision_core/rastervision/core/data/label_store/semantic_segmentation_label_store.py", line 194, in save
labels)
File "/opt/src/rastervision_core/rastervision/core/data/label_store/semantic_segmentation_label_store.py", line 256, in write_discrete_raster_output
label_arr = labels.get_label_arr(window)
File "/opt/src/rastervision_core/rastervision/core/data/label/semantic_segmentation_labels.py", line 170, in get_label_arr
return self.window_to_label_arr[window]
KeyError: Box(0, 4864, 256, 5000)
|
KeyError
|
def _labels_to_full_label_arr(self, labels: SemanticSegmentationLabels) -> np.ndarray:
"""Get an array of labels covering the full extent."""
try:
label_arr = labels.get_label_arr(self.extent)
return label_arr
except KeyError:
pass
# we will construct the array from individual windows
windows = labels.get_windows()
# value for pixels not convered by any windows
try:
default_class_id = self.class_config.get_null_class_id()
except ValueError:
# Set it to a high value so that it doesn't match any class's id.
# assumption: num_classes < 256
default_class_id = 255
label_arr = np.full(self.extent.size, fill_value=default_class_id, dtype=np.uint8)
for w in windows:
ymin, xmin, ymax, xmax = w
arr = labels.get_label_arr(w)
w, arr = self._clip_to_extent(self.extent, w, arr)
label_arr[ymin:ymax, xmin:xmax] = arr
return label_arr
|
def _labels_to_full_label_arr(self, labels: SemanticSegmentationLabels) -> np.ndarray:
"""Get an array of labels covering the full extent."""
try:
label_arr = labels.get_label_arr(self.extent)
return label_arr
except KeyError:
pass
# construct the array from individual windows
windows = labels.get_windows()
# value for pixels not convered by any windows
try:
default_class_id = self.class_config.get_null_class_id()
except ValueError:
# Set it to a high value so that it doesn't match any class's id.
# assumption: num_classes < 256
default_class_id = 255
label_arr = np.full(self.extent.size, fill_value=default_class_id, dtype=np.uint8)
for w in windows:
w = w.intersection(self.extent)
ymin, xmin, ymax, xmax = w
arr = labels.get_label_arr(w)
label_arr[ymin:ymax, xmin:xmax] = arr
return label_arr
|
https://github.com/azavea/raster-vision/issues/1073
|
Running predict command...
2021-01-18 09:29:36:rastervision.pytorch_learner.learner: INFO - Loading model weights from: /opt/data/tmp/tmpno2kuo4_/model-bundle/model.pth
2021-01-18 09:29:38:rastervision.core.rv_pipeline.rv_pipeline: INFO - Making predictions for scene
................................................................................................................................................................................................................................................................................................................................................................................................................
2021-01-18 09:29:48:rastervision.core.data.label_store.semantic_segmentation_label_store: INFO - Writing labels to disk.
[#-----------------------------------] 5% 0d 00:00:48
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 248, in <module>
main()
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 240, in run_command
runner=runner)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 217, in _run_command
command_fn()
File "/opt/src/rastervision_core/rastervision/core/rv_pipeline/rv_pipeline.py", line 165, in predict
for s in dataset.validation_scenes
File "/opt/src/rastervision_core/rastervision/core/rv_pipeline/rv_pipeline.py", line 161, in _predict
label_store.save(labels)
File "/opt/src/rastervision_core/rastervision/core/data/label_store/semantic_segmentation_label_store.py", line 194, in save
labels)
File "/opt/src/rastervision_core/rastervision/core/data/label_store/semantic_segmentation_label_store.py", line 256, in write_discrete_raster_output
label_arr = labels.get_label_arr(window)
File "/opt/src/rastervision_core/rastervision/core/data/label/semantic_segmentation_labels.py", line 170, in get_label_arr
return self.window_to_label_arr[window]
KeyError: Box(0, 4864, 256, 5000)
|
KeyError
|
def save(self, labels):
"""Save.
Args:
labels - (SemanticSegmentationLabels) labels to be saved
"""
local_path = get_local_path(self.uri, self.tmp_dir)
make_dir(local_path, use_dirname=True)
transform = self.crs_transformer.get_affine_transform()
crs = self.crs_transformer.get_image_crs()
band_count = 1
dtype = np.uint8
if self.class_trans:
band_count = 3
if self.vector_output:
# We need to store the whole output mask to run feature extraction.
# If the raster is large, this will result in running out of memory, so
# more work will be needed to get this to work in a scalable way. But this
# is complicated because of the need to merge features that are split
# across windows.
mask = np.zeros((self.extent.ymax, self.extent.xmax), dtype=np.uint8)
else:
mask = None
# https://github.com/mapbox/rasterio/blob/master/docs/quickstart.rst
# https://rasterio.readthedocs.io/en/latest/topics/windowed-rw.html
with rasterio.open(
local_path,
"w",
driver="GTiff",
height=self.extent.ymax,
width=self.extent.xmax,
count=band_count,
dtype=dtype,
transform=transform,
crs=crs,
) as dataset:
for window in labels.get_windows():
class_labels = labels.get_label_arr(window, clip_extent=self.extent)
clipped_window = (
(window.ymin, window.ymin + class_labels.shape[0]),
(window.xmin, window.xmin + class_labels.shape[1]),
)
if mask is not None:
mask[
clipped_window[0][0] : clipped_window[0][1],
clipped_window[1][0] : clipped_window[1][1],
] = class_labels
if self.class_trans:
rgb_labels = self.class_trans.class_to_rgb(class_labels)
for chan in range(3):
dataset.write_band(
chan + 1, rgb_labels[:, :, chan], window=clipped_window
)
else:
img = class_labels.astype(dtype)
dataset.write_band(1, img, window=clipped_window)
upload_or_copy(local_path, self.uri)
if self.vector_output:
import mask_to_polygons.vectorification as vectorification
import mask_to_polygons.processing.denoise as denoise
for vo in self.vector_output:
denoise_radius = vo["denoise"]
uri = vo["uri"]
mode = vo["mode"]
class_id = vo["class_id"]
class_mask = np.array(mask == class_id, dtype=np.uint8)
def transform(x, y):
return self.crs_transformer.pixel_to_map((x, y))
if denoise_radius > 0:
class_mask = denoise.denoise(class_mask, denoise_radius)
if uri and mode == "buildings":
options = vo["building_options"]
geojson = vectorification.geojson_from_mask(
mask=class_mask,
transform=transform,
mode=mode,
min_aspect_ratio=options["min_aspect_ratio"],
min_area=options["min_area"],
width_factor=options["element_width_factor"],
thickness=options["element_thickness"],
)
elif uri and mode == "polygons":
geojson = vectorification.geojson_from_mask(
mask=class_mask, transform=transform, mode=mode
)
str_to_file(geojson, uri)
|
def save(self, labels):
"""Save.
Args:
labels - (SemanticSegmentationLabels) labels to be saved
"""
local_path = get_local_path(self.uri, self.tmp_dir)
make_dir(local_path, use_dirname=True)
transform = self.crs_transformer.get_affine_transform()
crs = self.crs_transformer.get_image_crs()
band_count = 1
dtype = np.uint8
if self.class_trans:
band_count = 3
if self.vector_output:
# We need to store the whole output mask to run feature extraction.
# If the raster is large, this will result in running out of memory, so
# more work will be needed to get this to work in a scalable way. But this
# is complicated because of the need to merge features that are split
# across windows.
mask = np.zeros((self.extent.ymax, self.extent.xmax), dtype=np.uint8)
else:
mask = None
# https://github.com/mapbox/rasterio/blob/master/docs/quickstart.rst
# https://rasterio.readthedocs.io/en/latest/topics/windowed-rw.html
with rasterio.open(
local_path,
"w",
driver="GTiff",
height=self.extent.ymax,
width=self.extent.xmax,
count=band_count,
dtype=dtype,
transform=transform,
crs=crs,
) as dataset:
for window in labels.get_windows():
class_labels = labels.get_label_arr(window, clip_extent=self.extent)
clipped_window = (
(window.ymin, window.ymin + class_labels.shape[0]),
(window.xmin, window.xmin + class_labels.shape[1]),
)
if mask is not None:
mask[
clipped_window[0][0] : clipped_window[0][1],
clipped_window[1][0] : clipped_window[1][1],
] = class_labels
if self.class_trans:
rgb_labels = self.class_trans.class_to_rgb(class_labels)
for chan in range(3):
dataset.write_band(
chan + 1, rgb_labels[:, :, chan], window=clipped_window
)
else:
img = class_labels.astype(dtype)
dataset.write_band(1, img, window=clipped_window)
upload_or_copy(local_path, self.uri)
if self.vector_output:
import mask_to_polygons.vectorification as vectorification
import mask_to_polygons.processing.denoise as denoise
for vo in self.vector_output:
denoise_radius = vo["denoise"]
uri = vo["uri"]
mode = vo["mode"]
class_id = vo["class_id"]
class_mask = np.array(mask == class_id, dtype=np.uint8)
local_geojson_path = get_local_path(uri, self.tmp_dir)
def transform(x, y):
return self.crs_transformer.pixel_to_map((x, y))
if denoise_radius > 0:
class_mask = denoise.denoise(class_mask, denoise_radius)
if uri and mode == "buildings":
options = vo["building_options"]
geojson = vectorification.geojson_from_mask(
mask=class_mask,
transform=transform,
mode=mode,
min_aspect_ratio=options["min_aspect_ratio"],
min_area=options["min_area"],
width_factor=options["element_width_factor"],
thickness=options["element_thickness"],
)
elif uri and mode == "polygons":
geojson = vectorification.geojson_from_mask(
mask=class_mask, transform=transform, mode=mode
)
if local_geojson_path:
with open(local_geojson_path, "w") as file_out:
file_out.write(geojson)
upload_or_copy(local_geojson_path, uri)
|
https://github.com/azavea/raster-vision/issues/835
|
17:55:54
2019-10-01 17:55:54:rastervision.utils.files: INFO - Downloading s3://raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentation/5058.tif to /opt/data/temp/tmp5903c076/tmpk2r7cdga/s3/raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentat
2019-10-01 17:55:54:rastervision.utils.files: INFO - Downloading s3://raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentation/5058.tif to /opt/data/temp/tmp5903c076/tmpk2r7cdga/s3/raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentation/5058.tif
17:55:56
Traceback (most recent call last):
17:55:56
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
17:55:56
"__main__", mod_spec)
17:55:56
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
17:55:56
exec(code, run_globals)
17:55:56
File "/opt/src/rastervision/__main__.py", line 17, in <module>
17:55:56
rv.main()
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 722, in __call__
17:55:56
return self.main(*args, **kwargs)
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 697, in main
17:55:56
rv = self.invoke(ctx)
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
17:55:56
return _process_result(sub_ctx.command.invoke(sub_ctx))
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 895, in invoke
17:55:56
return ctx.invoke(self.callback, **ctx.params)
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 535, in invoke
17:55:56
return callback(*args, **kwargs)
17:55:56
File "/opt/src/rastervision/cli/main.py", line 294, in run_command
17:55:56
rv.runner.CommandRunner.run(command_config_uri)
17:55:56
File "/opt/src/rastervision/runner/command_runner.py", line 11, in run
17:55:56
CommandRunner.run_from_proto(msg)
17:55:56
File "/opt/src/rastervision/runner/command_runner.py", line 17, in run_from_proto
17:55:56
command.run()
17:55:56
File "/opt/src/rastervision/command/eval_command.py", line 24, in run
17:55:56
evaluator.process(scenes, tmp_dir)
17:55:56
File "/opt/src/rastervision/evaluation/semantic_segmentation_evaluator.py", line 75, in process
17:55:56
pred_geojson = pred_geojson_source.get_geojson()
17:55:56
File "/opt/src/rastervision/data/vector_source/vector_source.py", line 153, in get_geojson
17:55:56
self.geojson = self._get_geojson()
17:55:56
File "/opt/src/rastervision/data/vector_source/geojson_vector_source.py", line 25, in _get_geojson
17:55:56
geojson = json.loads(file_to_str(self.uri))
17:55:56
File "/opt/conda/lib/python3.6/json/__init__.py", line 354, in loads
17:55:56
return _default_decoder.decode(s)
17:55:56
File "/opt/conda/lib/python3.6/json/decoder.py", line 339, in decode
17:55:56
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
17:55:56
File "/opt/conda/lib/python3.6/json/decoder.py", line 357, in raw_decode
17:55:56
raise JSONDecodeError("Expecting value", s, err.value) from None
17:55:56
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
|
json.decoder.JSONDecodeError
|
def save(self, labels):
"""Save.
Args:
labels - (SemanticSegmentationLabels) labels to be saved
"""
local_path = get_local_path(self.uri, self.tmp_dir)
make_dir(local_path, use_dirname=True)
transform = self.crs_transformer.get_affine_transform()
crs = self.crs_transformer.get_image_crs()
band_count = 1
dtype = np.uint8
if self.class_trans:
band_count = 3
mask = (
np.zeros((self.extent.ymax, self.extent.xmax), dtype=np.uint8)
if self.vector_output
else None
)
# https://github.com/mapbox/rasterio/blob/master/docs/quickstart.rst
# https://rasterio.readthedocs.io/en/latest/topics/windowed-rw.html
with rasterio.open(
local_path,
"w",
driver="GTiff",
height=self.extent.ymax,
width=self.extent.xmax,
count=band_count,
dtype=dtype,
transform=transform,
crs=crs,
) as dataset:
for window in labels.get_windows():
label_arr = labels.get_label_arr(window)
window = window.intersection(self.extent)
label_arr = label_arr[0 : window.get_height(), 0 : window.get_width()]
if mask is not None:
mask[window.ymin : window.ymax, window.xmin : window.xmax] = label_arr
window = window.rasterio_format()
if self.class_trans:
rgb_labels = self.class_trans.class_to_rgb(label_arr)
for chan in range(3):
dataset.write_band(chan + 1, rgb_labels[:, :, chan], window=window)
else:
img = label_arr.astype(dtype)
dataset.write_band(1, img, window=window)
upload_or_copy(local_path, self.uri)
if self.vector_output:
import mask_to_polygons.vectorification as vectorification
import mask_to_polygons.processing.denoise as denoise
for vo in self.vector_output:
denoise_radius = vo.denoise
uri = vo.uri
mode = vo.get_mode()
class_id = vo.class_id
class_mask = np.array(mask == class_id, dtype=np.uint8)
def transform(x, y):
return self.crs_transformer.pixel_to_map((x, y))
if denoise_radius > 0:
class_mask = denoise.denoise(class_mask, denoise_radius)
if uri and mode == "buildings":
geojson = vectorification.geojson_from_mask(
mask=class_mask,
transform=transform,
mode=mode,
min_aspect_ratio=vo.min_aspect_ratio,
min_area=vo.min_area,
width_factor=vo.element_width_factor,
thickness=vo.element_thickness,
)
elif uri and mode == "polygons":
geojson = vectorification.geojson_from_mask(
mask=class_mask, transform=transform, mode=mode
)
str_to_file(geojson, uri)
|
def save(self, labels):
"""Save.
Args:
labels - (SemanticSegmentationLabels) labels to be saved
"""
local_path = get_local_path(self.uri, self.tmp_dir)
make_dir(local_path, use_dirname=True)
transform = self.crs_transformer.get_affine_transform()
crs = self.crs_transformer.get_image_crs()
band_count = 1
dtype = np.uint8
if self.class_trans:
band_count = 3
mask = (
np.zeros((self.extent.ymax, self.extent.xmax), dtype=np.uint8)
if self.vector_output
else None
)
# https://github.com/mapbox/rasterio/blob/master/docs/quickstart.rst
# https://rasterio.readthedocs.io/en/latest/topics/windowed-rw.html
with rasterio.open(
local_path,
"w",
driver="GTiff",
height=self.extent.ymax,
width=self.extent.xmax,
count=band_count,
dtype=dtype,
transform=transform,
crs=crs,
) as dataset:
for window in labels.get_windows():
label_arr = labels.get_label_arr(window)
window = window.intersection(self.extent)
label_arr = label_arr[0 : window.get_height(), 0 : window.get_width()]
if mask is not None:
mask[window.ymin : window.ymax, window.xmin : window.xmax] = label_arr
window = window.rasterio_format()
if self.class_trans:
rgb_labels = self.class_trans.class_to_rgb(label_arr)
for chan in range(3):
dataset.write_band(chan + 1, rgb_labels[:, :, chan], window=window)
else:
img = label_arr.astype(dtype)
dataset.write_band(1, img, window=window)
upload_or_copy(local_path, self.uri)
if self.vector_output:
import mask_to_polygons.vectorification as vectorification
import mask_to_polygons.processing.denoise as denoise
for vo in self.vector_output:
denoise_radius = vo.denoise
uri = vo.uri
mode = vo.get_mode()
class_id = vo.class_id
class_mask = np.array(mask == class_id, dtype=np.uint8)
local_geojson_path = get_local_path(uri, self.tmp_dir)
def transform(x, y):
return self.crs_transformer.pixel_to_map((x, y))
if denoise_radius > 0:
class_mask = denoise.denoise(class_mask, denoise_radius)
if uri and mode == "buildings":
geojson = vectorification.geojson_from_mask(
mask=class_mask,
transform=transform,
mode=mode,
min_aspect_ratio=vo.min_aspect_ratio,
min_area=vo.min_area,
width_factor=vo.element_width_factor,
thickness=vo.element_thickness,
)
elif uri and mode == "polygons":
geojson = vectorification.geojson_from_mask(
mask=class_mask, transform=transform, mode=mode
)
if local_geojson_path:
with open(local_geojson_path, "w") as file_out:
file_out.write(geojson)
upload_or_copy(local_geojson_path, uri)
|
https://github.com/azavea/raster-vision/issues/835
|
17:55:54
2019-10-01 17:55:54:rastervision.utils.files: INFO - Downloading s3://raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentation/5058.tif to /opt/data/temp/tmp5903c076/tmpk2r7cdga/s3/raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentat
2019-10-01 17:55:54:rastervision.utils.files: INFO - Downloading s3://raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentation/5058.tif to /opt/data/temp/tmp5903c076/tmpk2r7cdga/s3/raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentation/5058.tif
17:55:56
Traceback (most recent call last):
17:55:56
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
17:55:56
"__main__", mod_spec)
17:55:56
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
17:55:56
exec(code, run_globals)
17:55:56
File "/opt/src/rastervision/__main__.py", line 17, in <module>
17:55:56
rv.main()
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 722, in __call__
17:55:56
return self.main(*args, **kwargs)
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 697, in main
17:55:56
rv = self.invoke(ctx)
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
17:55:56
return _process_result(sub_ctx.command.invoke(sub_ctx))
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 895, in invoke
17:55:56
return ctx.invoke(self.callback, **ctx.params)
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 535, in invoke
17:55:56
return callback(*args, **kwargs)
17:55:56
File "/opt/src/rastervision/cli/main.py", line 294, in run_command
17:55:56
rv.runner.CommandRunner.run(command_config_uri)
17:55:56
File "/opt/src/rastervision/runner/command_runner.py", line 11, in run
17:55:56
CommandRunner.run_from_proto(msg)
17:55:56
File "/opt/src/rastervision/runner/command_runner.py", line 17, in run_from_proto
17:55:56
command.run()
17:55:56
File "/opt/src/rastervision/command/eval_command.py", line 24, in run
17:55:56
evaluator.process(scenes, tmp_dir)
17:55:56
File "/opt/src/rastervision/evaluation/semantic_segmentation_evaluator.py", line 75, in process
17:55:56
pred_geojson = pred_geojson_source.get_geojson()
17:55:56
File "/opt/src/rastervision/data/vector_source/vector_source.py", line 153, in get_geojson
17:55:56
self.geojson = self._get_geojson()
17:55:56
File "/opt/src/rastervision/data/vector_source/geojson_vector_source.py", line 25, in _get_geojson
17:55:56
geojson = json.loads(file_to_str(self.uri))
17:55:56
File "/opt/conda/lib/python3.6/json/__init__.py", line 354, in loads
17:55:56
return _default_decoder.decode(s)
17:55:56
File "/opt/conda/lib/python3.6/json/decoder.py", line 339, in decode
17:55:56
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
17:55:56
File "/opt/conda/lib/python3.6/json/decoder.py", line 357, in raw_decode
17:55:56
raise JSONDecodeError("Expecting value", s, err.value) from None
17:55:56
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
|
json.decoder.JSONDecodeError
|
def build(self, class_config, crs_transformer, extent, tmp_dir):
return SemanticSegmentationLabelStore(
self.uri,
extent,
crs_transformer,
tmp_dir,
vector_output=self.vector_output,
class_config=class_config if self.rgb else None,
)
|
def build(self, class_config, crs_transformer, extent, tmp_dir):
return SemanticSegmentationLabelStore(
self.uri,
extent,
crs_transformer,
tmp_dir,
vector_output=self.vector_output,
class_config=class_config,
)
|
https://github.com/azavea/raster-vision/issues/835
|
17:55:54
2019-10-01 17:55:54:rastervision.utils.files: INFO - Downloading s3://raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentation/5058.tif to /opt/data/temp/tmp5903c076/tmpk2r7cdga/s3/raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentat
2019-10-01 17:55:54:rastervision.utils.files: INFO - Downloading s3://raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentation/5058.tif to /opt/data/temp/tmp5903c076/tmpk2r7cdga/s3/raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentation/5058.tif
17:55:56
Traceback (most recent call last):
17:55:56
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
17:55:56
"__main__", mod_spec)
17:55:56
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
17:55:56
exec(code, run_globals)
17:55:56
File "/opt/src/rastervision/__main__.py", line 17, in <module>
17:55:56
rv.main()
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 722, in __call__
17:55:56
return self.main(*args, **kwargs)
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 697, in main
17:55:56
rv = self.invoke(ctx)
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
17:55:56
return _process_result(sub_ctx.command.invoke(sub_ctx))
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 895, in invoke
17:55:56
return ctx.invoke(self.callback, **ctx.params)
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 535, in invoke
17:55:56
return callback(*args, **kwargs)
17:55:56
File "/opt/src/rastervision/cli/main.py", line 294, in run_command
17:55:56
rv.runner.CommandRunner.run(command_config_uri)
17:55:56
File "/opt/src/rastervision/runner/command_runner.py", line 11, in run
17:55:56
CommandRunner.run_from_proto(msg)
17:55:56
File "/opt/src/rastervision/runner/command_runner.py", line 17, in run_from_proto
17:55:56
command.run()
17:55:56
File "/opt/src/rastervision/command/eval_command.py", line 24, in run
17:55:56
evaluator.process(scenes, tmp_dir)
17:55:56
File "/opt/src/rastervision/evaluation/semantic_segmentation_evaluator.py", line 75, in process
17:55:56
pred_geojson = pred_geojson_source.get_geojson()
17:55:56
File "/opt/src/rastervision/data/vector_source/vector_source.py", line 153, in get_geojson
17:55:56
self.geojson = self._get_geojson()
17:55:56
File "/opt/src/rastervision/data/vector_source/geojson_vector_source.py", line 25, in _get_geojson
17:55:56
geojson = json.loads(file_to_str(self.uri))
17:55:56
File "/opt/conda/lib/python3.6/json/__init__.py", line 354, in loads
17:55:56
return _default_decoder.decode(s)
17:55:56
File "/opt/conda/lib/python3.6/json/decoder.py", line 339, in decode
17:55:56
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
17:55:56
File "/opt/conda/lib/python3.6/json/decoder.py", line 357, in raw_decode
17:55:56
raise JSONDecodeError("Expecting value", s, err.value) from None
17:55:56
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
|
json.decoder.JSONDecodeError
|
def process(self, scenes, tmp_dir):
evaluation = self.create_evaluation()
vect_evaluation = self.create_evaluation()
null_class_id = self.class_config.get_null_class_id()
for scene in scenes:
log.info("Computing evaluation for scene {}...".format(scene.id))
label_source = scene.ground_truth_label_source
label_store = scene.prediction_label_store
with ActivateMixin.compose(label_source, label_store):
ground_truth = label_source.get_labels()
predictions = label_store.get_labels()
if scene.aoi_polygons:
# Filter labels based on AOI.
ground_truth = ground_truth.filter_by_aoi(
scene.aoi_polygons, null_class_id
)
predictions = predictions.filter_by_aoi(
scene.aoi_polygons, null_class_id
)
scene_evaluation = self.create_evaluation()
scene_evaluation.compute(ground_truth, predictions)
evaluation.merge(scene_evaluation, scene_id=scene.id)
if (
hasattr(label_source, "raster_source")
and hasattr(label_source.raster_source, "vector_source")
and hasattr(label_store, "vector_output")
):
gt_geojson = label_source.raster_source.vector_source.get_geojson()
for vo in label_store.vector_output:
pred_geojson_uri = vo.uri
mode = vo.get_mode()
class_id = vo.class_id
pred_geojson_source = GeoJSONVectorSourceConfig(
uri=pred_geojson_uri, default_class_id=class_id
).build(self.class_config, scene.raster_source.get_crs_transformer())
pred_geojson = pred_geojson_source.get_geojson()
if scene.aoi_polygons:
gt_geojson = filter_geojson_by_aoi(gt_geojson, scene.aoi_polygons)
pred_geojson = filter_geojson_by_aoi(
pred_geojson, scene.aoi_polygons
)
vect_scene_evaluation = self.create_evaluation()
vect_scene_evaluation.compute_vector(
gt_geojson, pred_geojson, mode, class_id
)
vect_evaluation.merge(vect_scene_evaluation, scene_id=scene.id)
if not evaluation.is_empty():
evaluation.save(self.output_uri)
if not vect_evaluation.is_empty():
vect_evaluation.save(self.vector_output_uri)
|
def process(self, scenes, tmp_dir):
evaluation = self.create_evaluation()
vect_evaluation = self.create_evaluation()
null_class_id = self.class_config.get_null_class_id()
for scene in scenes:
log.info("Computing evaluation for scene {}...".format(scene.id))
label_source = scene.ground_truth_label_source
label_store = scene.prediction_label_store
with ActivateMixin.compose(label_source, label_store):
ground_truth = label_source.get_labels()
predictions = label_store.get_labels()
if scene.aoi_polygons:
# Filter labels based on AOI.
ground_truth = ground_truth.filter_by_aoi(
scene.aoi_polygons, null_class_id
)
predictions = predictions.filter_by_aoi(
scene.aoi_polygons, null_class_id
)
scene_evaluation = self.create_evaluation()
scene_evaluation.compute(ground_truth, predictions)
evaluation.merge(scene_evaluation, scene_id=scene.id)
if (
hasattr(label_source, "raster_source")
and hasattr(label_source.raster_source, "vector_source")
and hasattr(label_store, "vector_output")
):
gt_geojson = label_source.raster_source.vector_source.get_geojson()
for vo in label_store.vector_output:
pred_geojson_uri = vo.uri
mode = vo.get_mode()
class_id = vo.class_id
pred_geojson_source = GeoJSONVectorSourceConfig(
uri=pred_geojson_uri, default_class_id=None
).build(self.class_config, scene.raster_source.get_crs_transformer())
pred_geojson = pred_geojson_source.get_geojson()
if scene.aoi_polygons:
gt_geojson = filter_geojson_by_aoi(gt_geojson, scene.aoi_polygons)
pred_geojson = filter_geojson_by_aoi(
pred_geojson, scene.aoi_polygons
)
vect_scene_evaluation = self.create_evaluation()
vect_scene_evaluation.compute_vector(
gt_geojson, pred_geojson, mode, class_id
)
vect_evaluation.merge(vect_scene_evaluation, scene_id=scene.id)
if not evaluation.is_empty():
evaluation.save(self.output_uri)
if not vect_evaluation.is_empty():
vect_evaluation.save(self.vector_output_uri)
|
https://github.com/azavea/raster-vision/issues/835
|
17:55:54
2019-10-01 17:55:54:rastervision.utils.files: INFO - Downloading s3://raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentation/5058.tif to /opt/data/temp/tmp5903c076/tmpk2r7cdga/s3/raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentat
2019-10-01 17:55:54:rastervision.utils.files: INFO - Downloading s3://raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentation/5058.tif to /opt/data/temp/tmp5903c076/tmpk2r7cdga/s3/raster-vision-lf-dev/examples/test-output/21/spacenet-vegas-buildings-semantic-segmentation-pytorch/predict/buildings-semantic_segmentation/5058.tif
17:55:56
Traceback (most recent call last):
17:55:56
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
17:55:56
"__main__", mod_spec)
17:55:56
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
17:55:56
exec(code, run_globals)
17:55:56
File "/opt/src/rastervision/__main__.py", line 17, in <module>
17:55:56
rv.main()
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 722, in __call__
17:55:56
return self.main(*args, **kwargs)
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 697, in main
17:55:56
rv = self.invoke(ctx)
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
17:55:56
return _process_result(sub_ctx.command.invoke(sub_ctx))
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 895, in invoke
17:55:56
return ctx.invoke(self.callback, **ctx.params)
17:55:56
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 535, in invoke
17:55:56
return callback(*args, **kwargs)
17:55:56
File "/opt/src/rastervision/cli/main.py", line 294, in run_command
17:55:56
rv.runner.CommandRunner.run(command_config_uri)
17:55:56
File "/opt/src/rastervision/runner/command_runner.py", line 11, in run
17:55:56
CommandRunner.run_from_proto(msg)
17:55:56
File "/opt/src/rastervision/runner/command_runner.py", line 17, in run_from_proto
17:55:56
command.run()
17:55:56
File "/opt/src/rastervision/command/eval_command.py", line 24, in run
17:55:56
evaluator.process(scenes, tmp_dir)
17:55:56
File "/opt/src/rastervision/evaluation/semantic_segmentation_evaluator.py", line 75, in process
17:55:56
pred_geojson = pred_geojson_source.get_geojson()
17:55:56
File "/opt/src/rastervision/data/vector_source/vector_source.py", line 153, in get_geojson
17:55:56
self.geojson = self._get_geojson()
17:55:56
File "/opt/src/rastervision/data/vector_source/geojson_vector_source.py", line 25, in _get_geojson
17:55:56
geojson = json.loads(file_to_str(self.uri))
17:55:56
File "/opt/conda/lib/python3.6/json/__init__.py", line 354, in loads
17:55:56
return _default_decoder.decode(s)
17:55:56
File "/opt/conda/lib/python3.6/json/decoder.py", line 339, in decode
17:55:56
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
17:55:56
File "/opt/conda/lib/python3.6/json/decoder.py", line 357, in raw_decode
17:55:56
raise JSONDecodeError("Expecting value", s, err.value) from None
17:55:56
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
|
json.decoder.JSONDecodeError
|
def filter_by_aoi(self, aoi_polygons):
boxes = self.get_boxes()
class_ids = self.get_class_ids()
scores = self.get_scores()
new_boxes = []
new_class_ids = []
new_scores = []
for box, class_id, score in zip(boxes, class_ids, scores):
box_poly = box.to_shapely()
for aoi in aoi_polygons:
if box_poly.within(aoi):
new_boxes.append(box.npbox_format())
new_class_ids.append(class_id)
new_scores.append(score)
break
if len(new_boxes) == 0:
return ObjectDetectionLabels.make_empty()
return ObjectDetectionLabels(
np.array(new_boxes), np.array(new_class_ids), np.array(new_scores)
)
|
def filter_by_aoi(self, aoi_polygons):
boxes = self.get_boxes()
class_ids = self.get_class_ids()
scores = self.get_scores()
new_boxes = []
new_class_ids = []
new_scores = []
for box, class_id, score in zip(boxes, class_ids, scores):
box_poly = box.to_shapely()
for aoi in aoi_polygons:
if box_poly.within(aoi):
new_boxes.append(box)
new_class_ids.append(class_id)
new_scores.append(score)
break
return ObjectDetectionLabels(
np.array(new_boxes), np.array(new_class_ids), np.array(new_scores)
)
|
https://github.com/azavea/raster-vision/issues/740
|
Running evaluator: ObjectDetectionEvaluator...
2019-03-28 16:47:07:rastervision.evaluation.classification_evaluator: INFO - Computing evaluation for scene 01986917-30ea-4f7f-8e01-985d73b8aa2a...
Traceback (most recent call last):
File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision/__main__.py", line 17, in <module>
rv.main()
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision/cli/main.py", line 260, in run_command
rv.runner.CommandRunner.run(command_config_uri)
File "/opt/src/rastervision/runner/command_runner.py", line 11, in run
CommandRunner.run_from_proto(msg)
File "/opt/src/rastervision/runner/command_runner.py", line 17, in run_from_proto
command.run()
File "/opt/src/rastervision/command/eval_command.py", line 24, in run
evaluator.process(scenes, tmp_dir)
File "/opt/src/rastervision/evaluation/classification_evaluator.py", line 36, in process
scene.aoi_polygons)
File "/opt/src/rastervision/data/label/object_detection_labels.py", line 70, in filter_by_aoi
np.array(new_boxes), np.array(new_class_ids), np.array(new_scores))
File "/opt/src/rastervision/data/label/object_detection_labels.py", line 27, in __init__
self.boxlist = BoxList(npboxes)
File "/opt/tf-models/object_detection/utils/np_box_list.py", line 46, in __init__
raise ValueError('Invalid dimensions for box data.')
ValueError: Invalid dimensions for box data.
/tmp/tmpwie3_vrf/tmp41bj3kgh/Makefile:9: recipe for target '3' failed
make: *** [3] Error 1
make: *** Waiting for unfinished jobs....
|
ValueError
|
def __init__(self, transform, image_crs, map_crs="epsg:4326"):
"""Construct transformer.
Args:
image_dataset: Rasterio DatasetReader
map_crs: CRS code
"""
self.map_proj = pyproj.Proj(init=map_crs)
self.image_proj = pyproj.Proj(image_crs)
super().__init__(image_crs, map_crs, transform)
|
def __init__(self, transform, image_crs, map_crs="epsg:4326"):
"""Construct transformer.
Args:
image_dataset: Rasterio DatasetReader
map_crs: CRS code
"""
self.map_proj = pyproj.Proj(init=map_crs)
self.image_proj = pyproj.Proj(init=image_crs)
super().__init__(image_crs, map_crs, transform)
|
https://github.com/azavea/raster-vision/issues/724
|
Checking for existing output [####################################] 100%
Saving command configuration to /opt/data/rv_root/chip/xview-object_detection/command-config.json...
Saving command configuration to /opt/data/rv_root/train/xview-object-detection-mobilenet/command-config.json...
Saving command configuration to /opt/data/rv_root/bundle/xview-object-detection-mobilenet/command-config.json...
Saving command configuration to /opt/data/rv_root/predict/xview-object-detection-mobilenet/command-config.json...
Saving command configuration to /opt/data/rv_root/eval/xview-object-detection-mobilenet/command-config.json...
python -m rastervision run_command /opt/data/rv_root/chip/xview-object_detection/command-config.json
Making training chips...
2019-03-20 16:00:47:rastervision.utils.files: INFO - Downloading s3://azavea-nyc-ml/cogs/000227.tif to /tmp/tmp98w0p7cl/tmpypmuuz6t/tmpqbscq03c/s3/azavea-nyc-ml/cogs/000227.tif
Traceback (most recent call last):
File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision/__main__.py", line 17, in <module>
rv.main()
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision/cli/main.py", line 253, in run_command
rv.runner.CommandRunner.run(command_config_uri)
File "/opt/src/rastervision/runner/command_runner.py", line 11, in run
CommandRunner.run_from_proto(msg)
File "/opt/src/rastervision/runner/command_runner.py", line 17, in run_from_proto
command.run()
File "/opt/src/rastervision/command/chip_command.py", line 22, in run
map(lambda s: s.create_scene(cc.task, tmp_dir), cc.train_scenes))
File "/opt/src/rastervision/command/chip_command.py", line 22, in <lambda>
map(lambda s: s.create_scene(cc.task, tmp_dir), cc.train_scenes))
File "/opt/src/rastervision/data/scene_config.py", line 36, in create_scene
raster_source = self.raster_source.create_source(tmp_dir)
File "/opt/src/rastervision/data/raster_source/geotiff_source_config.py", line 70, in create_source
y_shift_meters=y_shift_meters)
File "/opt/src/rastervision/data/raster_source/geotiff_source.py", line 46, in __init__
super().__init__(raster_transformers, temp_dir, channel_order)
File "/opt/src/rastervision/data/raster_source/rasterio_source.py", line 47, in __init__
with self.activate():
File "/opt/src/rastervision/data/activate_mixin.py", line 21, in __enter__
self.activate()
File "/opt/src/rastervision/data/activate_mixin.py", line 54, in do_activate
self._activate()
File "/opt/src/rastervision/data/raster_source/geotiff_source.py", line 104, in _activate
super()._activate()
File "/opt/src/rastervision/data/raster_source/rasterio_source.py", line 106, in _activate
self._set_crs_transformer()
File "/opt/src/rastervision/data/raster_source/geotiff_source.py", line 56, in _set_crs_transformer
self.image_dataset)
File "/opt/src/rastervision/data/crs_transformer/rasterio_crs_transformer.py", line 59, in from_dataset
image_crs = dataset.crs['init']
File "/usr/lib/python3.5/collections/__init__.py", line 986, in __getitem__
raise KeyError(key)
KeyError: 'init'
/tmp/tmpkon0yhf2/tmpyysq6a82/Makefile:6: recipe for target '0' failed
make: *** [0] Error 1
|
KeyError
|
def from_dataset(cls, dataset, map_crs="epsg:4326"):
if dataset.crs is None:
return IdentityCRSTransformer()
transform = dataset.transform
image_crs = dataset.crs
return cls(transform, image_crs, map_crs)
|
def from_dataset(cls, dataset, map_crs="epsg:4326"):
if dataset.crs is None:
return IdentityCRSTransformer()
transform = dataset.transform
image_crs = dataset.crs["init"]
return cls(transform, image_crs, map_crs)
|
https://github.com/azavea/raster-vision/issues/724
|
Checking for existing output [####################################] 100%
Saving command configuration to /opt/data/rv_root/chip/xview-object_detection/command-config.json...
Saving command configuration to /opt/data/rv_root/train/xview-object-detection-mobilenet/command-config.json...
Saving command configuration to /opt/data/rv_root/bundle/xview-object-detection-mobilenet/command-config.json...
Saving command configuration to /opt/data/rv_root/predict/xview-object-detection-mobilenet/command-config.json...
Saving command configuration to /opt/data/rv_root/eval/xview-object-detection-mobilenet/command-config.json...
python -m rastervision run_command /opt/data/rv_root/chip/xview-object_detection/command-config.json
Making training chips...
2019-03-20 16:00:47:rastervision.utils.files: INFO - Downloading s3://azavea-nyc-ml/cogs/000227.tif to /tmp/tmp98w0p7cl/tmpypmuuz6t/tmpqbscq03c/s3/azavea-nyc-ml/cogs/000227.tif
Traceback (most recent call last):
File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision/__main__.py", line 17, in <module>
rv.main()
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision/cli/main.py", line 253, in run_command
rv.runner.CommandRunner.run(command_config_uri)
File "/opt/src/rastervision/runner/command_runner.py", line 11, in run
CommandRunner.run_from_proto(msg)
File "/opt/src/rastervision/runner/command_runner.py", line 17, in run_from_proto
command.run()
File "/opt/src/rastervision/command/chip_command.py", line 22, in run
map(lambda s: s.create_scene(cc.task, tmp_dir), cc.train_scenes))
File "/opt/src/rastervision/command/chip_command.py", line 22, in <lambda>
map(lambda s: s.create_scene(cc.task, tmp_dir), cc.train_scenes))
File "/opt/src/rastervision/data/scene_config.py", line 36, in create_scene
raster_source = self.raster_source.create_source(tmp_dir)
File "/opt/src/rastervision/data/raster_source/geotiff_source_config.py", line 70, in create_source
y_shift_meters=y_shift_meters)
File "/opt/src/rastervision/data/raster_source/geotiff_source.py", line 46, in __init__
super().__init__(raster_transformers, temp_dir, channel_order)
File "/opt/src/rastervision/data/raster_source/rasterio_source.py", line 47, in __init__
with self.activate():
File "/opt/src/rastervision/data/activate_mixin.py", line 21, in __enter__
self.activate()
File "/opt/src/rastervision/data/activate_mixin.py", line 54, in do_activate
self._activate()
File "/opt/src/rastervision/data/raster_source/geotiff_source.py", line 104, in _activate
super()._activate()
File "/opt/src/rastervision/data/raster_source/rasterio_source.py", line 106, in _activate
self._set_crs_transformer()
File "/opt/src/rastervision/data/raster_source/geotiff_source.py", line 56, in _set_crs_transformer
self.image_dataset)
File "/opt/src/rastervision/data/crs_transformer/rasterio_crs_transformer.py", line 59, in from_dataset
image_crs = dataset.crs['init']
File "/usr/lib/python3.5/collections/__init__.py", line 986, in __getitem__
raise KeyError(key)
KeyError: 'init'
/tmp/tmpkon0yhf2/tmpyysq6a82/Makefile:6: recipe for target '0' failed
make: *** [0] Error 1
|
KeyError
|
def __init__(self, image_crs=None, map_crs=None, transform=None):
self.image_crs = image_crs
self.map_crs = map_crs
self.transform = transform
|
def __init__(self, image_crs=None, map_crs=None):
self.image_crs = image_crs
self.map_crs = map_crs
|
https://github.com/azavea/raster-vision/issues/707
|
root@122d4f0150f4:/opt/data/mar5# rastervision predict spacenet.zip example.jpg out.tif
/usr/local/lib/python3.5/dist-packages/pluginbase.py:439: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
fromlist, level)
2019-03-06 16:59:56.502396: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
/usr/local/lib/python3.5/dist-packages/rasterio/__init__.py:217: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned.
s = DatasetReader(path, driver=driver, **kwargs)
[0, 1, 2]
2019-03-06 16:59:56:rastervision.task.semantic_segmentation: INFO - Making predictions for scene
Traceback (most recent call last):
File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision/__main__.py", line 17, in <module>
rv.main()
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision/cli/main.py", line 241, in predict
predictor.predict(image_uri, output_uri, export_config)
File "/opt/src/rastervision/predictor.py", line 144, in predict
scene.prediction_label_store.save(labels)
File "/opt/src/rastervision/data/label_store/semantic_segmentation_raster_store.py", line 90, in save
transform = self.crs_transformer.transform
AttributeError: 'IdentityCRSTransformer' object has no attribute 'transform'
|
AttributeError
|
def get_affine_transform(self):
return self.transform
|
def get_affine_transform(self):
raise NotImplementedError()
|
https://github.com/azavea/raster-vision/issues/707
|
root@122d4f0150f4:/opt/data/mar5# rastervision predict spacenet.zip example.jpg out.tif
/usr/local/lib/python3.5/dist-packages/pluginbase.py:439: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
fromlist, level)
2019-03-06 16:59:56.502396: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
/usr/local/lib/python3.5/dist-packages/rasterio/__init__.py:217: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned.
s = DatasetReader(path, driver=driver, **kwargs)
[0, 1, 2]
2019-03-06 16:59:56:rastervision.task.semantic_segmentation: INFO - Making predictions for scene
Traceback (most recent call last):
File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision/__main__.py", line 17, in <module>
rv.main()
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision/cli/main.py", line 241, in predict
predictor.predict(image_uri, output_uri, export_config)
File "/opt/src/rastervision/predictor.py", line 144, in predict
scene.prediction_label_store.save(labels)
File "/opt/src/rastervision/data/label_store/semantic_segmentation_raster_store.py", line 90, in save
transform = self.crs_transformer.transform
AttributeError: 'IdentityCRSTransformer' object has no attribute 'transform'
|
AttributeError
|
def __init__(self, transform, image_crs, map_crs="epsg:4326"):
"""Construct transformer.
Args:
image_dataset: Rasterio DatasetReader
map_crs: CRS code
"""
self.map_proj = pyproj.Proj(init=map_crs)
self.image_proj = pyproj.Proj(init=image_crs)
super().__init__(image_crs, map_crs, transform)
|
def __init__(self, transform, image_crs, map_crs="epsg:4326"):
"""Construct transformer.
Args:
image_dataset: Rasterio DatasetReader
map_crs: CRS code
"""
self.transform = transform
self.map_proj = pyproj.Proj(init=map_crs)
self.image_proj = pyproj.Proj(init=image_crs)
super().__init__(image_crs, map_crs)
|
https://github.com/azavea/raster-vision/issues/707
|
root@122d4f0150f4:/opt/data/mar5# rastervision predict spacenet.zip example.jpg out.tif
/usr/local/lib/python3.5/dist-packages/pluginbase.py:439: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
fromlist, level)
2019-03-06 16:59:56.502396: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
/usr/local/lib/python3.5/dist-packages/rasterio/__init__.py:217: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned.
s = DatasetReader(path, driver=driver, **kwargs)
[0, 1, 2]
2019-03-06 16:59:56:rastervision.task.semantic_segmentation: INFO - Making predictions for scene
Traceback (most recent call last):
File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision/__main__.py", line 17, in <module>
rv.main()
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision/cli/main.py", line 241, in predict
predictor.predict(image_uri, output_uri, export_config)
File "/opt/src/rastervision/predictor.py", line 144, in predict
scene.prediction_label_store.save(labels)
File "/opt/src/rastervision/data/label_store/semantic_segmentation_raster_store.py", line 90, in save
transform = self.crs_transformer.transform
AttributeError: 'IdentityCRSTransformer' object has no attribute 'transform'
|
AttributeError
|
def save(self, labels):
"""Save.
Args:
labels - (SemanticSegmentationLabels) labels to be saved
"""
local_path = get_local_path(self.uri, self.tmp_dir)
make_dir(local_path, use_dirname=True)
transform = self.crs_transformer.get_affine_transform()
crs = self.crs_transformer.get_image_crs()
band_count = 1
dtype = np.uint8
if self.class_trans:
band_count = 3
if self.vector_output:
# We need to store the whole output mask to run feature extraction.
# If the raster is large, this will result in running out of memory, so
# more work will be needed to get this to work in a scalable way. But this
# is complicated because of the need to merge features that are split
# across windows.
mask = np.zeros((self.extent.ymax, self.extent.xmax), dtype=np.uint8)
else:
mask = None
# https://github.com/mapbox/rasterio/blob/master/docs/quickstart.rst
# https://rasterio.readthedocs.io/en/latest/topics/windowed-rw.html
with rasterio.open(
local_path,
"w",
driver="GTiff",
height=self.extent.ymax,
width=self.extent.xmax,
count=band_count,
dtype=dtype,
transform=transform,
crs=crs,
) as dataset:
for window in labels.get_windows():
class_labels = labels.get_label_arr(window, clip_extent=self.extent)
clipped_window = (
(window.ymin, window.ymin + class_labels.shape[0]),
(window.xmin, window.xmin + class_labels.shape[1]),
)
if mask is not None:
mask[
clipped_window[0][0] : clipped_window[0][1],
clipped_window[1][0] : clipped_window[1][1],
] = class_labels
if self.class_trans:
rgb_labels = self.class_trans.class_to_rgb(class_labels)
for chan in range(3):
dataset.write_band(
chan + 1, rgb_labels[:, :, chan], window=clipped_window
)
else:
img = class_labels.astype(dtype)
dataset.write_band(1, img, window=clipped_window)
upload_or_copy(local_path, self.uri)
if self.vector_output:
import mask_to_polygons.vectorification as vectorification
import mask_to_polygons.processing.denoise as denoise
for vo in self.vector_output:
denoise_radius = vo["denoise"]
uri = vo["uri"]
mode = vo["mode"]
class_id = vo["class_id"]
class_mask = np.array(mask == class_id, dtype=np.uint8)
local_geojson_path = get_local_path(uri, self.tmp_dir)
def transform(x, y):
return self.crs_transformer.pixel_to_map((x, y))
if denoise_radius > 0:
class_mask = denoise.denoise(class_mask, denoise_radius)
if uri and mode == "buildings":
options = vo["building_options"]
geojson = vectorification.geojson_from_mask(
mask=class_mask,
transform=transform,
mode=mode,
min_aspect_ratio=options["min_aspect_ratio"],
min_area=options["min_area"],
width_factor=options["element_width_factor"],
thickness=options["element_thickness"],
)
elif uri and mode == "polygons":
geojson = vectorification.geojson_from_mask(
mask=class_mask, transform=transform, mode=mode
)
if local_geojson_path:
with open(local_geojson_path, "w") as file_out:
file_out.write(geojson)
upload_or_copy(local_geojson_path, uri)
|
def save(self, labels):
"""Save.
Args:
labels - (SemanticSegmentationLabels) labels to be saved
"""
local_path = get_local_path(self.uri, self.tmp_dir)
make_dir(local_path, use_dirname=True)
# TODO: this only works if crs_transformer is RasterioCRSTransformer.
# Need more general way of computing transform for the more general case.
transform = self.crs_transformer.transform
crs = self.crs_transformer.get_image_crs()
band_count = 1
dtype = np.uint8
if self.class_trans:
band_count = 3
if self.vector_output:
# We need to store the whole output mask to run feature extraction.
# If the raster is large, this will result in running out of memory, so
# more work will be needed to get this to work in a scalable way. But this
# is complicated because of the need to merge features that are split
# across windows.
mask = np.zeros((self.extent.ymax, self.extent.xmax), dtype=np.uint8)
else:
mask = None
# https://github.com/mapbox/rasterio/blob/master/docs/quickstart.rst
# https://rasterio.readthedocs.io/en/latest/topics/windowed-rw.html
with rasterio.open(
local_path,
"w",
driver="GTiff",
height=self.extent.ymax,
width=self.extent.xmax,
count=band_count,
dtype=dtype,
transform=transform,
crs=crs,
) as dataset:
for window in labels.get_windows():
class_labels = labels.get_label_arr(window, clip_extent=self.extent)
clipped_window = (
(window.ymin, window.ymin + class_labels.shape[0]),
(window.xmin, window.xmin + class_labels.shape[1]),
)
if mask is not None:
mask[
clipped_window[0][0] : clipped_window[0][1],
clipped_window[1][0] : clipped_window[1][1],
] = class_labels
if self.class_trans:
rgb_labels = self.class_trans.class_to_rgb(class_labels)
for chan in range(3):
dataset.write_band(
chan + 1, rgb_labels[:, :, chan], window=clipped_window
)
else:
img = class_labels.astype(dtype)
dataset.write_band(1, img, window=clipped_window)
upload_or_copy(local_path, self.uri)
if self.vector_output:
import mask_to_polygons.vectorification as vectorification
import mask_to_polygons.processing.denoise as denoise
for vo in self.vector_output:
denoise_radius = vo["denoise"]
uri = vo["uri"]
mode = vo["mode"]
class_id = vo["class_id"]
class_mask = np.array(mask == class_id, dtype=np.uint8)
local_geojson_path = get_local_path(uri, self.tmp_dir)
def transform(x, y):
return self.crs_transformer.pixel_to_map((x, y))
if denoise_radius > 0:
class_mask = denoise.denoise(class_mask, denoise_radius)
if uri and mode == "buildings":
options = vo["building_options"]
geojson = vectorification.geojson_from_mask(
mask=class_mask,
transform=transform,
mode=mode,
min_aspect_ratio=options["min_aspect_ratio"],
min_area=options["min_area"],
width_factor=options["element_width_factor"],
thickness=options["element_thickness"],
)
elif uri and mode == "polygons":
geojson = vectorification.geojson_from_mask(
mask=class_mask, transform=transform, mode=mode
)
if local_geojson_path:
with open(local_geojson_path, "w") as file_out:
file_out.write(geojson)
upload_or_copy(local_geojson_path, uri)
|
https://github.com/azavea/raster-vision/issues/707
|
root@122d4f0150f4:/opt/data/mar5# rastervision predict spacenet.zip example.jpg out.tif
/usr/local/lib/python3.5/dist-packages/pluginbase.py:439: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
fromlist, level)
2019-03-06 16:59:56.502396: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
/usr/local/lib/python3.5/dist-packages/rasterio/__init__.py:217: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned.
s = DatasetReader(path, driver=driver, **kwargs)
[0, 1, 2]
2019-03-06 16:59:56:rastervision.task.semantic_segmentation: INFO - Making predictions for scene
Traceback (most recent call last):
File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision/__main__.py", line 17, in <module>
rv.main()
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision/cli/main.py", line 241, in predict
predictor.predict(image_uri, output_uri, export_config)
File "/opt/src/rastervision/predictor.py", line 144, in predict
scene.prediction_label_store.save(labels)
File "/opt/src/rastervision/data/label_store/semantic_segmentation_raster_store.py", line 90, in save
transform = self.crs_transformer.transform
AttributeError: 'IdentityCRSTransformer' object has no attribute 'transform'
|
AttributeError
|
def get_service_scale(self, service_dict):
# service.scale for v2 and deploy.replicas for v3
scale = service_dict.get("scale", None)
deploy_dict = service_dict.get("deploy", None)
if not deploy_dict:
return 1 if scale is None else scale
if deploy_dict.get("mode", "replicated") != "replicated":
return 1 if scale is None else scale
replicas = deploy_dict.get("replicas", None)
if scale and replicas:
raise ConfigurationError(
"Both service.scale and service.deploy.replicas are set."
" Only one of them must be set."
)
if replicas:
scale = replicas
if scale is None:
return 1
# deploy may contain placement constraints introduced in v3.8
max_replicas = deploy_dict.get("placement", {}).get("max_replicas_per_node", scale)
scale = min(scale, max_replicas)
if max_replicas < scale:
log.warning(
"Scale is limited to {} ('max_replicas_per_node' field).".format(
max_replicas
)
)
return scale
|
def get_service_scale(self, service_dict):
# service.scale for v2 and deploy.replicas for v3
scale = service_dict.get("scale", None)
deploy_dict = service_dict.get("deploy", None)
if not deploy_dict:
return 1 if scale is None else scale
if deploy_dict.get("mode", "replicated") != "replicated":
return 1 if scale is None else scale
replicas = deploy_dict.get("replicas", None)
if scale and replicas:
raise ConfigurationError(
"Both service.scale and service.deploy.replicas are set."
" Only one of them must be set."
)
if replicas:
scale = replicas
# deploy may contain placement constraints introduced in v3.8
max_replicas = deploy_dict.get("placement", {}).get("max_replicas_per_node", scale)
scale = min(scale, max_replicas)
if max_replicas < scale:
log.warning(
"Scale is limited to {} ('max_replicas_per_node' field).".format(
max_replicas
)
)
return scale
|
https://github.com/docker/compose/issues/7671
|
WARNING: Some services (minecraft) use the 'deploy' key, which will be ignored. Compose does not support 'deploy' configuration - use `docker stack deploy` to deploy to a swarm.
Traceback (most recent call last):
File "docker-compose", line 3, in <module>
File "compose/cli/main.py", line 67, in main
File "compose/cli/main.py", line 120, in perform_command
File "compose/cli/command.py", line 70, in project_from_options
File "compose/cli/command.py", line 144, in get_project
File "compose/project.py", line 130, in from_config
File "compose/project.py", line 326, in get_service_scale
TypeError: '<' not supported between instances of 'NoneType' and 'NoneType'
[11029] Failed to execute script docker-compose
|
TypeError
|
def call_docker(args, dockeropts, environment):
executable_path = find_executable("docker")
if not executable_path:
raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
tls = dockeropts.get("--tls", False)
ca_cert = dockeropts.get("--tlscacert")
cert = dockeropts.get("--tlscert")
key = dockeropts.get("--tlskey")
verify = dockeropts.get("--tlsverify")
host = dockeropts.get("--host")
tls_options = []
if tls:
tls_options.append("--tls")
if ca_cert:
tls_options.extend(["--tlscacert", ca_cert])
if cert:
tls_options.extend(["--tlscert", cert])
if key:
tls_options.extend(["--tlskey", key])
if verify:
tls_options.append("--tlsverify")
if host:
tls_options.extend(
["--host", re.sub(r"^https?://", "tcp://", host.lstrip("="))]
)
args = [executable_path] + tls_options + args
log.debug(" ".join(map(pipes.quote, args)))
filtered_env = {}
for k, v in environment.items():
if v is not None:
filtered_env[k] = environment[k]
return subprocess.call(args, env=filtered_env)
|
def call_docker(args, dockeropts, environment):
executable_path = find_executable("docker")
if not executable_path:
raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
tls = dockeropts.get("--tls", False)
ca_cert = dockeropts.get("--tlscacert")
cert = dockeropts.get("--tlscert")
key = dockeropts.get("--tlskey")
verify = dockeropts.get("--tlsverify")
host = dockeropts.get("--host")
tls_options = []
if tls:
tls_options.append("--tls")
if ca_cert:
tls_options.extend(["--tlscacert", ca_cert])
if cert:
tls_options.extend(["--tlscert", cert])
if key:
tls_options.extend(["--tlskey", key])
if verify:
tls_options.append("--tlsverify")
if host:
tls_options.extend(
["--host", re.sub(r"^https?://", "tcp://", host.lstrip("="))]
)
args = [executable_path] + tls_options + args
log.debug(" ".join(map(pipes.quote, args)))
return subprocess.call(args, env=environment)
|
https://github.com/docker/compose/issues/7180
|
Traceback (most recent call last):
File "docker-compose", line 6, in <module>
File "compose/cli/main.py", line 72, in main
File "compose/cli/main.py", line 128, in perform_command
File "compose/cli/main.py", line 491, in exec_command
File "compose/cli/main.py", line 1469, in call_docker
File "subprocess.py", line 339, in call
File "subprocess.py", line 800, in __init__
File "subprocess.py", line 1462, in _execute_child
File "os.py", line 810, in fsencode
TypeError: expected str, bytes or os.PathLike object, not NoneType
[24719] Failed to execute script docker-compose
|
TypeError
|
def version(self):
if "version" not in self.config:
return V1
version = self.config["version"]
if isinstance(version, dict):
log.warning(
'Unexpected type for "version" key in "{}". Assuming '
'"version" is the name of a service, and defaulting to '
"Compose file version 1.".format(self.filename)
)
return V1
if not isinstance(version, six.string_types):
raise ConfigurationError(
'Version in "{}" is invalid - it should be a string.'.format(self.filename)
)
if version == "1":
raise ConfigurationError(
'Version in "{}" is invalid. {}'.format(self.filename, VERSION_EXPLANATION)
)
version_pattern = re.compile(r"^[2-9]+(\.\d+)?$")
if not version_pattern.match(version):
raise ConfigurationError(
'Version "{}" in "{}" is invalid.'.format(version, self.filename)
)
if version == "2":
return const.COMPOSEFILE_V2_0
if version == "3":
return const.COMPOSEFILE_V3_0
return ComposeVersion(version)
|
def version(self):
if "version" not in self.config:
return V1
version = self.config["version"]
if isinstance(version, dict):
log.warning(
'Unexpected type for "version" key in "{}". Assuming '
'"version" is the name of a service, and defaulting to '
"Compose file version 1.".format(self.filename)
)
return V1
if not isinstance(version, six.string_types):
raise ConfigurationError(
'Version in "{}" is invalid - it should be a string.'.format(self.filename)
)
if version == "1":
raise ConfigurationError(
'Version in "{}" is invalid. {}'.format(self.filename, VERSION_EXPLANATION)
)
if version == "2":
return const.COMPOSEFILE_V2_0
if version == "3":
return const.COMPOSEFILE_V3_0
return ComposeVersion(version)
|
https://github.com/docker/compose/issues/6036
|
Traceback (most recent call last):
File "docker-compose", line 6, in <module>
File "compose/cli/main.py", line 71, in main
File "compose/cli/main.py", line 121, in perform_command
File "compose/cli/main.py", line 332, in config
File "compose/cli/command.py", line 68, in get_config_from_options
File "compose/config/config.py", line 385, in load
File "compose/config/config.py", line 385, in <listcomp>
File "compose/config/config.py", line 535, in process_config_file
File "distutils/version.py", line 70, in __ge__
File "distutils/version.py", line 337, in _cmp
TypeError: '<' not supported between instances of 'str' and 'int'
[4431] Failed to execute script docker-compose
|
TypeError
|
def create_container(
self,
one_off=False,
previous_container=None,
number=None,
quiet=False,
**override_options,
):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
# This is only necessary for `scale` and `volumes_from`
# auto-creating containers to satisfy the dependency.
self.ensure_image_exists()
container_options = self._get_container_create_options(
override_options,
number or self._next_container_number(one_off=one_off),
one_off=one_off,
previous_container=previous_container,
)
if "name" in container_options and not quiet:
log.info("Creating %s" % container_options["name"])
try:
return Container.create(self.client, **container_options)
except APIError as ex:
raise OperationFailedError(
"Cannot create container for service %s: %s"
% (self.name, binarystr_to_unicode(ex.explanation))
)
|
def create_container(
self,
one_off=False,
previous_container=None,
number=None,
quiet=False,
**override_options,
):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
# This is only necessary for `scale` and `volumes_from`
# auto-creating containers to satisfy the dependency.
self.ensure_image_exists()
container_options = self._get_container_create_options(
override_options,
number or self._next_container_number(one_off=one_off),
one_off=one_off,
previous_container=previous_container,
)
if "name" in container_options and not quiet:
log.info("Creating %s" % container_options["name"])
try:
return Container.create(self.client, **container_options)
except APIError as ex:
raise OperationFailedError(
"Cannot create container for service %s: %s" % (self.name, ex.explanation)
)
|
https://github.com/docker/compose/issues/6998
|
$ docker-compose -f badcompose.yaml up
Creating ddev-d8composer-db ... done
Creating ddev-d8composer-dba ...
Creating ddev-d8composer-web ...
Creating ddev-d8composer-dba ... done
ERROR: for ddev-d8composer-web a bytes-like object is required, not 'str'
ERROR: for web a bytes-like object is required, not 'str'
Traceback (most recent call last):
File "site-packages/docker/api/client.py", line 261, in _raise_for_status
File "site-packages/requests/models.py", line 940, in raise_for_status
requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: http+docker://localhost/v1.36/containers/5154c7d2e17e152a31a3513dad4e4354a4a57ba514b83382faff45eec9bb85d1/start
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "compose/service.py", line 625, in start_container
File "compose/container.py", line 241, in start
File "site-packages/docker/utils/decorators.py", line 19, in wrapped
File "site-packages/docker/api/container.py", line 1095, in start
File "site-packages/docker/api/client.py", line 263, in _raise_for_status
File "site-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
docker.errors.APIError: 500 Server Error: Internal Server Error ("b'Ports are not available: /forwards/expose/port returned unexpected status: 500'")
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "docker-compose", line 6, in <module>
File "compose/cli/main.py", line 71, in main
File "compose/cli/main.py", line 127, in perform_command
File "compose/cli/main.py", line 1106, in up
File "compose/cli/main.py", line 1102, in up
File "compose/project.py", line 570, in up
File "compose/parallel.py", line 112, in parallel_execute
File "compose/parallel.py", line 210, in producer
File "compose/project.py", line 556, in do
File "compose/service.py", line 546, in execute_convergence_plan
File "compose/service.py", line 467, in _execute_convergence_create
File "compose/parallel.py", line 112, in parallel_execute
File "compose/parallel.py", line 210, in producer
File "compose/service.py", line 465, in <lambda>
File "compose/service.py", line 457, in create_and_start
File "compose/service.py", line 627, in start_container
TypeError: a bytes-like object is required, not 'str'
[87400] Failed to execute script docker-compose
|
requests.exceptions.HTTPError
|
def start_container(self, container, use_network_aliases=True):
self.connect_container_to_networks(container, use_network_aliases)
try:
container.start()
except APIError as ex:
expl = binarystr_to_unicode(ex.explanation)
if "driver failed programming external connectivity" in expl:
log.warn("Host is already in use by another container")
raise OperationFailedError("Cannot start service %s: %s" % (self.name, expl))
return container
|
def start_container(self, container, use_network_aliases=True):
self.connect_container_to_networks(container, use_network_aliases)
try:
container.start()
except APIError as ex:
if "driver failed programming external connectivity" in ex.explanation:
log.warn("Host is already in use by another container")
raise OperationFailedError(
"Cannot start service %s: %s" % (self.name, ex.explanation)
)
return container
|
https://github.com/docker/compose/issues/6998
|
$ docker-compose -f badcompose.yaml up
Creating ddev-d8composer-db ... done
Creating ddev-d8composer-dba ...
Creating ddev-d8composer-web ...
Creating ddev-d8composer-dba ... done
ERROR: for ddev-d8composer-web a bytes-like object is required, not 'str'
ERROR: for web a bytes-like object is required, not 'str'
Traceback (most recent call last):
File "site-packages/docker/api/client.py", line 261, in _raise_for_status
File "site-packages/requests/models.py", line 940, in raise_for_status
requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: http+docker://localhost/v1.36/containers/5154c7d2e17e152a31a3513dad4e4354a4a57ba514b83382faff45eec9bb85d1/start
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "compose/service.py", line 625, in start_container
File "compose/container.py", line 241, in start
File "site-packages/docker/utils/decorators.py", line 19, in wrapped
File "site-packages/docker/api/container.py", line 1095, in start
File "site-packages/docker/api/client.py", line 263, in _raise_for_status
File "site-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
docker.errors.APIError: 500 Server Error: Internal Server Error ("b'Ports are not available: /forwards/expose/port returned unexpected status: 500'")
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "docker-compose", line 6, in <module>
File "compose/cli/main.py", line 71, in main
File "compose/cli/main.py", line 127, in perform_command
File "compose/cli/main.py", line 1106, in up
File "compose/cli/main.py", line 1102, in up
File "compose/project.py", line 570, in up
File "compose/parallel.py", line 112, in parallel_execute
File "compose/parallel.py", line 210, in producer
File "compose/project.py", line 556, in do
File "compose/service.py", line 546, in execute_convergence_plan
File "compose/service.py", line 467, in _execute_convergence_create
File "compose/parallel.py", line 112, in parallel_execute
File "compose/parallel.py", line 210, in producer
File "compose/service.py", line 465, in <lambda>
File "compose/service.py", line 457, in create_and_start
File "compose/service.py", line 627, in start_container
TypeError: a bytes-like object is required, not 'str'
[87400] Failed to execute script docker-compose
|
requests.exceptions.HTTPError
|
def execution_context_labels(config_details, environment_file):
extra_labels = [
"{0}={1}".format(LABEL_WORKING_DIR, os.path.abspath(config_details.working_dir))
]
if not use_config_from_stdin(config_details):
extra_labels.append(
"{0}={1}".format(LABEL_CONFIG_FILES, config_files_label(config_details))
)
if environment_file is not None:
extra_labels.append(
"{0}={1}".format(LABEL_ENVIRONMENT_FILE, os.path.normpath(environment_file))
)
return extra_labels
|
def execution_context_labels(config_details, environment_file):
extra_labels = [
"{0}={1}".format(
LABEL_WORKING_DIR, os.path.abspath(config_details.working_dir)
),
"{0}={1}".format(LABEL_CONFIG_FILES, config_files_label(config_details)),
]
if environment_file is not None:
extra_labels.append(
"{0}={1}".format(LABEL_ENVIRONMENT_FILE, os.path.normpath(environment_file))
)
return extra_labels
|
https://github.com/docker/compose/issues/7032
|
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
load_entry_point('docker-compose==1.25.0', 'console_scripts', 'docker-compose')()
File "/usr/lib/python3.8/site-packages/compose/cli/main.py", line 72, in main
command()
File "/usr/lib/python3.8/site-packages/compose/cli/main.py", line 125, in perform_command
project = project_from_options('.', options)
File "/usr/lib/python3.8/site-packages/compose/cli/command.py", line 53, in project_from_options
return get_project(
File "/usr/lib/python3.8/site-packages/compose/cli/command.py", line 156, in get_project
execution_context_labels(config_details, environment_file),
File "/usr/lib/python3.8/site-packages/compose/cli/command.py", line 163, in execution_context_labels
'{0}={1}'.format(LABEL_CONFIG_FILES, config_files_label(config_details)),
File "/usr/lib/python3.8/site-packages/compose/cli/command.py", line 172, in config_files_label
return ",".join(
File "/usr/lib/python3.8/site-packages/compose/cli/command.py", line 173, in <genexpr>
map(str, (os.path.normpath(c.filename) for c in config_details.config_files)))
File "/usr/lib/python3.8/posixpath.py", line 336, in normpath
path = os.fspath(path)
TypeError: expected str, bytes or os.PathLike object, not NoneType
|
TypeError
|
def config_files_label(config_details):
return ",".join(
map(str, (config_file_path(c.filename) for c in config_details.config_files))
)
|
def config_files_label(config_details):
return ",".join(
map(str, (os.path.normpath(c.filename) for c in config_details.config_files))
)
|
https://github.com/docker/compose/issues/7032
|
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
load_entry_point('docker-compose==1.25.0', 'console_scripts', 'docker-compose')()
File "/usr/lib/python3.8/site-packages/compose/cli/main.py", line 72, in main
command()
File "/usr/lib/python3.8/site-packages/compose/cli/main.py", line 125, in perform_command
project = project_from_options('.', options)
File "/usr/lib/python3.8/site-packages/compose/cli/command.py", line 53, in project_from_options
return get_project(
File "/usr/lib/python3.8/site-packages/compose/cli/command.py", line 156, in get_project
execution_context_labels(config_details, environment_file),
File "/usr/lib/python3.8/site-packages/compose/cli/command.py", line 163, in execution_context_labels
'{0}={1}'.format(LABEL_CONFIG_FILES, config_files_label(config_details)),
File "/usr/lib/python3.8/site-packages/compose/cli/command.py", line 172, in config_files_label
return ",".join(
File "/usr/lib/python3.8/site-packages/compose/cli/command.py", line 173, in <genexpr>
map(str, (os.path.normpath(c.filename) for c in config_details.config_files)))
File "/usr/lib/python3.8/posixpath.py", line 336, in normpath
path = os.fspath(path)
TypeError: expected str, bytes or os.PathLike object, not NoneType
|
TypeError
|
def check_remote_network_config(remote, local):
if local.driver and remote.get("Driver") != local.driver:
raise NetworkConfigChangedError(local.true_name, "driver")
local_opts = local.driver_opts or {}
remote_opts = remote.get("Options") or {}
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
if k in OPTS_EXCEPTIONS:
continue
if remote_opts.get(k) != local_opts.get(k):
raise NetworkConfigChangedError(local.true_name, 'option "{}"'.format(k))
if local.ipam is not None:
check_remote_ipam_config(remote, local)
if local.internal is not None and local.internal != remote.get("Internal", False):
raise NetworkConfigChangedError(local.true_name, "internal")
if local.enable_ipv6 is not None and local.enable_ipv6 != remote.get(
"EnableIPv6", False
):
raise NetworkConfigChangedError(local.true_name, "enable_ipv6")
local_labels = local.labels or {}
remote_labels = remote.get("Labels") or {}
for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
if k.startswith(
"com.docker."
): # We are only interested in user-specified labels
continue
if remote_labels.get(k) != local_labels.get(k):
log.warning(
'Network {}: label "{}" has changed. It may need to be'
" recreated.".format(local.true_name, k)
)
|
def check_remote_network_config(remote, local):
if local.driver and remote.get("Driver") != local.driver:
raise NetworkConfigChangedError(local.true_name, "driver")
local_opts = local.driver_opts or {}
remote_opts = remote.get("Options") or {}
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
if k in OPTS_EXCEPTIONS:
continue
if remote_opts.get(k) != local_opts.get(k):
raise NetworkConfigChangedError(local.true_name, 'option "{}"'.format(k))
if local.ipam is not None:
check_remote_ipam_config(remote, local)
if local.internal is not None and local.internal != remote.get("Internal", False):
raise NetworkConfigChangedError(local.true_name, "internal")
if local.enable_ipv6 is not None and local.enable_ipv6 != remote.get(
"EnableIPv6", False
):
raise NetworkConfigChangedError(local.true_name, "enable_ipv6")
local_labels = local.labels or {}
remote_labels = remote.get("Labels", {})
for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
if k.startswith(
"com.docker."
): # We are only interested in user-specified labels
continue
if remote_labels.get(k) != local_labels.get(k):
log.warning(
'Network {}: label "{}" has changed. It may need to be'
" recreated.".format(local.true_name, k)
)
|
https://github.com/docker/compose/issues/6854
|
$ sudo -E docker-compose up --detach
[16436] Failed to execute script docker-compose
Traceback (most recent call last):
File "bin/docker-compose", line 6, in <module>
File "compose/cli/main.py", line 71, in main
File "compose/cli/main.py", line 127, in perform_command
File "compose/cli/main.py", line 1096, in up
File "compose/cli/main.py", line 1092, in up
File "compose/project.py", line 514, in up
File "compose/project.py", line 569, in initialize
File "compose/network.py", line 298, in initialize
File "compose/network.py", line 74, in ensure
File "compose/network.py", line 230, in check_remote_network_config
AttributeError: 'NoneType' object has no attribute 'keys'
|
AttributeError
|
def watch_events(thread_map, event_stream, presenters, thread_args):
crashed_containers = set()
for event in event_stream:
if event["action"] == "stop":
thread_map.pop(event["id"], None)
if event["action"] == "die":
thread_map.pop(event["id"], None)
crashed_containers.add(event["id"])
if event["action"] != "start":
continue
if event["id"] in thread_map:
if thread_map[event["id"]].is_alive():
continue
# Container was stopped and started, we need a new thread
thread_map.pop(event["id"], None)
# Container crashed so we should reattach to it
if event["id"] in crashed_containers:
container = event["container"]
if not container.is_restarting:
try:
container.attach_log_stream()
except APIError:
# Just ignore errors when reattaching to already crashed containers
pass
crashed_containers.remove(event["id"])
thread_map[event["id"]] = build_thread(
event["container"], next(presenters), *thread_args
)
|
def watch_events(thread_map, event_stream, presenters, thread_args):
crashed_containers = set()
for event in event_stream:
if event["action"] == "stop":
thread_map.pop(event["id"], None)
if event["action"] == "die":
thread_map.pop(event["id"], None)
crashed_containers.add(event["id"])
if event["action"] != "start":
continue
if event["id"] in thread_map:
if thread_map[event["id"]].is_alive():
continue
# Container was stopped and started, we need a new thread
thread_map.pop(event["id"], None)
# Container crashed so we should reattach to it
if event["id"] in crashed_containers:
event["container"].attach_log_stream()
crashed_containers.remove(event["id"])
thread_map[event["id"]] = build_thread(
event["container"], next(presenters), *thread_args
)
|
https://github.com/docker/compose/issues/6745
|
╰─$ docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
╰─$ docker-compose up
Creating readme_db_account_1 ... done
Creating readme_account15_1 ...
Creating readme_account13_1 ...
Creating readme_account15_1 ... done
Creating readme_account2_1 ... done
Creating readme_account9_1 ... done
Creating readme_account11_1 ... done
Creating readme_account8_1 ... done
Creating readme_account_1 ... done
Creating readme_account3_1 ... done
Creating readme_account5_1 ... done
Creating readme_account6_1 ... done
Creating readme_account10_1 ... done
Creating readme_account7_1 ... done
Creating readme_account4_1 ... done
Creating readme_account12_1 ... done
Creating readme_account14_1 ... done
Attaching to readme_db_account_1, readme_account1_1, readme_account13_1, readme_account10_1, readme_account9_1, readme_account12_1, readme_account8_1, readme_account6_1, readme_account5_1, readme_account2_1, readme_account14_1, readme_account7_1, readme_account11_1, readme_account15_1, readme_account3_1, readme_account_1, readme_account4_1
db_account_1 | Initializing database
db_account_1 | 2019-06-10T15:11:07.568516Z 0 [Warning] [MY-011070] [Server] 'Disabling symbolic links using --skip-symbolic-links (or equivalent) is the default. Consider not using this option as it' is deprecated and will be removed in a future release.
db_account_1 | 2019-06-10T15:11:07.575008Z 0 [System] [MY-013169] [Server] /usr/sbin/mysqld (mysqld 8.0.15) initializing of server in progress as process 28
db_account_1 | 2019-06-10T15:11:13.366075Z 5 [Warning] [MY-010453] [Server] root@localhost is created with an empty password ! Please consider switching off the --initialize-insecure option.
db_account_1 | 2019-06-10T15:11:17.264497Z 0 [System] [MY-013170] [Server] /usr/sbin/mysqld (mysqld 8.0.15) initializing of server has completed
db_account_1 | Database initialized
db_account_1 | MySQL init process in progress...
db_account_1 | MySQL init process in progress...
db_account_1 | mbind: Operation not permitted
db_account_1 | MySQL init process in progress...
db_account_1 | 2019-06-10T15:11:20.542013Z 0 [Warning] [MY-011070] [Server] 'Disabling symbolic links using --skip-symbolic-links (or equivalent) is the default. Consider not using this option as it' is deprecated and will be removed in a future release.
db_account_1 | 2019-06-10T15:11:20.546077Z 0 [System] [MY-010116] [Server] /usr/sbin/mysqld (mysqld 8.0.15) starting as process 79
db_account_1 | 2019-06-10T15:11:21.905873Z 0 [Warning] [MY-010068] [Server] CA certificate ca.pem is self signed.
db_account_1 | 2019-06-10T15:11:21.910027Z 0 [Warning] [MY-011810] [Server] Insecure configuration for --pid-file: Location '/var/run/mysqld' in the path is accessible to all OS users. Consider choosing a different directory.
db_account_1 | 2019-06-10T15:11:21.967720Z 0 [System] [MY-010931] [Server] /usr/sbin/mysqld: ready for connections. Version: '8.0.15' socket: '/var/run/mysqld/mysqld.sock' port: 0 MySQL Community Server - GPL.
db_account_1 | 2019-06-10T15:11:22.038811Z 0 [System] [MY-011323] [Server] X Plugin ready for connections. Socket: '/var/run/mysqld/mysqlx.sock'
readme_account1_1 exited with code 0
readme_account13_1 exited with code 0
readme_account10_1 exited with code 0
readme_account9_1 exited with code 0
readme_account12_1 exited with code 0
db_account_1 | Warning: Unable to load '/usr/share/zoneinfo/iso3166.tab' as time zone. Skipping it.
db_account_1 | Warning: Unable to load '/usr/share/zoneinfo/leap-seconds.list' as time zone. Skipping it.
readme_account8_1 exited with code 0
db_account_1 | Warning: Unable to load '/usr/share/zoneinfo/zone.tab' as time zone. Skipping it.
db_account_1 | Warning: Unable to load '/usr/share/zoneinfo/zone1970.tab' as time zone. Skipping it.
db_account_1 | mysql: [Warning] Using a password on the command line interface can be insecure.
db_account_1 | mysql: [Warning] Using a password on the command line interface can be insecure.
db_account_1 | mysql: [Warning] Using a password on the command line interface can be insecure.
db_account_1 | mysql: [Warning] Using a password on the command line interface can be insecure.
db_account_1 |
readme_account3_1 exited with code 0
readme_account_1 exited with code 0
db_account_1 | 2019-06-10T15:11:41.974509Z 0 [System] [MY-010910] [Server] /usr/sbin/mysqld: Shutdown complete (mysqld 8.0.15) MySQL Community Server - GPL.
readme_account7_1 exited with code 0
db_account_1 |
db_account_1 | MySQL init process done. Ready for start up.
db_account_1 |
readme_account2_1 exited with code 0
readme_account11_1 exited with code 0
db_account_1 | 2019-06-10T15:11:42.698089Z 0 [Warning] [MY-011070] [Server] 'Disabling symbolic links using --skip-symbolic-links (or equivalent) is the default. Consider not using this option as it' is deprecated and will be removed in a future release.
db_account_1 | 2019-06-10T15:11:42.698915Z 0 [System] [MY-010116] [Server] /usr/sbin/mysqld (mysqld 8.0.15) starting as process 1
db_account_1 | 2019-06-10T15:11:43.733127Z 0 [Warning] [MY-010068] [Server] CA certificate ca.pem is self signed.
db_account_1 | 2019-06-10T15:11:43.738792Z 0 [Warning] [MY-011810] [Server] Insecure configuration for --pid-file: Location '/var/run/mysqld' in the path is accessible to all OS users. Consider choosing a different directory.
db_account_1 | 2019-06-10T15:11:43.786543Z 0 [System] [MY-010931] [Server] /usr/sbin/mysqld: ready for connections. Version: '8.0.15' socket: '/var/run/mysqld/mysqld.sock' port: 3306 MySQL Community Server - GPL.
db_account_1 | 2019-06-10T15:11:43.797101Z 0 [System] [MY-011323] [Server] X Plugin ready for connections. Socket: '/var/run/mysqld/mysqlx.sock' bind-address: '::' port: 33060
readme_account14_1 exited with code 0
readme_account6_1 exited with code 0
readme_account15_1 exited with code 0
readme_account4_1 exited with code 0
readme_account5_1 exited with code 0
readme_account7_1 exited with code 0
readme_account_1 exited with code 0
readme_account3_1 exited with code 0
readme_account8_1 exited with code 0
readme_account2_1 exited with code 0
readme_account11_1 exited with code 0
readme_account12_1 exited with code 0
readme_account5_1 exited with code 0
readme_account1_1 exited with code 0
readme_account10_1 exited with code 0
readme_account9_1 exited with code 0
Exception in thread Thread-52:
Traceback (most recent call last):
File "site-packages/docker/api/client.py", line 246, in _raise_for_status
File "site-packages/requests/models.py", line 940, in raise_for_status
requests.exceptions.HTTPError: 409 Client Error: Conflict for url: http+docker://localhost/v1.25/containers/0e141dadaf757e383a6fcd674d8d67ff2c3e2f3474adba0e2f877524f35d7e8a/attach?logs=0&stdout=1&stderr=1&stream=1
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "threading.py", line 916, in _bootstrap_inner
File "threading.py", line 864, in run
File "compose/cli/log_printer.py", line 233, in watch_events
File "compose/container.py", line 215, in attach_log_stream
File "compose/container.py", line 307, in attach
File "site-packages/docker/utils/decorators.py", line 19, in wrapped
File "site-packages/docker/api/container.py", line 57, in attach
File "site-packages/docker/api/client.py", line 385, in _read_from_socket
File "site-packages/docker/api/client.py", line 296, in _get_raw_response_socket
File "site-packages/docker/api/client.py", line 248, in _raise_for_status
File "site-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
docker.errors.APIError: 409 Client Error: Conflict ("b'container 0e141dadaf757e383a6fcd674d8d67ff2c3e2f3474adba0e2f877524f35d7e8a is restarting, wait until the container is running'")
readme_account6_1 exited with code 0
readme_account13_1 exited with code 0
readme_account15_1 exited with code 0
|
requests.exceptions.HTTPError
|
def merge_networks(base, override):
merged_networks = {}
all_network_names = set(base) | set(override)
base = {k: {} for k in base} if isinstance(base, list) else base
override = {k: {} for k in override} if isinstance(override, list) else override
for network_name in all_network_names:
md = MergeDict(base.get(network_name) or {}, override.get(network_name) or {})
md.merge_field("aliases", merge_unique_items_lists, [])
md.merge_field("link_local_ips", merge_unique_items_lists, [])
md.merge_scalar("priority")
md.merge_scalar("ipv4_address")
md.merge_scalar("ipv6_address")
merged_networks[network_name] = dict(md)
return merged_networks
|
def merge_networks(base, override):
merged_networks = {}
all_network_names = set(base) | set(override)
base = {k: {} for k in base} if isinstance(base, list) else base
override = {k: {} for k in override} if isinstance(override, list) else override
for network_name in all_network_names:
md = MergeDict(base.get(network_name, {}), override.get(network_name, {}))
md.merge_field("aliases", merge_unique_items_lists, [])
md.merge_field("link_local_ips", merge_unique_items_lists, [])
md.merge_scalar("priority")
md.merge_scalar("ipv4_address")
md.merge_scalar("ipv6_address")
merged_networks[network_name] = dict(md)
return merged_networks
|
https://github.com/docker/compose/issues/6525
|
docker-compose config
Traceback (most recent call last):
File "docker-compose", line 6, in <module>
File "compose/cli/main.py", line 71, in main
File "compose/cli/main.py", line 121, in perform_command
File "compose/cli/main.py", line 339, in config
File "compose/cli/command.py", line 70, in get_config_from_options
File "compose/config/config.py", line 404, in load
File "compose/config/config.py", line 502, in load_services
File "compose/config/config.py", line 493, in merge_services
File "compose/config/config.py", line 493, in <dictcomp>
File "compose/config/config.py", line 996, in merge_service_dicts_from_files
File "compose/config/config.py", line 1064, in merge_service_dicts
File "compose/config/config.py", line 1020, in merge_field
File "compose/config/config.py", line 1176, in merge_networks
File "compose/config/config.py", line 1015, in merge_field
File "compose/config/config.py", line 1012, in needs_merge
TypeError: argument of type 'NoneType' is not iterable
[1981] Failed to execute script docker-compose
|
TypeError
|
def slug(self):
if not self.full_slug:
return None
return truncate_id(self.full_slug)
|
def slug(self):
return truncate_id(self.full_slug)
|
https://github.com/docker/compose/issues/6311
|
$ pip install --no-cache-dir docker-compose
Collecting docker-compose
Downloading https://files.pythonhosted.org/packages/23/e7/3702078bb674d36e607c48177f4e7d93d6fecb13c32a8889d1172236848d/docker_compose-1.23.0-py2.py3-none-any.whl (131kB)
Collecting websocket-client<1.0,>=0.32.0 (from docker-compose)
Downloading https://files.pythonhosted.org/packages/14/d4/6a8cd4e7f67da465108c7cc0a307a1c5da7e2cdf497330b682069b1d4758/websocket_client-0.53.0-py2.py3-none-any.whl (198kB)
Collecting PyYAML<4,>=3.10 (from docker-compose)
Downloading https://files.pythonhosted.org/packages/9e/a3/1d13970c3f36777c583f136c136f804d70f500168edc1edea6daa7200769/PyYAML-3.13.tar.gz (270kB)
Collecting dockerpty<0.5,>=0.4.1 (from docker-compose)
Downloading https://files.pythonhosted.org/packages/8d/ee/e9ecce4c32204a6738e0a5d5883d3413794d7498fe8b06f44becc028d3ba/dockerpty-0.4.1.tar.gz
Collecting backports.ssl-match-hostname>=3.5; python_version < "3.5" (from docker-compose)
Downloading https://files.pythonhosted.org/packages/76/21/2dc61178a2038a5cb35d14b61467c6ac632791ed05131dda72c20e7b9e23/backports.ssl_match_hostname-3.5.0.1.tar.gz
Collecting docopt<0.7,>=0.6.1 (from docker-compose)
Downloading https://files.pythonhosted.org/packages/a2/55/8f8cab2afd404cf578136ef2cc5dfb50baa1761b68c9da1fb1e4eed343c9/docopt-0.6.2.tar.gz
Collecting ipaddress>=1.0.16; python_version < "3.3" (from docker-compose)
Downloading https://files.pythonhosted.org/packages/fc/d0/7fc3a811e011d4b388be48a0e381db8d990042df54aa4ef4599a31d39853/ipaddress-1.0.22-py2.py3-none-any.whl
Collecting enum34<2,>=1.0.4; python_version < "3.4" (from docker-compose)
Downloading https://files.pythonhosted.org/packages/c5/db/e56e6b4bbac7c4a06de1c50de6fe1ef3810018ae11732a50f15f62c7d050/enum34-1.1.6-py2-none-any.whl
Collecting requests!=2.11.0,!=2.12.2,!=2.18.0,<2.21,>=2.6.1 (from docker-compose)
Downloading https://files.pythonhosted.org/packages/f1/ca/10332a30cb25b627192b4ea272c351bce3ca1091e541245cccbace6051d8/requests-2.20.0-py2.py3-none-any.whl (60kB)
Collecting texttable<0.10,>=0.9.0 (from docker-compose)
Downloading https://files.pythonhosted.org/packages/02/e1/2565e6b842de7945af0555167d33acfc8a615584ef7abd30d1eae00a4d80/texttable-0.9.1.tar.gz
Collecting docker<4.0,>=3.5.0 (from docker-compose)
Downloading https://files.pythonhosted.org/packages/c2/76/b8091dc6d9db038af62ae88f228da656a84632cf5d7a84dcf54c613d3fd0/docker-3.5.1-py2.py3-none-any.whl (126kB)
Collecting jsonschema<3,>=2.5.1 (from docker-compose)
Downloading https://files.pythonhosted.org/packages/77/de/47e35a97b2b05c2fadbec67d44cfcdcd09b8086951b331d82de90d2912da/jsonschema-2.6.0-py2.py3-none-any.whl
Collecting cached-property<2,>=1.2.0 (from docker-compose)
Downloading https://files.pythonhosted.org/packages/3b/86/85c1be2e8db9e13ef9a350aecd6dea292bd612fa288c2f40d035bb750ded/cached_property-1.5.1-py2.py3-none-any.whl
Collecting six<2,>=1.3.0 (from docker-compose)
Downloading https://files.pythonhosted.org/packages/67/4b/141a581104b1f6397bfa78ac9d43d8ad29a7ca43ea90a2d863fe3056e86a/six-1.11.0-py2.py3-none-any.whl
Collecting chardet<3.1.0,>=3.0.2 (from requests!=2.11.0,!=2.12.2,!=2.18.0,<2.21,>=2.6.1->docker-compose)
Downloading https://files.pythonhosted.org/packages/bc/a9/01ffebfb562e4274b6487b4bb1ddec7ca55ec7510b22e4c51f14098443b8/chardet-3.0.4-py2.py3-none-any.whl (133kB)
Collecting certifi>=2017.4.17 (from requests!=2.11.0,!=2.12.2,!=2.18.0,<2.21,>=2.6.1->docker-compose)
Downloading https://files.pythonhosted.org/packages/56/9d/1d02dd80bc4cd955f98980f28c5ee2200e1209292d5f9e9cc8d030d18655/certifi-2018.10.15-py2.py3-none-any.whl (146kB)
Collecting urllib3<1.25,>=1.21.1 (from requests!=2.11.0,!=2.12.2,!=2.18.0,<2.21,>=2.6.1->docker-compose)
Downloading https://files.pythonhosted.org/packages/8c/4b/5cbc4cb46095f369117dcb751821e1bef9dd86a07c968d8757e9204c324c/urllib3-1.24-py2.py3-none-any.whl (117kB)
Collecting idna<2.8,>=2.5 (from requests!=2.11.0,!=2.12.2,!=2.18.0,<2.21,>=2.6.1->docker-compose)
Downloading https://files.pythonhosted.org/packages/4b/2a/0276479a4b3caeb8a8c1af2f8e4355746a97fab05a372e4a2c6a6b876165/idna-2.7-py2.py3-none-any.whl (58kB)
Collecting docker-pycreds>=0.3.0 (from docker<4.0,>=3.5.0->docker-compose)
Downloading https://files.pythonhosted.org/packages/ea/bf/7e70aeebc40407fbdb96fa9f79fc8e4722ea889a99378303e3bcc73f4ab5/docker_pycreds-0.3.0-py2.py3-none-any.whl
Collecting functools32; python_version == "2.7" (from jsonschema<3,>=2.5.1->docker-compose)
Downloading https://files.pythonhosted.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz
Installing collected packages: six, websocket-client, PyYAML, dockerpty, backports.ssl-match-hostname, docopt, ipaddress, enum34, chardet, certifi, urllib3, idna, requests, texttable, docker-pycreds, docker, functools32, jsonschema, cached-property, docker-compose
Running setup.py install for PyYAML: started
Running setup.py install for PyYAML: finished with status 'done'
Running setup.py install for dockerpty: started
Running setup.py install for dockerpty: finished with status 'done'
Running setup.py install for backports.ssl-match-hostname: started
Running setup.py install for backports.ssl-match-hostname: finished with status 'done'
Running setup.py install for docopt: started
Running setup.py install for docopt: finished with status 'done'
Running setup.py install for texttable: started
Running setup.py install for texttable: finished with status 'done'
Running setup.py install for functools32: started
Running setup.py install for functools32: finished with status 'done'
Successfully installed PyYAML-3.13 backports.ssl-match-hostname-3.5.0.1 cached-property-1.5.1 certifi-2018.10.15 chardet-3.0.4 docker-3.5.1 docker-compose-1.23.0 docker-pycreds-0.3.0 dockerpty-0.4.1 docopt-0.6.2 enum34-1.1.6 functools32-3.2.3.post2 idna-2.7 ipaddress-1.0.22 jsonschema-2.6.0 requests-2.20.0 six-1.11.0 texttable-0.9.1 urllib3-1.24 websocket-client-0.53.0
You are using pip version 10.0.1, however version 18.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
$ docker-compose -f docker/docker-compose.yml -f docker/docker-compose.dev.yml run --no-deps php bin/console doctrine:migrations:migrate --allow-no-migration --no-interaction
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 71, in main
command()
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 127, in perform_command
handler(command, command_options)
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 873, in run
self.toplevel_options, self.project_dir
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 1328, in run_one_off_container
**container_options)
File "/usr/lib/python2.7/site-packages/compose/service.py", line 326, in create_container
previous_container=previous_container,
File "/usr/lib/python2.7/site-packages/compose/service.py", line 895, in _get_container_create_options
one_off=one_off)
File "/usr/lib/python2.7/site-packages/compose/service.py", line 969, in _get_container_host_config
links=self._get_links(link_to_self=one_off),
File "/usr/lib/python2.7/site-packages/compose/service.py", line 800, in _get_links
links[container.name_without_project] = container.name
File "/usr/lib/python2.7/site-packages/compose/container.py", line 85, in name_without_project
return '{0}_{1}{2}'.format(self.service, self.number, '_' + self.slug if self.slug else '')
File "/usr/lib/python2.7/site-packages/compose/container.py", line 99, in slug
return truncate_id(self.full_slug)
File "/usr/lib/python2.7/site-packages/compose/utils.py", line 168, in truncate_id
if ':' in value:
TypeError: argument of type 'NoneType' is not iterable
ERROR: Job failed: exit code 1
|
TypeError
|
def write_initial(self, msg, obj_index):
if msg is None:
return
return self._write_noansi(msg, obj_index, "")
|
def write_initial(self, msg, obj_index):
if msg is None:
return
self.stream.write(
"{:<{width}} ... \r\n".format(msg + " " + obj_index, width=self.width)
)
self.stream.flush()
|
https://github.com/docker/compose/issues/5855
|
Traceback (most recent call last):
File "bin/docker-compose", line 6, in <module>
File "compose/cli/main.py", line 71, in main
File "compose/cli/main.py", line 127, in perform_command
File "compose/cli/main.py", line 716, in pull
File "compose/project.py", line 558, in pull
TypeError: sequence item 0: expected a bytes-like object, str found
|
TypeError
|
def pull(
self,
service_names=None,
ignore_pull_failures=False,
parallel_pull=False,
silent=False,
include_deps=False,
):
services = self.get_services(service_names, include_deps)
if parallel_pull:
def pull_service(service):
service.pull(ignore_pull_failures, True)
_, errors = parallel.parallel_execute(
services,
pull_service,
operator.attrgetter("name"),
not silent and "Pulling" or None,
limit=5,
)
if len(errors):
combined_errors = "\n".join(
[
e.decode("utf-8") if isinstance(e, six.binary_type) else e
for e in errors.values()
]
)
raise ProjectError(combined_errors)
else:
for service in services:
service.pull(ignore_pull_failures, silent=silent)
|
def pull(
self,
service_names=None,
ignore_pull_failures=False,
parallel_pull=False,
silent=False,
include_deps=False,
):
services = self.get_services(service_names, include_deps)
if parallel_pull:
def pull_service(service):
service.pull(ignore_pull_failures, True)
_, errors = parallel.parallel_execute(
services,
pull_service,
operator.attrgetter("name"),
not silent and "Pulling" or None,
limit=5,
)
if len(errors):
raise ProjectError(b"\n".join(errors.values()))
else:
for service in services:
service.pull(ignore_pull_failures, silent=silent)
|
https://github.com/docker/compose/issues/5855
|
Traceback (most recent call last):
File "bin/docker-compose", line 6, in <module>
File "compose/cli/main.py", line 71, in main
File "compose/cli/main.py", line 127, in perform_command
File "compose/cli/main.py", line 716, in pull
File "compose/project.py", line 558, in pull
TypeError: sequence item 0: expected a bytes-like object, str found
|
TypeError
|
def load_yaml(filename, encoding=None):
try:
with io.open(filename, "r", encoding=encoding) as fh:
return yaml.safe_load(fh)
except (IOError, yaml.YAMLError, UnicodeDecodeError) as e:
if encoding is None:
# Sometimes the user's locale sets an encoding that doesn't match
# the YAML files. Im such cases, retry once with the "default"
# UTF-8 encoding
return load_yaml(filename, encoding="utf-8")
error_name = getattr(e, "__module__", "") + "." + e.__class__.__name__
raise ConfigurationError("{}: {}".format(error_name, e))
|
def load_yaml(filename):
try:
with open(filename, "r") as fh:
return yaml.safe_load(fh)
except (IOError, yaml.YAMLError) as e:
error_name = getattr(e, "__module__", "") + "." + e.__class__.__name__
raise ConfigurationError("{}: {}".format(error_name, e))
|
https://github.com/docker/compose/issues/5826
|
Output of "docker-compose config"
Traceback (most recent call last):
File "docker-compose", line 6, in <module>
File "compose\cli\main.py", line 71, in main
File "compose\cli\main.py", line 121, in perform_command
File "compose\cli\main.py", line 329, in config
File "compose\cli\command.py", line 67, in get_config_from_options
File "compose\config\config.py", line 283, in find
File "compose\config\config.py", line 283, in <listcomp>
File "compose\config\config.py", line 183, in from_filename
File "compose\config\config.py", line 1434, in load_yaml
File "site-packages\yaml\__init__.py", line 94, in safe_load
File "site-packages\yaml\__init__.py", line 70, in load
File "site-packages\yaml\loader.py", line 24, in __init__
File "site-packages\yaml\reader.py", line 85, in __init__
File "site-packages\yaml\reader.py", line 124, in determine_encoding
File "site-packages\yaml\reader.py", line 178, in update_raw
UnicodeDecodeError: 'cp932' codec can't decode byte 0x83 in position 19: illegal multibyte sequence
[11180] Failed to execute script docker-compose
|
UnicodeDecodeError
|
def stream_output(output, stream):
is_terminal = hasattr(stream, "isatty") and stream.isatty()
stream = utils.get_output_stream(stream)
all_events = []
lines = {}
diff = 0
for event in utils.json_stream(output):
all_events.append(event)
is_progress_event = "progress" in event or "progressDetail" in event
if not is_progress_event:
print_output_event(event, stream, is_terminal)
stream.flush()
continue
if not is_terminal:
continue
# if it's a progress event and we have a terminal, then display the progress bars
image_id = event.get("id")
if not image_id:
continue
if image_id not in lines:
lines[image_id] = len(lines)
write_to_stream("\n", stream)
diff = len(lines) - lines[image_id]
# move cursor up `diff` rows
write_to_stream("%c[%dA" % (27, diff), stream)
print_output_event(event, stream, is_terminal)
if "id" in event:
# move cursor back down
write_to_stream("%c[%dB" % (27, diff), stream)
stream.flush()
return all_events
|
def stream_output(output, stream):
is_terminal = hasattr(stream, "isatty") and stream.isatty()
stream = utils.get_output_stream(stream)
all_events = []
lines = {}
diff = 0
for event in utils.json_stream(output):
all_events.append(event)
is_progress_event = "progress" in event or "progressDetail" in event
if not is_progress_event:
print_output_event(event, stream, is_terminal)
stream.flush()
continue
if not is_terminal:
continue
# if it's a progress event and we have a terminal, then display the progress bars
image_id = event.get("id")
if not image_id:
continue
if image_id not in lines:
lines[image_id] = len(lines)
stream.write("\n")
diff = len(lines) - lines[image_id]
# move cursor up `diff` rows
stream.write("%c[%dA" % (27, diff))
print_output_event(event, stream, is_terminal)
if "id" in event:
# move cursor back down
stream.write("%c[%dB" % (27, diff))
stream.flush()
return all_events
|
https://github.com/docker/compose/issues/5784
|
Traceback (most recent call last):
File "bin/docker-compose", line 6, in <module>
File "compose/cli/main.py", line 71, in main
File "compose/cli/main.py", line 127, in perform_command
File "compose/cli/main.py", line 280, in build
File "compose/project.py", line 372, in build
File "compose/service.py", line 1003, in build
File "compose/progress_stream.py", line 23, in stream_output
File "compose/progress_stream.py", line 90, in print_output_event
UnicodeEncodeError: 'ascii' codec can't encode character '\u2013' in position 151: ordinal not in range(128)
[96] Failed to execute script docker-compose
Exited with code 255
|
UnicodeEncodeError
|
def print_output_event(event, stream, is_terminal):
if "errorDetail" in event:
raise StreamOutputError(event["errorDetail"]["message"])
terminator = ""
if is_terminal and "stream" not in event:
# erase current line
write_to_stream("%c[2K\r" % 27, stream)
terminator = "\r"
elif "progressDetail" in event:
return
if "time" in event:
write_to_stream("[%s] " % event["time"], stream)
if "id" in event:
write_to_stream("%s: " % event["id"], stream)
if "from" in event:
write_to_stream("(from %s) " % event["from"], stream)
status = event.get("status", "")
if "progress" in event:
write_to_stream("%s %s%s" % (status, event["progress"], terminator), stream)
elif "progressDetail" in event:
detail = event["progressDetail"]
total = detail.get("total")
if "current" in detail and total:
percentage = float(detail["current"]) / float(total) * 100
write_to_stream("%s (%.1f%%)%s" % (status, percentage, terminator), stream)
else:
write_to_stream("%s%s" % (status, terminator), stream)
elif "stream" in event:
write_to_stream("%s%s" % (event["stream"], terminator), stream)
else:
write_to_stream("%s%s\n" % (status, terminator), stream)
|
def print_output_event(event, stream, is_terminal):
if "errorDetail" in event:
raise StreamOutputError(event["errorDetail"]["message"])
terminator = ""
if is_terminal and "stream" not in event:
# erase current line
stream.write("%c[2K\r" % 27)
terminator = "\r"
elif "progressDetail" in event:
return
if "time" in event:
stream.write("[%s] " % event["time"])
if "id" in event:
stream.write("%s: " % event["id"])
if "from" in event:
stream.write("(from %s) " % event["from"])
status = event.get("status", "")
if "progress" in event:
stream.write("%s %s%s" % (status, event["progress"], terminator))
elif "progressDetail" in event:
detail = event["progressDetail"]
total = detail.get("total")
if "current" in detail and total:
percentage = float(detail["current"]) / float(total) * 100
stream.write("%s (%.1f%%)%s" % (status, percentage, terminator))
else:
stream.write("%s%s" % (status, terminator))
elif "stream" in event:
stream.write("%s%s" % (event["stream"], terminator))
else:
stream.write("%s%s\n" % (status, terminator))
|
https://github.com/docker/compose/issues/5784
|
Traceback (most recent call last):
File "bin/docker-compose", line 6, in <module>
File "compose/cli/main.py", line 71, in main
File "compose/cli/main.py", line 127, in perform_command
File "compose/cli/main.py", line 280, in build
File "compose/project.py", line 372, in build
File "compose/service.py", line 1003, in build
File "compose/progress_stream.py", line 23, in stream_output
File "compose/progress_stream.py", line 90, in print_output_event
UnicodeEncodeError: 'ascii' codec can't encode character '\u2013' in position 151: ordinal not in range(128)
[96] Failed to execute script docker-compose
Exited with code 255
|
UnicodeEncodeError
|
def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option):
"""
Find the container data volumes that are in `volumes_option`, and return
a mapping of volume bindings for those volumes.
Anonymous volume mounts are updated in place instead.
"""
volumes = []
volumes_option = volumes_option or []
container_mounts = dict(
(mount["Destination"], mount) for mount in container.get("Mounts") or {}
)
image_volumes = [
VolumeSpec.parse(volume)
for volume in container.image_config["ContainerConfig"].get("Volumes") or {}
]
for volume in set(volumes_option + image_volumes):
# No need to preserve host volumes
if volume.external:
continue
# Attempting to rebind tmpfs volumes breaks: https://github.com/docker/compose/issues/4751
if volume.internal in convert_tmpfs_mounts(tmpfs_option).keys():
continue
mount = container_mounts.get(volume.internal)
# New volume, doesn't exist in the old container
if not mount:
continue
# Volume was previously a host volume, now it's a container volume
if not mount.get("Name"):
continue
# Copy existing volume from old container
volume = volume._replace(external=mount["Name"])
volumes.append(volume)
updated_mounts = False
for mount in mounts_option:
if mount.type != "volume":
continue
ctnr_mount = container_mounts.get(mount.target)
if not ctnr_mount or not ctnr_mount.get("Name"):
continue
mount.source = ctnr_mount["Name"]
updated_mounts = True
return volumes, updated_mounts
|
def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option):
"""
Find the container data volumes that are in `volumes_option`, and return
a mapping of volume bindings for those volumes.
Anonymous volume mounts are updated in place instead.
"""
volumes = []
volumes_option = volumes_option or []
container_mounts = dict(
(mount["Destination"], mount) for mount in container.get("Mounts") or {}
)
image_volumes = [
VolumeSpec.parse(volume)
for volume in container.image_config["ContainerConfig"].get("Volumes") or {}
]
for volume in set(volumes_option + image_volumes):
# No need to preserve host volumes
if volume.external:
continue
# Attempting to rebind tmpfs volumes breaks: https://github.com/docker/compose/issues/4751
if volume.internal in convert_tmpfs_mounts(tmpfs_option).keys():
continue
mount = container_mounts.get(volume.internal)
# New volume, doesn't exist in the old container
if not mount:
continue
# Volume was previously a host volume, now it's a container volume
if not mount.get("Name"):
continue
# Copy existing volume from old container
volume = volume._replace(external=mount["Name"])
volumes.append(volume)
updated_mounts = False
for mount in mounts_option:
if mount.type != "volume":
continue
ctnr_mount = container_mounts.get(mount.target)
if not ctnr_mount.get("Name"):
continue
mount.source = ctnr_mount["Name"]
updated_mounts = True
return volumes, updated_mounts
|
https://github.com/docker/compose/issues/5591
|
➜ projectname docker-compose -f docker-compose.local.yml up
Recreating 12396073747d_projectname_nginx_1 ...
projectname_projectname_1 is up-to-date
ERROR: for 12396073747d_projectname_nginx_1 'NoneType' object has no attribute 'get'
ERROR: for nginx 'NoneType' object has no attribute 'get'
Traceback (most recent call last):
File "docker-compose", line 6, in <module>
File "compose/cli/main.py", line 71, in main
File "compose/cli/main.py", line 124, in perform_command
File "compose/cli/main.py", line 959, in up
File "compose/project.py", line 479, in up
File "compose/parallel.py", line 80, in parallel_execute
AttributeError: 'NoneType' object has no attribute 'get'
Failed to execute script docker-compose
|
AttributeError
|
def substitute(self, mapping):
# Helper function for .sub()
def convert(mo):
named = mo.group("named") or mo.group("braced")
braced = mo.group("braced")
if braced is not None:
sep = mo.group("sep")
result = self.process_braced_group(braced, sep, mapping)
if result:
return result
if named is not None:
val = mapping[named]
if isinstance(val, six.binary_type):
val = val.decode("utf-8")
return "%s" % (val,)
if mo.group("escaped") is not None:
return self.delimiter
if mo.group("invalid") is not None:
self._invalid(mo)
raise ValueError("Unrecognized named group in pattern", self.pattern)
return self.pattern.sub(convert, self.template)
|
def substitute(self, mapping):
# Helper function for .sub()
def convert(mo):
named = mo.group("named") or mo.group("braced")
braced = mo.group("braced")
if braced is not None:
sep = mo.group("sep")
result = self.process_braced_group(braced, sep, mapping)
if result:
return result
if named is not None:
val = mapping[named]
return "%s" % (val,)
if mo.group("escaped") is not None:
return self.delimiter
if mo.group("invalid") is not None:
self._invalid(mo)
raise ValueError("Unrecognized named group in pattern", self.pattern)
return self.pattern.sub(convert, self.template)
|
https://github.com/docker/compose/issues/5549
|
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 71, in main
command()
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 118, in perform_command
handler(command, options, command_options)
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 322, in config
print(serialize_config(compose_config, image_digests))
File "/usr/lib/python2.7/site-packages/compose/config/serialize.py", line 93, in serialize_config
width=80
File "/usr/lib/python2.7/site-packages/yaml/__init__.py", line 218, in safe_dump
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
File "/usr/lib/python2.7/site-packages/yaml/__init__.py", line 190, in dump_all
dumper.represent(data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 28, in represent
node = self.represent_data(data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/compose/config/serialize.py", line 30, in serialize_string
data = data.replace('$', '$$')
UnicodeDecodeError: ‘ascii’ codec can't decode byte 0xe2 in position 1: ordinal not in range(128)
|
UnicodeDecodeError
|
def convert(mo):
named = mo.group("named") or mo.group("braced")
braced = mo.group("braced")
if braced is not None:
sep = mo.group("sep")
result = self.process_braced_group(braced, sep, mapping)
if result:
return result
if named is not None:
val = mapping[named]
if isinstance(val, six.binary_type):
val = val.decode("utf-8")
return "%s" % (val,)
if mo.group("escaped") is not None:
return self.delimiter
if mo.group("invalid") is not None:
self._invalid(mo)
raise ValueError("Unrecognized named group in pattern", self.pattern)
|
def convert(mo):
named = mo.group("named") or mo.group("braced")
braced = mo.group("braced")
if braced is not None:
sep = mo.group("sep")
result = self.process_braced_group(braced, sep, mapping)
if result:
return result
if named is not None:
val = mapping[named]
return "%s" % (val,)
if mo.group("escaped") is not None:
return self.delimiter
if mo.group("invalid") is not None:
self._invalid(mo)
raise ValueError("Unrecognized named group in pattern", self.pattern)
|
https://github.com/docker/compose/issues/5549
|
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 71, in main
command()
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 118, in perform_command
handler(command, options, command_options)
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 322, in config
print(serialize_config(compose_config, image_digests))
File "/usr/lib/python2.7/site-packages/compose/config/serialize.py", line 93, in serialize_config
width=80
File "/usr/lib/python2.7/site-packages/yaml/__init__.py", line 218, in safe_dump
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
File "/usr/lib/python2.7/site-packages/yaml/__init__.py", line 190, in dump_all
dumper.represent(data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 28, in represent
node = self.represent_data(data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/compose/config/serialize.py", line 30, in serialize_string
data = data.replace('$', '$$')
UnicodeDecodeError: ‘ascii’ codec can't decode byte 0xe2 in position 1: ordinal not in range(128)
|
UnicodeDecodeError
|
def serialize_string(dumper, data):
"""Ensure boolean-like strings are quoted in the output and escape $ characters"""
representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
if isinstance(data, six.binary_type):
data = data.decode("utf-8")
data = data.replace("$", "$$")
if data.lower() in ("y", "n", "yes", "no", "on", "off", "true", "false"):
# Empirically only y/n appears to be an issue, but this might change
# depending on which PyYaml version is being used. Err on safe side.
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style='"')
return representer(data)
|
def serialize_string(dumper, data):
"""Ensure boolean-like strings are quoted in the output and escape $ characters"""
representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
data = data.replace("$", "$$")
if data.lower() in ("y", "n", "yes", "no", "on", "off", "true", "false"):
# Empirically only y/n appears to be an issue, but this might change
# depending on which PyYaml version is being used. Err on safe side.
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style='"')
return representer(data)
|
https://github.com/docker/compose/issues/5549
|
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 71, in main
command()
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 118, in perform_command
handler(command, options, command_options)
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 322, in config
print(serialize_config(compose_config, image_digests))
File "/usr/lib/python2.7/site-packages/compose/config/serialize.py", line 93, in serialize_config
width=80
File "/usr/lib/python2.7/site-packages/yaml/__init__.py", line 218, in safe_dump
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
File "/usr/lib/python2.7/site-packages/yaml/__init__.py", line 190, in dump_all
dumper.represent(data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 28, in represent
node = self.represent_data(data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/compose/config/serialize.py", line 30, in serialize_string
data = data.replace('$', '$$')
UnicodeDecodeError: ‘ascii’ codec can't decode byte 0xe2 in position 1: ordinal not in range(128)
|
UnicodeDecodeError
|
def serialize_config(config, image_digests=None):
return yaml.safe_dump(
denormalize_config(config, image_digests),
default_flow_style=False,
indent=2,
width=80,
allow_unicode=True,
)
|
def serialize_config(config, image_digests=None):
return yaml.safe_dump(
denormalize_config(config, image_digests),
default_flow_style=False,
indent=2,
width=80,
)
|
https://github.com/docker/compose/issues/5549
|
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 71, in main
command()
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 118, in perform_command
handler(command, options, command_options)
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 322, in config
print(serialize_config(compose_config, image_digests))
File "/usr/lib/python2.7/site-packages/compose/config/serialize.py", line 93, in serialize_config
width=80
File "/usr/lib/python2.7/site-packages/yaml/__init__.py", line 218, in safe_dump
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
File "/usr/lib/python2.7/site-packages/yaml/__init__.py", line 190, in dump_all
dumper.represent(data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 28, in represent
node = self.represent_data(data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 225, in represent_dict
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 123, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib/python2.7/site-packages/yaml/representer.py", line 57, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib/python2.7/site-packages/compose/config/serialize.py", line 30, in serialize_string
data = data.replace('$', '$$')
UnicodeDecodeError: ‘ascii’ codec can't decode byte 0xe2 in position 1: ordinal not in range(128)
|
UnicodeDecodeError
|
def project_from_options(project_dir, options):
environment = Environment.from_env_file(project_dir)
set_parallel_limit(environment)
host = options.get("--host")
if host is not None:
host = host.lstrip("=")
return get_project(
project_dir,
get_config_path_from_options(project_dir, options, environment),
project_name=options.get("--project-name"),
verbose=options.get("--verbose"),
host=host,
tls_config=tls_config_from_options(options),
environment=environment,
override_dir=options.get("--project-directory"),
)
|
def project_from_options(project_dir, options):
environment = Environment.from_env_file(project_dir)
host = options.get("--host")
if host is not None:
host = host.lstrip("=")
return get_project(
project_dir,
get_config_path_from_options(project_dir, options, environment),
project_name=options.get("--project-name"),
verbose=options.get("--verbose"),
host=host,
tls_config=tls_config_from_options(options),
environment=environment,
override_dir=options.get("--project-directory"),
)
|
https://github.com/docker/compose/issues/1828
|
Exception in thread Thread-5:
Traceback (most recent call last):
File "/compose/build/docker-compose/out00-PYZ.pyz/threading", line 810, in __bootstrap_inner
File "/compose/build/docker-compose/out00-PYZ.pyz/threading", line 763, in run
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.utils", line 31, in inner_execute_function
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 220, in <lambda>
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 173, in create_and_start
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 268, in create_container
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 579, in _get_container_create_options
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 464, in config_hash
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 469, in config_dict
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 295, in image
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.utils.decorators", line 20, in wrapped
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.client", line 489, in inspect_image
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.clientbase", line 86, in _get
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 477, in get
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 465, in request
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 573, in send
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.adapters", line 370, in send
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 544, in urlopen
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 341, in _make_request
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 761, in _validate_conn
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connection", line 238, in connect
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.util.ssl_", line 277, in ssl_wrap_socket
IOError: [Errno 24] Too many open files
|
IOError
|
def producer(obj, func, results, limiter):
"""
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
with limiter, GlobalLimit.global_limiter:
try:
result = func(obj)
results.put((obj, result, None))
except Exception as e:
results.put((obj, None, e))
|
def producer(obj, func, results, limiter):
"""
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
with limiter:
try:
result = func(obj)
results.put((obj, result, None))
except Exception as e:
results.put((obj, None, e))
|
https://github.com/docker/compose/issues/1828
|
Exception in thread Thread-5:
Traceback (most recent call last):
File "/compose/build/docker-compose/out00-PYZ.pyz/threading", line 810, in __bootstrap_inner
File "/compose/build/docker-compose/out00-PYZ.pyz/threading", line 763, in run
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.utils", line 31, in inner_execute_function
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 220, in <lambda>
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 173, in create_and_start
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 268, in create_container
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 579, in _get_container_create_options
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 464, in config_hash
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 469, in config_dict
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 295, in image
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.utils.decorators", line 20, in wrapped
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.client", line 489, in inspect_image
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.clientbase", line 86, in _get
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 477, in get
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 465, in request
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 573, in send
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.adapters", line 370, in send
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 544, in urlopen
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 341, in _make_request
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 761, in _validate_conn
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connection", line 238, in connect
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.util.ssl_", line 277, in ssl_wrap_socket
IOError: [Errno 24] Too many open files
|
IOError
|
def get_project(
project_dir,
config_path=None,
project_name=None,
verbose=False,
host=None,
tls_config=None,
environment=None,
override_dir=None,
):
if not environment:
environment = Environment.from_env_file(project_dir)
config_details = config.find(project_dir, config_path, environment, override_dir)
project_name = get_project_name(
config_details.working_dir, project_name, environment
)
config_data = config.load(config_details)
api_version = environment.get(
"COMPOSE_API_VERSION", API_VERSIONS[config_data.version]
)
client = get_client(
verbose=verbose,
version=api_version,
tls_config=tls_config,
host=host,
environment=environment,
)
global_parallel_limit = environment.get("COMPOSE_PARALLEL_LIMIT")
if global_parallel_limit:
global_parallel_limit = int(global_parallel_limit)
with errors.handle_connection_errors(client):
return Project.from_config(
project_name,
config_data,
client,
global_parallel_limit=global_parallel_limit,
)
|
def get_project(
project_dir,
config_path=None,
project_name=None,
verbose=False,
host=None,
tls_config=None,
environment=None,
override_dir=None,
):
if not environment:
environment = Environment.from_env_file(project_dir)
config_details = config.find(project_dir, config_path, environment, override_dir)
project_name = get_project_name(
config_details.working_dir, project_name, environment
)
config_data = config.load(config_details)
api_version = environment.get(
"COMPOSE_API_VERSION", API_VERSIONS[config_data.version]
)
client = get_client(
verbose=verbose,
version=api_version,
tls_config=tls_config,
host=host,
environment=environment,
)
with errors.handle_connection_errors(client):
return Project.from_config(project_name, config_data, client)
|
https://github.com/docker/compose/issues/1828
|
Exception in thread Thread-5:
Traceback (most recent call last):
File "/compose/build/docker-compose/out00-PYZ.pyz/threading", line 810, in __bootstrap_inner
File "/compose/build/docker-compose/out00-PYZ.pyz/threading", line 763, in run
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.utils", line 31, in inner_execute_function
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 220, in <lambda>
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 173, in create_and_start
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 268, in create_container
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 579, in _get_container_create_options
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 464, in config_hash
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 469, in config_dict
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 295, in image
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.utils.decorators", line 20, in wrapped
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.client", line 489, in inspect_image
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.clientbase", line 86, in _get
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 477, in get
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 465, in request
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 573, in send
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.adapters", line 370, in send
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 544, in urlopen
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 341, in _make_request
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 761, in _validate_conn
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connection", line 238, in connect
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.util.ssl_", line 277, in ssl_wrap_socket
IOError: [Errno 24] Too many open files
|
IOError
|
def producer(obj, func, results, limiter):
"""
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
with limiter, GlobalLimit.global_limiter:
try:
result = func(obj)
results.put((obj, result, None))
except Exception as e:
results.put((obj, None, e))
|
def producer(obj, func, results, limiter):
"""
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
with limiter, global_limiter:
try:
result = func(obj)
results.put((obj, result, None))
except Exception as e:
results.put((obj, None, e))
|
https://github.com/docker/compose/issues/1828
|
Exception in thread Thread-5:
Traceback (most recent call last):
File "/compose/build/docker-compose/out00-PYZ.pyz/threading", line 810, in __bootstrap_inner
File "/compose/build/docker-compose/out00-PYZ.pyz/threading", line 763, in run
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.utils", line 31, in inner_execute_function
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 220, in <lambda>
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 173, in create_and_start
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 268, in create_container
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 579, in _get_container_create_options
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 464, in config_hash
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 469, in config_dict
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 295, in image
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.utils.decorators", line 20, in wrapped
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.client", line 489, in inspect_image
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.clientbase", line 86, in _get
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 477, in get
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 465, in request
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 573, in send
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.adapters", line 370, in send
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 544, in urlopen
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 341, in _make_request
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 761, in _validate_conn
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connection", line 238, in connect
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.util.ssl_", line 277, in ssl_wrap_socket
IOError: [Errno 24] Too many open files
|
IOError
|
def __init__(
self,
name,
services,
client,
networks=None,
volumes=None,
config_version=None,
parallel_limit=None,
):
self.name = name
self.services = services
self.client = client
self.volumes = volumes or ProjectVolumes({})
self.networks = networks or ProjectNetworks({}, False)
self.config_version = config_version
parallel.GlobalLimit.set_global_limit(value=parallel_limit)
|
def __init__(
self, name, services, client, networks=None, volumes=None, config_version=None
):
self.name = name
self.services = services
self.client = client
self.volumes = volumes or ProjectVolumes({})
self.networks = networks or ProjectNetworks({}, False)
self.config_version = config_version
|
https://github.com/docker/compose/issues/1828
|
Exception in thread Thread-5:
Traceback (most recent call last):
File "/compose/build/docker-compose/out00-PYZ.pyz/threading", line 810, in __bootstrap_inner
File "/compose/build/docker-compose/out00-PYZ.pyz/threading", line 763, in run
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.utils", line 31, in inner_execute_function
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 220, in <lambda>
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 173, in create_and_start
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 268, in create_container
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 579, in _get_container_create_options
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 464, in config_hash
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 469, in config_dict
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 295, in image
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.utils.decorators", line 20, in wrapped
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.client", line 489, in inspect_image
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.clientbase", line 86, in _get
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 477, in get
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 465, in request
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 573, in send
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.adapters", line 370, in send
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 544, in urlopen
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 341, in _make_request
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 761, in _validate_conn
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connection", line 238, in connect
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.util.ssl_", line 277, in ssl_wrap_socket
IOError: [Errno 24] Too many open files
|
IOError
|
def from_config(cls, name, config_data, client, global_parallel_limit=None):
"""
Construct a Project from a config.Config object.
"""
use_networking = config_data.version and config_data.version != V1
networks = build_networks(name, config_data, client)
project_networks = ProjectNetworks.from_services(
config_data.services, networks, use_networking
)
volumes = ProjectVolumes.from_config(name, config_data, client)
project = cls(
name,
[],
client,
project_networks,
volumes,
config_data.version,
parallel_limit=global_parallel_limit,
)
for service_dict in config_data.services:
service_dict = dict(service_dict)
if use_networking:
service_networks = get_networks(service_dict, networks)
else:
service_networks = {}
service_dict.pop("networks", None)
links = project.get_links(service_dict)
network_mode = project.get_network_mode(
service_dict, list(service_networks.keys())
)
pid_mode = project.get_pid_mode(service_dict)
volumes_from = get_volumes_from(project, service_dict)
if config_data.version != V1:
service_dict["volumes"] = [
volumes.namespace_spec(volume_spec)
for volume_spec in service_dict.get("volumes", [])
]
secrets = get_secrets(
service_dict["name"],
service_dict.pop("secrets", None) or [],
config_data.secrets,
)
project.services.append(
Service(
service_dict.pop("name"),
client=client,
project=name,
use_networking=use_networking,
networks=service_networks,
links=links,
network_mode=network_mode,
volumes_from=volumes_from,
secrets=secrets,
pid_mode=pid_mode,
**service_dict,
)
)
return project
|
def from_config(cls, name, config_data, client):
"""
Construct a Project from a config.Config object.
"""
use_networking = config_data.version and config_data.version != V1
networks = build_networks(name, config_data, client)
project_networks = ProjectNetworks.from_services(
config_data.services, networks, use_networking
)
volumes = ProjectVolumes.from_config(name, config_data, client)
project = cls(name, [], client, project_networks, volumes, config_data.version)
for service_dict in config_data.services:
service_dict = dict(service_dict)
if use_networking:
service_networks = get_networks(service_dict, networks)
else:
service_networks = {}
service_dict.pop("networks", None)
links = project.get_links(service_dict)
network_mode = project.get_network_mode(
service_dict, list(service_networks.keys())
)
pid_mode = project.get_pid_mode(service_dict)
volumes_from = get_volumes_from(project, service_dict)
if config_data.version != V1:
service_dict["volumes"] = [
volumes.namespace_spec(volume_spec)
for volume_spec in service_dict.get("volumes", [])
]
secrets = get_secrets(
service_dict["name"],
service_dict.pop("secrets", None) or [],
config_data.secrets,
)
project.services.append(
Service(
service_dict.pop("name"),
client=client,
project=name,
use_networking=use_networking,
networks=service_networks,
links=links,
network_mode=network_mode,
volumes_from=volumes_from,
secrets=secrets,
pid_mode=pid_mode,
**service_dict,
)
)
return project
|
https://github.com/docker/compose/issues/1828
|
Exception in thread Thread-5:
Traceback (most recent call last):
File "/compose/build/docker-compose/out00-PYZ.pyz/threading", line 810, in __bootstrap_inner
File "/compose/build/docker-compose/out00-PYZ.pyz/threading", line 763, in run
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.utils", line 31, in inner_execute_function
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 220, in <lambda>
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 173, in create_and_start
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 268, in create_container
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 579, in _get_container_create_options
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 464, in config_hash
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 469, in config_dict
File "/compose/build/docker-compose/out00-PYZ.pyz/compose.service", line 295, in image
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.utils.decorators", line 20, in wrapped
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.client", line 489, in inspect_image
File "/compose/build/docker-compose/out00-PYZ.pyz/docker.clientbase", line 86, in _get
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 477, in get
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 465, in request
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.sessions", line 573, in send
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.adapters", line 370, in send
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 544, in urlopen
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 341, in _make_request
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connectionpool", line 761, in _validate_conn
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.connection", line 238, in connect
File "/compose/build/docker-compose/out00-PYZ.pyz/requests.packages.urllib3.util.ssl_", line 277, in ssl_wrap_socket
IOError: [Errno 24] Too many open files
|
IOError
|
def to_int(s):
# We must be able to handle octal representation for `mode` values notably
if six.PY3 and re.match("^0[0-9]+$", s.strip()):
s = "0o" + s[1:]
try:
return int(s, base=0)
except ValueError:
raise ValueError('"{}" is not a valid integer'.format(s))
|
def to_int(s):
# We must be able to handle octal representation for `mode` values notably
if six.PY3 and re.match("^0[0-9]+$", s.strip()):
s = "0o" + s[1:]
return int(s, base=0)
|
https://github.com/docker/compose/issues/5527
|
$ docker-compose pull
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
load_entry_point('docker-compose==1.18.0', 'console_scripts', 'docker-compose')()
File "/usr/lib/python3.6/site-packages/compose/cli/main.py", line 71, in main
command()
File "/usr/lib/python3.6/site-packages/compose/cli/main.py", line 121, in perform_command
project = project_from_options('.', options)
File "/usr/lib/python3.6/site-packages/compose/cli/command.py", line 37, in project_from_options
override_dir=options.get('--project-directory'),
File "/usr/lib/python3.6/site-packages/compose/cli/command.py", line 91, in get_project
config_data = config.load(config_details)
File "/usr/lib/python3.6/site-packages/compose/config/config.py", line 375, in load
for config_file in config_details.config_files
File "/usr/lib/python3.6/site-packages/compose/config/config.py", line 375, in <listcomp>
for config_file in config_details.config_files
File "/usr/lib/python3.6/site-packages/compose/config/config.py", line 506, in process_config_file
environment)
File "/usr/lib/python3.6/site-packages/compose/config/config.py", line 497, in interpolate_config_section
environment
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 44, in interpolate_environment_variables
for name, config_dict in config.items()
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 44, in <genexpr>
for name, config_dict in config.items()
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 39, in process_item
for key, val in (config_dict or {}).items()
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 39, in <genexpr>
for key, val in (config_dict or {}).items()
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 54, in interpolate_value
return recursive_interpolate(value, interpolator, get_config_path(config_key, section, name))
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 74, in recursive_interpolate
for (key, val) in obj.items()
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 74, in <genexpr>
for (key, val) in obj.items()
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 70, in recursive_interpolate
return converter.convert(config_path, interpolator.interpolate(obj))
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 184, in convert
return self.map[rexp](value)
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 141, in to_int
return int(s, base=0)
ValueError: invalid literal for int() with base 0: '1g'
|
ValueError
|
def convert(self, path, value):
for rexp in self.map.keys():
if rexp.match(path):
try:
return self.map[rexp](value)
except ValueError as e:
raise ConfigurationError(
"Error while attempting to convert {} to appropriate type: {}".format(
path, e
)
)
return value
|
def convert(self, path, value):
for rexp in self.map.keys():
if rexp.match(path):
return self.map[rexp](value)
return value
|
https://github.com/docker/compose/issues/5527
|
$ docker-compose pull
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
load_entry_point('docker-compose==1.18.0', 'console_scripts', 'docker-compose')()
File "/usr/lib/python3.6/site-packages/compose/cli/main.py", line 71, in main
command()
File "/usr/lib/python3.6/site-packages/compose/cli/main.py", line 121, in perform_command
project = project_from_options('.', options)
File "/usr/lib/python3.6/site-packages/compose/cli/command.py", line 37, in project_from_options
override_dir=options.get('--project-directory'),
File "/usr/lib/python3.6/site-packages/compose/cli/command.py", line 91, in get_project
config_data = config.load(config_details)
File "/usr/lib/python3.6/site-packages/compose/config/config.py", line 375, in load
for config_file in config_details.config_files
File "/usr/lib/python3.6/site-packages/compose/config/config.py", line 375, in <listcomp>
for config_file in config_details.config_files
File "/usr/lib/python3.6/site-packages/compose/config/config.py", line 506, in process_config_file
environment)
File "/usr/lib/python3.6/site-packages/compose/config/config.py", line 497, in interpolate_config_section
environment
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 44, in interpolate_environment_variables
for name, config_dict in config.items()
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 44, in <genexpr>
for name, config_dict in config.items()
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 39, in process_item
for key, val in (config_dict or {}).items()
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 39, in <genexpr>
for key, val in (config_dict or {}).items()
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 54, in interpolate_value
return recursive_interpolate(value, interpolator, get_config_path(config_key, section, name))
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 74, in recursive_interpolate
for (key, val) in obj.items()
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 74, in <genexpr>
for (key, val) in obj.items()
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 70, in recursive_interpolate
return converter.convert(config_path, interpolator.interpolate(obj))
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 184, in convert
return self.map[rexp](value)
File "/usr/lib/python3.6/site-packages/compose/config/interpolation.py", line 141, in to_int
return int(s, base=0)
ValueError: invalid literal for int() with base 0: '1g'
|
ValueError
|
def _get_container_create_options(
self, override_options, number, one_off=False, previous_container=None
):
add_config_hash = not one_off and not override_options
container_options = dict(
(k, self.options[k]) for k in DOCKER_CONFIG_KEYS if k in self.options
)
override_volumes = override_options.pop("volumes", [])
container_options.update(override_options)
if not container_options.get("name"):
container_options["name"] = self.get_container_name(self.name, number, one_off)
container_options.setdefault("detach", True)
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
# was also given explicitly. This matches behavior
# until Docker Engine 1.11.0 - Docker API 1.23.
if (
version_lt(self.client.api_version, "1.23")
and "hostname" in container_options
and "domainname" not in container_options
and "." in container_options["hostname"]
):
parts = container_options["hostname"].partition(".")
container_options["hostname"] = parts[0]
container_options["domainname"] = parts[2]
if (
version_gte(self.client.api_version, "1.25")
and "stop_grace_period" in self.options
):
container_options["stop_timeout"] = self.stop_timeout(None)
if "ports" in container_options or "expose" in self.options:
container_options["ports"] = build_container_ports(
formatted_ports(container_options.get("ports", [])), self.options
)
if "volumes" in container_options or override_volumes:
container_options["volumes"] = list(
set(container_options.get("volumes", []) + override_volumes)
)
container_options["environment"] = merge_environment(
self.options.get("environment"), override_options.get("environment")
)
container_options["labels"] = merge_labels(
self.options.get("labels"), override_options.get("labels")
)
container_options, override_options = self._build_container_volume_options(
previous_container, container_options, override_options
)
container_options["image"] = self.image_name
container_options["labels"] = build_container_labels(
container_options.get("labels", {}),
self.labels(one_off=one_off),
number,
self.config_hash if add_config_hash else None,
)
# Delete options which are only used in HostConfig
for key in HOST_CONFIG_KEYS:
container_options.pop(key, None)
container_options["host_config"] = self._get_container_host_config(
override_options, one_off=one_off
)
networking_config = self.build_default_networking_config()
if networking_config:
container_options["networking_config"] = networking_config
container_options["environment"] = format_environment(
container_options["environment"]
)
return container_options
|
def _get_container_create_options(
self, override_options, number, one_off=False, previous_container=None
):
add_config_hash = not one_off and not override_options
container_options = dict(
(k, self.options[k]) for k in DOCKER_CONFIG_KEYS if k in self.options
)
override_volumes = override_options.pop("volumes", [])
container_options.update(override_options)
if not container_options.get("name"):
container_options["name"] = self.get_container_name(self.name, number, one_off)
container_options.setdefault("detach", True)
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
# was also given explicitly. This matches behavior
# until Docker Engine 1.11.0 - Docker API 1.23.
if (
version_lt(self.client.api_version, "1.23")
and "hostname" in container_options
and "domainname" not in container_options
and "." in container_options["hostname"]
):
parts = container_options["hostname"].partition(".")
container_options["hostname"] = parts[0]
container_options["domainname"] = parts[2]
if (
version_gte(self.client.api_version, "1.25")
and "stop_grace_period" in self.options
):
container_options["stop_timeout"] = self.stop_timeout(None)
if "ports" in container_options or "expose" in self.options:
container_options["ports"] = build_container_ports(
formatted_ports(container_options.get("ports", [])), self.options
)
if "volumes" in container_options or override_volumes:
container_options["volumes"] = list(
set(container_options.get("volumes", []) + override_volumes)
)
container_options["environment"] = merge_environment(
self.options.get("environment"), override_options.get("environment")
)
container_options["labels"] = merge_labels(
self.options.get("labels"), override_options.get("labels")
)
container_volumes = []
container_mounts = []
if "volumes" in container_options:
container_volumes = [
v for v in container_options.get("volumes") if isinstance(v, VolumeSpec)
]
container_mounts = [
v for v in container_options.get("volumes") if isinstance(v, MountSpec)
]
binds, affinity = merge_volume_bindings(
container_volumes,
self.options.get("tmpfs") or [],
previous_container,
container_mounts,
)
override_options["binds"] = binds
container_options["environment"].update(affinity)
container_options["volumes"] = dict(
(v.internal, {}) for v in container_volumes or {}
)
override_options["mounts"] = [build_mount(v) for v in container_mounts] or None
secret_volumes = self.get_secret_volumes()
if secret_volumes:
if version_lt(self.client.api_version, "1.30"):
override_options["binds"].extend(v.legacy_repr() for v in secret_volumes)
container_options["volumes"].update((v.target, {}) for v in secret_volumes)
else:
override_options["mounts"] = override_options.get("mounts") or []
override_options["mounts"].extend([build_mount(v) for v in secret_volumes])
container_options["image"] = self.image_name
container_options["labels"] = build_container_labels(
container_options.get("labels", {}),
self.labels(one_off=one_off),
number,
self.config_hash if add_config_hash else None,
)
# Delete options which are only used in HostConfig
for key in HOST_CONFIG_KEYS:
container_options.pop(key, None)
container_options["host_config"] = self._get_container_host_config(
override_options, one_off=one_off
)
networking_config = self.build_default_networking_config()
if networking_config:
container_options["networking_config"] = networking_config
container_options["environment"] = format_environment(
container_options["environment"]
)
return container_options
|
https://github.com/docker/compose/issues/5489
|
compose.cli.verbose_proxy.proxy_callable: docker create_host_config <- (device_read_iops=None, mem_swappiness=None, links=[], oom_score_adj=None, blkio_weight=None, cpu_count=None, cpuset_cpus=None, dns_search=None, pid_mode=None, init_path=None, log_config={'Type': u'', 'Config': {}}, cpu_quota=None, read_only=None, cpu_percent=None, device_read_bps=None, storage_opt=None, init=None, dns=None, volumes_from=[], ipc_mode=None, mem_reservation=None, security_opt=None, shm_size=None, device_write_iops=None, dns_opt=None, cgroup_parent=None, group_add=None, network_mode=u'cerebrom_default', volume_driver=None, oom_kill_disable=None, userns_mode=None, tmpfs=None, nano_cpus=None, port_bindings={'9600/tcp': [None]}, isolation=None, memswap_limit=None, restart_policy=None, blkio_weight_device=None, devices=None, extra_hosts=None, binds=[], sysctls=None, pids_limit=None, device_write_bps=None, cap_add=None, mounts=[{'Source': u'/home/wayne/projects/ormuco/cerebrom/docker/ormuco-vpn/credentials/vpnc', 'ReadOnly': None, 'Type': 'bind', 'Target': '/etc/vpnc'}, {'Source': u'/home/wayne/projects/ormuco/cerebrom/docker/ormuco-vpn/credentials/ssh', 'ReadOnly': None, 'Type': 'bind', 'Target': '/ssh'}], mem_limit=None, cap_drop=None, privileged=False, ulimits=None, cpu_shares=None)
compose.parallel.parallel_execute_iter: Failed: <Container: 21d4214f35f5_cerebrom_ormuco-vpn_1 (21d421)>
compose.parallel.feed_queue: Pending: set([])
ERROR: for 21d4214f35f5_cerebrom_ormuco-vpn_1 mounts param is not supported in API versions < 1.30
compose.parallel.parallel_execute_iter: Failed: <Service: ormuco-vpn>
compose.parallel.feed_queue: Pending: set([<Service: proctor>])
compose.parallel.feed_queue: <Service: proctor> has upstream errors - not processing
compose.parallel.parallel_execute_iter: Failed: <Service: proctor>
compose.parallel.feed_queue: Pending: set([])
ERROR: for ormuco-vpn mounts param is not supported in API versions < 1.30
Traceback (most recent call last):
File "/home/wayne/.virtualenvs/cerebrom-5qiep0Vx/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/home/wayne/.virtualenvs/cerebrom-5qiep0Vx/local/lib/python2.7/site-packages/compose/cli/main.py", line 71, in main
command()
File "/home/wayne/.virtualenvs/cerebrom-5qiep0Vx/local/lib/python2.7/site-packages/compose/cli/main.py", line 124, in perform_command
handler(command, command_options)
File "/home/wayne/.virtualenvs/cerebrom-5qiep0Vx/local/lib/python2.7/site-packages/compose/cli/main.py", line 956, in up
start=not no_start
File "/home/wayne/.virtualenvs/cerebrom-5qiep0Vx/local/lib/python2.7/site-packages/compose/project.py", line 479, in up
get_deps,
File "/home/wayne/.virtualenvs/cerebrom-5qiep0Vx/local/lib/python2.7/site-packages/compose/parallel.py", line 80, in parallel_execute
raise error_to_reraise
docker.errors.InvalidVersion: mounts param is not supported in API versions < 1.30
|
docker.errors.InvalidVersion
|
def process_service(service_config):
working_dir = service_config.working_dir
service_dict = dict(service_config.config)
if "env_file" in service_dict:
service_dict["env_file"] = [
expand_path(working_dir, path) for path in to_list(service_dict["env_file"])
]
if "build" in service_dict:
process_build_section(service_dict, working_dir)
if "volumes" in service_dict and service_dict.get("volume_driver") is None:
service_dict["volumes"] = resolve_volume_paths(working_dir, service_dict)
if "sysctls" in service_dict:
service_dict["sysctls"] = build_string_dict(
parse_sysctls(service_dict["sysctls"])
)
if "labels" in service_dict:
service_dict["labels"] = parse_labels(service_dict["labels"])
service_dict = process_depends_on(service_dict)
for field in ["dns", "dns_search", "tmpfs"]:
if field in service_dict:
service_dict[field] = to_list(service_dict[field])
service_dict = process_blkio_config(
process_ports(process_healthcheck(service_dict))
)
return service_dict
|
def process_service(service_config):
working_dir = service_config.working_dir
service_dict = dict(service_config.config)
if "env_file" in service_dict:
service_dict["env_file"] = [
expand_path(working_dir, path) for path in to_list(service_dict["env_file"])
]
if "build" in service_dict:
if isinstance(service_dict["build"], six.string_types):
service_dict["build"] = resolve_build_path(
working_dir, service_dict["build"]
)
elif isinstance(service_dict["build"], dict):
if "context" in service_dict["build"]:
path = service_dict["build"]["context"]
service_dict["build"]["context"] = resolve_build_path(working_dir, path)
if "labels" in service_dict["build"]:
service_dict["build"]["labels"] = parse_labels(
service_dict["build"]["labels"]
)
if "volumes" in service_dict and service_dict.get("volume_driver") is None:
service_dict["volumes"] = resolve_volume_paths(working_dir, service_dict)
if "sysctls" in service_dict:
service_dict["sysctls"] = build_string_dict(
parse_sysctls(service_dict["sysctls"])
)
service_dict = process_depends_on(service_dict)
for field in ["dns", "dns_search", "tmpfs"]:
if field in service_dict:
service_dict[field] = to_list(service_dict[field])
service_dict = process_blkio_config(
process_ports(process_healthcheck(service_dict))
)
return service_dict
|
https://github.com/docker/compose/issues/5336
|
Traceback (most recent call last):
File "/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 68, in main
command()
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 121, in perform_command
handler(command, command_options)
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 768, in run
run_one_off_container(container_options, self.project, service, options)
File "/usr/lib/python2.7/site-packages/compose/cli/main.py", line 1178, in run_one_off_container
**container_options)
File "/usr/lib/python2.7/site-packages/compose/service.py", line 288, in create_container
previous_container=previous_container,
File "/usr/lib/python2.7/site-packages/compose/service.py", line 795, in _get_container_create_options
self.config_hash if add_config_hash else None)
File "/usr/lib/python2.7/site-packages/compose/service.py", line 1338, in build_container_labels
labels = dict(label_options or {})
ValueError: dictionary update sequence element #0 has length 35; 2 is required
|
ValueError
|
def _parse_oneof_validator(error):
"""oneOf has multiple schemas, so we need to reason about which schema, sub
schema or constraint the validation is failing on.
Inspecting the context value of a ValidationError gives us information about
which sub schema failed and which kind of error it is.
"""
types = []
for context in error.context:
if context.validator == "oneOf":
_, error_msg = _parse_oneof_validator(context)
return path_string(context.path), error_msg
if context.validator == "required":
return (None, context.message)
if context.validator == "additionalProperties":
invalid_config_key = parse_key_from_error_msg(context)
return (
None,
"contains unsupported option: '{}'".format(invalid_config_key),
)
if context.validator == "uniqueItems":
return (
path_string(context.path) if context.path else None,
"contains non-unique items, please remove duplicates from {}".format(
context.instance
),
)
if context.path:
return (
path_string(context.path),
"contains {}, which is an invalid type, it should be {}".format(
json.dumps(context.instance),
_parse_valid_types_from_validator(context.validator_value),
),
)
if context.validator == "type":
types.append(context.validator_value)
valid_types = _parse_valid_types_from_validator(types)
return (None, "contains an invalid type, it should be {}".format(valid_types))
|
def _parse_oneof_validator(error):
"""oneOf has multiple schemas, so we need to reason about which schema, sub
schema or constraint the validation is failing on.
Inspecting the context value of a ValidationError gives us information about
which sub schema failed and which kind of error it is.
"""
types = []
for context in error.context:
if context.validator == "oneOf":
_, error_msg = _parse_oneof_validator(context)
return path_string(context.path), error_msg
if context.validator == "required":
return (None, context.message)
if context.validator == "additionalProperties":
invalid_config_key = parse_key_from_error_msg(context)
return (
None,
"contains unsupported option: '{}'".format(invalid_config_key),
)
if context.path:
return (
path_string(context.path),
"contains {}, which is an invalid type, it should be {}".format(
json.dumps(context.instance),
_parse_valid_types_from_validator(context.validator_value),
),
)
if context.validator == "uniqueItems":
return (
None,
"contains non unique items, please remove duplicates from {}".format(
context.instance
),
)
if context.validator == "type":
types.append(context.validator_value)
valid_types = _parse_valid_types_from_validator(types)
return (None, "contains an invalid type, it should be {}".format(valid_types))
|
https://github.com/docker/compose/issues/5211
|
$ docker-compose -f example.yml build
Traceback (most recent call last):
File "/home/pupssman/venv/py2/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/cli/main.py", line 68, in main
command()
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/cli/main.py", line 118, in perform_command
project = project_from_options('.', options)
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/cli/command.py", line 37, in project_from_options
override_dir=options.get('--project-directory'),
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/cli/command.py", line 91, in get_project
config_data = config.load(config_details)
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/config/config.py", line 368, in load
for config_file in config_details.config_files
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/config/config.py", line 534, in process_config_file
validate_against_config_schema(config_file)
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/config/validation.py", line 393, in validate_against_config_schema
config_file.filename)
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/config/validation.py", line 454, in handle_errors
error_msg = '\n'.join(format_error_func(error) for error in errors)
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/config/validation.py", line 454, in <genexpr>
error_msg = '\n'.join(format_error_func(error) for error in errors)
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/config/validation.py", line 380, in process_config_schema_errors
return handle_generic_error(error, path)
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/config/validation.py", line 252, in handle_generic_error
config_key, error_msg = _parse_oneof_validator(error)
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/config/validation.py", line 335, in _parse_oneof_validator
_parse_valid_types_from_validator(context.validator_value)),
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/config/validation.py", line 300, in _parse_valid_types_from_validator
return anglicize_json_type(validator)
File "/home/pupssman/venv/py2/local/lib/python2.7/site-packages/compose/config/validation.py", line 216, in anglicize_json_type
if json_type.startswith(('a', 'e', 'i', 'o', 'u')):
AttributeError: 'bool' object has no attribute 'startswith'
|
AttributeError
|
def merge_ports(md, base, override):
def parse_sequence_func(seq):
acc = []
for item in seq:
acc.extend(ServicePort.parse(item))
return to_mapping(acc, "merge_field")
field = "ports"
if not md.needs_merge(field):
return
merged = parse_sequence_func(md.base.get(field, []))
merged.update(parse_sequence_func(md.override.get(field, [])))
md[field] = [item for item in sorted(merged.values(), key=lambda x: x.target)]
|
def merge_ports(md, base, override):
def parse_sequence_func(seq):
acc = []
for item in seq:
acc.extend(ServicePort.parse(item))
return to_mapping(acc, "merge_field")
field = "ports"
if not md.needs_merge(field):
return
merged = parse_sequence_func(md.base.get(field, []))
merged.update(parse_sequence_func(md.override.get(field, [])))
md[field] = [item for item in sorted(merged.values())]
|
https://github.com/docker/compose/issues/4943
|
(master) hexlet$ make compose-bash
docker-compose run web bash
Traceback (most recent call last):
File "/usr/local/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.5/dist-packages/compose/cli/main.py", line 68, in main
command()
File "/usr/local/lib/python3.5/dist-packages/compose/cli/main.py", line 115, in perform_command
project = project_from_options('.', options)
File "/usr/local/lib/python3.5/dist-packages/compose/cli/command.py", line 37, in project_from_options
override_dir=options.get('--project-directory'),
File "/usr/local/lib/python3.5/dist-packages/compose/cli/command.py", line 91, in get_project
config_data = config.load(config_details)
File "/usr/local/lib/python3.5/dist-packages/compose/config/config.py", line 379, in load
service_dicts = load_services(config_details, main_file)
File "/usr/local/lib/python3.5/dist-packages/compose/config/config.py", line 475, in load_services
service_config = merge_services(service_config, next_config)
File "/usr/local/lib/python3.5/dist-packages/compose/config/config.py", line 466, in merge_services
for name in all_service_names
File "/usr/local/lib/python3.5/dist-packages/compose/config/config.py", line 466, in <dictcomp>
for name in all_service_names
File "/usr/local/lib/python3.5/dist-packages/compose/config/config.py", line 853, in merge_service_dicts_from_files
new_service = merge_service_dicts(base, override, version)
File "/usr/local/lib/python3.5/dist-packages/compose/config/config.py", line 929, in merge_service_dicts
merge_ports(md, base, override)
File "/usr/local/lib/python3.5/dist-packages/compose/config/config.py", line 962, in merge_ports
md[field] = [item for item in sorted(merged.values())]
TypeError: unorderable types: NoneType() < str()
Makefile:72: recipe for target 'compose-bash' failed
make: *** [compose-bash] Error 1
(master) hexlet$ cat ^C
(master) hexlet$ docker-compose --version
docker-compose version 1.14.0, build c7bdf9e
|
TypeError
|
def get_config_path_from_options(base_dir, options, environment):
def unicode_paths(paths):
return [
p.decode("utf-8") if isinstance(p, six.binary_type) else p for p in paths
]
file_option = options.get("--file")
if file_option:
return unicode_paths(file_option)
config_files = environment.get("COMPOSE_FILE")
if config_files:
pathsep = environment.get("COMPOSE_PATH_SEPARATOR", os.pathsep)
return unicode_paths(config_files.split(pathsep))
return None
|
def get_config_path_from_options(base_dir, options, environment):
file_option = options.get("--file")
if file_option:
return file_option
config_files = environment.get("COMPOSE_FILE")
if config_files:
pathsep = environment.get("COMPOSE_PATH_SEPARATOR", os.pathsep)
return config_files.split(pathsep)
return None
|
https://github.com/docker/compose/issues/4376
|
$ docker-compose -f 就吃饭/docker-compose.yml config/home/joffrey/work/compose/compose/config/config.py:234: UnicodeWarning: Unicode equal comparison failed to convert both arguments to Unicode - interpreting them as being unequal
if filenames == ['-']:
Traceback (most recent call last):
File "/home/joffrey/.envs/compose/bin/docker-compose", line 9, in <module>
load_entry_point('docker-compose==1.11.0.dev0', 'console_scripts', 'docker-compose')()
File "/home/joffrey/work/compose/compose/cli/main.py", line 64, in main
command()
File "/home/joffrey/work/compose/compose/cli/main.py", line 110, in perform_command
handler(command, options, command_options)
File "/home/joffrey/work/compose/compose/cli/main.py", line 305, in config
compose_config = get_config_from_options(self.project_dir, config_options)
File "/home/joffrey/work/compose/compose/cli/command.py", line 46, in get_config_from_options
config.find(base_dir, config_path, environment)
File "/home/joffrey/work/compose/compose/config/config.py", line 242, in find
filenames = [os.path.join(base_dir, f) for f in filenames]
File "/home/joffrey/.envs/compose/lib/python2.7/posixpath.py", line 73, in join
path += '/' + b
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe5 in position 1: ordinal not in range(128)
|
UnicodeDecodeError
|
def log_api_error(e, client_version):
explanation = e.explanation
if isinstance(explanation, six.binary_type):
explanation = explanation.decode("utf-8")
if "client is newer than server" not in explanation:
log.error(explanation)
return
version = API_VERSION_TO_ENGINE_VERSION.get(client_version)
if not version:
# They've set a custom API version
log.error(explanation)
return
log.error(
"The Docker Engine version is less than the minimum required by "
"Compose. Your current project requires a Docker Engine of "
"version {version} or greater.".format(version=version)
)
|
def log_api_error(e, client_version):
if b"client is newer than server" not in e.explanation:
log.error(e.explanation)
return
version = API_VERSION_TO_ENGINE_VERSION.get(client_version)
if not version:
# They've set a custom API version
log.error(e.explanation)
return
log.error(
"The Docker Engine version is less than the minimum required by "
"Compose. Your current project requires a Docker Engine of "
"version {version} or greater.".format(version=version)
)
|
https://github.com/docker/compose/issues/4580
|
$ docker-compose -f prod.yaml exec proxy sh
Traceback (most recent call last):
File "/home/yajo/.local/lib/python3.5/site-packages/docker/api/client.py", line 214, in _raise_for_status
response.raise_for_status()
File "/home/yajo/.local/lib/python3.5/site-packages/requests/models.py", line 862, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 409 Client Error: Conflict for url: http+docker://localunixsocket/v1.24/containers/6e4476fbc03e711b187ef37046d89125449f69a2fa7f741db06a0742ddfc9f51/exec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/yajo/.local/lib/python3.5/site-packages/compose/cli/errors.py", line 44, in handle_connection_errors
yield
File "/home/yajo/.local/lib/python3.5/site-packages/compose/cli/main.py", line 116, in perform_command
handler(command, command_options)
File "/home/yajo/.local/lib/python3.5/site-packages/compose/cli/main.py", line 448, in exec_command
exec_id = container.create_exec(command, **create_exec_options)
File "/home/yajo/.local/lib/python3.5/site-packages/compose/container.py", line 220, in create_exec
return self.client.exec_create(self.id, command, **options)
File "/home/yajo/.local/lib/python3.5/site-packages/docker/utils/decorators.py", line 35, in wrapper
return f(self, *args, **kwargs)
File "/home/yajo/.local/lib/python3.5/site-packages/docker/utils/decorators.py", line 21, in wrapped
return f(self, resource_id, *args, **kwargs)
File "/home/yajo/.local/lib/python3.5/site-packages/docker/api/exec_api.py", line 58, in exec_create
return self._result(res, True)
File "/home/yajo/.local/lib/python3.5/site-packages/docker/api/client.py", line 220, in _result
self._raise_for_status(response)
File "/home/yajo/.local/lib/python3.5/site-packages/docker/api/client.py", line 216, in _raise_for_status
raise create_api_error_from_http_exception(e)
File "/home/yajo/.local/lib/python3.5/site-packages/docker/errors.py", line 30, in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
docker.errors.APIError: 409 Client Error: Conflict for url: http+docker://localunixsocket/v1.24/containers/6e4476fbc03e711b187ef37046d89125449f69a2fa7f741db06a0742ddfc9f51/exec ("Container 6e4476fbc03e711b187ef37046d89125449f69a2fa7f741db06a0742ddfc9f51 is restarting, wait until the container is running")
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/yajo/.local/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/home/yajo/.local/lib/python3.5/site-packages/compose/cli/main.py", line 64, in main
command()
File "/home/yajo/.local/lib/python3.5/site-packages/compose/cli/main.py", line 116, in perform_command
handler(command, command_options)
File "/usr/lib64/python3.5/contextlib.py", line 77, in __exit__
self.gen.throw(type, value, traceback)
File "/home/yajo/.local/lib/python3.5/site-packages/compose/cli/errors.py", line 54, in handle_connection_errors
log_api_error(e, client.api_version)
File "/home/yajo/.local/lib/python3.5/site-packages/compose/cli/errors.py", line 71, in log_api_error
if b'client is newer than server' not in e.explanation:
TypeError: 'in <string>' requires string as left operand, not bytes
|
requests.exceptions.HTTPError
|
def denormalize_service_dict(service_dict, version):
service_dict = service_dict.copy()
if "restart" in service_dict:
service_dict["restart"] = types.serialize_restart_spec(service_dict["restart"])
if version == V1 and "network_mode" not in service_dict:
service_dict["network_mode"] = "bridge"
if "depends_on" in service_dict and version != V2_1:
service_dict["depends_on"] = sorted(
[svc for svc in service_dict["depends_on"].keys()]
)
if "healthcheck" in service_dict:
if "interval" in service_dict["healthcheck"]:
service_dict["healthcheck"]["interval"] = serialize_ns_time_value(
service_dict["healthcheck"]["interval"]
)
if "timeout" in service_dict["healthcheck"]:
service_dict["healthcheck"]["timeout"] = serialize_ns_time_value(
service_dict["healthcheck"]["timeout"]
)
if "secrets" in service_dict:
service_dict["secrets"] = map(lambda s: s.repr(), service_dict["secrets"])
return service_dict
|
def denormalize_service_dict(service_dict, version):
service_dict = service_dict.copy()
if "restart" in service_dict:
service_dict["restart"] = types.serialize_restart_spec(service_dict["restart"])
if version == V1 and "network_mode" not in service_dict:
service_dict["network_mode"] = "bridge"
if "depends_on" in service_dict and version != V2_1:
service_dict["depends_on"] = sorted(
[svc for svc in service_dict["depends_on"].keys()]
)
if "healthcheck" in service_dict:
if "interval" in service_dict["healthcheck"]:
service_dict["healthcheck"]["interval"] = serialize_ns_time_value(
service_dict["healthcheck"]["interval"]
)
if "timeout" in service_dict["healthcheck"]:
service_dict["healthcheck"]["timeout"] = serialize_ns_time_value(
service_dict["healthcheck"]["timeout"]
)
return service_dict
|
https://github.com/docker/compose/issues/4479
|
$ docker-compose config
Traceback (most recent call last):
File "/usr/local/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/site-packages/compose/cli/main.py", line 88, in main
command()
File "/usr/local/lib/python2.7/site-packages/compose/cli/main.py", line 134, in perform_command
handler(command, options, command_options)
File "/usr/local/lib/python2.7/site-packages/compose/cli/main.py", line 330, in config
compose_config = get_config_from_options(self.project_dir, config_options)
File "/usr/local/lib/python2.7/site-packages/compose/cli/command.py", line 46, in get_config_from_options
config.find(base_dir, config_path, environment)
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 333, in load
service_dicts = load_services(config_details, main_file)
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 455, in load_services
service_config = merge_services(service_config, next_config)
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 446, in merge_services
for name in all_service_names
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 446, in <dictcomp>
for name in all_service_names
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 797, in merge_service_dicts_from_files
new_service = merge_service_dicts(base, override, version)
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 856, in merge_service_dicts
md.merge_sequence('secrets', types.ServiceSecret.parse)
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 839, in merge_sequence
self[field] = [item.repr() for item in sorted(merged.values())]
AttributeError: 'ServiceSecret' object has no attribute 'repr'
$
|
AttributeError
|
def repr(self):
return dict([(k, v) for k, v in self._asdict().items() if v is not None])
|
def repr(self):
return dict(
source=self.source,
target=self.target,
uid=self.uid,
gid=self.gid,
mode=self.mode,
)
|
https://github.com/docker/compose/issues/4479
|
$ docker-compose config
Traceback (most recent call last):
File "/usr/local/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/site-packages/compose/cli/main.py", line 88, in main
command()
File "/usr/local/lib/python2.7/site-packages/compose/cli/main.py", line 134, in perform_command
handler(command, options, command_options)
File "/usr/local/lib/python2.7/site-packages/compose/cli/main.py", line 330, in config
compose_config = get_config_from_options(self.project_dir, config_options)
File "/usr/local/lib/python2.7/site-packages/compose/cli/command.py", line 46, in get_config_from_options
config.find(base_dir, config_path, environment)
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 333, in load
service_dicts = load_services(config_details, main_file)
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 455, in load_services
service_config = merge_services(service_config, next_config)
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 446, in merge_services
for name in all_service_names
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 446, in <dictcomp>
for name in all_service_names
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 797, in merge_service_dicts_from_files
new_service = merge_service_dicts(base, override, version)
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 856, in merge_service_dicts
md.merge_sequence('secrets', types.ServiceSecret.parse)
File "/usr/local/lib/python2.7/site-packages/compose/config/config.py", line 839, in merge_sequence
self[field] = [item.repr() for item in sorted(merged.values())]
AttributeError: 'ServiceSecret' object has no attribute 'repr'
$
|
AttributeError
|
def finalize_service(service_config, service_names, version, environment):
service_dict = dict(service_config.config)
if "environment" in service_dict or "env_file" in service_dict:
service_dict["environment"] = resolve_environment(service_dict, environment)
service_dict.pop("env_file", None)
if "volumes_from" in service_dict:
service_dict["volumes_from"] = [
VolumeFromSpec.parse(vf, service_names, version)
for vf in service_dict["volumes_from"]
]
if "volumes" in service_dict:
service_dict["volumes"] = [
VolumeSpec.parse(
v, environment.get_boolean("COMPOSE_CONVERT_WINDOWS_PATHS")
)
for v in service_dict["volumes"]
]
if "net" in service_dict:
network_mode = service_dict.pop("net")
container_name = get_container_name_from_network_mode(network_mode)
if container_name and container_name in service_names:
service_dict["network_mode"] = "service:{}".format(container_name)
else:
service_dict["network_mode"] = network_mode
if "networks" in service_dict:
service_dict["networks"] = parse_networks(service_dict["networks"])
if "restart" in service_dict:
service_dict["restart"] = parse_restart_spec(service_dict["restart"])
if "secrets" in service_dict:
service_dict["secrets"] = [
types.ServiceSecret.parse(s) for s in service_dict["secrets"]
]
normalize_build(service_dict, service_config.working_dir, environment)
service_dict["name"] = service_config.name
return normalize_v1_service_format(service_dict)
|
def finalize_service(service_config, service_names, version, environment):
service_dict = dict(service_config.config)
if "environment" in service_dict or "env_file" in service_dict:
service_dict["environment"] = resolve_environment(service_dict, environment)
service_dict.pop("env_file", None)
if "volumes_from" in service_dict:
service_dict["volumes_from"] = [
VolumeFromSpec.parse(vf, service_names, version)
for vf in service_dict["volumes_from"]
]
if "volumes" in service_dict:
service_dict["volumes"] = [
VolumeSpec.parse(
v, environment.get_boolean("COMPOSE_CONVERT_WINDOWS_PATHS")
)
for v in service_dict["volumes"]
]
if "net" in service_dict:
network_mode = service_dict.pop("net")
container_name = get_container_name_from_network_mode(network_mode)
if container_name and container_name in service_names:
service_dict["network_mode"] = "service:{}".format(container_name)
else:
service_dict["network_mode"] = network_mode
if "networks" in service_dict:
service_dict["networks"] = parse_networks(service_dict["networks"])
if "restart" in service_dict:
service_dict["restart"] = parse_restart_spec(service_dict["restart"])
normalize_build(service_dict, service_config.working_dir, environment)
service_dict["name"] = service_config.name
return normalize_v1_service_format(service_dict)
|
https://github.com/docker/compose/issues/4463
|
$ docker-compose up
Traceback (most recent call last):
File "docker-compose", line 3, in <module>
File "compose/cli/main.py", line 88, in main
File "compose/cli/main.py", line 137, in perform_command
File "compose/cli/command.py", line 36, in project_from_options
File "compose/cli/command.py", line 115, in get_project
File "compose/project.py", line 110, in from_config
File "compose/project.py", line 566, in get_secrets
AttributeError: 'str' object has no attribute 'source'
Failed to execute script docker-compose
|
AttributeError
|
def repr(self):
return dict(
source=self.source,
target=self.target,
uid=self.uid,
gid=self.gid,
mode=self.mode,
)
|
def repr(self):
if self.target == self.alias:
return self.target
return "{s.target}:{s.alias}".format(s=self)
|
https://github.com/docker/compose/issues/4463
|
$ docker-compose up
Traceback (most recent call last):
File "docker-compose", line 3, in <module>
File "compose/cli/main.py", line 88, in main
File "compose/cli/main.py", line 137, in perform_command
File "compose/cli/command.py", line 36, in project_from_options
File "compose/cli/command.py", line 115, in get_project
File "compose/project.py", line 110, in from_config
File "compose/project.py", line 566, in get_secrets
AttributeError: 'str' object has no attribute 'source'
Failed to execute script docker-compose
|
AttributeError
|
def merge_service_dicts(base, override, version):
md = MergeDict(base, override)
md.merge_mapping("environment", parse_environment)
md.merge_mapping("labels", parse_labels)
md.merge_mapping("ulimits", parse_ulimits)
md.merge_mapping("networks", parse_networks)
md.merge_sequence("links", ServiceLink.parse)
for field in ["volumes", "devices"]:
md.merge_field(field, merge_path_mappings)
for field in [
"ports",
"cap_add",
"cap_drop",
"expose",
"external_links",
"security_opt",
"volumes_from",
"depends_on",
]:
md.merge_field(field, merge_unique_items_lists, default=[])
for field in ["dns", "dns_search", "env_file", "tmpfs"]:
md.merge_field(field, merge_list_or_string)
md.merge_field("logging", merge_logging, default={})
for field in set(ALLOWED_KEYS) - set(md):
md.merge_scalar(field)
if version == V1:
legacy_v1_merge_image_or_build(md, base, override)
elif md.needs_merge("build"):
md["build"] = merge_build(md, base, override)
return dict(md)
|
def merge_service_dicts(base, override, version):
md = MergeDict(base, override)
md.merge_mapping("environment", parse_environment)
md.merge_mapping("labels", parse_labels)
md.merge_mapping("ulimits", parse_ulimits)
md.merge_mapping("networks", parse_networks)
md.merge_sequence("links", ServiceLink.parse)
for field in ["volumes", "devices"]:
md.merge_field(field, merge_path_mappings)
for field in [
"ports",
"cap_add",
"cap_drop",
"expose",
"external_links",
"security_opt",
"volumes_from",
"depends_on",
]:
md.merge_field(field, merge_unique_items_lists, default=[])
for field in ["dns", "dns_search", "env_file", "tmpfs"]:
md.merge_field(field, merge_list_or_string)
md.merge_field("logging", merge_logging)
for field in set(ALLOWED_KEYS) - set(md):
md.merge_scalar(field)
if version == V1:
legacy_v1_merge_image_or_build(md, base, override)
elif md.needs_merge("build"):
md["build"] = merge_build(md, base, override)
return dict(md)
|
https://github.com/docker/compose/issues/4103
|
docker-compose up
Traceback (most recent call last):
File "<string>", line 3, in <module>
File "compose/cli/main.py", line 65, in main
File "compose/cli/main.py", line 114, in perform_command
File "compose/cli/command.py", line 36, in project_from_options
File "compose/cli/command.py", line 103, in get_project
File "compose/config/config.py", line 323, in load
File "compose/config/config.py", line 416, in load_services
File "compose/config/config.py", line 395, in build_services
File "compose/config/config.py", line 380, in build_service
File "compose/config/config.py", line 484, in run
File "compose/config/config.py", line 528, in resolve_extends
File "compose/config/config.py", line 774, in merge_service_dicts
File "compose/config/config.py", line 728, in merge_field
File "compose/config/config.py", line 807, in merge_logging
File "compose/config/config.py", line 750, in merge_scalar
AttributeError: 'NoneType' object has no attribute 'get'
docker-compose returned -1
|
AttributeError
|
def format_environment(environment):
def format_env(key, value):
if value is None:
return key
if isinstance(value, six.binary_type):
value = value.decode("utf-8")
return "{key}={value}".format(key=key, value=value)
return [format_env(*item) for item in environment.items()]
|
def format_environment(environment):
def format_env(key, value):
if value is None:
return key
return "{key}={value}".format(key=key, value=value)
return [format_env(*item) for item in environment.items()]
|
https://github.com/docker/compose/issues/3963
|
Traceback (most recent call last):
File "<string>", line 3, in <module>
File "compose/cli/main.py", line 56, in main
File "compose/cli/docopt_command.py", line 23, in sys_dispatch
File "compose/cli/docopt_command.py", line 26, in dispatch
File "compose/cli/main.py", line 191, in perform_command
File "compose/cli/main.py", line 657, in up
File "compose/project.py", line 318, in up
File "compose/service.py", line 370, in execute_convergence_plan
File "compose/service.py", line 410, in recreate_container
File "compose/service.py", line 258, in create_container
File "compose/service.py", line 625, in _get_container_create_options
File "compose/service.py", line 1031, in format_environment
File "compose/service.py", line 1030, in format_env
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 7: ordinal not in range(128)
docker-compose returned -1
make: *** [run_compose_up] Error 255
|
UnicodeDecodeError
|
def format_env(key, value):
if value is None:
return key
if isinstance(value, six.binary_type):
value = value.decode("utf-8")
return "{key}={value}".format(key=key, value=value)
|
def format_env(key, value):
if value is None:
return key
return "{key}={value}".format(key=key, value=value)
|
https://github.com/docker/compose/issues/3963
|
Traceback (most recent call last):
File "<string>", line 3, in <module>
File "compose/cli/main.py", line 56, in main
File "compose/cli/docopt_command.py", line 23, in sys_dispatch
File "compose/cli/docopt_command.py", line 26, in dispatch
File "compose/cli/main.py", line 191, in perform_command
File "compose/cli/main.py", line 657, in up
File "compose/project.py", line 318, in up
File "compose/service.py", line 370, in execute_convergence_plan
File "compose/service.py", line 410, in recreate_container
File "compose/service.py", line 258, in create_container
File "compose/service.py", line 625, in _get_container_create_options
File "compose/service.py", line 1031, in format_environment
File "compose/service.py", line 1030, in format_env
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 7: ordinal not in range(128)
docker-compose returned -1
make: *** [run_compose_up] Error 255
|
UnicodeDecodeError
|
def wait_on_exit(container):
try:
exit_code = container.wait()
return "%s exited with code %s\n" % (container.name, exit_code)
except APIError as e:
return "Unexpected API error for %s (HTTP code %s)\nResponse body:\n%s\n" % (
container.name,
e.response.status_code,
e.response.text or "[empty]",
)
|
def wait_on_exit(container):
exit_code = container.wait()
return "%s exited with code %s\n" % (container.name, exit_code)
|
https://github.com/docker/compose/issues/3888
|
Exception in thread Thread-9:
Traceback (most recent call last):
File "threading.py", line 810, in __bootstrap_inner
File "threading.py", line 763, in run
File "compose/cli/log_printer.py", line 149, in tail_container_logs
File "compose/cli/log_printer.py", line 179, in wait_on_exit
File "compose/container.py", line 239, in wait
File "site-packages/docker/utils/decorators.py", line 21, in wrapped
File "site-packages/docker/api/container.py", line 441, in wait
File "site-packages/docker/client.py", line 174, in _raise_for_status
APIError: 502 Server Error: Bad Gateway ("Bad response from Docker engine")
Exception in thread Thread-6:
Traceback (most recent call last):
File "threading.py", line 810, in __bootstrap_inner
File "threading.py", line 763, in run
File "compose/cli/log_printer.py", line 149, in tail_container_logs
File "compose/cli/log_printer.py", line 179, in wait_on_exit
File "compose/container.py", line 239, in wait
File "site-packages/docker/utils/decorators.py", line 21, in wrapped
File "site-packages/docker/api/container.py", line 441, in wait
File "site-packages/docker/client.py", line 174, in _raise_for_status
APIError: 502 Server Error: Bad Gateway ("Bad response from Docker engine")
Exception in thread Thread-10:
Traceback (most recent call last):
File "threading.py", line 810, in __bootstrap_inner
File "threading.py", line 763, in run
File "compose/cli/log_printer.py", line 149, in tail_container_logs
File "compose/cli/log_printer.py", line 179, in wait_on_exit
File "compose/container.py", line 239, in wait
File "site-packages/docker/utils/decorators.py", line 21, in wrapped
File "site-packages/docker/api/container.py", line 441, in wait
File "site-packages/docker/client.py", line 174, in _raise_for_status
APIError: 502 Server Error: Bad Gateway ("Bad response from Docker engine")
Exception in thread Thread-8:
Traceback (most recent call last):
File "threading.py", line 810, in __bootstrap_inner
File "threading.py", line 763, in run
File "compose/cli/log_printer.py", line 149, in tail_container_logs
File "compose/cli/log_printer.py", line 179, in wait_on_exit
File "compose/container.py", line 239, in wait
File "site-packages/docker/utils/decorators.py", line 21, in wrapped
File "site-packages/docker/api/container.py", line 441, in wait
File "site-packages/docker/client.py", line 174, in _raise_for_status
APIError: 502 Server Error: Bad Gateway ("Bad response from Docker engine")
Exception in thread Thread-11:
Traceback (most recent call last):
File "threading.py", line 810, in __bootstrap_inner
File "threading.py", line 763, in run
File "compose/cli/log_printer.py", line 190, in watch_events
File "compose/project.py", line 356, in events
File "compose/container.py", line 42, in from_id
File "site-packages/docker/utils/decorators.py", line 21, in wrapped
File "site-packages/docker/api/container.py", line 182, in inspect_container
File "site-packages/docker/utils/decorators.py", line 47, in inner
File "site-packages/docker/client.py", line 140, in _get
File "site-packages/requests/sessions.py", line 477, in get
File "site-packages/requests/sessions.py", line 465, in request
File "site-packages/requests/sessions.py", line 573, in send
File "site-packages/requests/adapters.py", line 415, in send
ConnectionError: ('Connection aborted.', BadStatusLine("''",))
|
APIError
|
def get_project(
project_dir,
config_path=None,
project_name=None,
verbose=False,
host=None,
tls_config=None,
environment=None,
):
if not environment:
environment = Environment.from_env_file(project_dir)
config_details = config.find(project_dir, config_path, environment)
project_name = get_project_name(
config_details.working_dir, project_name, environment
)
config_data = config.load(config_details)
api_version = environment.get(
"COMPOSE_API_VERSION", API_VERSIONS[config_data.version]
)
client = get_client(
verbose=verbose,
version=api_version,
tls_config=tls_config,
host=host,
environment=environment,
)
with errors.handle_connection_errors(client):
return Project.from_config(project_name, config_data, client)
|
def get_project(
project_dir,
config_path=None,
project_name=None,
verbose=False,
host=None,
tls_config=None,
environment=None,
):
if not environment:
environment = Environment.from_env_file(project_dir)
config_details = config.find(project_dir, config_path, environment)
project_name = get_project_name(
config_details.working_dir, project_name, environment
)
config_data = config.load(config_details)
api_version = environment.get(
"COMPOSE_API_VERSION", API_VERSIONS[config_data.version]
)
client = get_client(
verbose=verbose,
version=api_version,
tls_config=tls_config,
host=host,
environment=environment,
)
return Project.from_config(project_name, config_data, client)
|
https://github.com/docker/compose/issues/3779
|
➜ ehnv git:(dev) docker-compose up
Traceback (most recent call last):
File "<string>", line 3, in <module>
File "compose/cli/main.py", line 60, in main
File "compose/cli/main.py", line 108, in perform_command
File "compose/cli/command.py", line 35, in project_from_options
File "compose/cli/command.py", line 112, in get_project
File "compose/project.py", line 100, in from_config
File "compose/project.py", line 533, in get_volumes_from
File "compose/project.py", line 522, in build_volume_from
File "compose/container.py", line 42, in from_id
File "site-packages/docker/utils/decorators.py", line 21, in wrapped
File "site-packages/docker/api/container.py", line 182, in inspect_container
File "site-packages/docker/utils/decorators.py", line 47, in inner
File "site-packages/docker/client.py", line 138, in _get
File "site-packages/requests/sessions.py", line 477, in get
File "site-packages/requests/sessions.py", line 465, in request
File "site-packages/requests/sessions.py", line 573, in send
File "site-packages/requests/adapters.py", line 415, in send
requests.exceptions.ConnectionError: ('Connection aborted.', error(2, 'No such file or directory'))
|
requests.exceptions.ConnectionError
|
def input(prompt):
"""
Version of input (raw_input in Python 2) which forces a flush of sys.stdout
to avoid problems where the prompt fails to appear due to line buffering
"""
sys.stdout.write(prompt)
sys.stdout.flush()
return sys.stdin.readline().rstrip("\n")
|
def input(prompt):
"""
Version of input (raw_input in Python 2) which forces a flush of sys.stdout
to avoid problems where the prompt fails to appear due to line buffering
"""
sys.stdout.write(prompt)
sys.stdout.flush()
return sys.stdin.readline().rstrip(b"\n")
|
https://github.com/docker/compose/issues/3576
|
Traceback (most recent call last):
File "/opt/venvs/docker-compose/bin/docker-compose", line 9, in <module>
load_entry_point('docker-compose==1.8.0.dev0', 'console_scripts', 'docker-compose')()
File "/opt/venvs/docker-compose/lib64/python3.5/site-packages/compose/cli/main.py", line 58, in main
command()
File "/opt/venvs/docker-compose/lib64/python3.5/site-packages/compose/cli/main.py", line 109, in perform_command
handler(command, command_options)
File "/opt/venvs/docker-compose/lib64/python3.5/site-packages/compose/cli/main.py", line 553, in rm
or yesno("Are you sure? [yN] ", default=False):
File "/opt/venvs/docker-compose/lib64/python3.5/site-packages/compose/cli/utils.py", line 33, in yesno
answer = input(prompt).strip().lower()
File "/opt/venvs/docker-compose/lib64/python3.5/site-packages/compose/cli/utils.py", line 52, in input
return sys.stdin.readline().rstrip(b'\n')
TypeError: rstrip arg must be None or str
|
TypeError
|
def line_splitter(buffer, separator="\n"):
index = buffer.find(six.text_type(separator))
if index == -1:
return None
return buffer[: index + 1], buffer[index + 1 :]
|
def line_splitter(buffer, separator="\n"):
index = buffer.find(six.text_type(separator))
if index == -1:
return None, None
return buffer[: index + 1], buffer[index + 1 :]
|
https://github.com/docker/compose/issues/2398
|
$ docker-compose -f pep-wilma.yml build pepwilma
Building pepwilma
Step 0 : FROM node:0.10-slim
---> 04e511e59c2e
[...]
Step 16 : ADD https://raw.githubusercontent.com/Bitergia/docker/master/utils/entrypoint-common.sh /
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 9, in <module>
load_entry_point('docker-compose==1.5.1', 'console_scripts', 'docker-compose')()
File "/usr/lib/python2.7/dist-packages/compose/cli/main.py", line 54, in main
command.sys_dispatch()
File "/usr/lib/python2.7/dist-packages/compose/cli/docopt_command.py", line 23, in sys_dispatch
self.dispatch(sys.argv[1:], None)
File "/usr/lib/python2.7/dist-packages/compose/cli/docopt_command.py", line 26, in dispatch
self.perform_command(*self.parse(argv, global_options))
File "/usr/lib/python2.7/dist-packages/compose/cli/main.py", line 171, in perform_command
handler(project, command_options)
File "/usr/lib/python2.7/dist-packages/compose/cli/main.py", line 192, in build
force_rm=bool(options.get('--force-rm', False)))
File "/usr/lib/python2.7/dist-packages/compose/project.py", line 284, in build
service.build(no_cache, pull, force_rm)
File "/usr/lib/python2.7/dist-packages/compose/service.py", line 727, in build
all_events = stream_output(build_output, sys.stdout)
File "/usr/lib/python2.7/dist-packages/compose/progress_stream.py", line 15, in stream_output
for event in utils.json_stream(output):
File "/usr/lib/python2.7/dist-packages/compose/utils.py", line 131, in split_buffer
yield decoder(buffered)
File "/usr/lib/python2.7/json/decoder.py", line 367, in decode
raise ValueError(errmsg("Extra data", s, end, len(s)))
ValueError: Extra data: line 2 column 1 - line 15 column 1 (char 4 - 544)
|
ValueError
|
def split_buffer(stream, splitter=None, decoder=lambda a: a):
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
splitter = splitter or line_splitter
buffered = six.text_type("")
for data in stream_as_text(stream):
buffered += data
while True:
buffer_split = splitter(buffered)
if buffer_split is None:
break
item, buffered = buffer_split
yield item
if buffered:
yield decoder(buffered)
|
def split_buffer(stream, splitter=None, decoder=lambda a: a):
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
splitter = splitter or line_splitter
buffered = six.text_type("")
for data in stream_as_text(stream):
buffered += data
while True:
item, rest = splitter(buffered)
if not item:
break
buffered = rest
yield item
if buffered:
yield decoder(buffered)
|
https://github.com/docker/compose/issues/2398
|
$ docker-compose -f pep-wilma.yml build pepwilma
Building pepwilma
Step 0 : FROM node:0.10-slim
---> 04e511e59c2e
[...]
Step 16 : ADD https://raw.githubusercontent.com/Bitergia/docker/master/utils/entrypoint-common.sh /
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 9, in <module>
load_entry_point('docker-compose==1.5.1', 'console_scripts', 'docker-compose')()
File "/usr/lib/python2.7/dist-packages/compose/cli/main.py", line 54, in main
command.sys_dispatch()
File "/usr/lib/python2.7/dist-packages/compose/cli/docopt_command.py", line 23, in sys_dispatch
self.dispatch(sys.argv[1:], None)
File "/usr/lib/python2.7/dist-packages/compose/cli/docopt_command.py", line 26, in dispatch
self.perform_command(*self.parse(argv, global_options))
File "/usr/lib/python2.7/dist-packages/compose/cli/main.py", line 171, in perform_command
handler(project, command_options)
File "/usr/lib/python2.7/dist-packages/compose/cli/main.py", line 192, in build
force_rm=bool(options.get('--force-rm', False)))
File "/usr/lib/python2.7/dist-packages/compose/project.py", line 284, in build
service.build(no_cache, pull, force_rm)
File "/usr/lib/python2.7/dist-packages/compose/service.py", line 727, in build
all_events = stream_output(build_output, sys.stdout)
File "/usr/lib/python2.7/dist-packages/compose/progress_stream.py", line 15, in stream_output
for event in utils.json_stream(output):
File "/usr/lib/python2.7/dist-packages/compose/utils.py", line 131, in split_buffer
yield decoder(buffered)
File "/usr/lib/python2.7/json/decoder.py", line 367, in decode
raise ValueError(errmsg("Extra data", s, end, len(s)))
ValueError: Extra data: line 2 column 1 - line 15 column 1 (char 4 - 544)
|
ValueError
|
def json_splitter(buffer):
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
try:
obj, index = json_decoder.raw_decode(buffer)
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end() :]
return obj, rest
except ValueError:
return None
|
def json_splitter(buffer):
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
try:
obj, index = json_decoder.raw_decode(buffer)
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end() :]
return obj, rest
except ValueError:
return None, None
|
https://github.com/docker/compose/issues/2398
|
$ docker-compose -f pep-wilma.yml build pepwilma
Building pepwilma
Step 0 : FROM node:0.10-slim
---> 04e511e59c2e
[...]
Step 16 : ADD https://raw.githubusercontent.com/Bitergia/docker/master/utils/entrypoint-common.sh /
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 9, in <module>
load_entry_point('docker-compose==1.5.1', 'console_scripts', 'docker-compose')()
File "/usr/lib/python2.7/dist-packages/compose/cli/main.py", line 54, in main
command.sys_dispatch()
File "/usr/lib/python2.7/dist-packages/compose/cli/docopt_command.py", line 23, in sys_dispatch
self.dispatch(sys.argv[1:], None)
File "/usr/lib/python2.7/dist-packages/compose/cli/docopt_command.py", line 26, in dispatch
self.perform_command(*self.parse(argv, global_options))
File "/usr/lib/python2.7/dist-packages/compose/cli/main.py", line 171, in perform_command
handler(project, command_options)
File "/usr/lib/python2.7/dist-packages/compose/cli/main.py", line 192, in build
force_rm=bool(options.get('--force-rm', False)))
File "/usr/lib/python2.7/dist-packages/compose/project.py", line 284, in build
service.build(no_cache, pull, force_rm)
File "/usr/lib/python2.7/dist-packages/compose/service.py", line 727, in build
all_events = stream_output(build_output, sys.stdout)
File "/usr/lib/python2.7/dist-packages/compose/progress_stream.py", line 15, in stream_output
for event in utils.json_stream(output):
File "/usr/lib/python2.7/dist-packages/compose/utils.py", line 131, in split_buffer
yield decoder(buffered)
File "/usr/lib/python2.7/json/decoder.py", line 367, in decode
raise ValueError(errmsg("Extra data", s, end, len(s)))
ValueError: Extra data: line 2 column 1 - line 15 column 1 (char 4 - 544)
|
ValueError
|
def json_stream(stream):
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
return split_buffer(stream, json_splitter, json_decoder.decode)
|
def json_stream(stream):
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
return split_buffer(stream_as_text(stream), json_splitter, json_decoder.decode)
|
https://github.com/docker/compose/issues/2398
|
$ docker-compose -f pep-wilma.yml build pepwilma
Building pepwilma
Step 0 : FROM node:0.10-slim
---> 04e511e59c2e
[...]
Step 16 : ADD https://raw.githubusercontent.com/Bitergia/docker/master/utils/entrypoint-common.sh /
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 9, in <module>
load_entry_point('docker-compose==1.5.1', 'console_scripts', 'docker-compose')()
File "/usr/lib/python2.7/dist-packages/compose/cli/main.py", line 54, in main
command.sys_dispatch()
File "/usr/lib/python2.7/dist-packages/compose/cli/docopt_command.py", line 23, in sys_dispatch
self.dispatch(sys.argv[1:], None)
File "/usr/lib/python2.7/dist-packages/compose/cli/docopt_command.py", line 26, in dispatch
self.perform_command(*self.parse(argv, global_options))
File "/usr/lib/python2.7/dist-packages/compose/cli/main.py", line 171, in perform_command
handler(project, command_options)
File "/usr/lib/python2.7/dist-packages/compose/cli/main.py", line 192, in build
force_rm=bool(options.get('--force-rm', False)))
File "/usr/lib/python2.7/dist-packages/compose/project.py", line 284, in build
service.build(no_cache, pull, force_rm)
File "/usr/lib/python2.7/dist-packages/compose/service.py", line 727, in build
all_events = stream_output(build_output, sys.stdout)
File "/usr/lib/python2.7/dist-packages/compose/progress_stream.py", line 15, in stream_output
for event in utils.json_stream(output):
File "/usr/lib/python2.7/dist-packages/compose/utils.py", line 131, in split_buffer
yield decoder(buffered)
File "/usr/lib/python2.7/json/decoder.py", line 367, in decode
raise ValueError(errmsg("Extra data", s, end, len(s)))
ValueError: Extra data: line 2 column 1 - line 15 column 1 (char 4 - 544)
|
ValueError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.