after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def explode(self):
"""
Explode muti-part geometries into multiple single geometries.
Each row containing a multi-part geometry will be split into
multiple rows with single geometries, thereby increasing the vertical
size of the GeoDataFrame.
The index of the input geodataframe is no longer unique and is
replaced with a multi-index (original index with additional level
indicating the multiple geometries: a new zero-based index for each
single part geometry per multi-part geometry).
Returns
-------
GeoDataFrame
Exploded geodataframe with each single geometry
as a separate entry in the geodataframe.
"""
df_copy = self.copy()
if "level_1" in df_copy.columns: # GH1393
df_copy = df_copy.rename(columns={"level_1": "__level_1"})
exploded_geom = df_copy.geometry.explode().reset_index(level=-1)
exploded_index = exploded_geom.columns[0]
df = pd.concat(
[df_copy.drop(df_copy._geometry_column_name, axis=1), exploded_geom], axis=1
)
# reset to MultiIndex, otherwise df index is only first level of
# exploded GeoSeries index.
df.set_index(exploded_index, append=True, inplace=True)
df.index.names = list(self.index.names) + [None]
if "__level_1" in df.columns:
df = df.rename(columns={"__level_1": "level_1"})
geo_df = df.set_geometry(self._geometry_column_name)
return geo_df
|
def explode(self):
"""
Explode muti-part geometries into multiple single geometries.
Each row containing a multi-part geometry will be split into
multiple rows with single geometries, thereby increasing the vertical
size of the GeoDataFrame.
The index of the input geodataframe is no longer unique and is
replaced with a multi-index (original index with additional level
indicating the multiple geometries: a new zero-based index for each
single part geometry per multi-part geometry).
Returns
-------
GeoDataFrame
Exploded geodataframe with each single geometry
as a separate entry in the geodataframe.
"""
df_copy = self.copy()
exploded_geom = df_copy.geometry.explode().reset_index(level=-1)
exploded_index = exploded_geom.columns[0]
df = pd.concat(
[df_copy.drop(df_copy._geometry_column_name, axis=1), exploded_geom], axis=1
)
# reset to MultiIndex, otherwise df index is only first level of
# exploded GeoSeries index.
df.set_index(exploded_index, append=True, inplace=True)
df.index.names = list(self.index.names) + [None]
geo_df = df.set_geometry(self._geometry_column_name)
return geo_df
|
https://github.com/geopandas/geopandas/issues/1393
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/envs/uap/lib/python3.8/site-packages/pandas/core/arrays/categorical.py in __init__(self, values, categories, ordered, dtype, fastpath)
354 try:
--> 355 codes, categories = factorize(values, sort=True)
356 except TypeError:
~/anaconda3/envs/uap/lib/python3.8/site-packages/pandas/core/algorithms.py in factorize(values, sort, na_sentinel, size_hint)
634
--> 635 codes, uniques = _factorize_array(
636 values, na_sentinel=na_sentinel, size_hint=size_hint, na_value=na_value
~/anaconda3/envs/uap/lib/python3.8/site-packages/pandas/core/algorithms.py in _factorize_array(values, na_sentinel, size_hint, na_value)
483 table = hash_klass(size_hint or len(values))
--> 484 uniques, codes = table.factorize(values, na_sentinel=na_sentinel, na_value=na_value)
485
pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.factorize()
ValueError: Buffer has wrong number of dimensions (expected 1, got 2)
During handling of the above exception, another exception occurred:
NotImplementedError Traceback (most recent call last)
<ipython-input-66-973d8edd5e42> in <module>
----> 1 gdf.explode()
~/Dropbox/Python/geopandas/geopandas/geopandas/geodataframe.py in explode(self)
844 # reset to MultiIndex, otherwise df index is only first level of
845 # exploded GeoSeries index.
--> 846 df.set_index(exploded_index, append=True, inplace=True)
847 df.index.names = list(self.index.names) + [None]
848 geo_df = df.set_geometry(self._geometry_column_name)
~/anaconda3/envs/uap/lib/python3.8/site-packages/pandas/core/frame.py in set_index(self, keys, drop, append, inplace, verify_integrity)
4349 )
4350
-> 4351 index = ensure_index_from_sequences(arrays, names)
4352
4353 if verify_integrity and not index.is_unique:
~/anaconda3/envs/uap/lib/python3.8/site-packages/pandas/core/indexes/base.py in ensure_index_from_sequences(sequences, names)
5288 return Index(sequences[0], name=names)
5289 else:
-> 5290 return MultiIndex.from_arrays(sequences, names=names)
5291
5292
~/anaconda3/envs/uap/lib/python3.8/site-packages/pandas/core/indexes/multi.py in from_arrays(cls, arrays, sortorder, names)
425 raise ValueError("all arrays must be same length")
426
--> 427 codes, levels = factorize_from_iterables(arrays)
428 if names is lib.no_default:
429 names = [getattr(arr, "name", None) for arr in arrays]
~/anaconda3/envs/uap/lib/python3.8/site-packages/pandas/core/arrays/categorical.py in factorize_from_iterables(iterables)
2706 # For consistency, it should return a list of 2 lists.
2707 return [[], []]
-> 2708 return map(list, zip(*(factorize_from_iterable(it) for it in iterables)))
~/anaconda3/envs/uap/lib/python3.8/site-packages/pandas/core/arrays/categorical.py in <genexpr>(.0)
2706 # For consistency, it should return a list of 2 lists.
2707 return [[], []]
-> 2708 return map(list, zip(*(factorize_from_iterable(it) for it in iterables)))
~/anaconda3/envs/uap/lib/python3.8/site-packages/pandas/core/arrays/categorical.py in factorize_from_iterable(values)
2678 # but only the resulting categories, the order of which is independent
2679 # from ordered. Set ordered to False as default. See GH #15457
-> 2680 cat = Categorical(values, ordered=False)
2681 categories = cat.categories
2682 codes = cat.codes
~/anaconda3/envs/uap/lib/python3.8/site-packages/pandas/core/arrays/categorical.py in __init__(self, values, categories, ordered, dtype, fastpath)
367
368 # FIXME
--> 369 raise NotImplementedError(
370 "> 1 ndim Categorical are not supported at this time"
371 )
NotImplementedError: > 1 ndim Categorical are not supported at this time
|
ValueError
|
def sjoin(
left_df, right_df, how="inner", op="intersects", lsuffix="left", rsuffix="right"
):
"""Spatial join of two GeoDataFrames.
Parameters
----------
left_df, right_df : GeoDataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
op : string, default 'intersection'
Binary predicate, one of {'intersects', 'contains', 'within'}.
See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
"""
if not isinstance(left_df, GeoDataFrame):
raise ValueError(
"'left_df' should be GeoDataFrame, got {}".format(type(left_df))
)
if not isinstance(right_df, GeoDataFrame):
raise ValueError(
"'right_df' should be GeoDataFrame, got {}".format(type(right_df))
)
allowed_hows = ["left", "right", "inner"]
if how not in allowed_hows:
raise ValueError(
'`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
)
allowed_ops = ["contains", "within", "intersects"]
if op not in allowed_ops:
raise ValueError(
'`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
)
if left_df.crs != right_df.crs:
warn(
(
"CRS of frames being joined does not match!"
"(%s != %s)" % (left_df.crs, right_df.crs)
)
)
index_left = "index_%s" % lsuffix
index_right = "index_%s" % rsuffix
# due to GH 352
if any(left_df.columns.isin([index_left, index_right])) or any(
right_df.columns.isin([index_left, index_right])
):
raise ValueError(
"'{0}' and '{1}' cannot be names in the frames being joined".format(
index_left, index_right
)
)
# Attempt to re-use spatial indexes, otherwise generate the spatial index
# for the longer dataframe. If we are joining to an empty dataframe,
# don't bother generating the index.
if right_df._sindex_generated or (
not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]
):
tree_idx = right_df.sindex if len(left_df) > 0 else None
tree_idx_right = True
else:
tree_idx = left_df.sindex if len(right_df) > 0 else None
tree_idx_right = False
# the rtree spatial index only allows limited (numeric) index types, but an
# index in geopandas may be any arbitrary dtype. so reset both indices now
# and store references to the original indices, to be reaffixed later.
# GH 352
left_df = left_df.copy(deep=True)
try:
left_index_name = left_df.index.name
left_df.index = left_df.index.rename(index_left)
except TypeError:
index_left = [
"index_%s" % lsuffix + str(l) for l, ix in enumerate(left_df.index.names)
]
left_index_name = left_df.index.names
left_df.index = left_df.index.rename(index_left)
left_df = left_df.reset_index()
right_df = right_df.copy(deep=True)
try:
right_index_name = right_df.index.name
right_df.index = right_df.index.rename(index_right)
except TypeError:
index_right = [
"index_%s" % rsuffix + str(l) for l, ix in enumerate(right_df.index.names)
]
right_index_name = right_df.index.names
right_df.index = right_df.index.rename(index_right)
right_df = right_df.reset_index()
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
tree_idx_right = not tree_idx_right
r_idx = np.empty((0, 0))
l_idx = np.empty((0, 0))
# get rtree spatial index. If tree_idx does not exist, it is due to either a
# failure to generate the index (e.g., if the column is empty), or the
# other dataframe is empty so it wasn't necessary to generate it.
if tree_idx_right and tree_idx:
idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(
lambda x: list(tree_idx.intersection(x)) if not x == () else []
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
# indexes of overlapping boundaries
if idxmatch.shape[0] > 0:
r_idx = np.concatenate(idxmatch.values)
l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
elif not tree_idx_right and tree_idx:
# tree_idx_df == 'left'
idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(
lambda x: list(tree_idx.intersection(x)) if not x == () else []
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
if idxmatch.shape[0] > 0:
# indexes of overlapping boundaries
l_idx = np.concatenate(idxmatch.values)
r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
if len(r_idx) > 0 and len(l_idx) > 0:
# Vectorize predicate operations
def find_intersects(a1, a2):
return a1.intersects(a2)
def find_contains(a1, a2):
return a1.contains(a2)
predicate_d = {
"intersects": find_intersects,
"contains": find_contains,
"within": find_contains,
}
check_predicates = np.vectorize(predicate_d[op])
result = pd.DataFrame(
np.column_stack(
[
l_idx,
r_idx,
check_predicates(
left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],
right_df[right_df.geometry.name][r_idx],
),
]
)
)
result.columns = ["_key_left", "_key_right", "match_bool"]
result = pd.DataFrame(result[result["match_bool"] == 1]).drop(
"match_bool", axis=1
)
else:
# when output from the join has no overlapping geometries
result = pd.DataFrame(columns=["_key_left", "_key_right"], dtype=float)
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
result = result.rename(
columns={"_key_left": "_key_right", "_key_right": "_key_left"}
)
if how == "inner":
result = result.set_index("_key_left")
joined = (
left_df.merge(result, left_index=True, right_index=True)
.merge(
right_df.drop(right_df.geometry.name, axis=1),
left_on="_key_right",
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
if isinstance(index_left, list):
joined.index.names = left_index_name
else:
joined.index.name = left_index_name
elif how == "left":
result = result.set_index("_key_left")
joined = (
left_df.merge(result, left_index=True, right_index=True, how="left")
.merge(
right_df.drop(right_df.geometry.name, axis=1),
how="left",
left_on="_key_right",
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
if isinstance(index_left, list):
joined.index.names = left_index_name
else:
joined.index.name = left_index_name
else: # how == 'right':
joined = (
left_df.drop(left_df.geometry.name, axis=1)
.merge(
result.merge(
right_df, left_on="_key_right", right_index=True, how="right"
),
left_index=True,
right_on="_key_left",
how="right",
)
.set_index(index_right)
.drop(["_key_left", "_key_right"], axis=1)
)
if isinstance(index_right, list):
joined.index.names = right_index_name
else:
joined.index.name = right_index_name
return joined
|
def sjoin(
left_df, right_df, how="inner", op="intersects", lsuffix="left", rsuffix="right"
):
"""Spatial join of two GeoDataFrames.
Parameters
----------
left_df, right_df : GeoDataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
op : string, default 'intersection'
Binary predicate, one of {'intersects', 'contains', 'within'}.
See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
"""
if not isinstance(left_df, GeoDataFrame):
raise ValueError(
"'left_df' should be GeoDataFrame, got {}".format(type(left_df))
)
if not isinstance(right_df, GeoDataFrame):
raise ValueError(
"'right_df' should be GeoDataFrame, got {}".format(type(right_df))
)
allowed_hows = ["left", "right", "inner"]
if how not in allowed_hows:
raise ValueError(
'`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
)
allowed_ops = ["contains", "within", "intersects"]
if op not in allowed_ops:
raise ValueError(
'`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
)
if left_df.crs != right_df.crs:
warn(
(
"CRS of frames being joined does not match!"
"(%s != %s)" % (left_df.crs, right_df.crs)
)
)
index_left = "index_%s" % lsuffix
index_right = "index_%s" % rsuffix
# due to GH 352
if any(left_df.columns.isin([index_left, index_right])) or any(
right_df.columns.isin([index_left, index_right])
):
raise ValueError(
"'{0}' and '{1}' cannot be names in the frames being joined".format(
index_left, index_right
)
)
# Attempt to re-use spatial indexes, otherwise generate the spatial index
# for the longer dataframe
if right_df._sindex_generated or (
not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]
):
tree_idx = right_df.sindex
tree_idx_right = True
else:
tree_idx = left_df.sindex
tree_idx_right = False
# the rtree spatial index only allows limited (numeric) index types, but an
# index in geopandas may be any arbitrary dtype. so reset both indices now
# and store references to the original indices, to be reaffixed later.
# GH 352
left_df = left_df.copy(deep=True)
try:
left_index_name = left_df.index.name
left_df.index = left_df.index.rename(index_left)
except TypeError:
index_left = [
"index_%s" % lsuffix + str(l) for l, ix in enumerate(left_df.index.names)
]
left_index_name = left_df.index.names
left_df.index = left_df.index.rename(index_left)
left_df = left_df.reset_index()
right_df = right_df.copy(deep=True)
try:
right_index_name = right_df.index.name
right_df.index = right_df.index.rename(index_right)
except TypeError:
index_right = [
"index_%s" % rsuffix + str(l) for l, ix in enumerate(right_df.index.names)
]
right_index_name = right_df.index.names
right_df.index = right_df.index.rename(index_right)
right_df = right_df.reset_index()
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
tree_idx_right = not tree_idx_right
r_idx = np.empty((0, 0))
l_idx = np.empty((0, 0))
# get rtree spatial index
if tree_idx_right:
idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(
lambda x: list(tree_idx.intersection(x)) if not x == () else []
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
# indexes of overlapping boundaries
if idxmatch.shape[0] > 0:
r_idx = np.concatenate(idxmatch.values)
l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
else:
# tree_idx_df == 'left'
idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(
lambda x: list(tree_idx.intersection(x)) if not x == () else []
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
if idxmatch.shape[0] > 0:
# indexes of overlapping boundaries
l_idx = np.concatenate(idxmatch.values)
r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
if len(r_idx) > 0 and len(l_idx) > 0:
# Vectorize predicate operations
def find_intersects(a1, a2):
return a1.intersects(a2)
def find_contains(a1, a2):
return a1.contains(a2)
predicate_d = {
"intersects": find_intersects,
"contains": find_contains,
"within": find_contains,
}
check_predicates = np.vectorize(predicate_d[op])
result = pd.DataFrame(
np.column_stack(
[
l_idx,
r_idx,
check_predicates(
left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],
right_df[right_df.geometry.name][r_idx],
),
]
)
)
result.columns = ["_key_left", "_key_right", "match_bool"]
result = pd.DataFrame(result[result["match_bool"] == 1]).drop(
"match_bool", axis=1
)
else:
# when output from the join has no overlapping geometries
result = pd.DataFrame(columns=["_key_left", "_key_right"], dtype=float)
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
result = result.rename(
columns={"_key_left": "_key_right", "_key_right": "_key_left"}
)
if how == "inner":
result = result.set_index("_key_left")
joined = (
left_df.merge(result, left_index=True, right_index=True)
.merge(
right_df.drop(right_df.geometry.name, axis=1),
left_on="_key_right",
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
if isinstance(index_left, list):
joined.index.names = left_index_name
else:
joined.index.name = left_index_name
elif how == "left":
result = result.set_index("_key_left")
joined = (
left_df.merge(result, left_index=True, right_index=True, how="left")
.merge(
right_df.drop(right_df.geometry.name, axis=1),
how="left",
left_on="_key_right",
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
if isinstance(index_left, list):
joined.index.names = left_index_name
else:
joined.index.name = left_index_name
else: # how == 'right':
joined = (
left_df.drop(left_df.geometry.name, axis=1)
.merge(
result.merge(
right_df, left_on="_key_right", right_index=True, how="right"
),
left_index=True,
right_on="_key_left",
how="right",
)
.set_index(index_right)
.drop(["_key_left", "_key_right"], axis=1)
)
if isinstance(index_right, list):
joined.index.names = right_index_name
else:
joined.index.name = right_index_name
return joined
|
https://github.com/geopandas/geopandas/issues/1307
|
import geopandas as gpd
gdf = gpd.read_file(gpd.datasets.get_path('nybb'))
gpd.sjoin(gdf, gdf.head(0))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-23-a647daaec265> in <module>
----> 1 gpd.sjoin(gdf.head(0), gdf)
~/model/.venv/lib/python3.7/site-packages/geopandas/tools/sjoin.py in sjoin(left_df, right_df, how, op, lsuffix, rsuffix)
126 lambda x: list(tree_idx.intersection(x)) if not x == () else []
127 )
--> 128 idxmatch = idxmatch[idxmatch.apply(len) > 0]
129 # indexes of overlapping boundaries
130 if idxmatch.shape[0] > 0:
~/model/.venv/lib/python3.7/site-packages/pandas/core/ops/common.py in new_method(self, other)
62 other = item_from_zerodim(other)
63
---> 64 return method(self, other)
65
66 return new_method
~/model/.venv/lib/python3.7/site-packages/pandas/core/ops/__init__.py in wrapper(self, other)
524 rvalues = extract_array(other, extract_numpy=True)
525
--> 526 res_values = comparison_op(lvalues, rvalues, op)
527
528 return _construct_result(self, res_values, index=self.index, name=res_name)
~/model/.venv/lib/python3.7/site-packages/pandas/core/ops/array_ops.py in comparison_op(left, right, op)
235
236 if should_extension_dispatch(lvalues, rvalues):
--> 237 res_values = dispatch_to_extension_op(op, lvalues, rvalues)
238
239 elif is_scalar(rvalues) and isna(rvalues):
~/model/.venv/lib/python3.7/site-packages/pandas/core/ops/dispatch.py in dispatch_to_extension_op(op, left, right)
123 # The op calls will raise TypeError if the op is not defined
124 # on the ExtensionArray
--> 125 res_values = op(left, right)
126 return res_values
TypeError: '>' not supported between instances of 'GeometryArray' and 'int'
|
TypeError
|
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
"""
if isinstance(dtype, GeometryDtype):
if copy:
return self.copy()
else:
return self
elif pd.api.types.is_string_dtype(dtype) and not pd.api.types.is_object_dtype(
dtype
):
return to_wkt(self).astype(dtype, copy=False)
else:
return np.array(self, dtype=dtype, copy=copy)
|
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
"""
if isinstance(dtype, GeometryDtype):
if copy:
return self.copy()
else:
return self
return np.array(self, dtype=dtype, copy=copy)
|
https://github.com/geopandas/geopandas/issues/1145
|
$ conda create -n gpd060 python=3.6 geopandas=0.6.0
<conda output>
$ conda activate gpd060
(gpd060) $ python
Python 3.6.7 | packaged by conda-forge | (default, Jul 2 2019, 02:07:37)
[GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
from shapely.wkt import loads
import geopandas as gpd
geom_str = 'MULTIPOLYGON (((1 0, 1 1, 0 1, 0 0, 1 0)), ((3 0, 3 1, 2 1, 2 0, 3 0)))'
geom = loads(geom_str)
gdf = gpd.GeoDataFrame(geometry=[geom])
gdf.to_csv()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/john.flavin/anaconda3/envs/gpd060/lib/python3.6/site-packages/pandas/core/generic.py", line 3228, in to_csv
formatter.save()
File "/Users/john.flavin/anaconda3/envs/gpd060/lib/python3.6/site-packages/pandas/io/formats/csvs.py", line 202, in save
self._save()
File "/Users/john.flavin/anaconda3/envs/gpd060/lib/python3.6/site-packages/pandas/io/formats/csvs.py", line 324, in _save
self._save_chunk(start_i, end_i)
File "/Users/john.flavin/anaconda3/envs/gpd060/lib/python3.6/site-packages/pandas/io/formats/csvs.py", line 340, in _save_chunk
quoting=self.quoting,
File "/Users/john.flavin/anaconda3/envs/gpd060/lib/python3.6/site-packages/pandas/core/internals/blocks.py", line 760, in to_native_types
values = values.astype(str)
ValueError: setting an array element with a sequence
|
ValueError
|
def from_wkb(data):
"""
Convert a list or array of WKB objects to a GeometryArray.
"""
import shapely.wkb
n = len(data)
out = []
for idx in range(n):
geom = data[idx]
if geom is not None and len(geom):
geom = shapely.wkb.loads(geom)
else:
geom = None
out.append(geom)
aout = np.empty(n, dtype=object)
aout[:] = out
return GeometryArray(aout)
|
def from_wkb(data):
"""
Convert a list or array of WKB objects to a GeometryArray.
"""
import shapely.wkb
n = len(data)
out = []
for idx in range(n):
geom = data[idx]
if geom is not None and len(geom):
geom = shapely.wkb.loads(geom)
else:
geom = None
out.append(geom)
out = np.array(out, dtype=object)
return GeometryArray(out)
|
https://github.com/geopandas/geopandas/issues/1130
|
Traceback (most recent call last):
File "intersection_crash.py", line 17, in <module>
intersection_big_gdf = gpd.overlay(concave_polygon_gdf, big_rect_polygon_gdf, how='intersection')
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 362, in overlay
result = _overlay_intersection(df1, df2)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 217, in _overlay_intersection
intersections = left.intersection(right).buffer(0)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 530, in buffer
resolution=resolution, **kwargs)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 92, in _delegate_geo_method
data = getattr(a_this, op)(*args, **kwargs).data
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 616, in buffer
return GeometryArray(np.array(data, dtype=object))
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 407, in __init__
"'data' should be a 1-dimensional array of geometry objects.")
ValueError: 'data' should be a 1-dimensional array of geometry objects.
|
ValueError
|
def from_wkt(data):
"""
Convert a list or array of WKT objects to a GeometryArray.
"""
import shapely.wkt
n = len(data)
out = []
for idx in range(n):
geom = data[idx]
if geom is not None and len(geom):
if isinstance(geom, bytes):
geom = geom.decode("utf-8")
geom = shapely.wkt.loads(geom)
else:
geom = None
out.append(geom)
aout = np.empty(n, dtype=object)
aout[:] = out
return GeometryArray(aout)
|
def from_wkt(data):
"""
Convert a list or array of WKT objects to a GeometryArray.
"""
import shapely.wkt
n = len(data)
out = []
for idx in range(n):
geom = data[idx]
if geom is not None and len(geom):
if isinstance(geom, bytes):
geom = geom.decode("utf-8")
geom = shapely.wkt.loads(geom)
else:
geom = None
out.append(geom)
out = np.array(out, dtype=object)
return GeometryArray(out)
|
https://github.com/geopandas/geopandas/issues/1130
|
Traceback (most recent call last):
File "intersection_crash.py", line 17, in <module>
intersection_big_gdf = gpd.overlay(concave_polygon_gdf, big_rect_polygon_gdf, how='intersection')
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 362, in overlay
result = _overlay_intersection(df1, df2)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 217, in _overlay_intersection
intersections = left.intersection(right).buffer(0)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 530, in buffer
resolution=resolution, **kwargs)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 92, in _delegate_geo_method
data = getattr(a_this, op)(*args, **kwargs).data
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 616, in buffer
return GeometryArray(np.array(data, dtype=object))
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 407, in __init__
"'data' should be a 1-dimensional array of geometry objects.")
ValueError: 'data' should be a 1-dimensional array of geometry objects.
|
ValueError
|
def points_from_xy(x, y, z=None):
"""Convert arrays of x and y values to a GeometryArray of points."""
x = np.asarray(x, dtype="float64")
y = np.asarray(y, dtype="float64")
if z is not None:
z = np.asarray(z, dtype="float64")
out = _points_from_xy(x, y, z)
aout = np.empty(len(x), dtype=object)
aout[:] = out
return GeometryArray(aout)
|
def points_from_xy(x, y, z=None):
"""Convert arrays of x and y values to a GeometryArray of points."""
x = np.asarray(x, dtype="float64")
y = np.asarray(y, dtype="float64")
if z is not None:
z = np.asarray(z, dtype="float64")
out = _points_from_xy(x, y, z)
out = np.array(out, dtype=object)
return GeometryArray(out)
|
https://github.com/geopandas/geopandas/issues/1130
|
Traceback (most recent call last):
File "intersection_crash.py", line 17, in <module>
intersection_big_gdf = gpd.overlay(concave_polygon_gdf, big_rect_polygon_gdf, how='intersection')
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 362, in overlay
result = _overlay_intersection(df1, df2)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 217, in _overlay_intersection
intersections = left.intersection(right).buffer(0)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 530, in buffer
resolution=resolution, **kwargs)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 92, in _delegate_geo_method
data = getattr(a_this, op)(*args, **kwargs).data
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 616, in buffer
return GeometryArray(np.array(data, dtype=object))
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 407, in __init__
"'data' should be a 1-dimensional array of geometry objects.")
ValueError: 'data' should be a 1-dimensional array of geometry objects.
|
ValueError
|
def _affinity_method(op, left, *args, **kwargs):
# type: (str, GeometryArray, ...) -> GeometryArray
# not all shapely.affinity methods can handle empty geometries:
# affine_transform itself works (as well as translate), but rotate, scale
# and skew fail (they try to unpack the bounds).
# Here: consistently returning empty geom for input empty geom
out = []
for geom in left.data:
if geom is None or geom.is_empty:
res = geom
else:
res = getattr(shapely.affinity, op)(geom, *args, **kwargs)
out.append(res)
data = np.empty(len(left), dtype=object)
data[:] = out
return GeometryArray(data)
|
def _affinity_method(op, left, *args, **kwargs):
# type: (str, GeometryArray, ...) -> GeometryArray
# not all shapely.affinity methods can handle empty geometries:
# affine_transform itself works (as well as translate), but rotate, scale
# and skew fail (they try to unpack the bounds).
# Here: consistently returning empty geom for input empty geom
data = []
for geom in left.data:
if geom is None or geom.is_empty:
res = geom
else:
res = getattr(shapely.affinity, op)(geom, *args, **kwargs)
data.append(res)
return GeometryArray(np.array(data, dtype=object))
|
https://github.com/geopandas/geopandas/issues/1130
|
Traceback (most recent call last):
File "intersection_crash.py", line 17, in <module>
intersection_big_gdf = gpd.overlay(concave_polygon_gdf, big_rect_polygon_gdf, how='intersection')
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 362, in overlay
result = _overlay_intersection(df1, df2)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 217, in _overlay_intersection
intersections = left.intersection(right).buffer(0)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 530, in buffer
resolution=resolution, **kwargs)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 92, in _delegate_geo_method
data = getattr(a_this, op)(*args, **kwargs).data
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 616, in buffer
return GeometryArray(np.array(data, dtype=object))
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 407, in __init__
"'data' should be a 1-dimensional array of geometry objects.")
ValueError: 'data' should be a 1-dimensional array of geometry objects.
|
ValueError
|
def interiors(self):
has_non_poly = False
inner_rings = []
for geom in self.data:
interior_ring_seq = getattr(geom, "interiors", None)
# polygon case
if interior_ring_seq is not None:
inner_rings.append(list(interior_ring_seq))
# non-polygon case
else:
has_non_poly = True
inner_rings.append(None)
if has_non_poly:
warnings.warn(
"Only Polygon objects have interior rings. For other "
"geometry types, None is returned."
)
data = np.empty(len(self), dtype=object)
data[:] = inner_rings
return data
|
def interiors(self):
has_non_poly = False
inner_rings = []
for geom in self.data:
interior_ring_seq = getattr(geom, "interiors", None)
# polygon case
if interior_ring_seq is not None:
inner_rings.append(list(interior_ring_seq))
# non-polygon case
else:
has_non_poly = True
inner_rings.append(None)
if has_non_poly:
warnings.warn(
"Only Polygon objects have interior rings. For other "
"geometry types, None is returned."
)
return np.array(inner_rings, dtype=object)
|
https://github.com/geopandas/geopandas/issues/1130
|
Traceback (most recent call last):
File "intersection_crash.py", line 17, in <module>
intersection_big_gdf = gpd.overlay(concave_polygon_gdf, big_rect_polygon_gdf, how='intersection')
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 362, in overlay
result = _overlay_intersection(df1, df2)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 217, in _overlay_intersection
intersections = left.intersection(right).buffer(0)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 530, in buffer
resolution=resolution, **kwargs)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 92, in _delegate_geo_method
data = getattr(a_this, op)(*args, **kwargs).data
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 616, in buffer
return GeometryArray(np.array(data, dtype=object))
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 407, in __init__
"'data' should be a 1-dimensional array of geometry objects.")
ValueError: 'data' should be a 1-dimensional array of geometry objects.
|
ValueError
|
def buffer(self, distance, resolution=16, **kwargs):
data = data = np.empty(len(self), dtype=object)
if isinstance(distance, np.ndarray):
if len(distance) != len(self):
raise ValueError(
"Length of distance sequence does not match length of the GeoSeries"
)
data[:] = [
geom.buffer(dist, resolution, **kwargs) if geom is not None else None
for geom, dist in zip(self.data, distance)
]
return GeometryArray(data)
data[:] = [
geom.buffer(distance, resolution, **kwargs) if geom is not None else None
for geom in self.data
]
return GeometryArray(data)
|
def buffer(self, distance, resolution=16, **kwargs):
if isinstance(distance, np.ndarray):
if len(distance) != len(self):
raise ValueError(
"Length of distance sequence does not match length of the GeoSeries"
)
data = [
geom.buffer(dist, resolution, **kwargs) if geom is not None else None
for geom, dist in zip(self.data, distance)
]
return GeometryArray(np.array(data, dtype=object))
data = [
geom.buffer(distance, resolution, **kwargs) if geom is not None else None
for geom in self.data
]
return GeometryArray(np.array(data, dtype=object))
|
https://github.com/geopandas/geopandas/issues/1130
|
Traceback (most recent call last):
File "intersection_crash.py", line 17, in <module>
intersection_big_gdf = gpd.overlay(concave_polygon_gdf, big_rect_polygon_gdf, how='intersection')
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 362, in overlay
result = _overlay_intersection(df1, df2)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 217, in _overlay_intersection
intersections = left.intersection(right).buffer(0)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 530, in buffer
resolution=resolution, **kwargs)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 92, in _delegate_geo_method
data = getattr(a_this, op)(*args, **kwargs).data
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 616, in buffer
return GeometryArray(np.array(data, dtype=object))
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 407, in __init__
"'data' should be a 1-dimensional array of geometry objects.")
ValueError: 'data' should be a 1-dimensional array of geometry objects.
|
ValueError
|
def interpolate(self, distance, normalized=False):
data = data = np.empty(len(self), dtype=object)
if isinstance(distance, np.ndarray):
if len(distance) != len(self):
raise ValueError(
"Length of distance sequence does not match length of the GeoSeries"
)
data[:] = [
geom.interpolate(dist, normalized=normalized)
for geom, dist in zip(self.data, distance)
]
return GeometryArray(data)
data[:] = [geom.interpolate(distance, normalized=normalized) for geom in self.data]
return GeometryArray(data)
|
def interpolate(self, distance, normalized=False):
if isinstance(distance, np.ndarray):
if len(distance) != len(self):
raise ValueError(
"Length of distance sequence does not match length of the GeoSeries"
)
data = [
geom.interpolate(dist, normalized=normalized)
for geom, dist in zip(self.data, distance)
]
return GeometryArray(np.array(data, dtype=object))
data = [geom.interpolate(distance, normalized=normalized) for geom in self.data]
return GeometryArray(np.array(data, dtype=object))
|
https://github.com/geopandas/geopandas/issues/1130
|
Traceback (most recent call last):
File "intersection_crash.py", line 17, in <module>
intersection_big_gdf = gpd.overlay(concave_polygon_gdf, big_rect_polygon_gdf, how='intersection')
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 362, in overlay
result = _overlay_intersection(df1, df2)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/tools/overlay.py", line 217, in _overlay_intersection
intersections = left.intersection(right).buffer(0)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 530, in buffer
resolution=resolution, **kwargs)
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/base.py", line 92, in _delegate_geo_method
data = getattr(a_this, op)(*args, **kwargs).data
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 616, in buffer
return GeometryArray(np.array(data, dtype=object))
File "/usr/local/lib/python3.6/site-packages/geopandas-0.6.0rc1-py3.6.egg/geopandas/array.py", line 407, in __init__
"'data' should be a 1-dimensional array of geometry objects.")
ValueError: 'data' should be a 1-dimensional array of geometry objects.
|
ValueError
|
def sjoin(
left_df, right_df, how="inner", op="intersects", lsuffix="left", rsuffix="right"
):
"""Spatial join of two GeoDataFrames.
Parameters
----------
left_df, right_df : GeoDataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
op : string, default 'intersection'
Binary predicate, one of {'intersects', 'contains', 'within'}.
See http://toblerity.org/shapely/manual.html#binary-predicates.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
"""
import rtree
allowed_hows = ["left", "right", "inner"]
if how not in allowed_hows:
raise ValueError(
'`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
)
allowed_ops = ["contains", "within", "intersects"]
if op not in allowed_ops:
raise ValueError(
'`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
)
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
if left_df.crs != right_df.crs:
print("Warning: CRS does not match!")
tree_idx = rtree.index.Index()
right_df_bounds = right_df["geometry"].apply(lambda x: x.bounds)
for i in right_df_bounds.index:
tree_idx.insert(i, right_df_bounds[i])
idxmatch = (
left_df["geometry"]
.apply(lambda x: x.bounds)
.apply(lambda x: list(tree_idx.intersection(x)))
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
if idxmatch.shape[0] > 0:
# if output from join has overlapping geometries
r_idx = np.concatenate(idxmatch.values)
l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
# Vectorize predicate operations
def find_intersects(a1, a2):
return a1.intersects(a2)
def find_contains(a1, a2):
return a1.contains(a2)
predicate_d = {
"intersects": find_intersects,
"contains": find_contains,
"within": find_contains,
}
check_predicates = np.vectorize(predicate_d[op])
result = pd.DataFrame(
np.column_stack(
[
l_idx,
r_idx,
check_predicates(
left_df["geometry"].apply(lambda x: prepared.prep(x))[l_idx],
right_df["geometry"][r_idx],
),
]
)
)
result.columns = ["index_%s" % lsuffix, "index_%s" % rsuffix, "match_bool"]
result = pd.DataFrame(result[result["match_bool"] == 1]).drop(
"match_bool", axis=1
)
else:
# when output from the join has no overlapping geometries
result = pd.DataFrame(columns=["index_%s" % lsuffix, "index_%s" % rsuffix])
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
result = result.rename(
columns={
"index_%s" % (lsuffix): "index_%s" % (rsuffix),
"index_%s" % (rsuffix): "index_%s" % (lsuffix),
}
)
if how == "inner":
result = result.set_index("index_%s" % lsuffix)
return left_df.merge(result, left_index=True, right_index=True).merge(
right_df.drop("geometry", axis=1),
left_on="index_%s" % rsuffix,
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
elif how == "left":
result = result.set_index("index_%s" % lsuffix)
return left_df.merge(
result, left_index=True, right_index=True, how="left"
).merge(
right_df.drop("geometry", axis=1),
how="left",
left_on="index_%s" % rsuffix,
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
elif how == "right":
return (
left_df.drop("geometry", axis=1)
.merge(
result.merge(
right_df,
left_on="index_%s" % rsuffix,
right_index=True,
how="right",
),
left_index=True,
right_on="index_%s" % lsuffix,
how="right",
)
.set_index("index_%s" % rsuffix)
)
|
def sjoin(
left_df, right_df, how="inner", op="intersects", lsuffix="left", rsuffix="right"
):
"""Spatial join of two GeoDataFrames.
Parameters
----------
left_df, right_df : GeoDataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
op : string, default 'intersection'
Binary predicate, one of {'intersects', 'contains', 'within'}.
See http://toblerity.org/shapely/manual.html#binary-predicates.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
"""
import rtree
allowed_hows = ["left", "right", "inner"]
if how not in allowed_hows:
raise ValueError(
'`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
)
allowed_ops = ["contains", "within", "intersects"]
if op not in allowed_ops:
raise ValueError(
'`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
)
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
if left_df.crs != right_df.crs:
print("Warning: CRS does not match!")
tree_idx = rtree.index.Index()
right_df_bounds = right_df["geometry"].apply(lambda x: x.bounds)
for i in right_df_bounds.index:
tree_idx.insert(i, right_df_bounds[i])
idxmatch = (
left_df["geometry"]
.apply(lambda x: x.bounds)
.apply(lambda x: list(tree_idx.intersection(x)))
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
r_idx = np.concatenate(idxmatch.values)
l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
# Vectorize predicate operations
def find_intersects(a1, a2):
return a1.intersects(a2)
def find_contains(a1, a2):
return a1.contains(a2)
predicate_d = {
"intersects": find_intersects,
"contains": find_contains,
"within": find_contains,
}
check_predicates = np.vectorize(predicate_d[op])
result = pd.DataFrame(
np.column_stack(
[
l_idx,
r_idx,
check_predicates(
left_df["geometry"].apply(lambda x: prepared.prep(x))[l_idx],
right_df["geometry"][r_idx],
),
]
)
)
result.columns = ["index_%s" % lsuffix, "index_%s" % rsuffix, "match_bool"]
result = pd.DataFrame(result[result["match_bool"] == 1]).drop("match_bool", axis=1)
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
result = result.rename(
columns={
"index_%s" % (lsuffix): "index_%s" % (rsuffix),
"index_%s" % (rsuffix): "index_%s" % (lsuffix),
}
)
if how == "inner":
result = result.set_index("index_%s" % lsuffix)
return left_df.merge(result, left_index=True, right_index=True).merge(
right_df.drop("geometry", axis=1),
left_on="index_%s" % rsuffix,
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
elif how == "left":
result = result.set_index("index_%s" % lsuffix)
return left_df.merge(
result, left_index=True, right_index=True, how="left"
).merge(
right_df.drop("geometry", axis=1),
how="left",
left_on="index_%s" % rsuffix,
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
elif how == "right":
return (
left_df.drop("geometry", axis=1)
.merge(
result.merge(
right_df,
left_on="index_%s" % rsuffix,
right_index=True,
how="right",
),
left_index=True,
right_on="index_%s" % lsuffix,
how="right",
)
.set_index("index_%s" % rsuffix)
)
|
https://github.com/geopandas/geopandas/issues/251
|
Traceback (most recent call last):
File "_test.py", line 21, in <module>
merged = sjoin(polygons, amenities, how='left', op='contains')
File "/Users/mperry/projects/geopandas/geopandas/tools/sjoin.py", line 50, in sjoin
r_idx = np.concatenate(idxmatch.values)
ValueError: need at least one array to concatenate
|
ValueError
|
def sjoin(
left_df,
right_df,
how="inner",
op="intersects",
lsuffix="left",
rsuffix="right",
**kwargs,
):
"""Spatial join of two GeoDataFrames.
left_df, right_df are GeoDataFrames
how: type of join
left -> use keys from left_df; retain only left_df geometry column
right -> use keys from right_df; retain only right_df geometry column
inner -> use intersection of keys from both dfs;
retain only left_df geometry column
op: binary predicate {'intersects', 'contains', 'within'}
see http://toblerity.org/shapely/manual.html#binary-predicates
lsuffix: suffix to apply to overlapping column names (left GeoDataFrame)
rsuffix: suffix to apply to overlapping column names (right GeoDataFrame)
"""
# CHECK VALIDITY OF JOIN TYPE
allowed_hows = ["left", "right", "inner"]
if how not in allowed_hows:
raise ValueError(
'`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
)
# CHECK VALIDITY OF PREDICATE OPERATION
allowed_ops = ["contains", "within", "intersects"]
if op not in allowed_ops:
raise ValueError(
'`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
)
# IF WITHIN, SWAP NAMES
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
# CONVERT CRS IF NOT EQUAL
if left_df.crs != right_df.crs:
print("Warning: CRS does not match!")
# CONSTRUCT SPATIAL INDEX FOR RIGHT DATAFRAME
tree_idx = rtree.index.Index()
right_df_bounds = right_df["geometry"].apply(lambda x: x.bounds)
for i in right_df_bounds.index:
tree_idx.insert(i, right_df_bounds[i])
# FIND INTERSECTION OF SPATIAL INDEX
idxmatch = (
left_df["geometry"]
.apply(lambda x: x.bounds)
.apply(lambda x: list(tree_idx.intersection(x)))
)
idxmatch = idxmatch[idxmatch.str.len() > 0]
r_idx = np.concatenate(idxmatch.values)
l_idx = idxmatch.index.values.repeat(idxmatch.str.len().values)
# VECTORIZE PREDICATE OPERATIONS
def find_intersects(a1, a2):
return a1.intersects(a2)
def find_contains(a1, a2):
return a1.contains(a2)
predicate_d = {
"intersects": find_intersects,
"contains": find_contains,
"within": find_contains,
}
check_predicates = np.vectorize(predicate_d[op])
# CHECK PREDICATES
result = pd.DataFrame(
np.column_stack(
[
l_idx,
r_idx,
check_predicates(
left_df["geometry"].apply(lambda x: prepared.prep(x)).values[l_idx],
right_df["geometry"][r_idx],
),
]
)
)
result.columns = ["index_%s" % lsuffix, "index_%s" % rsuffix, "match_bool"]
result = pd.DataFrame(result[result["match_bool"] == 1]).drop("match_bool", axis=1)
# IF 'WITHIN', SWAP NAMES AGAIN
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
result = result.rename(
columns={
"index_%s" % (lsuffix): "index_%s" % (rsuffix),
"index_%s" % (rsuffix): "index_%s" % (lsuffix),
}
)
# APPLY JOIN
if how == "inner":
result = result.set_index("index_%s" % lsuffix)
return left_df.merge(result, left_index=True, right_index=True).merge(
right_df.drop("geometry", axis=1),
left_on="index_%s" % rsuffix,
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
elif how == "left":
result = result.set_index("index_%s" % lsuffix)
return left_df.merge(
result, left_index=True, right_index=True, how="left"
).merge(
right_df.drop("geometry", axis=1),
how="left",
left_on="index_%s" % rsuffix,
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
elif how == "right":
return (
left_df.drop("geometry", axis=1)
.merge(
result.merge(
right_df,
left_on="index_%s" % rsuffix,
right_index=True,
how="right",
),
left_index=True,
right_on="index_%s" % lsuffix,
how="right",
)
.set_index("index_%s" % rsuffix)
)
|
def sjoin(
left_df,
right_df,
how="inner",
op="intersects",
lsuffix="left",
rsuffix="right",
**kwargs,
):
"""Spatial join of two GeoDataFrames.
left_df, right_df are GeoDataFrames
how: type of join
left -> use keys from left_df; retain only left_df geometry column
right -> use keys from right_df; retain only right_df geometry column
inner -> use intersection of keys from both dfs;
retain only left_df geometry column
op: binary predicate {'intersects', 'contains', 'within'}
see http://toblerity.org/shapely/manual.html#binary-predicates
lsuffix: suffix to apply to overlapping column names (left GeoDataFrame)
rsuffix: suffix to apply to overlapping column names (right GeoDataFrame)
"""
# CHECK VALIDITY OF JOIN TYPE
allowed_hows = ["left", "right", "inner"]
if how not in allowed_hows:
raise ValueError(
'`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
)
# CHECK VALIDITY OF PREDICATE OPERATION
allowed_ops = ["contains", "within", "intersects"]
if op not in allowed_ops:
raise ValueError(
'`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
)
# IF WITHIN, SWAP NAMES
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
# CONVERT CRS IF NOT EQUAL
if left_df.crs != right_df.crs:
print("Warning: CRS does not match!")
# CONSTRUCT SPATIAL INDEX FOR RIGHT DATAFRAME
tree_idx = rtree.index.Index()
right_df_bounds = right_df["geometry"].apply(lambda x: x.bounds)
for i in right_df_bounds.index:
tree_idx.insert(i, right_df_bounds[i])
# FIND INTERSECTION OF SPATIAL INDEX
idxmatch = (
left_df["geometry"]
.apply(lambda x: x.bounds)
.apply(lambda x: list(tree_idx.intersection(x)))
)
idxmatch = idxmatch[idxmatch.str.len() > 0]
r_idx = np.concatenate(idxmatch.values)
l_idx = idxmatch.index.values.repeat(idxmatch.str.len().values)
# VECTORIZE PREDICATE OPERATIONS
def find_intersects(a1, a2):
return a1.intersects(a2)
def find_contains(a1, a2):
return a1.contains(a2)
predicate_d = {
"intersects": find_intersects,
"contains": find_contains,
"within": find_contains,
}
check_predicates = np.vectorize(predicate_d[op])
# CHECK PREDICATES
result = pd.DataFrame(
np.column_stack(
[
l_idx,
r_idx,
check_predicates(
left_df["geometry"].apply(lambda x: prepared.prep(x)).values[l_idx],
right_df["geometry"].values[r_idx],
),
]
)
)
result.columns = ["index_%s" % lsuffix, "index_%s" % rsuffix, "match_bool"]
result = pd.DataFrame(result[result["match_bool"] == 1]).drop("match_bool", axis=1)
# IF 'WITHIN', SWAP NAMES AGAIN
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
result = result.rename(
columns={
"index_%s" % (lsuffix): "index_%s" % (rsuffix),
"index_%s" % (rsuffix): "index_%s" % (lsuffix),
}
)
# APPLY JOIN
if how == "inner":
result = result.set_index("index_%s" % lsuffix)
return left_df.merge(result, left_index=True, right_index=True).merge(
right_df.drop("geometry", axis=1),
left_on="index_%s" % rsuffix,
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
elif how == "left":
result = result.set_index("index_%s" % lsuffix)
return left_df.merge(
result, left_index=True, right_index=True, how="left"
).merge(
right_df.drop("geometry", axis=1),
how="left",
left_on="index_%s" % rsuffix,
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
elif how == "right":
return (
left_df.drop("geometry", axis=1)
.merge(
result.merge(
right_df,
left_on="index_%s" % rsuffix,
right_index=True,
how="right",
),
left_index=True,
right_on="index_%s" % lsuffix,
how="right",
)
.set_index("index_%s" % rsuffix)
)
|
https://github.com/geopandas/geopandas/issues/190
|
Traceback (most recent call last):
File "C:/Users/JKosir/PyCharmProjects/nyctaxi/main.py", line 50, in <module>
for group in sjoin(chunk, nbh, how='inner').groupby('REGIONID'):
File "C:\Python27\lib\site-packages\geopandas\tools\sjoin.py", line 58, in sjoin
l_idx = idxmatch.index.values.repeat(idxmatch.str.len().values)
TypeError: Cannot cast array data from dtype('int64') to dtype('int32') according to the rule 'safe'
|
TypeError
|
def _setup_logging(args):
# Set environment to standard to use periods for decimals and avoid localization
locale_to_use = utils.get_locale()
os.environ["LC_ALL"] = locale_to_use
os.environ["LC"] = locale_to_use
os.environ["LANG"] = locale_to_use
config = None
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
for arg in args:
if config_utils.is_nested_config_arg(arg):
config = arg["config"]
break
elif config_utils.is_std_config_arg(arg):
config = arg
break
elif isinstance(arg, (list, tuple)) and config_utils.is_nested_config_arg(
arg[0]
):
config = arg[0]["config"]
break
if config is None:
raise NotImplementedError("No config found in arguments: %s" % args[0])
handler = setup_local_logging(config, config.get("parallel", {}))
try:
yield config
except:
logger.exception("Unexpected error")
raise
finally:
if hasattr(handler, "close"):
handler.close()
|
def _setup_logging(args):
# Set environment to standard to use periods for decimals and avoid localization
os.environ["LC_ALL"] = "C"
os.environ["LC"] = "C"
os.environ["LANG"] = "C"
config = None
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
for arg in args:
if config_utils.is_nested_config_arg(arg):
config = arg["config"]
break
elif config_utils.is_std_config_arg(arg):
config = arg
break
elif isinstance(arg, (list, tuple)) and config_utils.is_nested_config_arg(
arg[0]
):
config = arg[0]["config"]
break
if config is None:
raise NotImplementedError("No config found in arguments: %s" % args[0])
handler = setup_local_logging(config, config.get("parallel", {}))
try:
yield config
except:
logger.exception("Unexpected error")
raise
finally:
if hasattr(handler, "close"):
handler.close()
|
https://github.com/bcbio/bcbio-nextgen/issues/2890
|
Traceback (most recent call last):
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 52, in _setup_logging
yield config
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 447, in detect_sv
return ipython.zip_args(apply(structural.detect_sv, *args))
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 80, in apply
return object(*args, **kwargs)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/__init__.py", line 205, in detect_sv
for svdata in caller_fn(items):
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 57, in run
return _cnvkit_by_type(items, background)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 67, in _cnvkit_by_type
return _run_cnvkit_single(items[0])
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 110, in _run_cnvkit_single
return _associate_cnvkit_out(ckouts, [data])
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 90, in _associate_cnvkit_out
ckout = _add_plots_to_output(ckout, data)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 640, in _add_plots_to_output
scatter = _add_scatter_plot(out, data)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 710, in _add_scatter_plot
priority_bed = plot._prioritize_plot_regions(pybedtools.BedTool(priority_bed), data, os.path.dirname(out_file))
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/plot.py", line 91, in _prioritize_plot_regions
for r in region_bt:
File "pybedtools/cbedtools.pyx", line 754, in pybedtools.cbedtools.IntervalIterator.__next__
File "/opt/bcbio/anaconda/lib/python3.6/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 749: ordinal not in range(128)
|
UnicodeDecodeError
|
def process(args):
"""Run the function in args.name given arguments in args.argfile."""
# Set environment to standard to use periods for decimals and avoid localization
locale_to_use = utils.get_locale()
os.environ["LC_ALL"] = locale_to_use
os.environ["LC"] = locale_to_use
os.environ["LANG"] = locale_to_use
setpath.prepend_bcbiopath()
try:
fn = getattr(multitasks, args.name)
except AttributeError:
raise AttributeError(
"Did not find exposed function in bcbio.distributed.multitasks named '%s'"
% args.name
)
if args.moreargs or args.raw:
fnargs = [args.argfile] + args.moreargs
work_dir = None
argfile = None
else:
with open(args.argfile) as in_handle:
fnargs = yaml.safe_load(in_handle)
work_dir = os.path.dirname(args.argfile)
fnargs = config_utils.merge_resources(fnargs)
argfile = (
args.outfile
if args.outfile
else "%s-out%s" % os.path.splitext(args.argfile)
)
if not work_dir:
work_dir = os.getcwd()
if len(fnargs) > 0 and fnargs[0] == "cwl":
fnargs, parallel, out_keys, input_files = _world_from_cwl(
args.name, fnargs[1:], work_dir
)
# Can remove this awkward Docker merge when we do not need custom GATK3 installs
fnargs = config_utils.merge_resources(fnargs)
argfile = os.path.join(work_dir, "cwl.output.json")
else:
parallel, out_keys, input_files = None, {}, []
with utils.chdir(work_dir):
with contextlib.closing(log.setup_local_logging(parallel={"wrapper": "runfn"})):
try:
out = fn(*fnargs)
except:
logger.exception()
raise
finally:
# Clean up any copied and unpacked workflow inputs, avoiding extra disk usage
wf_input_dir = os.path.join(work_dir, "wf-inputs")
if os.path.exists(wf_input_dir) and os.path.isdir(wf_input_dir):
shutil.rmtree(wf_input_dir)
if argfile:
try:
_write_out_argfile(
argfile, out, fnargs, parallel, out_keys, input_files, work_dir
)
except:
logger.exception()
raise
|
def process(args):
"""Run the function in args.name given arguments in args.argfile."""
# Set environment to standard to use periods for decimals and avoid localization
os.environ["LC_ALL"] = "C"
os.environ["LC"] = "C"
os.environ["LANG"] = "C"
setpath.prepend_bcbiopath()
try:
fn = getattr(multitasks, args.name)
except AttributeError:
raise AttributeError(
"Did not find exposed function in bcbio.distributed.multitasks named '%s'"
% args.name
)
if args.moreargs or args.raw:
fnargs = [args.argfile] + args.moreargs
work_dir = None
argfile = None
else:
with open(args.argfile) as in_handle:
fnargs = yaml.safe_load(in_handle)
work_dir = os.path.dirname(args.argfile)
fnargs = config_utils.merge_resources(fnargs)
argfile = (
args.outfile
if args.outfile
else "%s-out%s" % os.path.splitext(args.argfile)
)
if not work_dir:
work_dir = os.getcwd()
if len(fnargs) > 0 and fnargs[0] == "cwl":
fnargs, parallel, out_keys, input_files = _world_from_cwl(
args.name, fnargs[1:], work_dir
)
# Can remove this awkward Docker merge when we do not need custom GATK3 installs
fnargs = config_utils.merge_resources(fnargs)
argfile = os.path.join(work_dir, "cwl.output.json")
else:
parallel, out_keys, input_files = None, {}, []
with utils.chdir(work_dir):
with contextlib.closing(log.setup_local_logging(parallel={"wrapper": "runfn"})):
try:
out = fn(*fnargs)
except:
logger.exception()
raise
finally:
# Clean up any copied and unpacked workflow inputs, avoiding extra disk usage
wf_input_dir = os.path.join(work_dir, "wf-inputs")
if os.path.exists(wf_input_dir) and os.path.isdir(wf_input_dir):
shutil.rmtree(wf_input_dir)
if argfile:
try:
_write_out_argfile(
argfile, out, fnargs, parallel, out_keys, input_files, work_dir
)
except:
logger.exception()
raise
|
https://github.com/bcbio/bcbio-nextgen/issues/2890
|
Traceback (most recent call last):
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 52, in _setup_logging
yield config
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 447, in detect_sv
return ipython.zip_args(apply(structural.detect_sv, *args))
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 80, in apply
return object(*args, **kwargs)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/__init__.py", line 205, in detect_sv
for svdata in caller_fn(items):
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 57, in run
return _cnvkit_by_type(items, background)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 67, in _cnvkit_by_type
return _run_cnvkit_single(items[0])
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 110, in _run_cnvkit_single
return _associate_cnvkit_out(ckouts, [data])
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 90, in _associate_cnvkit_out
ckout = _add_plots_to_output(ckout, data)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 640, in _add_plots_to_output
scatter = _add_scatter_plot(out, data)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 710, in _add_scatter_plot
priority_bed = plot._prioritize_plot_regions(pybedtools.BedTool(priority_bed), data, os.path.dirname(out_file))
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/plot.py", line 91, in _prioritize_plot_regions
for r in region_bt:
File "pybedtools/cbedtools.pyx", line 754, in pybedtools.cbedtools.IntervalIterator.__next__
File "/opt/bcbio/anaconda/lib/python3.6/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 749: ordinal not in range(128)
|
UnicodeDecodeError
|
def run_main(
workdir,
config_file=None,
fc_dir=None,
run_info_yaml=None,
parallel=None,
workflow=None,
):
"""Run variant analysis, handling command line options."""
# Set environment to standard to use periods for decimals and avoid localization
locale_to_use = utils.get_locale()
os.environ["LC_ALL"] = locale_to_use
os.environ["LC"] = locale_to_use
os.environ["LANG"] = locale_to_use
workdir = utils.safe_makedir(os.path.abspath(workdir))
os.chdir(workdir)
config, config_file = config_utils.load_system_config(config_file, workdir)
log.setup_local_logging(config, parallel)
logger.info(f"System YAML configuration: {os.path.abspath(config_file)}.")
logger.info(f"Locale set to {locale_to_use}.")
if config.get("log_dir", None) is None:
config["log_dir"] = os.path.join(workdir, DEFAULT_LOG_DIR)
if parallel["type"] in ["local", "clusterk"]:
_setup_resources()
_run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml)
elif parallel["type"] == "ipython":
assert parallel["scheduler"] is not None, (
"IPython parallel requires a specified scheduler (-s)"
)
if parallel["scheduler"] != "sge":
assert parallel["queue"] is not None, (
"IPython parallel requires a specified queue (-q)"
)
elif not parallel["queue"]:
parallel["queue"] = ""
_run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml)
else:
raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
|
def run_main(
workdir,
config_file=None,
fc_dir=None,
run_info_yaml=None,
parallel=None,
workflow=None,
):
"""Run variant analysis, handling command line options."""
# Set environment to standard to use periods for decimals and avoid localization
os.environ["LC_ALL"] = "C"
os.environ["LC"] = "C"
os.environ["LANG"] = "C"
workdir = utils.safe_makedir(os.path.abspath(workdir))
os.chdir(workdir)
config, config_file = config_utils.load_system_config(config_file, workdir)
if config.get("log_dir", None) is None:
config["log_dir"] = os.path.join(workdir, DEFAULT_LOG_DIR)
if parallel["type"] in ["local", "clusterk"]:
_setup_resources()
_run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml)
elif parallel["type"] == "ipython":
assert parallel["scheduler"] is not None, (
"IPython parallel requires a specified scheduler (-s)"
)
if parallel["scheduler"] != "sge":
assert parallel["queue"] is not None, (
"IPython parallel requires a specified queue (-q)"
)
elif not parallel["queue"]:
parallel["queue"] = ""
_run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml)
else:
raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
|
https://github.com/bcbio/bcbio-nextgen/issues/2890
|
Traceback (most recent call last):
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 52, in _setup_logging
yield config
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 447, in detect_sv
return ipython.zip_args(apply(structural.detect_sv, *args))
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 80, in apply
return object(*args, **kwargs)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/__init__.py", line 205, in detect_sv
for svdata in caller_fn(items):
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 57, in run
return _cnvkit_by_type(items, background)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 67, in _cnvkit_by_type
return _run_cnvkit_single(items[0])
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 110, in _run_cnvkit_single
return _associate_cnvkit_out(ckouts, [data])
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 90, in _associate_cnvkit_out
ckout = _add_plots_to_output(ckout, data)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 640, in _add_plots_to_output
scatter = _add_scatter_plot(out, data)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 710, in _add_scatter_plot
priority_bed = plot._prioritize_plot_regions(pybedtools.BedTool(priority_bed), data, os.path.dirname(out_file))
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/plot.py", line 91, in _prioritize_plot_regions
for r in region_bt:
File "pybedtools/cbedtools.pyx", line 754, in pybedtools.cbedtools.IntervalIterator.__next__
File "/opt/bcbio/anaconda/lib/python3.6/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 749: ordinal not in range(128)
|
UnicodeDecodeError
|
def _run_toplevel(
config, config_file, work_dir, parallel, fc_dir=None, run_info_yaml=None
):
"""
Run toplevel analysis, processing a set of input files.
config_file -- Main YAML configuration file with system parameters
fc_dir -- Directory of fastq files to process
run_info_yaml -- YAML configuration file specifying inputs to process
"""
parallel = log.create_base_logger(config, parallel)
dirs = run_info.setup_directories(work_dir, fc_dir, config, config_file)
config_file = os.path.join(dirs["config"], os.path.basename(config_file))
pipelines, config = _pair_samples_with_pipelines(run_info_yaml, config)
system.write_info(dirs, parallel, config)
with tx_tmpdir(config if parallel.get("type") == "local" else None) as tmpdir:
tempfile.tempdir = tmpdir
for pipeline, samples in pipelines.items():
for xs in pipeline(config, run_info_yaml, parallel, dirs, samples):
pass
|
def _run_toplevel(
config, config_file, work_dir, parallel, fc_dir=None, run_info_yaml=None
):
"""
Run toplevel analysis, processing a set of input files.
config_file -- Main YAML configuration file with system parameters
fc_dir -- Directory of fastq files to process
run_info_yaml -- YAML configuration file specifying inputs to process
"""
parallel = log.create_base_logger(config, parallel)
log.setup_local_logging(config, parallel)
logger.info("System YAML configuration: %s" % os.path.abspath(config_file))
dirs = run_info.setup_directories(work_dir, fc_dir, config, config_file)
config_file = os.path.join(dirs["config"], os.path.basename(config_file))
pipelines, config = _pair_samples_with_pipelines(run_info_yaml, config)
system.write_info(dirs, parallel, config)
with tx_tmpdir(config if parallel.get("type") == "local" else None) as tmpdir:
tempfile.tempdir = tmpdir
for pipeline, samples in pipelines.items():
for xs in pipeline(config, run_info_yaml, parallel, dirs, samples):
pass
|
https://github.com/bcbio/bcbio-nextgen/issues/2890
|
Traceback (most recent call last):
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 52, in _setup_logging
yield config
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 447, in detect_sv
return ipython.zip_args(apply(structural.detect_sv, *args))
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 80, in apply
return object(*args, **kwargs)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/__init__.py", line 205, in detect_sv
for svdata in caller_fn(items):
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 57, in run
return _cnvkit_by_type(items, background)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 67, in _cnvkit_by_type
return _run_cnvkit_single(items[0])
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 110, in _run_cnvkit_single
return _associate_cnvkit_out(ckouts, [data])
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 90, in _associate_cnvkit_out
ckout = _add_plots_to_output(ckout, data)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 640, in _add_plots_to_output
scatter = _add_scatter_plot(out, data)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 710, in _add_scatter_plot
priority_bed = plot._prioritize_plot_regions(pybedtools.BedTool(priority_bed), data, os.path.dirname(out_file))
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/plot.py", line 91, in _prioritize_plot_regions
for r in region_bt:
File "pybedtools/cbedtools.pyx", line 754, in pybedtools.cbedtools.IntervalIterator.__next__
File "/opt/bcbio/anaconda/lib/python3.6/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 749: ordinal not in range(128)
|
UnicodeDecodeError
|
def _run_delly(bam_files, chrom, ref_file, work_dir, items):
"""Run delly, calling structural variations for the specified type."""
batch = sshared.get_cur_batch(items)
ext = "-%s-svs" % batch if batch else "-svs"
out_file = os.path.join(
work_dir,
"%s%s-%s.bcf"
% (os.path.splitext(os.path.basename(bam_files[0]))[0], ext, chrom),
)
final_file = "%s.vcf.gz" % (utils.splitext_plus(out_file)[0])
cores = min(
utils.get_in(items[0], ("config", "algorithm", "num_cores"), 1), len(bam_files)
)
if not utils.file_exists(out_file) and not utils.file_exists(final_file):
with file_transaction(items[0], out_file) as tx_out_file:
if sshared.has_variant_regions(items, out_file, chrom):
exclude = ["-x", _delly_exclude_file(items, out_file, chrom)]
cmd = (
["delly", "call", "-g", ref_file, "-o", tx_out_file]
+ exclude
+ bam_files
)
locale_to_use = utils.get_locale()
multi_cmd = "export OMP_NUM_THREADS=%s && export LC_ALL=%s && " % (
cores,
locale_to_use,
)
try:
do.run(multi_cmd + " ".join(cmd), "delly structural variant")
except subprocess.CalledProcessError as msg:
# Small input samples, write an empty vcf
if (
"Sample has not enough data to estimate library parameters"
in str(msg)
):
pass
# delly returns an error exit code if there are no variants
elif "No structural variants found" not in str(msg):
raise
return [_bgzip_and_clean(out_file, items)]
|
def _run_delly(bam_files, chrom, ref_file, work_dir, items):
"""Run delly, calling structural variations for the specified type."""
batch = sshared.get_cur_batch(items)
ext = "-%s-svs" % batch if batch else "-svs"
out_file = os.path.join(
work_dir,
"%s%s-%s.bcf"
% (os.path.splitext(os.path.basename(bam_files[0]))[0], ext, chrom),
)
final_file = "%s.vcf.gz" % (utils.splitext_plus(out_file)[0])
cores = min(
utils.get_in(items[0], ("config", "algorithm", "num_cores"), 1), len(bam_files)
)
if not utils.file_exists(out_file) and not utils.file_exists(final_file):
with file_transaction(items[0], out_file) as tx_out_file:
if sshared.has_variant_regions(items, out_file, chrom):
exclude = ["-x", _delly_exclude_file(items, out_file, chrom)]
cmd = (
["delly", "call", "-g", ref_file, "-o", tx_out_file]
+ exclude
+ bam_files
)
multi_cmd = "export OMP_NUM_THREADS=%s && export LC_ALL=C && " % cores
try:
do.run(multi_cmd + " ".join(cmd), "delly structural variant")
except subprocess.CalledProcessError as msg:
# Small input samples, write an empty vcf
if (
"Sample has not enough data to estimate library parameters"
in str(msg)
):
pass
# delly returns an error exit code if there are no variants
elif "No structural variants found" not in str(msg):
raise
return [_bgzip_and_clean(out_file, items)]
|
https://github.com/bcbio/bcbio-nextgen/issues/2890
|
Traceback (most recent call last):
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 52, in _setup_logging
yield config
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 447, in detect_sv
return ipython.zip_args(apply(structural.detect_sv, *args))
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 80, in apply
return object(*args, **kwargs)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/__init__.py", line 205, in detect_sv
for svdata in caller_fn(items):
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 57, in run
return _cnvkit_by_type(items, background)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 67, in _cnvkit_by_type
return _run_cnvkit_single(items[0])
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 110, in _run_cnvkit_single
return _associate_cnvkit_out(ckouts, [data])
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 90, in _associate_cnvkit_out
ckout = _add_plots_to_output(ckout, data)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 640, in _add_plots_to_output
scatter = _add_scatter_plot(out, data)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 710, in _add_scatter_plot
priority_bed = plot._prioritize_plot_regions(pybedtools.BedTool(priority_bed), data, os.path.dirname(out_file))
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/plot.py", line 91, in _prioritize_plot_regions
for r in region_bt:
File "pybedtools/cbedtools.pyx", line 754, in pybedtools.cbedtools.IntervalIterator.__next__
File "/opt/bcbio/anaconda/lib/python3.6/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 749: ordinal not in range(128)
|
UnicodeDecodeError
|
def locale_export():
"""Exports for dealing with Click-based programs and ASCII/Unicode errors.
RuntimeError: Click will abort further execution because Python 3 was
configured to use ASCII as encoding for the environment.
Consult https://click.palletsprojects.com/en/7.x/python3/ for mitigation steps.
"""
locale_to_use = get_locale()
return "export LC_ALL=%s && export LANG=%s && " % (locale_to_use, locale_to_use)
|
def locale_export():
"""Exports for dealing with Click-based programs and ASCII/Unicode errors.
RuntimeError: Click will abort further execution because Python 3 was
configured to use ASCII as encoding for the environment.
Consult https://click.palletsprojects.com/en/7.x/python3/ for mitigation steps.
Looks up available locales on the system to find an appropriate one to pick,
defaulting to C.UTF-8 which is globally available on newer systems.
"""
locale_to_use = "C.UTF-8"
try:
locales = (
subprocess.check_output(["locale", "-a"])
.decode(errors="ignore")
.split("\n")
)
except subprocess.CalledProcessError:
locales = []
for locale in locales:
if locale.lower().endswith(("utf-8", "utf8")):
locale_to_use = locale
break
return "export LC_ALL=%s && export LANG=%s && " % (locale_to_use, locale_to_use)
|
https://github.com/bcbio/bcbio-nextgen/issues/2890
|
Traceback (most recent call last):
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 52, in _setup_logging
yield config
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 447, in detect_sv
return ipython.zip_args(apply(structural.detect_sv, *args))
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 80, in apply
return object(*args, **kwargs)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/__init__.py", line 205, in detect_sv
for svdata in caller_fn(items):
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 57, in run
return _cnvkit_by_type(items, background)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 67, in _cnvkit_by_type
return _run_cnvkit_single(items[0])
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 110, in _run_cnvkit_single
return _associate_cnvkit_out(ckouts, [data])
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 90, in _associate_cnvkit_out
ckout = _add_plots_to_output(ckout, data)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 640, in _add_plots_to_output
scatter = _add_scatter_plot(out, data)
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/cnvkit.py", line 710, in _add_scatter_plot
priority_bed = plot._prioritize_plot_regions(pybedtools.BedTool(priority_bed), data, os.path.dirname(out_file))
File "/opt/bcbio/anaconda/lib/python3.6/site-packages/bcbio/structural/plot.py", line 91, in _prioritize_plot_regions
for r in region_bt:
File "pybedtools/cbedtools.pyx", line 754, in pybedtools.cbedtools.IntervalIterator.__next__
File "/opt/bcbio/anaconda/lib/python3.6/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 749: ordinal not in range(128)
|
UnicodeDecodeError
|
def _sge_info(queue):
"""Returns machine information for an sge job scheduler."""
qhost_out = subprocess.check_output(["qhost", "-q", "-xml"]).decode()
qstat_queue = ["-q", queue] if queue and "," not in queue else []
qstat_out = subprocess.check_output(["qstat", "-f", "-xml"] + qstat_queue).decode()
slot_info = _sge_get_slots(qstat_out)
mem_info = _sge_get_mem(qhost_out, queue)
machine_keys = list(slot_info.keys())
# num_cpus_vec = [slot_info[x]["slots_total"] for x in machine_keys]
# mem_vec = [mem_info[x]["mem_total"] for x in machine_keys]
mem_per_slot = [
mem_info[x]["mem_total"] / float(slot_info[x]["slots_total"])
for x in machine_keys
]
min_ratio_index = mem_per_slot.index(median_left(mem_per_slot))
mem_info[machine_keys[min_ratio_index]]["mem_total"]
return [
{
"cores": slot_info[machine_keys[min_ratio_index]]["slots_total"],
"memory": mem_info[machine_keys[min_ratio_index]]["mem_total"],
"name": "sge_machine",
}
]
|
def _sge_info(queue):
"""Returns machine information for an sge job scheduler."""
qhost_out = subprocess.check_output(["qhost", "-q", "-xml"]).decode()
qstat_queue = ["-q", queue] if queue and "," not in queue else []
qstat_out = subprocess.check_output(["qstat", "-f", "-xml"] + qstat_queue).decode()
slot_info = _sge_get_slots(qstat_out)
mem_info = _sge_get_mem(qhost_out, queue)
machine_keys = slot_info.keys()
# num_cpus_vec = [slot_info[x]["slots_total"] for x in machine_keys]
# mem_vec = [mem_info[x]["mem_total"] for x in machine_keys]
mem_per_slot = [
mem_info[x]["mem_total"] / float(slot_info[x]["slots_total"])
for x in machine_keys
]
min_ratio_index = mem_per_slot.index(median_left(mem_per_slot))
mem_info[machine_keys[min_ratio_index]]["mem_total"]
return [
{
"cores": slot_info[machine_keys[min_ratio_index]]["slots_total"],
"memory": mem_info[machine_keys[min_ratio_index]]["mem_total"],
"name": "sge_machine",
}
]
|
https://github.com/bcbio/bcbio-nextgen/issues/3063
|
Using GATK jar /export/home/ncit/external/a.mizeranschi/bcbio_nextgen/anaconda/share/gatk4-4.1.4.1-1/gatk-package-4.1.4.1-local.jar
Running:
java -Dsamjdk.use_async_io_read_samtools=false -Dsamjdk.use_async_io_write_samtools=true -Dsamjdk.use_async_io_write_tribble=false -Dsamjdk.compression_level=2 -Xms681m -Xmx1g -XX:+UseSerialGC -Djava.io.tmpdir=/export/home/ncit/external/a.mizeranschi/automated-VC-test/testingVC/work/bcbiotx/tmp6esj9s4r -jar /export/home/ncit/external/a.mizeranschi/bcbio_nextgen/anaconda/share/gatk4-4.1.4.1-1/gatk-package-4.1.4.1-local.jar HaplotypeCaller -R /export/home/ncit/external/a.mizeranschi/bcbio_nextgen/genomes/Hsapiens/hg38/seq/hg38.fa --annotation MappingQualityRankSumTest --annotation MappingQualityZero --annotation QualByDepth --annotation ReadPosRankSumTest --annotation RMSMappingQuality --annotation BaseQualityRankSumTest --annotation FisherStrand --annotation MappingQuality --annotation DepthPerAlleleBySample --annotation Coverage -I /export/home/ncit/external/a.mizeranschi/automated-VC-test/testingVC/work/align/HG01518/HG01518-sort.bam -L /export/home/ncit/external/a.mizeranschi/automated-VC-test/testingVC/work/gatk-haplotype/chr1/HG01518-chr1_0_50142832-regions.bed --interval-set-rule INTERSECTION --annotation ClippingRankSumTest --annotation DepthPerSampleHC --native-pair-hmm-threads 1 --verbosity ERROR --emit-ref-confidence GVCF -GQB 10 -GQB 20 -GQB 30 -GQB 40 -GQB 60 -GQB 80 -ploidy 2 --output /export/home/ncit/external/a.mizeranschi/automated-VC-test/testingVC/work/bcbiotx/tmp6esj9s4r/HG01518-chr1_0_50142832.vcf.gz
' returned non-zero exit status 1.
[2020-01-20T08:09Z] haswell-wn35.grid.pub.ro: Unexpected error
Traceback (most recent call last):
File "/export/home/ncit/external/a.mizeranschi/bcbio_nextgen/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 54, in _setup_logging
yield config
File "/export/home/ncit/external/a.mizeranschi/bcbio_nextgen/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 439, in variantcall_sample
return ipython.zip_args(apply(genotype.variantcall_sample, *args))
File "/export/home/ncit/external/a.mizeranschi/bcbio_nextgen/anaconda/lib/python3.6/site-packages/bcbio/distributed/ipythontasks.py", line 82, in apply
return object(*args, **kwargs)
File "/export/home/ncit/external/a.mizeranschi/bcbio_nextgen/anaconda/lib/python3.6/site-packages/bcbio/variation/genotype.py", line 377, in variantcall_sample
out_file = caller_fn(align_bams, items, ref_file, assoc_files, region, out_file)
File "/export/home/ncit/external/a.mizeranschi/bcbio_nextgen/anaconda/lib/python3.6/site-packages/bcbio/variation/gatk.py", line 180, in haplotype_caller
parallel_gc=_use_spark(num_cores, gatk_type, items, spark_opts))
File "/export/home/ncit/external/a.mizeranschi/bcbio_nextgen/anaconda/lib/python3.6/site-packages/bcbio/broad/__init__.py", line 365, in run_gatk
log_error=log_error)
File "/export/home/ncit/external/a.mizeranschi/bcbio_nextgen/anaconda/lib/python3.6/site-packages/bcbio/provenance/do.py", line 26, in run
_do_run(cmd, checks, log_stdout, env=env)
File "/export/home/ncit/external/a.mizeranschi/bcbio_nextgen/anaconda/lib/python3.6/site-packages/bcbio/provenance/do.py", line 106, in _do_run
raise subprocess.CalledProcessError(exitcode, error_msg)
subprocess.CalledProcessError: Command 'set -o pipefail; unset JAVA_HOME && export PATH=/export/home/ncit/external/a.mizeranschi/bcbio_nextgen/anaconda/bin:"$PATH" && gatk --java-options '-Xms681m -Xmx1g -XX:+UseSerialGC -Djava.io.tmpdir=/export/home/ncit/external/a.mizeranschi/automated-VC-test/testingVC/work/bcbiotx/tmp6esj9s4r' HaplotypeCaller -R /export/home/ncit/external/a.mizeranschi/bcbio_nextgen/genomes/Hsapiens/hg38/seq/hg38.fa --annotation MappingQualityRankSumTest --annotation MappingQualityZero --annotation QualByDepth --annotation ReadPosRankSumTest --annotation RMSMappingQuality --annotation BaseQualityRankSumTest --annotation FisherStrand --annotation MappingQuality --annotation DepthPerAlleleBySample --annotation Coverage -I /export/home/ncit/external/a.mizeranschi/automated-VC-test/testingVC/work/align/HG01518/HG01518-sort.bam -L /export/home/ncit/external/a.mizeranschi/automated-VC-test/testingVC/work/gatk-haplotype/chr1/HG01518-chr1_0_50142832-regions.bed --interval-set-rule INTERSECTION --annotation ClippingRankSumTest --annotation DepthPerSampleHC --native-pair-hmm-threads 1 --verbosity ERROR --emit-ref-confidence GVCF -GQB 10 -GQB 20 -GQB 30 -GQB 40 -GQB 60 -GQB 80 -ploidy 2 --output /export/home/ncit/external/a.mizeranschi/automated-VC-test/testingVC/work/bcbiotx/tmp6esj9s4r/HG01518-chr1_0_50142832.vcf.gz
[January 20, 2020 10:09:19 AM EET] org.broadinstitute.hellbender.tools.walkers.haplotypecaller.HaplotypeCaller done. Elapsed time: 1.44 minutes.
Runtime.totalMemory()=1037959168
Exception in thread "main" java.lang.OutOfMemoryError: Java heap space
at org.apache.commons.math3.util.MathArrays.copyOfRange(MathArrays.java:827)
at org.apache.commons.math3.stat.descriptive.rank.Percentile.copyOf(Percentile.java:479)
at org.apache.commons.math3.stat.descriptive.rank.Percentile.removeAndSlice(Percentile.java:527)
at org.apache.commons.math3.stat.descriptive.rank.Percentile.getWorkArray(Percentile.java:455)
at org.apache.commons.math3.stat.descriptive.rank.Percentile.evaluate(Percentile.java:351)
at org.apache.commons.math3.stat.descriptive.rank.Percentile.evaluate(Percentile.java:302)
at org.apache.commons.math3.stat.descriptive.AbstractUnivariateStatistic.evaluate(AbstractUnivariateStatistic.java:121)
at org.broadinstitute.hellbender.utils.MathUtils.median(MathUtils.java:839)
at org.broadinstitute.hellbender.utils.variant.writers.GVCFBlock.getMedianDP(GVCFBlock.java:75)
at org.broadinstitute.hellbender.utils.variant.writers.HomRefBlock.createHomRefGenotype(HomRefBlock.java:73)
at org.broadinstitute.hellbender.utils.variant.writers.GVCFBlock.toVariantContext(GVCFBlock.java:49)
at org.broadinstitute.hellbender.utils.variant.writers.GVCFBlockCombiner.emitCurrentBlock(GVCFBlockCombiner.java:177)
at org.broadinstitute.hellbender.utils.variant.writers.GVCFBlockCombiner.signalEndOfInput(GVCFBlockCombiner.java:227)
at org.broadinstitute.hellbender.utils.variant.writers.GVCFWriter.close(GVCFWriter.java:70)
at org.broadinstitute.hellbender.tools.walkers.haplotypecaller.HaplotypeCaller.closeTool(HaplotypeCaller.java:246)
at org.broadinstitute.hellbender.engine.GATKTool.doWork(GATKTool.java:1052)
at org.broadinstitute.hellbender.cmdline.CommandLineProgram.runTool(CommandLineProgram.java:139)
at org.broadinstitute.hellbender.cmdline.CommandLineProgram.instanceMainPostParseArgs(CommandLineProgram.java:191)
at org.broadinstitute.hellbender.cmdline.CommandLineProgram.instanceMain(CommandLineProgram.java:210)
at org.broadinstitute.hellbender.Main.runCommandLineProgram(Main.java:163)
at org.broadinstitute.hellbender.Main.mainEntry(Main.java:206)
at org.broadinstitute.hellbender.Main.main(Main.java:292)
|
subprocess.CalledProcessError
|
def picard_reorder(picard, in_bam, ref_file, out_file):
"""Reorder BAM file to match reference file ordering."""
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
dict_file = "%s.dict" % os.path.splitext(ref_file)[0]
opts = [
("INPUT", in_bam),
("OUTPUT", tx_out_file),
("SEQUENCE_DICTIONARY", dict_file),
("ALLOW_INCOMPLETE_DICT_CONCORDANCE", "true"),
("TMP_DIR", tmp_dir),
]
picard.run("ReorderSam", opts)
return out_file
|
def picard_reorder(picard, in_bam, ref_file, out_file):
"""Reorder BAM file to match reference file ordering."""
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [
("INPUT", in_bam),
("OUTPUT", tx_out_file),
("REFERENCE", ref_file),
("ALLOW_INCOMPLETE_DICT_CONCORDANCE", "true"),
("TMP_DIR", tmp_dir),
]
picard.run("ReorderSam", opts)
return out_file
|
https://github.com/bcbio/bcbio-nextgen/issues/3000
|
Uncaught exception occurred
Traceback (most recent call last):
File "/path/anaconda/lib/python3.6/site-packages/bcbio/provenance/do.py", line 26, in run
_do_run(cmd, checks, log_stdout, env=env)
File "/path/anaconda/lib/python3.6/site-packages/bcbio/provenance/do.py", line 106, in _do_run
raise subprocess.CalledProcessError(exitcode, error_msg)
subprocess.CalledProcessError: Command 'set -o pipefail; unset JAVA_HOME && export PATH=/path/anaconda/bin:"$PAT
INFO 2019-10-18 21:14:01 ReorderSam
********** NOTE: Picard's command line syntax is changing.
**********
********** For more information, please see:
********** https://github.com/broadinstitute/picard/wiki/Command-Line-Syntax-Transition-For-Users-(Pre-Transition)
**********
********** The command line looks like this in the new syntax:
**********
********** ReorderSam -INPUT /path/bam/sample-ready.bam -OUTPUT /path
********** The command line looks like this in the new syntax:
**********
********** ReorderSam -INPUT /path/bam/sample.bam -OUTPUT /path
**********
ERROR: Unrecognized option: REFERENCE
USAGE: ReorderSam [options]
Documentation: http://broadinstitute.github.io/picard/command-line-overview.html#ReorderSam
Not to be confused with SortSam which sorts a SAM or BAM file with a valid sequence dictionary, ReorderSam reorders
reads in a SAM/BAM file to match the contig ordering in a provided reference file, as determined by exact name matching
of contigs. Reads mapped to contigs absent in the new reference are unmapped. Runs substantially faster if the input is
an indexed BAM file.
Example
java -jar picard.jar ReorderSam \
INPUT=sample.bam \
OUTPUT=reordered.bam \
SEQUENCE_DICTIONARY=reference_with_different_order.dict
Version: 2.21.1-SNAPSHOT
Options:
--help
-h Displays options specific to this tool.
--stdhelp
-H Displays options specific to this tool AND options common to all Picard command line
tools.
--version Displays program version.
INPUT=File
I=File Input file (SAM or BAM) to extract reads from. Required.
OUTPUT=File
O=File Output file (SAM or BAM) to write extracted reads to. Required.
SEQUENCE_DICTIONARY=File
SD=File A Sequence Dictionary for the OUTPUT file (can be read from one of the following file
types (SAM, BAM, VCF, BCF, Interval List, Fasta, or Dict) Required.
ALLOW_INCOMPLETE_DICT_CONCORDANCE=Boolean
S=Boolean If true, allows only a partial overlap of the original contigs with the new reference
sequence contigs. By default, this tool requires a corresponding contig in the new
reference for each read contig Default value: false. This option can be set to 'null' to
clear the default value. Possible values: {true, false}
ALLOW_CONTIG_LENGTH_DISCORDANCE=Boolean
U=Boolean If true, then permits mapping from a read contig to a new reference contig with the same
name but a different length. Highly dangerous, only use if you know what you are doing.
Default value: false. This option can be set to 'null' to clear the default value.
Possible values: {true, false}
TMP_DIR=File One or more directories with space available to be used by this program for temporary
storage of working files Default value: null. This option may be specified 0 or more
times.
VERBOSITY=LogLevel Control verbosity of logging. Default value: INFO. This option can be set to 'null' to
clear the default value. Possible values: {ERROR, WARNING, INFO, DEBUG}
QUIET=Boolean Whether to suppress job-summary info on System.err. Default value: false. This option can
be set to 'null' to clear the default value. Possible values: {true, false}
VALIDATION_STRINGENCY=ValidationStringency
Validation stringency for all SAM files read by this program. Setting stringency to
SILENT can improve performance when processing a BAM file in which variable-length data
(read, qualities, tags) do not otherwise need to be decoded. Default value: STRICT. This
option can be set to 'null' to clear the default value. Possible values: {STRICT, LENIENT,
SILENT}
COMPRESSION_LEVEL=Integer Compression level for all compressed files created (e.g. BAM and VCF). Default value: 5.
This option can be set to 'null' to clear the default value.
MAX_RECORDS_IN_RAM=Integer When writing files that need to be sorted, this will specify the number of records stored
in RAM before spilling to disk. Increasing this number reduces the number of file handles
needed to sort the file, and increases the amount of RAM needed. Default value: 500000.
This option can be set to 'null' to clear the default value.
CREATE_INDEX=Boolean Whether to create a BAM index when writing a coordinate-sorted BAM file. Default value:
false. This option can be set to 'null' to clear the default value. Possible values:
{true, false}
CREATE_MD5_FILE=Boolean Whether to create an MD5 digest for any BAM or FASTQ files created. Default value:
false. This option can be set to 'null' to clear the default value. Possible values:
{true, false}
REFERENCE_SEQUENCE=File
R=File Reference sequence file. Default value: null.
GA4GH_CLIENT_SECRETS=String Google Genomics API client_secrets.json file path. Default value: client_secrets.json.
This option can be set to 'null' to clear the default value.
USE_JDK_DEFLATER=Boolean
USE_JDK_DEFLATER=Boolean Use the JDK Deflater instead of the Intel Deflater for writing compressed output Default
value: false. This option can be set to 'null' to clear the default value. Possible
values: {true, false}
USE_JDK_INFLATER=Boolean
USE_JDK_INFLATER=Boolean Use the JDK Inflater instead of the Intel Inflater for reading compressed input Default
value: false. This option can be set to 'null' to clear the default value. Possible
values: {true, false}
OPTIONS_FILE=File File of OPTION_NAME=value pairs. No positional parameters allowed. Unlike command-line
options, unrecognized options are ignored. A single-valued option set in an options file
may be overridden by a subsequent command-line option. A line starting with '#' is
considered a comment. Required.
' returned non-zero exit status 1.
[2019-10-18T21:14Z] seskscpn129.prim.scp: Uncaught exception occurred
Traceback (most recent call last):
File "/path/anaconda/lib/python3.6/site-packages/bcbio/provenance/do.py", line 26, in run
_do_run(cmd, checks, log_stdout, env=env)
File "/path/anaconda/lib/python3.6/site-packages/bcbio/provenance/do.py", line 106, in _do_run
raise subprocess.CalledProcessError(exitcode, error_msg)
subprocess.CalledProcessError: Command 'set -o pipefail; unset JAVA_HOME && export PATH=/path/anaconda/bin:"$PAT
INFO 2019-10-18 21:14:01 ReorderSam
|
subprocess.CalledProcessError
|
def process_alignment(data, alt_input=None):
"""Do an alignment of fastq files, preparing a sorted BAM output file."""
data = cwlutils.normalize_missing(utils.to_single_data(data))
data = cwlutils.unpack_tarballs(data, data)
fastq1, fastq2 = dd.get_input_sequence_files(data)
if alt_input:
fastq1, fastq2 = alt_input
config = data["config"]
aligner = config["algorithm"].get("aligner", None)
if fastq1 and objectstore.file_exists_or_remote(fastq1) and aligner:
logger.info(
"Aligning lane %s with %s aligner" % (data["rgnames"]["lane"], aligner)
)
data = align_to_sort_bam(fastq1, fastq2, aligner, data)
if dd.get_correct_umis(data):
data["work_bam"] = postalign.correct_umis(data)
if dd.get_umi_consensus(data):
data["umi_bam"] = dd.get_work_bam(data)
if fastq2:
f1, f2, avg_cov = postalign.umi_consensus(data)
data["config"]["algorithm"]["rawumi_avg_cov"] = avg_cov
del data["config"]["algorithm"]["umi_type"]
data["config"]["algorithm"]["mark_duplicates"] = False
data = align_to_sort_bam(f1, f2, aligner, data)
else:
raise ValueError(
"Single fastq input for UMI processing; fgbio needs paired reads: %s"
% dd.get_sample_name(data)
)
data = _add_supplemental_bams(data)
elif (
fastq1 and objectstore.file_exists_or_remote(fastq1) and fastq1.endswith(".bam")
):
sort_method = config["algorithm"].get("bam_sort")
bamclean = config["algorithm"].get("bam_clean")
if bamclean is True or bamclean == "picard":
if sort_method and sort_method != "coordinate":
raise ValueError(
"Cannot specify `bam_clean: picard` with `bam_sort` other than coordinate: %s"
% sort_method
)
ref_file = dd.get_ref_file(data)
out_bam = cleanbam.picard_prep(
fastq1, data["rgnames"], ref_file, data["dirs"], data
)
elif bamclean == "fixrg":
out_bam = cleanbam.fixrg(
fastq1, data["rgnames"], dd.get_ref_file(data), data["dirs"], data
)
elif bamclean == "remove_extracontigs":
out_bam = cleanbam.remove_extracontigs(fastq1, data)
elif sort_method:
runner = broad.runner_from_path("picard", config)
out_file = os.path.join(
data["dirs"]["work"],
"{}-sort.bam".format(os.path.splitext(os.path.basename(fastq1))[0]),
)
if not utils.file_exists(out_file):
work_dir = utils.safe_makedir(
os.path.join(
dd.get_work_dir(data), "bamclean", dd.get_sample_name(data)
)
)
out_file = os.path.join(
work_dir, "{}-sort.bam".format(dd.get_sample_name(data))
)
out_bam = runner.run_fn("picard_sort", fastq1, sort_method, out_file)
else:
out_bam = _link_bam_file(
fastq1,
os.path.join(
dd.get_work_dir(data), "prealign", dd.get_sample_name(data)
),
data,
)
bam.index(out_bam, data["config"])
bam.check_header(
out_bam, data["rgnames"], dd.get_ref_file(data), data["config"]
)
dedup_bam = postalign.dedup_bam(out_bam, data)
bam.index(dedup_bam, data["config"])
data["work_bam"] = dedup_bam
elif (
fastq1
and objectstore.file_exists_or_remote(fastq1)
and fastq1.endswith(".cram")
):
data["work_bam"] = fastq1
elif fastq1 is None and not dd.get_aligner(data):
data["config"]["algorithm"]["variantcaller"] = False
data["work_bam"] = None
elif not fastq1:
raise ValueError(
"No 'files' specified for input sample: %s" % dd.get_sample_name(data)
)
elif "kraken" in config["algorithm"]: # kraken doesn's need bam
pass
else:
raise ValueError(
"Could not process input file from sample configuration. \n"
+ fastq1
+ "\nIs the path to the file correct or is empty?\n"
+ "If it is a fastq file (not pre-aligned BAM or CRAM), "
"is an aligner specified in the input configuration?"
)
if data.get("work_bam"):
# Add stable 'align_bam' target to use for retrieving raw alignment
data["align_bam"] = data["work_bam"]
data = _add_hla_files(data)
return [[data]]
|
def process_alignment(data, alt_input=None):
"""Do an alignment of fastq files, preparing a sorted BAM output file."""
data = cwlutils.normalize_missing(utils.to_single_data(data))
data = cwlutils.unpack_tarballs(data, data)
fastq1, fastq2 = dd.get_input_sequence_files(data)
if alt_input:
fastq1, fastq2 = alt_input
config = data["config"]
aligner = config["algorithm"].get("aligner", None)
if fastq1 and objectstore.file_exists_or_remote(fastq1) and aligner:
logger.info(
"Aligning lane %s with %s aligner" % (data["rgnames"]["lane"], aligner)
)
data = align_to_sort_bam(fastq1, fastq2, aligner, data)
if dd.get_correct_umis(data):
data["work_bam"] = postalign.correct_umis(data)
if dd.get_umi_consensus(data):
data["umi_bam"] = dd.get_work_bam(data)
if fastq2:
f1, f2, avg_cov = postalign.umi_consensus(data)
data["config"]["algorithm"]["rawumi_avg_cov"] = avg_cov
del data["config"]["algorithm"]["umi_type"]
data["config"]["algorithm"]["mark_duplicates"] = False
data = align_to_sort_bam(f1, f2, aligner, data)
else:
raise ValueError(
"Single fastq input for UMI processing; fgbio needs paired reads: %s"
% dd.get_sample_name(data)
)
data = _add_supplemental_bams(data)
elif (
fastq1 and objectstore.file_exists_or_remote(fastq1) and fastq1.endswith(".bam")
):
sort_method = config["algorithm"].get("bam_sort")
bamclean = config["algorithm"].get("bam_clean")
if bamclean is True or bamclean == "picard":
if sort_method and sort_method != "coordinate":
raise ValueError(
"Cannot specify `bam_clean: picard` with `bam_sort` other than coordinate: %s"
% sort_method
)
out_bam = cleanbam.picard_prep(
fastq1, data["rgnames"], dd.get_ref_file(data), data["dirs"], data
)
elif bamclean == "fixrg":
out_bam = cleanbam.fixrg(
fastq1, data["rgnames"], dd.get_ref_file(data), data["dirs"], data
)
elif bamclean == "remove_extracontigs":
out_bam = cleanbam.remove_extracontigs(fastq1, data)
elif sort_method:
runner = broad.runner_from_path("picard", config)
out_file = os.path.join(
data["dirs"]["work"],
"{}-sort.bam".format(os.path.splitext(os.path.basename(fastq1))[0]),
)
if not utils.file_exists(out_file):
work_dir = utils.safe_makedir(
os.path.join(
dd.get_work_dir(data), "bamclean", dd.get_sample_name(data)
)
)
out_file = os.path.join(
work_dir, "{}-sort.bam".format(dd.get_sample_name(data))
)
out_bam = runner.run_fn("picard_sort", fastq1, sort_method, out_file)
else:
out_bam = _link_bam_file(
fastq1,
os.path.join(
dd.get_work_dir(data), "prealign", dd.get_sample_name(data)
),
data,
)
bam.index(out_bam, data["config"])
bam.check_header(
out_bam, data["rgnames"], dd.get_ref_file(data), data["config"]
)
dedup_bam = postalign.dedup_bam(out_bam, data)
bam.index(dedup_bam, data["config"])
data["work_bam"] = dedup_bam
elif (
fastq1
and objectstore.file_exists_or_remote(fastq1)
and fastq1.endswith(".cram")
):
data["work_bam"] = fastq1
elif fastq1 is None and not dd.get_aligner(data):
data["config"]["algorithm"]["variantcaller"] = False
data["work_bam"] = None
elif not fastq1:
raise ValueError(
"No 'files' specified for input sample: %s" % dd.get_sample_name(data)
)
elif "kraken" in config["algorithm"]: # kraken doesn's need bam
pass
else:
raise ValueError(
"Could not process input file from sample configuration. \n"
+ fastq1
+ "\nIs the path to the file correct or is empty?\n"
+ "If it is a fastq file (not pre-aligned BAM or CRAM), "
"is an aligner specified in the input configuration?"
)
if data.get("work_bam"):
# Add stable 'align_bam' target to use for retrieving raw alignment
data["align_bam"] = data["work_bam"]
data = _add_hla_files(data)
return [[data]]
|
https://github.com/bcbio/bcbio-nextgen/issues/3000
|
Uncaught exception occurred
Traceback (most recent call last):
File "/path/anaconda/lib/python3.6/site-packages/bcbio/provenance/do.py", line 26, in run
_do_run(cmd, checks, log_stdout, env=env)
File "/path/anaconda/lib/python3.6/site-packages/bcbio/provenance/do.py", line 106, in _do_run
raise subprocess.CalledProcessError(exitcode, error_msg)
subprocess.CalledProcessError: Command 'set -o pipefail; unset JAVA_HOME && export PATH=/path/anaconda/bin:"$PAT
INFO 2019-10-18 21:14:01 ReorderSam
********** NOTE: Picard's command line syntax is changing.
**********
********** For more information, please see:
********** https://github.com/broadinstitute/picard/wiki/Command-Line-Syntax-Transition-For-Users-(Pre-Transition)
**********
********** The command line looks like this in the new syntax:
**********
********** ReorderSam -INPUT /path/bam/sample-ready.bam -OUTPUT /path
********** The command line looks like this in the new syntax:
**********
********** ReorderSam -INPUT /path/bam/sample.bam -OUTPUT /path
**********
ERROR: Unrecognized option: REFERENCE
USAGE: ReorderSam [options]
Documentation: http://broadinstitute.github.io/picard/command-line-overview.html#ReorderSam
Not to be confused with SortSam which sorts a SAM or BAM file with a valid sequence dictionary, ReorderSam reorders
reads in a SAM/BAM file to match the contig ordering in a provided reference file, as determined by exact name matching
of contigs. Reads mapped to contigs absent in the new reference are unmapped. Runs substantially faster if the input is
an indexed BAM file.
Example
java -jar picard.jar ReorderSam \
INPUT=sample.bam \
OUTPUT=reordered.bam \
SEQUENCE_DICTIONARY=reference_with_different_order.dict
Version: 2.21.1-SNAPSHOT
Options:
--help
-h Displays options specific to this tool.
--stdhelp
-H Displays options specific to this tool AND options common to all Picard command line
tools.
--version Displays program version.
INPUT=File
I=File Input file (SAM or BAM) to extract reads from. Required.
OUTPUT=File
O=File Output file (SAM or BAM) to write extracted reads to. Required.
SEQUENCE_DICTIONARY=File
SD=File A Sequence Dictionary for the OUTPUT file (can be read from one of the following file
types (SAM, BAM, VCF, BCF, Interval List, Fasta, or Dict) Required.
ALLOW_INCOMPLETE_DICT_CONCORDANCE=Boolean
S=Boolean If true, allows only a partial overlap of the original contigs with the new reference
sequence contigs. By default, this tool requires a corresponding contig in the new
reference for each read contig Default value: false. This option can be set to 'null' to
clear the default value. Possible values: {true, false}
ALLOW_CONTIG_LENGTH_DISCORDANCE=Boolean
U=Boolean If true, then permits mapping from a read contig to a new reference contig with the same
name but a different length. Highly dangerous, only use if you know what you are doing.
Default value: false. This option can be set to 'null' to clear the default value.
Possible values: {true, false}
TMP_DIR=File One or more directories with space available to be used by this program for temporary
storage of working files Default value: null. This option may be specified 0 or more
times.
VERBOSITY=LogLevel Control verbosity of logging. Default value: INFO. This option can be set to 'null' to
clear the default value. Possible values: {ERROR, WARNING, INFO, DEBUG}
QUIET=Boolean Whether to suppress job-summary info on System.err. Default value: false. This option can
be set to 'null' to clear the default value. Possible values: {true, false}
VALIDATION_STRINGENCY=ValidationStringency
Validation stringency for all SAM files read by this program. Setting stringency to
SILENT can improve performance when processing a BAM file in which variable-length data
(read, qualities, tags) do not otherwise need to be decoded. Default value: STRICT. This
option can be set to 'null' to clear the default value. Possible values: {STRICT, LENIENT,
SILENT}
COMPRESSION_LEVEL=Integer Compression level for all compressed files created (e.g. BAM and VCF). Default value: 5.
This option can be set to 'null' to clear the default value.
MAX_RECORDS_IN_RAM=Integer When writing files that need to be sorted, this will specify the number of records stored
in RAM before spilling to disk. Increasing this number reduces the number of file handles
needed to sort the file, and increases the amount of RAM needed. Default value: 500000.
This option can be set to 'null' to clear the default value.
CREATE_INDEX=Boolean Whether to create a BAM index when writing a coordinate-sorted BAM file. Default value:
false. This option can be set to 'null' to clear the default value. Possible values:
{true, false}
CREATE_MD5_FILE=Boolean Whether to create an MD5 digest for any BAM or FASTQ files created. Default value:
false. This option can be set to 'null' to clear the default value. Possible values:
{true, false}
REFERENCE_SEQUENCE=File
R=File Reference sequence file. Default value: null.
GA4GH_CLIENT_SECRETS=String Google Genomics API client_secrets.json file path. Default value: client_secrets.json.
This option can be set to 'null' to clear the default value.
USE_JDK_DEFLATER=Boolean
USE_JDK_DEFLATER=Boolean Use the JDK Deflater instead of the Intel Deflater for writing compressed output Default
value: false. This option can be set to 'null' to clear the default value. Possible
values: {true, false}
USE_JDK_INFLATER=Boolean
USE_JDK_INFLATER=Boolean Use the JDK Inflater instead of the Intel Inflater for reading compressed input Default
value: false. This option can be set to 'null' to clear the default value. Possible
values: {true, false}
OPTIONS_FILE=File File of OPTION_NAME=value pairs. No positional parameters allowed. Unlike command-line
options, unrecognized options are ignored. A single-valued option set in an options file
may be overridden by a subsequent command-line option. A line starting with '#' is
considered a comment. Required.
' returned non-zero exit status 1.
[2019-10-18T21:14Z] seskscpn129.prim.scp: Uncaught exception occurred
Traceback (most recent call last):
File "/path/anaconda/lib/python3.6/site-packages/bcbio/provenance/do.py", line 26, in run
_do_run(cmd, checks, log_stdout, env=env)
File "/path/anaconda/lib/python3.6/site-packages/bcbio/provenance/do.py", line 106, in _do_run
raise subprocess.CalledProcessError(exitcode, error_msg)
subprocess.CalledProcessError: Command 'set -o pipefail; unset JAVA_HOME && export PATH=/path/anaconda/bin:"$PAT
INFO 2019-10-18 21:14:01 ReorderSam
|
subprocess.CalledProcessError
|
def locate(
visible_markers,
camera_model,
registered_markers_undist,
registered_markers_dist,
):
"""Computes a Surface_Location based on a list of visible markers."""
visible_registered_marker_ids = set(visible_markers.keys()) & set(
registered_markers_undist.keys()
)
# If the surface is defined by 2+ markers, we require 2+ markers to be detected.
# If the surface is defined by 1 marker, we require 1 marker to be detected.
if not visible_registered_marker_ids or len(visible_registered_marker_ids) < min(
2, len(registered_markers_undist)
):
return Surface_Location(detected=False)
visible_verts_dist = np.array(
[visible_markers[id].verts_px for id in visible_registered_marker_ids]
)
registered_verts_undist = np.array(
[
registered_markers_undist[uid].verts_uv
for uid in visible_registered_marker_ids
]
)
registered_verts_dist = np.array(
[registered_markers_dist[uid].verts_uv for uid in visible_registered_marker_ids]
)
visible_verts_dist.shape = (-1, 2)
registered_verts_undist.shape = (-1, 2)
registered_verts_dist.shape = (-1, 2)
dist_img_to_surf_trans, surf_to_dist_img_trans = Surface._find_homographies(
registered_verts_dist, visible_verts_dist
)
if None in (dist_img_to_surf_trans, surf_to_dist_img_trans):
return Surface_Location(detected=False)
visible_verts_undist = camera_model.undistort_points_on_image_plane(
visible_verts_dist
)
img_to_surf_trans, surf_to_img_trans = Surface._find_homographies(
registered_verts_undist, visible_verts_undist
)
if None in (img_to_surf_trans, surf_to_img_trans):
return Surface_Location(detected=False)
return Surface_Location(
True,
dist_img_to_surf_trans,
surf_to_dist_img_trans,
img_to_surf_trans,
surf_to_img_trans,
len(visible_registered_marker_ids),
)
|
def locate(
visible_markers,
camera_model,
registered_markers_undist,
registered_markers_dist,
):
"""Computes a Surface_Location based on a list of visible markers."""
visible_registered_marker_ids = set(visible_markers.keys()) & set(
registered_markers_undist.keys()
)
# If the surface is defined by 2+ markers, we require 2+ markers to be detected.
# If the surface is defined by 1 marker, we require 1 marker to be detected.
if not visible_registered_marker_ids or len(visible_registered_marker_ids) < min(
2, len(registered_markers_undist)
):
return Surface_Location(detected=False)
visible_verts_dist = np.array(
[visible_markers[id].verts_px for id in visible_registered_marker_ids]
)
registered_verts_undist = np.array(
[
registered_markers_undist[uid].verts_uv
for uid in visible_registered_marker_ids
]
)
registered_verts_dist = np.array(
[registered_markers_dist[uid].verts_uv for uid in visible_registered_marker_ids]
)
visible_verts_dist.shape = (-1, 2)
registered_verts_undist.shape = (-1, 2)
registered_verts_dist.shape = (-1, 2)
dist_img_to_surf_trans, surf_to_dist_img_trans = Surface._find_homographies(
registered_verts_dist, visible_verts_dist
)
visible_verts_undist = camera_model.undistort_points_on_image_plane(
visible_verts_dist
)
img_to_surf_trans, surf_to_img_trans = Surface._find_homographies(
registered_verts_undist, visible_verts_undist
)
return Surface_Location(
True,
dist_img_to_surf_trans,
surf_to_dist_img_trans,
img_to_surf_trans,
surf_to_img_trans,
len(visible_registered_marker_ids),
)
|
https://github.com/pupil-labs/pupil/issues/1876
|
2020-05-13 15:27:43,610 - player - [DEBUG] root: Recalculate Surface Cache!
2020-05-13 15:27:43,656 - Background Data Processor - [DEBUG] background_helper: Entering _wrapper
2020-05-13 15:27:43,661 - Background Data Processor - [DEBUG] surface_tracker.surface: Failed to calculate inverse homography with np.inv()! Trying with np.pinv() instead.
2020-05-13 15:27:43,663 - Background Data Processor - [INFO] background_helper: AttributeError: 'NoneType' object has no attribute 'conjugate'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "shared_modules\background_helper.py", line 73, in _wrapper
File "shared_modules\surface_tracker\background_tasks.py", line 189, in data_processing_generator
File "shared_modules\surface_tracker\background_tasks.py", line 176, in handle_sample
File "shared_modules\surface_tracker\offline_utils.py", line 35, in __call__
File "shared_modules\surface_tracker\surface.py", line 323, in locate
File "shared_modules\surface_tracker\surface.py", line 363, in _find_homographies
File "<__array_function__ internals>", line 6, in pinv
File "site-packages\numpy\linalg\linalg.py", line 1969, in pinv
TypeError: loop of ufunc does not support argument 0 of type NoneType which has no callable conjugate method
|
TypeError
|
def _find_homographies(points_A, points_B):
points_A = points_A.reshape((-1, 1, 2))
points_B = points_B.reshape((-1, 1, 2))
B_to_A, mask = cv2.findHomography(points_A, points_B)
# NOTE: cv2.findHomography(A, B) will not produce the inverse of
# cv2.findHomography(B, A)! The errors can actually be quite large, resulting in
# on-screen discrepancies of up to 50 pixels. We try to find the inverse
# analytically instead with fallbacks.
try:
A_to_B = np.linalg.inv(B_to_A)
return A_to_B, B_to_A
except np.linalg.LinAlgError as e:
pass
except Exception as e:
import traceback
exception_msg = traceback.format_exc()
logger.error(exception_msg)
logger.debug(
"Failed to calculate inverse homography with np.inv()! "
"Trying with np.pinv() instead."
)
try:
A_to_B = np.linalg.pinv(B_to_A)
return A_to_B, B_to_A
except np.linalg.LinAlgError as e:
pass
except Exception as e:
import traceback
exception_msg = traceback.format_exc()
logger.error(exception_msg)
logger.warning(
"Failed to calculate inverse homography with np.pinv()! "
"Falling back to inaccurate manual computation!"
)
A_to_B, mask = cv2.findHomography(points_B, points_A)
return A_to_B, B_to_A
|
def _find_homographies(points_A, points_B):
points_A = points_A.reshape((-1, 1, 2))
points_B = points_B.reshape((-1, 1, 2))
B_to_A, mask = cv2.findHomography(points_A, points_B)
# NOTE: cv2.findHomography(A, B) will not produce the inverse of
# cv2.findHomography(B, A)! The errors can actually be quite large, resulting in
# on-screen discrepancies of up to 50 pixels. We try to find the inverse
# analytically instead with fallbacks.
try:
A_to_B = np.linalg.inv(B_to_A)
return A_to_B, B_to_A
except np.linalg.LinAlgError as e:
logger.debug(
"Failed to calculate inverse homography with np.inv()! "
"Trying with np.pinv() instead."
)
try:
A_to_B = np.linalg.pinv(B_to_A)
return A_to_B, B_to_A
except np.linalg.LinAlgError as e:
logger.warning(
"Failed to calculate inverse homography with np.pinv()! "
"Falling back to inaccurate manual computation!"
)
A_to_B, mask = cv2.findHomography(points_B, points_A)
return A_to_B, B_to_A
|
https://github.com/pupil-labs/pupil/issues/1876
|
2020-05-13 15:27:43,610 - player - [DEBUG] root: Recalculate Surface Cache!
2020-05-13 15:27:43,656 - Background Data Processor - [DEBUG] background_helper: Entering _wrapper
2020-05-13 15:27:43,661 - Background Data Processor - [DEBUG] surface_tracker.surface: Failed to calculate inverse homography with np.inv()! Trying with np.pinv() instead.
2020-05-13 15:27:43,663 - Background Data Processor - [INFO] background_helper: AttributeError: 'NoneType' object has no attribute 'conjugate'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "shared_modules\background_helper.py", line 73, in _wrapper
File "shared_modules\surface_tracker\background_tasks.py", line 189, in data_processing_generator
File "shared_modules\surface_tracker\background_tasks.py", line 176, in handle_sample
File "shared_modules\surface_tracker\offline_utils.py", line 35, in __call__
File "shared_modules\surface_tracker\surface.py", line 323, in locate
File "shared_modules\surface_tracker\surface.py", line 363, in _find_homographies
File "<__array_function__ internals>", line 6, in pinv
File "site-packages\numpy\linalg\linalg.py", line 1969, in pinv
TypeError: loop of ufunc does not support argument 0 of type NoneType which has no callable conjugate method
|
TypeError
|
def _wrapper(self, pipe, _should_terminate_flag, generator, *args, **kwargs):
"""Executed in background, pipes generator results to foreground
All exceptions are caught, forwarded to the foreground, and raised in
`Task_Proxy.fetch()`. This allows users to handle failure gracefully
as well as raising their own exceptions in the background task.
"""
def interrupt_handler(sig, frame):
import traceback
trace = traceback.format_stack(f=frame)
logger.debug(f"Caught signal {sig} in:\n" + "".join(trace))
# NOTE: Interrupt is handled in world/service/player which are responsible
# for shutting down the background process properly
signal.signal(signal.SIGINT, interrupt_handler)
try:
self._change_logging_behavior()
logger.debug("Entering _wrapper")
for datum in generator(*args, **kwargs):
if _should_terminate_flag.value:
raise EarlyCancellationError("Task was cancelled")
pipe.send(datum)
pipe.send(StopIteration())
except BrokenPipeError:
# process canceled from outside
pass
except Exception as e:
try:
pipe.send(e)
except BrokenPipeError:
# process canceled from outside
pass
if not isinstance(e, EarlyCancellationError):
import traceback
logger.info(traceback.format_exc())
finally:
pipe.close()
logger.debug("Exiting _wrapper")
|
def _wrapper(self, pipe, _should_terminate_flag, generator, *args, **kwargs):
"""Executed in background, pipes generator results to foreground
All exceptions are caught, forwarded to the foreground, and raised in
`Task_Proxy.fetch()`. This allows users to handle failure gracefully
as well as raising their own exceptions in the background task.
"""
def interrupt_handler(sig, frame):
import traceback
trace = traceback.format_stack(f=frame)
logger.debug(f"Caught signal {sig} in:\n" + "".join(trace))
# NOTE: Interrupt is handled in world/service/player which are responsible
# for shutting down the background process properly
signal.signal(signal.SIGINT, interrupt_handler)
try:
self._change_logging_behavior()
logger.debug("Entering _wrapper")
for datum in generator(*args, **kwargs):
if _should_terminate_flag.value:
raise EarlyCancellationError("Task was cancelled")
pipe.send(datum)
except Exception as e:
pipe.send(e)
if not isinstance(e, EarlyCancellationError):
import traceback
logger.info(traceback.format_exc())
else:
pipe.send(StopIteration())
finally:
pipe.close()
logger.debug("Exiting _wrapper")
|
https://github.com/pupil-labs/pupil/issues/1847
|
Traceback (most recent call last):
File "/Users/papr/work/pupil/pupil_src/launchables/player.py", line 679, in player
g_pool.plugins.clean()
File "/Users/papr/work/pupil/pupil_src/shared_modules/plugin.py", line 396, in clean
p.cleanup()
File "/Users/papr/work/pupil/pupil_src/shared_modules/surface_tracker/surface_tracker_offline.py", line 560, in cleanup
for proxy in self.export_proxies:
RuntimeError: Set changed size during iteration
|
RuntimeError
|
def cleanup(self):
super().cleanup()
self._save_marker_cache()
for proxy in self.export_proxies.copy():
proxy.cancel()
self.export_proxies.remove(proxy)
|
def cleanup(self):
super().cleanup()
self._save_marker_cache()
for proxy in self.export_proxies:
proxy.cancel()
self.export_proxies.remove(proxy)
|
https://github.com/pupil-labs/pupil/issues/1847
|
Traceback (most recent call last):
File "/Users/papr/work/pupil/pupil_src/launchables/player.py", line 679, in player
g_pool.plugins.clean()
File "/Users/papr/work/pupil/pupil_src/shared_modules/plugin.py", line 396, in clean
p.cleanup()
File "/Users/papr/work/pupil/pupil_src/shared_modules/surface_tracker/surface_tracker_offline.py", line 560, in cleanup
for proxy in self.export_proxies:
RuntimeError: Set changed size during iteration
|
RuntimeError
|
def scan_path_gaze_for_frame(self, frame):
if self.timeframe == 0.0:
return None
if not self._gaze_data_store.is_valid or not self._gaze_data_store.is_complete:
if not self.is_active and self.g_pool.app == "player":
self._trigger_immediate_scan_path_calculation()
return None
timestamp_cutoff = frame.timestamp - self.timeframe
gaze_data = self._gaze_data_store.gaze_data
gaze_data = gaze_data[gaze_data.frame_index == frame.index]
gaze_data = gaze_data[gaze_data.timestamp > timestamp_cutoff]
return gaze_data
|
def scan_path_gaze_for_frame(self, frame):
if self.timeframe == 0.0:
return None
if not self._gaze_data_store.is_valid or not self._gaze_data_store.is_complete:
if not self.is_active:
self._trigger_immediate_scan_path_calculation()
return None
timestamp_cutoff = frame.timestamp - self.timeframe
gaze_data = self._gaze_data_store.gaze_data
gaze_data = gaze_data[gaze_data.frame_index == frame.index]
gaze_data = gaze_data[gaze_data.timestamp > timestamp_cutoff]
return gaze_data
|
https://github.com/pupil-labs/pupil/issues/1839
|
2020-03-26 19:12:22,171 - Export World Video - [DEBUG] scan_path.controller: ScanPathController._on_preproc_started
2020-03-26 19:12:22,172 - Export World Video - [INFO] background_helper: Traceback (most recent call last):
File "shared_modules/observable.py", line 256, in call_all_observers
File "shared_modules/observable.py", line 304, in __call__
File "shared_modules/vis_polyline.py", line 153, in _update_scan_path_ui
AttributeError: 'Vis_Polyline' object has no attribute 'menu_icon'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "shared_modules/background_helper.py", line 73, in _wrapper
File "shared_modules/video_export/plugins/world_video_exporter.py", line 275, in _export_world_video
File "shared_modules/vis_polyline.py", line 146, in recent_events
File "shared_modules/vis_polyline.py", line 180, in _draw_polyline_path
File "shared_modules/scan_path/controller.py", line 109, in scan_path_gaze_for_frame
File "shared_modules/scan_path/controller.py", line 140, in _trigger_immediate_scan_path_calculation
File "shared_modules/scan_path/tasks/preprocessing.py", line 76, in start
File "shared_modules/observable.py", line 251, in __call__
File "shared_modules/observable.py", line 265, in call_all_observers
File "shared_modules/observable.py", line 256, in call_all_observers
File "shared_modules/observable.py", line 304, in __call__
File "shared_modules/scan_path/controller.py", line 147, in _on_preproc_started
File "shared_modules/observable.py", line 251, in __call__
File "shared_modules/observable.py", line 267, in call_all_observers
observable.ObserverError: An observer raised an exception.
2020-03-26 19:12:22,172 - Export World Video - [DEBUG] background_helper: Exiting _wrapper
2020-03-26 19:12:22,182 - player - [ERROR] launchables.player: Process Player crashed with trace:
Traceback (most recent call last):
File "launchables/player.py", line 606, in player
File "shared_modules/task_manager.py", line 84, in recent_events
File "shared_modules/task_manager.py", line 88, in _manage_current_tasks
File "shared_modules/task_manager.py", line 112, in _update_running_tasks
File "shared_modules/task_manager.py", line 258, in most_recent_result_or_none
File "shared_modules/background_helper.py", line 115, in fetch
observable.ObserverError: An observer raised an exception.
2020-03-26 19:12:22,182 - player - [INFO] launchables.player: Process shutting down.
|
AttributeError
|
def on_click(self, pos: Vec2, button: int, action: int) -> bool:
if not self.has_frame or self.model.is_invalid():
return False
if action == glfw.GLFW_PRESS:
clicked_handle = self.get_handle_at(pos)
if clicked_handle != self.active_handle:
self.active_handle = clicked_handle
return True
elif action == glfw.GLFW_RELEASE:
if self.active_handle != Handle.NONE:
self.active_handle = Handle.NONE
return True
return False
|
def on_click(self, pos: Vec2, button: int, action: int) -> bool:
if action == glfw.GLFW_PRESS:
clicked_handle = self.get_handle_at(pos)
if clicked_handle != self.active_handle:
self.active_handle = clicked_handle
return True
elif action == glfw.GLFW_RELEASE:
if self.active_handle != Handle.NONE:
self.active_handle = Handle.NONE
return True
return False
|
https://github.com/pupil-labs/pupil/issues/1825
|
Traceback (most recent call last):
File "/Users/papr/work/pupil/pupil_src/launchables/eye.py", line 633, in eye
if plugin.on_click(pos, button, action):
File "/Users/papr/work/pupil/pupil_src/shared_modules/roi.py", line 287, in on_click
clicked_handle = self.get_handle_at(pos)
File "/Users/papr/work/pupil/pupil_src/shared_modules/roi.py", line 250, in get_handle_at
for handle in self._all_points.keys():
AttributeError: 'Roi' object has no attribute '_all_points'
|
AttributeError
|
def _load_recorded_calibrations(self):
notifications = fm.load_pldata_file(self._rec_dir, "notify")
for topic, data in zip(notifications.topics, notifications.data):
if topic == "notify.calibration.calibration_data":
try:
calib_result = model.CalibrationResult(
mapping_plugin_name=data["mapper_name"],
# data["mapper_args"] is a fm.Frozen_Dict and causes
# https://github.com/pupil-labs/pupil/issues/1498
# if not converted to a normal dict
mapper_args=dict(data["mapper_args"]),
)
except KeyError:
# notifications from old recordings will not have these fields!
continue
mapping_method = "2d" if "2d" in data["calibration_method"] else "3d"
# the unique id needs to be the same at every start or otherwise the
# same calibrations would be added again and again. The timestamp is
# the easiest datum that differs between calibrations but is the same
# for every start
unique_id = model.Calibration.create_unique_id_from_string(
str(data["timestamp"])
)
calibration = model.Calibration(
unique_id=unique_id,
name=make_unique.by_number_at_end(
"Recorded Calibration", self.item_names
),
recording_uuid=self._recording_uuid,
mapping_method=mapping_method,
frame_index_range=self._get_recording_index_range(),
minimum_confidence=0.8,
is_offline_calibration=False,
result=calib_result,
)
self.add(calibration)
|
def _load_recorded_calibrations(self):
notifications = fm.load_pldata_file(self._rec_dir, "notify")
for topic, data in zip(notifications.topics, notifications.data):
if topic == "notify.calibration.calibration_data":
try:
calib_result = model.CalibrationResult(
mapping_plugin_name=data["mapper_name"],
mapper_args=data["mapper_args"],
)
except KeyError:
# notifications from old recordings will not have these fields!
continue
mapping_method = "2d" if "2d" in data["calibration_method"] else "3d"
# the unique id needs to be the same at every start or otherwise the
# same calibrations would be added again and again. The timestamp is
# the easiest datum that differs between calibrations but is the same
# for every start
unique_id = model.Calibration.create_unique_id_from_string(
str(data["timestamp"])
)
calibration = model.Calibration(
unique_id=unique_id,
name=make_unique.by_number_at_end(
"Recorded Calibration", self.item_names
),
recording_uuid=self._recording_uuid,
mapping_method=mapping_method,
frame_index_range=self._get_recording_index_range(),
minimum_confidence=0.8,
is_offline_calibration=False,
result=calib_result,
)
self.add(calibration)
|
https://github.com/pupil-labs/pupil/issues/1498
|
player - [INFO] gaze_producer.controller.gaze_mapper_controller: Start gaze mapping for 'Default Gaze Mapper'
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
File "/Users/papr/work/pupil/pupil_src/shared_modules/file_methods.py", line 217, in __setitem__
raise NotImplementedError("Invalid operation")
NotImplementedError: Invalid operation
|
NotImplementedError
|
def player(rec_dir, ipc_pub_url, ipc_sub_url, ipc_push_url, user_dir, app_version):
# general imports
from time import sleep
import logging
import errno
from glob import glob
from copy import deepcopy
from time import time
# networking
import zmq
import zmq_tools
import numpy as np
# zmq ipc setup
zmq_ctx = zmq.Context()
ipc_pub = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))
# log setup
logging.getLogger("OpenGL").setLevel(logging.ERROR)
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.INFO)
logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
# create logger for the context of this function
logger = logging.getLogger(__name__)
try:
# imports
from file_methods import Persistent_Dict, load_object
# display
import glfw
# check versions for our own depedencies as they are fast-changing
from pyglui import __version__ as pyglui_version
from pyglui import ui, cygl
from pyglui.cygl.utils import Named_Texture, RGBA
import gl_utils
# capture
from video_capture import File_Source, EndofVideoFileError
# helpers/utils
from version_utils import VersionFormat
from methods import normalize, denormalize, delta_t, get_system_info
from player_methods import correlate_data, is_pupil_rec_dir, load_meta_info
# Plug-ins
from plugin import Plugin, Plugin_List, import_runtime_plugins
from plugin_manager import Plugin_Manager
from vis_circle import Vis_Circle
from vis_cross import Vis_Cross
from vis_polyline import Vis_Polyline
from vis_light_points import Vis_Light_Points
from vis_watermark import Vis_Watermark
from vis_fixation import Vis_Fixation
from vis_scan_path import Vis_Scan_Path
from vis_eye_video_overlay import Vis_Eye_Video_Overlay
from seek_control import Seek_Control
from video_export_launcher import Video_Export_Launcher
from offline_surface_tracker import Offline_Surface_Tracker
# from marker_auto_trim_marks import Marker_Auto_Trim_Marks
from fixation_detector import Offline_Fixation_Detector
from batch_exporter import Batch_Exporter, Batch_Export
from log_display import Log_Display
from annotations import Annotation_Player
from raw_data_exporter import Raw_Data_Exporter
from log_history import Log_History
from pupil_producers import Pupil_From_Recording, Offline_Pupil_Detection
from gaze_producers import Gaze_From_Recording, Offline_Calibration
from system_graphs import System_Graphs
assert VersionFormat(pyglui_version) >= VersionFormat("1.11"), (
"pyglui out of date, please upgrade to newest version"
)
runtime_plugins = import_runtime_plugins(os.path.join(user_dir, "plugins"))
system_plugins = [
Log_Display,
Seek_Control,
Plugin_Manager,
System_Graphs,
Batch_Export,
]
user_plugins = [
Vis_Circle,
Vis_Fixation,
Vis_Polyline,
Vis_Light_Points,
Vis_Cross,
Vis_Watermark,
Vis_Eye_Video_Overlay,
Vis_Scan_Path,
Offline_Fixation_Detector,
Batch_Exporter,
Video_Export_Launcher,
Offline_Surface_Tracker,
Raw_Data_Exporter,
Annotation_Player,
Log_History,
Pupil_From_Recording,
Offline_Pupil_Detection,
Gaze_From_Recording,
Offline_Calibration,
] + runtime_plugins
plugins = system_plugins + user_plugins
# Callback functions
def on_resize(window, w, h):
nonlocal window_size
if gl_utils.is_window_visible(window):
hdpi_factor = float(
glfw.glfwGetFramebufferSize(window)[0]
/ glfw.glfwGetWindowSize(window)[0]
)
g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
window_size = w, h
g_pool.camera_render_size = (
w - int(icon_bar_width * g_pool.gui.scale),
h,
)
g_pool.gui.update_window(*window_size)
g_pool.gui.collect_menus()
for p in g_pool.plugins:
p.on_window_resize(window, *g_pool.camera_render_size)
def on_window_key(window, key, scancode, action, mods):
g_pool.gui.update_key(key, scancode, action, mods)
def on_window_char(window, char):
g_pool.gui.update_char(char)
def on_window_mouse_button(window, button, action, mods):
g_pool.gui.update_button(button, action, mods)
def on_pos(window, x, y):
hdpi_factor = float(
glfw.glfwGetFramebufferSize(window)[0]
/ glfw.glfwGetWindowSize(window)[0]
)
x, y = x * hdpi_factor, y * hdpi_factor
g_pool.gui.update_mouse(x, y)
pos = x, y
pos = normalize(pos, g_pool.camera_render_size)
# Position in img pixels
pos = denormalize(pos, g_pool.capture.frame_size)
for p in g_pool.plugins:
p.on_pos(pos)
def on_scroll(window, x, y):
g_pool.gui.update_scroll(x, y * scroll_factor)
def on_drop(window, count, paths):
for x in range(count):
new_rec_dir = paths[x].decode("utf-8")
if is_pupil_rec_dir(new_rec_dir):
logger.debug("Starting new session with '{}'".format(new_rec_dir))
ipc_pub.notify(
{
"subject": "player_drop_process.should_start",
"rec_dir": new_rec_dir,
}
)
glfw.glfwSetWindowShouldClose(window, True)
else:
logger.error(
"'{}' is not a valid pupil recording".format(new_rec_dir)
)
tick = delta_t()
def get_dt():
return next(tick)
video_path = [
f
for f in glob(os.path.join(rec_dir, "world.*"))
if os.path.splitext(f)[1] in (".mp4", ".mkv", ".avi", ".h264", ".mjpeg")
][0]
pupil_data_path = os.path.join(rec_dir, "pupil_data")
meta_info = load_meta_info(rec_dir)
# log info about Pupil Platform and Platform in player.log
logger.info("Application Version: {}".format(app_version))
logger.info("System Info: {}".format(get_system_info()))
icon_bar_width = 50
window_size = None
# create container for globally scoped vars
g_pool = Global_Container()
g_pool.app = "player"
g_pool.zmq_ctx = zmq_ctx
g_pool.ipc_pub = ipc_pub
g_pool.ipc_pub_url = ipc_pub_url
g_pool.ipc_sub_url = ipc_sub_url
g_pool.ipc_push_url = ipc_push_url
g_pool.plugin_by_name = {p.__name__: p for p in plugins}
g_pool.camera_render_size = None
# sets itself to g_pool.capture
File_Source(g_pool, video_path)
# load session persistent settings
session_settings = Persistent_Dict(
os.path.join(user_dir, "user_settings_player")
)
if VersionFormat(session_settings.get("version", "0.0")) != app_version:
logger.info(
"Session setting are a different version of this app. I will not use those."
)
session_settings.clear()
width, height = session_settings.get("window_size", g_pool.capture.frame_size)
window_pos = session_settings.get("window_position", window_position_default)
glfw.glfwInit()
main_window = glfw.glfwCreateWindow(
width,
height,
"Pupil Player: "
+ meta_info["Recording Name"]
+ " - "
+ rec_dir.split(os.path.sep)[-1],
None,
None,
)
glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
glfw.glfwMakeContextCurrent(main_window)
cygl.utils.init()
g_pool.main_window = main_window
def set_scale(new_scale):
hdpi_factor = (
float(glfw.glfwGetFramebufferSize(main_window)[0])
/ glfw.glfwGetWindowSize(main_window)[0]
)
g_pool.gui_user_scale = new_scale
window_size = (
g_pool.camera_render_size[0]
+ int(icon_bar_width * g_pool.gui_user_scale * hdpi_factor),
glfw.glfwGetFramebufferSize(main_window)[1],
)
logger.warning(icon_bar_width * g_pool.gui_user_scale * hdpi_factor)
glfw.glfwSetWindowSize(main_window, *window_size)
# load pupil_positions, gaze_positions
g_pool.pupil_data = load_object(pupil_data_path)
g_pool.binocular = meta_info.get("Eye Mode", "monocular") == "binocular"
g_pool.version = app_version
g_pool.timestamps = g_pool.capture.timestamps
g_pool.get_timestamp = lambda: 0.0
g_pool.new_seek = True
g_pool.user_dir = user_dir
g_pool.rec_dir = rec_dir
g_pool.meta_info = meta_info
g_pool.min_data_confidence = session_settings.get("min_data_confidence", 0.6)
g_pool.pupil_positions = []
g_pool.gaze_positions = []
g_pool.fixations = []
g_pool.notifications_by_frame = correlate_data(
g_pool.pupil_data["notifications"], g_pool.timestamps
)
g_pool.pupil_positions_by_frame = [
[] for x in g_pool.timestamps
] # populated by producer`
g_pool.gaze_positions_by_frame = [
[] for x in g_pool.timestamps
] # populated by producer
g_pool.fixations_by_frame = [
[] for x in g_pool.timestamps
] # populated by the fixation detector plugin
# def next_frame(_):
# try:
# g_pool.capture.seek_to_frame(g_pool.capture.get_frame_index() + 1)
# except(FileSeekError):
# logger.warning("Could not seek to next frame.")
# else:
# g_pool.new_seek = True
# def prev_frame(_):
# try:
# g_pool.capture.seek_to_frame(g_pool.capture.get_frame_index() - 1)
# except(FileSeekError):
# logger.warning("Could not seek to previous frame.")
# else:
# g_pool.new_seek = True
# def toggle_play(new_state):
# if g_pool.capture.get_frame_index() >= g_pool.capture.get_frame_count()-5:
# g_pool.capture.seek_to_frame(1) # avoid pause set by hitting trimmark pause.
# logger.warning("End of video - restart at beginning.")
# g_pool.capture.play = new_state
def set_data_confidence(new_confidence):
g_pool.min_data_confidence = new_confidence
notification = {"subject": "min_data_confidence_changed"}
notification["_notify_time_"] = time() + 0.8
g_pool.ipc_pub.notify(notification)
def open_plugin(plugin):
if plugin == "Select to load":
return
g_pool.plugins.add(plugin)
def purge_plugins():
for p in g_pool.plugins:
if p.__class__ in user_plugins:
p.alive = False
g_pool.plugins.clean()
def do_export(_):
export_range = g_pool.seek_control.trim_left, g_pool.seek_control.trim_right
export_dir = os.path.join(
g_pool.rec_dir, "exports", "{}-{}".format(*export_range)
)
try:
os.makedirs(export_dir)
except OSError as e:
if e.errno != errno.EEXIST:
logger.error("Could not create export dir")
raise e
else:
overwrite_warning = "Previous export for range [{}-{}] already exsits - overwriting."
logger.warning(overwrite_warning.format(*export_range))
else:
logger.info('Created export dir at "{}"'.format(export_dir))
notification = {
"subject": "should_export",
"range": export_range,
"export_dir": export_dir,
}
g_pool.ipc_pub.notify(notification)
def reset_restart():
logger.warning("Resetting all settings and restarting Player.")
glfw.glfwSetWindowShouldClose(main_window, True)
ipc_pub.notify({"subject": "clear_settings_process.should_start"})
ipc_pub.notify(
{
"subject": "player_process.should_start",
"rec_dir": rec_dir,
"delay": 2.0,
}
)
def toggle_general_settings(collapsed):
# this is the menu toggle logic.
# Only one menu can be open.
# If no menu is open the menubar should collapse.
g_pool.menubar.collapsed = collapsed
for m in g_pool.menubar.elements:
m.collapsed = True
general_settings.collapsed = collapsed
g_pool.gui = ui.UI()
g_pool.gui_user_scale = session_settings.get("gui_scale", 1.0)
g_pool.menubar = ui.Scrolling_Menu(
"Settings", pos=(-500, 0), size=(-icon_bar_width, 0), header_pos="left"
)
g_pool.iconbar = ui.Scrolling_Menu(
"Icons", pos=(-icon_bar_width, 0), size=(0, 0), header_pos="hidden"
)
g_pool.timelines = ui.Container((0, 0), (0, 0), (0, 0))
g_pool.timelines.horizontal_constraint = g_pool.menubar
g_pool.user_timelines = ui.Timeline_Menu(
"User Timelines", pos=(0.0, -150.0), size=(0.0, 0.0), header_pos="headline"
)
g_pool.user_timelines.color = RGBA(a=0.0)
g_pool.user_timelines.collapsed = True
# add container that constaints itself to the seekbar height
vert_constr = ui.Container((0, 0), (0, -50.0), (0, 0))
vert_constr.append(g_pool.user_timelines)
g_pool.timelines.append(vert_constr)
general_settings = ui.Growing_Menu("General", header_pos="headline")
general_settings.append(
ui.Button(
"Reset window size",
lambda: glfw.glfwSetWindowSize(
main_window,
g_pool.capture.frame_size[0],
g_pool.capture.frame_size[1],
),
)
)
general_settings.append(
ui.Selector(
"gui_user_scale",
g_pool,
setter=set_scale,
selection=[0.8, 0.9, 1.0, 1.1, 1.2] + list(np.arange(1.5, 5.1, 0.5)),
label="Interface Size",
)
)
general_settings.append(
ui.Info_Text("Player Version: {}".format(g_pool.version))
)
general_settings.append(
ui.Info_Text(
"Capture Version: {}".format(meta_info["Capture Software Version"])
)
)
general_settings.append(
ui.Info_Text(
"Data Format Version: {}".format(meta_info["Data Format Version"])
)
)
general_settings.append(
ui.Slider(
"min_data_confidence",
g_pool,
setter=set_data_confidence,
step=0.05,
min=0.0,
max=1.0,
label="Confidence threshold",
)
)
general_settings.append(
ui.Button("Restart with default settings", reset_restart)
)
g_pool.menubar.append(general_settings)
icon = ui.Icon(
"collapsed",
general_settings,
label=chr(0xE8B8),
on_val=False,
off_val=True,
setter=toggle_general_settings,
label_font="pupil_icons",
)
icon.tooltip = "General Settings"
g_pool.iconbar.append(icon)
user_plugin_separator = ui.Separator()
user_plugin_separator.order = 0.35
g_pool.iconbar.append(user_plugin_separator)
g_pool.quickbar = ui.Stretching_Menu("Quick Bar", (0, 100), (100, -100))
g_pool.export_button = ui.Thumb(
"export",
label=chr(0xE2C4),
getter=lambda: False,
setter=do_export,
hotkey="e",
label_font="pupil_icons",
)
g_pool.quickbar.extend([g_pool.export_button])
g_pool.gui.append(g_pool.menubar)
g_pool.gui.append(g_pool.timelines)
g_pool.gui.append(g_pool.iconbar)
g_pool.gui.append(g_pool.quickbar)
# we always load these plugins
default_plugins = [
("Plugin_Manager", {}),
("Seek_Control", {}),
("Log_Display", {}),
("Vis_Scan_Path", {}),
("Vis_Polyline", {}),
("Vis_Circle", {}),
("System_Graphs", {}),
("Video_Export_Launcher", {}),
("Pupil_From_Recording", {}),
("Gaze_From_Recording", {}),
]
g_pool.plugins = Plugin_List(
g_pool, session_settings.get("loaded_plugins", default_plugins)
)
# Register callbacks main_window
glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
glfw.glfwSetKeyCallback(main_window, on_window_key)
glfw.glfwSetCharCallback(main_window, on_window_char)
glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
glfw.glfwSetCursorPosCallback(main_window, on_pos)
glfw.glfwSetScrollCallback(main_window, on_scroll)
glfw.glfwSetDropCallback(main_window, on_drop)
g_pool.gui.configuration = session_settings.get("ui_config", {})
# gl_state settings
gl_utils.basic_gl_setup()
g_pool.image_tex = Named_Texture()
# trigger on_resize
on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))
def handle_notifications(n):
subject = n["subject"]
if subject == "start_plugin":
g_pool.plugins.add(
g_pool.plugin_by_name[n["name"]], args=n.get("args", {})
)
elif subject.startswith("meta.should_doc"):
ipc_pub.notify(
{"subject": "meta.doc", "actor": g_pool.app, "doc": player.__doc__}
)
for p in g_pool.plugins:
if (
p.on_notify.__doc__
and p.__class__.on_notify != Plugin.on_notify
):
ipc_pub.notify(
{
"subject": "meta.doc",
"actor": p.class_name,
"doc": p.on_notify.__doc__,
}
)
while not glfw.glfwWindowShouldClose(main_window):
# fetch newest notifications
new_notifications = []
while notify_sub.new_data:
t, n = notify_sub.recv()
new_notifications.append(n)
# notify each plugin if there are new notifications:
for n in new_notifications:
handle_notifications(n)
for p in g_pool.plugins:
p.on_notify(n)
# grab new frame
if g_pool.capture.play or g_pool.new_seek:
g_pool.new_seek = False
try:
new_frame = g_pool.capture.get_frame()
except EndofVideoFileError:
# end of video logic: pause at last frame.
g_pool.capture.play = False
logger.warning("end of video")
frame = new_frame.copy()
events = {}
events["frame"] = frame
# report time between now and the last loop interation
events["dt"] = get_dt()
# pupil and gaze positions are added by their respective producer plugins
events["pupil_positions"] = []
events["gaze_positions"] = []
# allow each Plugin to do its work.
for p in g_pool.plugins:
p.recent_events(events)
# check if a plugin need to be destroyed
g_pool.plugins.clean()
glfw.glfwMakeContextCurrent(main_window)
# render visual feedback from loaded plugins
if gl_utils.is_window_visible(main_window):
gl_utils.glViewport(0, 0, *g_pool.camera_render_size)
g_pool.capture._recent_frame = frame
g_pool.capture.gl_display()
for p in g_pool.plugins:
p.gl_display()
gl_utils.glViewport(0, 0, *window_size)
unused_elements = g_pool.gui.update()
for b in unused_elements.buttons:
button, action, mods = b
pos = glfw.glfwGetCursorPos(main_window)
pos = normalize(pos, g_pool.camera_render_size)
pos = denormalize(pos, g_pool.capture.frame_size)
for p in g_pool.plugins:
p.on_click(pos, button, action)
for key, scancode, action, mods in unused_elements.keys:
for p in g_pool.plugins:
p.on_key(key, scancode, action, mods)
for char_ in unused_elements.chars:
for p in g_pool.plugins:
p.on_char(char_)
glfw.glfwSwapBuffers(main_window)
# present frames at appropriate speed
g_pool.capture.wait(frame)
glfw.glfwPollEvents()
session_settings["loaded_plugins"] = g_pool.plugins.get_initializers()
session_settings["min_data_confidence"] = g_pool.min_data_confidence
session_settings["gui_scale"] = g_pool.gui_user_scale
session_settings["ui_config"] = g_pool.gui.configuration
session_settings["window_size"] = glfw.glfwGetWindowSize(main_window)
session_settings["window_position"] = glfw.glfwGetWindowPos(main_window)
session_settings["version"] = str(g_pool.version)
session_settings.close()
# de-init all running plugins
for p in g_pool.plugins:
p.alive = False
g_pool.plugins.clean()
g_pool.capture.cleanup()
g_pool.gui.terminate()
glfw.glfwDestroyWindow(main_window)
except:
import traceback
trace = traceback.format_exc()
logger.error("Process Player crashed with trace:\n{}".format(trace))
finally:
logger.info("Process shutting down.")
ipc_pub.notify({"subject": "player_process.stopped"})
sleep(1.0)
|
def player(rec_dir, ipc_pub_url, ipc_sub_url, ipc_push_url, user_dir, app_version):
# general imports
from time import sleep
import logging
import errno
from glob import glob
from copy import deepcopy
from time import time
# networking
import zmq
import zmq_tools
import numpy as np
# zmq ipc setup
zmq_ctx = zmq.Context()
ipc_pub = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))
# log setup
logging.getLogger("OpenGL").setLevel(logging.ERROR)
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.INFO)
logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
# create logger for the context of this function
logger = logging.getLogger(__name__)
try:
# imports
from file_methods import Persistent_Dict, load_object
# display
import glfw
# check versions for our own depedencies as they are fast-changing
from pyglui import __version__ as pyglui_version
from pyglui import ui, cygl
from pyglui.cygl.utils import Named_Texture, RGBA
import gl_utils
# capture
from video_capture import File_Source, EndofVideoFileError
# helpers/utils
from version_utils import VersionFormat
from methods import normalize, denormalize, delta_t, get_system_info
from player_methods import correlate_data, is_pupil_rec_dir, load_meta_info
# Plug-ins
from plugin import Plugin, Plugin_List, import_runtime_plugins
from plugin_manager import Plugin_Manager
from vis_circle import Vis_Circle
from vis_cross import Vis_Cross
from vis_polyline import Vis_Polyline
from vis_light_points import Vis_Light_Points
from vis_watermark import Vis_Watermark
from vis_fixation import Vis_Fixation
from vis_scan_path import Vis_Scan_Path
from vis_eye_video_overlay import Vis_Eye_Video_Overlay
from seek_control import Seek_Control
from video_export_launcher import Video_Export_Launcher
from offline_surface_tracker import Offline_Surface_Tracker
# from marker_auto_trim_marks import Marker_Auto_Trim_Marks
from fixation_detector import Offline_Fixation_Detector
# from batch_exporter import Batch_Exporter
from log_display import Log_Display
from annotations import Annotation_Player
from raw_data_exporter import Raw_Data_Exporter
from log_history import Log_History
from pupil_producers import Pupil_From_Recording, Offline_Pupil_Detection
from gaze_producers import Gaze_From_Recording, Offline_Calibration
from system_graphs import System_Graphs
assert VersionFormat(pyglui_version) >= VersionFormat("1.10"), (
"pyglui out of date, please upgrade to newest version"
)
runtime_plugins = import_runtime_plugins(os.path.join(user_dir, "plugins"))
system_plugins = [Log_Display, Seek_Control, Plugin_Manager, System_Graphs]
user_plugins = [
Vis_Circle,
Vis_Fixation,
Vis_Polyline,
Vis_Light_Points,
Vis_Cross,
Vis_Watermark,
Vis_Eye_Video_Overlay,
Vis_Scan_Path,
Offline_Fixation_Detector,
Video_Export_Launcher,
Offline_Surface_Tracker,
Raw_Data_Exporter,
Annotation_Player,
Log_History,
Pupil_From_Recording,
Offline_Pupil_Detection,
Gaze_From_Recording,
Offline_Calibration,
] + runtime_plugins
plugins = system_plugins + user_plugins
# Callback functions
def on_resize(window, w, h):
nonlocal window_size
if gl_utils.is_window_visible(window):
hdpi_factor = float(
glfw.glfwGetFramebufferSize(window)[0]
/ glfw.glfwGetWindowSize(window)[0]
)
g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
window_size = w, h
g_pool.camera_render_size = (
w - int(icon_bar_width * g_pool.gui.scale),
h,
)
g_pool.gui.update_window(*window_size)
g_pool.gui.collect_menus()
for p in g_pool.plugins:
p.on_window_resize(window, *g_pool.camera_render_size)
def on_window_key(window, key, scancode, action, mods):
g_pool.gui.update_key(key, scancode, action, mods)
def on_window_char(window, char):
g_pool.gui.update_char(char)
def on_window_mouse_button(window, button, action, mods):
g_pool.gui.update_button(button, action, mods)
def on_pos(window, x, y):
hdpi_factor = float(
glfw.glfwGetFramebufferSize(window)[0]
/ glfw.glfwGetWindowSize(window)[0]
)
x, y = x * hdpi_factor, y * hdpi_factor
g_pool.gui.update_mouse(x, y)
pos = x, y
pos = normalize(pos, g_pool.camera_render_size)
# Position in img pixels
pos = denormalize(pos, g_pool.capture.frame_size)
for p in g_pool.plugins:
p.on_pos(pos)
def on_scroll(window, x, y):
g_pool.gui.update_scroll(x, y * scroll_factor)
def on_drop(window, count, paths):
for x in range(count):
new_rec_dir = paths[x].decode("utf-8")
if is_pupil_rec_dir(new_rec_dir):
logger.debug("Starting new session with '{}'".format(new_rec_dir))
ipc_pub.notify(
{
"subject": "player_drop_process.should_start",
"rec_dir": new_rec_dir,
}
)
glfw.glfwSetWindowShouldClose(window, True)
else:
logger.error(
"'{}' is not a valid pupil recording".format(new_rec_dir)
)
tick = delta_t()
def get_dt():
return next(tick)
video_path = [
f
for f in glob(os.path.join(rec_dir, "world.*"))
if os.path.splitext(f)[1] in (".mp4", ".mkv", ".avi", ".h264", ".mjpeg")
][0]
pupil_data_path = os.path.join(rec_dir, "pupil_data")
meta_info = load_meta_info(rec_dir)
# log info about Pupil Platform and Platform in player.log
logger.info("Application Version: {}".format(app_version))
logger.info("System Info: {}".format(get_system_info()))
icon_bar_width = 50
window_size = None
# create container for globally scoped vars
g_pool = Global_Container()
g_pool.app = "player"
g_pool.zmq_ctx = zmq_ctx
g_pool.ipc_pub = ipc_pub
g_pool.ipc_pub_url = ipc_pub_url
g_pool.ipc_sub_url = ipc_sub_url
g_pool.ipc_push_url = ipc_push_url
g_pool.plugin_by_name = {p.__name__: p for p in plugins}
g_pool.camera_render_size = None
# sets itself to g_pool.capture
File_Source(g_pool, video_path)
# load session persistent settings
session_settings = Persistent_Dict(
os.path.join(user_dir, "user_settings_player")
)
if VersionFormat(session_settings.get("version", "0.0")) != app_version:
logger.info(
"Session setting are a different version of this app. I will not use those."
)
session_settings.clear()
width, height = session_settings.get("window_size", g_pool.capture.frame_size)
window_pos = session_settings.get("window_position", window_position_default)
glfw.glfwInit()
main_window = glfw.glfwCreateWindow(
width,
height,
"Pupil Player: "
+ meta_info["Recording Name"]
+ " - "
+ rec_dir.split(os.path.sep)[-1],
None,
None,
)
glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
glfw.glfwMakeContextCurrent(main_window)
cygl.utils.init()
g_pool.main_window = main_window
def set_scale(new_scale):
hdpi_factor = (
float(glfw.glfwGetFramebufferSize(main_window)[0])
/ glfw.glfwGetWindowSize(main_window)[0]
)
g_pool.gui_user_scale = new_scale
window_size = (
g_pool.camera_render_size[0]
+ int(icon_bar_width * g_pool.gui_user_scale * hdpi_factor),
glfw.glfwGetFramebufferSize(main_window)[1],
)
logger.warning(icon_bar_width * g_pool.gui_user_scale * hdpi_factor)
glfw.glfwSetWindowSize(main_window, *window_size)
# load pupil_positions, gaze_positions
g_pool.pupil_data = load_object(pupil_data_path)
g_pool.binocular = meta_info.get("Eye Mode", "monocular") == "binocular"
g_pool.version = app_version
g_pool.timestamps = g_pool.capture.timestamps
g_pool.get_timestamp = lambda: 0.0
g_pool.new_seek = True
g_pool.user_dir = user_dir
g_pool.rec_dir = rec_dir
g_pool.meta_info = meta_info
g_pool.min_data_confidence = session_settings.get("min_data_confidence", 0.6)
g_pool.pupil_positions = []
g_pool.gaze_positions = []
g_pool.fixations = []
g_pool.notifications_by_frame = correlate_data(
g_pool.pupil_data["notifications"], g_pool.timestamps
)
g_pool.pupil_positions_by_frame = [
[] for x in g_pool.timestamps
] # populated by producer`
g_pool.gaze_positions_by_frame = [
[] for x in g_pool.timestamps
] # populated by producer
g_pool.fixations_by_frame = [
[] for x in g_pool.timestamps
] # populated by the fixation detector plugin
# def next_frame(_):
# try:
# g_pool.capture.seek_to_frame(g_pool.capture.get_frame_index() + 1)
# except(FileSeekError):
# logger.warning("Could not seek to next frame.")
# else:
# g_pool.new_seek = True
# def prev_frame(_):
# try:
# g_pool.capture.seek_to_frame(g_pool.capture.get_frame_index() - 1)
# except(FileSeekError):
# logger.warning("Could not seek to previous frame.")
# else:
# g_pool.new_seek = True
# def toggle_play(new_state):
# if g_pool.capture.get_frame_index() >= g_pool.capture.get_frame_count()-5:
# g_pool.capture.seek_to_frame(1) # avoid pause set by hitting trimmark pause.
# logger.warning("End of video - restart at beginning.")
# g_pool.capture.play = new_state
def set_data_confidence(new_confidence):
g_pool.min_data_confidence = new_confidence
notification = {"subject": "min_data_confidence_changed"}
notification["_notify_time_"] = time() + 0.8
g_pool.ipc_pub.notify(notification)
def open_plugin(plugin):
if plugin == "Select to load":
return
g_pool.plugins.add(plugin)
def purge_plugins():
for p in g_pool.plugins:
if p.__class__ in user_plugins:
p.alive = False
g_pool.plugins.clean()
def do_export(_):
export_range = g_pool.seek_control.trim_left, g_pool.seek_control.trim_right
export_dir = os.path.join(
g_pool.rec_dir, "exports", "{}-{}".format(*export_range)
)
try:
os.makedirs(export_dir)
except OSError as e:
if e.errno != errno.EEXIST:
logger.error("Could not create export dir")
raise e
else:
overwrite_warning = "Previous export for range [{}-{}] already exsits - overwriting."
logger.warning(overwrite_warning.format(*export_range))
else:
logger.info('Created export dir at "{}"'.format(export_dir))
notification = {
"subject": "should_export",
"range": export_range,
"export_dir": export_dir,
}
g_pool.ipc_pub.notify(notification)
def reset_restart():
logger.warning("Resetting all settings and restarting Player.")
glfw.glfwSetWindowShouldClose(main_window, True)
ipc_pub.notify({"subject": "clear_settings_process.should_start"})
ipc_pub.notify(
{
"subject": "player_process.should_start",
"rec_dir": rec_dir,
"delay": 2.0,
}
)
def toggle_general_settings(collapsed):
# this is the menu toggle logic.
# Only one menu can be open.
# If no menu is open the menubar should collapse.
g_pool.menubar.collapsed = collapsed
for m in g_pool.menubar.elements:
m.collapsed = True
general_settings.collapsed = collapsed
g_pool.gui = ui.UI()
g_pool.gui_user_scale = session_settings.get("gui_scale", 1.0)
g_pool.menubar = ui.Scrolling_Menu(
"Settings", pos=(-500, 0), size=(-icon_bar_width, 0), header_pos="left"
)
g_pool.iconbar = ui.Scrolling_Menu(
"Icons", pos=(-icon_bar_width, 0), size=(0, 0), header_pos="hidden"
)
g_pool.timelines = ui.Container((0, 0), (0, 0), (0, 0))
g_pool.timelines.horizontal_constraint = g_pool.menubar
g_pool.user_timelines = ui.Timeline_Menu(
"User Timelines", pos=(0.0, -150.0), size=(0.0, 0.0), header_pos="headline"
)
g_pool.user_timelines.color = RGBA(a=0.0)
g_pool.user_timelines.collapsed = True
# add container that constaints itself to the seekbar height
vert_constr = ui.Container((0, 0), (0, -50.0), (0, 0))
vert_constr.append(g_pool.user_timelines)
g_pool.timelines.append(vert_constr)
general_settings = ui.Growing_Menu("General", header_pos="headline")
general_settings.append(
ui.Button(
"Reset window size",
lambda: glfw.glfwSetWindowSize(
main_window,
g_pool.capture.frame_size[0],
g_pool.capture.frame_size[1],
),
)
)
general_settings.append(
ui.Selector(
"gui_user_scale",
g_pool,
setter=set_scale,
selection=[0.8, 0.9, 1.0, 1.1, 1.2] + list(np.arange(1.5, 5.1, 0.5)),
label="Interface Size",
)
)
general_settings.append(
ui.Info_Text("Player Version: {}".format(g_pool.version))
)
general_settings.append(
ui.Info_Text(
"Capture Version: {}".format(meta_info["Capture Software Version"])
)
)
general_settings.append(
ui.Info_Text(
"Data Format Version: {}".format(meta_info["Data Format Version"])
)
)
general_settings.append(
ui.Slider(
"min_data_confidence",
g_pool,
setter=set_data_confidence,
step=0.05,
min=0.0,
max=1.0,
label="Confidence threshold",
)
)
general_settings.append(
ui.Button("Restart with default settings", reset_restart)
)
g_pool.menubar.append(general_settings)
icon = ui.Icon(
"collapsed",
general_settings,
label=chr(0xE8B8),
on_val=False,
off_val=True,
setter=toggle_general_settings,
label_font="pupil_icons",
)
icon.tooltip = "General Settings"
g_pool.iconbar.append(icon)
user_plugin_separator = ui.Separator()
user_plugin_separator.order = 0.35
g_pool.iconbar.append(user_plugin_separator)
g_pool.quickbar = ui.Stretching_Menu("Quick Bar", (0, 100), (100, -100))
g_pool.export_button = ui.Thumb(
"export",
label=chr(0xE2C4),
getter=lambda: False,
setter=do_export,
hotkey="e",
label_font="pupil_icons",
)
g_pool.quickbar.extend([g_pool.export_button])
g_pool.gui.append(g_pool.menubar)
g_pool.gui.append(g_pool.timelines)
g_pool.gui.append(g_pool.iconbar)
g_pool.gui.append(g_pool.quickbar)
# we always load these plugins
default_plugins = [
("Plugin_Manager", {}),
("Seek_Control", {}),
("Log_Display", {}),
("Vis_Scan_Path", {}),
("Vis_Polyline", {}),
("Vis_Circle", {}),
("System_Graphs", {}),
("Video_Export_Launcher", {}),
("Pupil_From_Recording", {}),
("Gaze_From_Recording", {}),
]
g_pool.plugins = Plugin_List(
g_pool, session_settings.get("loaded_plugins", default_plugins)
)
# Register callbacks main_window
glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
glfw.glfwSetKeyCallback(main_window, on_window_key)
glfw.glfwSetCharCallback(main_window, on_window_char)
glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
glfw.glfwSetCursorPosCallback(main_window, on_pos)
glfw.glfwSetScrollCallback(main_window, on_scroll)
glfw.glfwSetDropCallback(main_window, on_drop)
g_pool.gui.configuration = session_settings.get("ui_config", {})
# gl_state settings
gl_utils.basic_gl_setup()
g_pool.image_tex = Named_Texture()
# trigger on_resize
on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))
def handle_notifications(n):
subject = n["subject"]
if subject == "start_plugin":
g_pool.plugins.add(
g_pool.plugin_by_name[n["name"]], args=n.get("args", {})
)
elif subject.startswith("meta.should_doc"):
ipc_pub.notify(
{"subject": "meta.doc", "actor": g_pool.app, "doc": player.__doc__}
)
for p in g_pool.plugins:
if (
p.on_notify.__doc__
and p.__class__.on_notify != Plugin.on_notify
):
ipc_pub.notify(
{
"subject": "meta.doc",
"actor": p.class_name,
"doc": p.on_notify.__doc__,
}
)
while not glfw.glfwWindowShouldClose(main_window):
# fetch newest notifications
new_notifications = []
while notify_sub.new_data:
t, n = notify_sub.recv()
new_notifications.append(n)
# notify each plugin if there are new notifications:
for n in new_notifications:
handle_notifications(n)
for p in g_pool.plugins:
p.on_notify(n)
# grab new frame
if g_pool.capture.play or g_pool.new_seek:
g_pool.new_seek = False
try:
new_frame = g_pool.capture.get_frame()
except EndofVideoFileError:
# end of video logic: pause at last frame.
g_pool.capture.play = False
logger.warning("end of video")
frame = new_frame.copy()
events = {}
events["frame"] = frame
# report time between now and the last loop interation
events["dt"] = get_dt()
# pupil and gaze positions are added by their respective producer plugins
events["pupil_positions"] = []
events["gaze_positions"] = []
# allow each Plugin to do its work.
for p in g_pool.plugins:
p.recent_events(events)
# check if a plugin need to be destroyed
g_pool.plugins.clean()
glfw.glfwMakeContextCurrent(main_window)
# render visual feedback from loaded plugins
if gl_utils.is_window_visible(main_window):
gl_utils.glViewport(0, 0, *g_pool.camera_render_size)
g_pool.capture._recent_frame = frame
g_pool.capture.gl_display()
for p in g_pool.plugins:
p.gl_display()
gl_utils.glViewport(0, 0, *window_size)
unused_elements = g_pool.gui.update()
for b in unused_elements.buttons:
button, action, mods = b
pos = glfw.glfwGetCursorPos(main_window)
pos = normalize(pos, g_pool.camera_render_size)
pos = denormalize(pos, g_pool.capture.frame_size)
for p in g_pool.plugins:
p.on_click(pos, button, action)
for key, scancode, action, mods in unused_elements.keys:
for p in g_pool.plugins:
p.on_key(key, scancode, action, mods)
for char_ in unused_elements.chars:
for p in g_pool.plugins:
p.on_char(char_)
glfw.glfwSwapBuffers(main_window)
# present frames at appropriate speed
g_pool.capture.wait(frame)
glfw.glfwPollEvents()
session_settings["loaded_plugins"] = g_pool.plugins.get_initializers()
session_settings["min_data_confidence"] = g_pool.min_data_confidence
session_settings["gui_scale"] = g_pool.gui_user_scale
session_settings["ui_config"] = g_pool.gui.configuration
session_settings["window_size"] = glfw.glfwGetWindowSize(main_window)
session_settings["window_position"] = glfw.glfwGetWindowPos(main_window)
session_settings["version"] = str(g_pool.version)
session_settings.close()
# de-init all running plugins
for p in g_pool.plugins:
p.alive = False
g_pool.plugins.clean()
g_pool.capture.cleanup()
g_pool.gui.terminate()
glfw.glfwDestroyWindow(main_window)
except:
import traceback
trace = traceback.format_exc()
logger.error("Process Player crashed with trace:\n{}".format(trace))
finally:
logger.info("Process shutting down.")
ipc_pub.notify({"subject": "player_process.stopped"})
sleep(1.0)
|
https://github.com/pupil-labs/pupil/issues/913
|
Traceback (most recent call last):
File "/home/papr/work/pupil/pupil_src/launchables/player.py", line 441, in player
p.recent_events(events)
File "/home/papr/work/pupil/pupil_src/shared_modules/batch_exporter.py", line 190, in recent_events
self.workers[i].start()
File "/usr/lib/python3.5/multiprocessing/process.py", line 99, in start
assert self._popen is None, 'cannot start a process twice'
AssertionError: cannot start a process twice
|
AssertionError
|
def __init__(self, name, generator, args=(), kwargs={}):
super().__init__()
self._should_terminate_flag = mp.Value(c_bool, 0)
self._completed = False
self._canceled = False
pipe_recv, pipe_send = mp.Pipe(False)
wrapper_args = [pipe_send, self._should_terminate_flag, generator]
wrapper_args.extend(args)
self.process = mp.Process(
target=self._wrapper, name=name, args=wrapper_args, kwargs=kwargs
)
self.process.daemon = True
self.process.start()
self.pipe = pipe_recv
|
def __init__(self, name, generator, args=(), kwargs={}):
super().__init__()
self._should_terminate_flag = mp.Value(c_bool, 0)
self._completed = False
self._canceled = False
pipe_recv, pipe_send = mp.Pipe(False)
wrapper_args = [pipe_send, self._should_terminate_flag, generator]
wrapper_args.extend(args)
self.process = mp.Process(
target=self._wrapper, name=name, args=wrapper_args, kwargs=kwargs
)
self.process.start()
self.pipe = pipe_recv
|
https://github.com/pupil-labs/pupil/issues/913
|
Traceback (most recent call last):
File "/home/papr/work/pupil/pupil_src/launchables/player.py", line 441, in player
p.recent_events(events)
File "/home/papr/work/pupil/pupil_src/shared_modules/batch_exporter.py", line 190, in recent_events
self.workers[i].start()
File "/usr/lib/python3.5/multiprocessing/process.py", line 99, in start
assert self._popen is None, 'cannot start a process twice'
AssertionError: cannot start a process twice
|
AssertionError
|
def fetch(self):
"""Fetches progress and available results from background"""
while self.pipe.poll(0):
try:
datum = self.pipe.recv()
except EOFError:
logger.debug("Process canceled be user.")
self._canceled = True
self.process = None
return
else:
if isinstance(datum, StopIteration):
self._completed = True
self.process = None
return
elif isinstance(datum, EarlyCancellationError):
self._canceled = True
self.process = None
return
elif isinstance(datum, Exception):
raise datum
else:
yield datum
|
def fetch(self):
"""Fetches progress and available results from background"""
while self.pipe.poll(0):
try:
datum = self.pipe.recv()
except EOFError:
logger.debug("Process canceled be user.")
return
else:
if isinstance(datum, StopIteration):
self._completed = True
return
elif isinstance(datum, EarlyCancellationError):
self._canceled = True
return
elif isinstance(datum, Exception):
raise datum
else:
yield datum
|
https://github.com/pupil-labs/pupil/issues/913
|
Traceback (most recent call last):
File "/home/papr/work/pupil/pupil_src/launchables/player.py", line 441, in player
p.recent_events(events)
File "/home/papr/work/pupil/pupil_src/shared_modules/batch_exporter.py", line 190, in recent_events
self.workers[i].start()
File "/usr/lib/python3.5/multiprocessing/process.py", line 99, in start
assert self._popen is None, 'cannot start a process twice'
AssertionError: cannot start a process twice
|
AssertionError
|
def get_recording_dirs(data_dir):
"""
You can supply a data folder or any folder
- all folders within will be checked for necessary files
- in order to make a visualization
"""
if is_pupil_rec_dir(data_dir):
yield data_dir
for root, dirs, files in os.walk(data_dir):
for d in dirs:
joined = os.path.join(root, d)
if not d.startswith(".") and is_pupil_rec_dir(joined):
yield joined
|
def get_recording_dirs(data_dir):
"""
You can supply a data folder or any folder
- all folders within will be checked for necessary files
- in order to make a visualization
"""
filtered_recording_dirs = []
if is_pupil_rec_dir(data_dir):
filtered_recording_dirs.append(data_dir)
for root, dirs, files in os.walk(data_dir):
filtered_recording_dirs += [
os.path.join(root, d)
for d in dirs
if not d.startswith(".") and is_pupil_rec_dir(os.path.join(root, d))
]
logger.debug("Filtered Recording Dirs: {}".format(filtered_recording_dirs))
return filtered_recording_dirs
|
https://github.com/pupil-labs/pupil/issues/913
|
Traceback (most recent call last):
File "/home/papr/work/pupil/pupil_src/launchables/player.py", line 441, in player
p.recent_events(events)
File "/home/papr/work/pupil/pupil_src/shared_modules/batch_exporter.py", line 190, in recent_events
self.workers[i].start()
File "/usr/lib/python3.5/multiprocessing/process.py", line 99, in start
assert self._popen is None, 'cannot start a process twice'
AssertionError: cannot start a process twice
|
AssertionError
|
def __init__(self, g_pool, source_dir="~/", destination_dir="~/"):
super().__init__(g_pool)
self.available_exports = []
self.queued_exports = []
self.active_exports = []
self.destination_dir = os.path.expanduser(destination_dir)
self.source_dir = os.path.expanduser(source_dir)
self.search_task = None
self.worker_count = cpu_count() - 1
logger.info(
"Using a maximum of {} CPUs to process visualizations in parallel...".format(
cpu_count() - 1
)
)
|
def __init__(self, g_pool):
super().__init__(g_pool)
# initialize empty menu
# and load menu configuration of last session
self.menu = None
self.exports = []
self.new_exports = []
self.active_exports = []
default_path = os.path.expanduser("~/work/pupil/recordings/demo")
self.destination_dir = default_path
self.source_dir = default_path
self.run = False
self.workers = [None for x in range(mp.cpu_count())]
logger.info(
"Using a maximum of {} CPUs to process visualizations in parallel...".format(
mp.cpu_count()
)
)
|
https://github.com/pupil-labs/pupil/issues/913
|
Traceback (most recent call last):
File "/home/papr/work/pupil/pupil_src/launchables/player.py", line 441, in player
p.recent_events(events)
File "/home/papr/work/pupil/pupil_src/shared_modules/batch_exporter.py", line 190, in recent_events
self.workers[i].start()
File "/usr/lib/python3.5/multiprocessing/process.py", line 99, in start
assert self._popen is None, 'cannot start a process twice'
AssertionError: cannot start a process twice
|
AssertionError
|
def init_ui(self):
self.add_menu()
self.menu.label = "Batch Export Recordings"
self.menu.append(
ui.Info_Text(
"Search will walk through the source direcotry recursively and detect available Pupil recordings."
)
)
self.menu.append(
ui.Text_Input(
"source_dir", self, label="Source directory", setter=self.set_src_dir
)
)
self.search_button = ui.Button("Search", self.detect_recordings)
self.menu.append(self.search_button)
self.avail_recs_menu = ui.Growing_Menu("Available Recordings")
self._update_avail_recs_menu()
self.menu.append(self.avail_recs_menu)
self.menu.append(
ui.Text_Input(
"destination_dir",
self,
label="Destination directory",
setter=self.set_dest_dir,
)
)
self.menu.append(ui.Button("Export selected", self.queue_selected))
self.menu.append(ui.Button("Clear search results", self._clear_avail))
self.menu.append(ui.Separator())
self.menu.append(ui.Button("Cancel all exports", self.cancel_all))
|
def init_ui(self):
self.add_menu()
# initialize the menu
self.menu.label = "Batch Export Recordings"
# load the configuration of last session
# add menu to the window
self._update_ui()
|
https://github.com/pupil-labs/pupil/issues/913
|
Traceback (most recent call last):
File "/home/papr/work/pupil/pupil_src/launchables/player.py", line 441, in player
p.recent_events(events)
File "/home/papr/work/pupil/pupil_src/shared_modules/batch_exporter.py", line 190, in recent_events
self.workers[i].start()
File "/usr/lib/python3.5/multiprocessing/process.py", line 99, in start
assert self._popen is None, 'cannot start a process twice'
AssertionError: cannot start a process twice
|
AssertionError
|
def deinit_ui(self):
self.menu.remove(self.avail_recs_menu)
self.avail_recs_menu = None
self.remove_menu()
|
def deinit_ui(self):
self.remove_menu()
|
https://github.com/pupil-labs/pupil/issues/913
|
Traceback (most recent call last):
File "/home/papr/work/pupil/pupil_src/launchables/player.py", line 441, in player
p.recent_events(events)
File "/home/papr/work/pupil/pupil_src/shared_modules/batch_exporter.py", line 190, in recent_events
self.workers[i].start()
File "/usr/lib/python3.5/multiprocessing/process.py", line 99, in start
assert self._popen is None, 'cannot start a process twice'
AssertionError: cannot start a process twice
|
AssertionError
|
def set_src_dir(self, new_dir):
new_dir = os.path.expanduser(new_dir)
if os.path.isdir(new_dir):
self.source_dir = new_dir
else:
logger.warning('"{}" is not a directory'.format(new_dir))
return
|
def set_src_dir(self, new_dir):
new_dir = new_dir
self.new_exports = []
self.exports = []
new_dir = os.path.expanduser(new_dir)
if os.path.isdir(new_dir):
self.source_dir = new_dir
self.new_exports = get_recording_dirs(new_dir)
else:
logger.warning('"{}" is not a directory'.format(new_dir))
return
if self.new_exports is []:
logger.warning('"{}" does not contain recordings'.format(new_dir))
return
self.add_exports()
self._update_ui()
|
https://github.com/pupil-labs/pupil/issues/913
|
Traceback (most recent call last):
File "/home/papr/work/pupil/pupil_src/launchables/player.py", line 441, in player
p.recent_events(events)
File "/home/papr/work/pupil/pupil_src/shared_modules/batch_exporter.py", line 190, in recent_events
self.workers[i].start()
File "/usr/lib/python3.5/multiprocessing/process.py", line 99, in start
assert self._popen is None, 'cannot start a process twice'
AssertionError: cannot start a process twice
|
AssertionError
|
def set_dest_dir(self, new_dir):
new_dir = os.path.expanduser(new_dir)
if os.path.isdir(new_dir):
self.destination_dir = new_dir
else:
logger.warning('"{}" is not a directory'.format(new_dir))
return
|
def set_dest_dir(self, new_dir):
new_dir = new_dir
new_dir = os.path.expanduser(new_dir)
if os.path.isdir(new_dir):
self.destination_dir = new_dir
else:
logger.warning('"{}" is not a directory'.format(new_dir))
return
self.exports = []
self.add_exports()
self._update_ui()
|
https://github.com/pupil-labs/pupil/issues/913
|
Traceback (most recent call last):
File "/home/papr/work/pupil/pupil_src/launchables/player.py", line 441, in player
p.recent_events(events)
File "/home/papr/work/pupil/pupil_src/shared_modules/batch_exporter.py", line 190, in recent_events
self.workers[i].start()
File "/usr/lib/python3.5/multiprocessing/process.py", line 99, in start
assert self._popen is None, 'cannot start a process twice'
AssertionError: cannot start a process twice
|
AssertionError
|
def recent_events(self, events):
if self.search_task:
recent = [d for d in self.search_task.fetch()]
if recent:
currently_avail = [rec["source"] for rec in self.available_exports]
self.available_exports.extend(
[
{"source": rec, "selected": True}
for rec in recent
if rec not in currently_avail
]
)
self._update_avail_recs_menu()
if self.search_task.completed:
self.search_task = None
self.search_button.outer_label = ""
self.search_button.label = "Search"
|
def recent_events(self, events):
frame = events.get("frame")
if not frame:
return
if self.run:
for i in range(len(self.workers)):
if self.workers[i] and self.workers[i].is_alive():
pass
else:
logger.info("starting new job")
if self.active_exports:
self.workers[i] = self.active_exports.pop(0)
if not self.workers[i].is_alive():
self.workers[i].start()
else:
self.run = False
|
https://github.com/pupil-labs/pupil/issues/913
|
Traceback (most recent call last):
File "/home/papr/work/pupil/pupil_src/launchables/player.py", line 441, in player
p.recent_events(events)
File "/home/papr/work/pupil/pupil_src/shared_modules/batch_exporter.py", line 190, in recent_events
self.workers[i].start()
File "/usr/lib/python3.5/multiprocessing/process.py", line 99, in start
assert self._popen is None, 'cannot start a process twice'
AssertionError: cannot start a process twice
|
AssertionError
|
def export(
rec_dir,
user_dir,
min_data_confidence,
start_frame=None,
end_frame=None,
plugin_initializers=(),
out_file_path=None,
pre_computed={},
):
PID = str(os.getpid())
logger = logging.getLogger(__name__ + " with pid: " + PID)
start_status = "Starting video export with pid: {}".format(PID)
print(start_status)
yield start_status, 0
try:
update_recording_to_recent(rec_dir)
vis_plugins = sorted(
[
Vis_Circle,
Vis_Cross,
Vis_Polyline,
Vis_Light_Points,
Vis_Watermark,
Vis_Scan_Path,
Vis_Eye_Video_Overlay,
],
key=lambda x: x.__name__,
)
analysis_plugins = [Offline_Fixation_Detector]
user_plugins = sorted(
import_runtime_plugins(os.path.join(user_dir, "plugins")),
key=lambda x: x.__name__,
)
available_plugins = vis_plugins + analysis_plugins + user_plugins
name_by_index = [p.__name__ for p in available_plugins]
plugin_by_name = dict(zip(name_by_index, available_plugins))
update_recording_to_recent(rec_dir)
video_path = [
f
for f in glob(os.path.join(rec_dir, "world.*"))
if os.path.splitext(f)[-1] in (".mp4", ".mkv", ".avi", ".mjpeg")
][0]
pupil_data_path = os.path.join(rec_dir, "pupil_data")
audio_path = os.path.join(rec_dir, "audio.mp4")
meta_info = load_meta_info(rec_dir)
g_pool = Global_Container()
g_pool.app = "exporter"
g_pool.min_data_confidence = min_data_confidence
cap = File_Source(g_pool, video_path)
timestamps = cap.timestamps
# Out file path verification, we do this before but if one uses a separate tool, this will kick in.
if out_file_path is None:
out_file_path = os.path.join(rec_dir, "world_viz.mp4")
else:
file_name = os.path.basename(out_file_path)
dir_name = os.path.dirname(out_file_path)
if not dir_name:
dir_name = rec_dir
if not file_name:
file_name = "world_viz.mp4"
out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))
if os.path.isfile(out_file_path):
logger.warning("Video out file already exsists. I will overwrite!")
os.remove(out_file_path)
logger.debug("Saving Video to {}".format(out_file_path))
# Trim mark verification
# make sure the trim marks (start frame, endframe) make sense:
# We define them like python list slices, thus we can test them like such.
trimmed_timestamps = timestamps[start_frame:end_frame]
if len(trimmed_timestamps) == 0:
warn = "Start and end frames are set such that no video will be exported."
logger.warning(warn)
yield warn, 0.0
return
if start_frame is None:
start_frame = 0
# these two vars are shared with the lauching process and give a job length and progress report.
frames_to_export = len(trimmed_timestamps)
current_frame = 0
exp_info = (
"Will export from frame {} to frame {}. This means I will export {} frames."
)
logger.debug(
exp_info.format(
start_frame, start_frame + frames_to_export, frames_to_export
)
)
# setup of writer
writer = AV_Writer(
out_file_path, fps=cap.frame_rate, audio_loc=audio_path, use_timestamps=True
)
cap.seek_to_frame(start_frame)
start_time = time()
g_pool.plugin_by_name = plugin_by_name
g_pool.capture = cap
g_pool.rec_dir = rec_dir
g_pool.user_dir = user_dir
g_pool.meta_info = meta_info
g_pool.timestamps = timestamps
g_pool.delayed_notifications = {}
g_pool.notifications = []
# load pupil_positions, gaze_positions
pupil_data = pre_computed.get("pupil_data") or load_object(pupil_data_path)
g_pool.pupil_data = pupil_data
g_pool.pupil_positions = (
pre_computed.get("pupil_positions") or pupil_data["pupil_positions"]
)
g_pool.gaze_positions = (
pre_computed.get("gaze_positions") or pupil_data["gaze_positions"]
)
g_pool.fixations = [] # populated by the fixation detector plugin
g_pool.pupil_positions_by_frame = correlate_data(
g_pool.pupil_positions, g_pool.timestamps
)
g_pool.gaze_positions_by_frame = correlate_data(
g_pool.gaze_positions, g_pool.timestamps
)
g_pool.fixations_by_frame = [
[] for x in g_pool.timestamps
] # populated by the fixation detector plugin
# add plugins
g_pool.plugins = Plugin_List(g_pool, plugin_initializers)
while frames_to_export > current_frame:
try:
frame = cap.get_frame()
except EndofVideoFileError:
break
events = {"frame": frame}
# new positons and events
events["gaze_positions"] = g_pool.gaze_positions_by_frame[frame.index]
events["pupil_positions"] = g_pool.pupil_positions_by_frame[frame.index]
# publish delayed notifiactions when their time has come.
for n in list(g_pool.delayed_notifications.values()):
if n["_notify_time_"] < time():
del n["_notify_time_"]
del g_pool.delayed_notifications[n["subject"]]
g_pool.notifications.append(n)
# notify each plugin if there are new notifactions:
while g_pool.notifications:
n = g_pool.notifications.pop(0)
for p in g_pool.plugins:
p.on_notify(n)
# allow each Plugin to do its work.
for p in g_pool.plugins:
p.recent_events(events)
writer.write_video_frame(frame)
current_frame += 1
yield "Exporting with pid {}".format(PID), current_frame
writer.close()
writer = None
duration = time() - start_time
effective_fps = float(current_frame) / duration
result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
print(result.format(current_frame, out_file_path, duration, effective_fps))
yield "Export done. This took {:.0f} seconds.".format(duration), current_frame
except GeneratorExit:
print("Video export with pid {} was canceled.".format(os.getpid()))
except Exception as e:
from time import sleep
import traceback
trace = traceback.format_exc()
print(
"Process Export (pid: {}) crashed with trace:\n{}".format(
os.getpid(), trace
)
)
yield e
sleep(1.0)
|
def export(
rec_dir,
user_dir,
min_data_confidence,
start_frame=None,
end_frame=None,
plugin_initializers=(),
out_file_path=None,
pre_computed={},
):
logger = logging.getLogger(__name__ + " with pid: " + str(os.getpid()))
start_status = "Starting video export with pid: {}".format(os.getpid())
print(start_status)
yield start_status, 0
try:
update_recording_to_recent(rec_dir)
vis_plugins = sorted(
[
Vis_Circle,
Vis_Cross,
Vis_Polyline,
Vis_Light_Points,
Vis_Watermark,
Vis_Scan_Path,
Vis_Eye_Video_Overlay,
],
key=lambda x: x.__name__,
)
analysis_plugins = [Offline_Fixation_Detector]
user_plugins = sorted(
import_runtime_plugins(os.path.join(user_dir, "plugins")),
key=lambda x: x.__name__,
)
available_plugins = vis_plugins + analysis_plugins + user_plugins
name_by_index = [p.__name__ for p in available_plugins]
plugin_by_name = dict(zip(name_by_index, available_plugins))
update_recording_to_recent(rec_dir)
video_path = [
f
for f in glob(os.path.join(rec_dir, "world.*"))
if os.path.splitext(f)[-1] in (".mp4", ".mkv", ".avi", ".mjpeg")
][0]
pupil_data_path = os.path.join(rec_dir, "pupil_data")
audio_path = os.path.join(rec_dir, "audio.mp4")
meta_info = load_meta_info(rec_dir)
g_pool = Global_Container()
g_pool.app = "exporter"
g_pool.min_data_confidence = min_data_confidence
cap = File_Source(g_pool, video_path)
timestamps = cap.timestamps
# Out file path verification, we do this before but if one uses a separate tool, this will kick in.
if out_file_path is None:
out_file_path = os.path.join(rec_dir, "world_viz.mp4")
else:
file_name = os.path.basename(out_file_path)
dir_name = os.path.dirname(out_file_path)
if not dir_name:
dir_name = rec_dir
if not file_name:
file_name = "world_viz.mp4"
out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))
if os.path.isfile(out_file_path):
logger.warning("Video out file already exsists. I will overwrite!")
os.remove(out_file_path)
logger.debug("Saving Video to {}".format(out_file_path))
# Trim mark verification
# make sure the trim marks (start frame, endframe) make sense:
# We define them like python list slices, thus we can test them like such.
trimmed_timestamps = timestamps[start_frame:end_frame]
if len(trimmed_timestamps) == 0:
warn = "Start and end frames are set such that no video will be exported."
logger.warning(warn)
yield warn, 0.0
return
if start_frame is None:
start_frame = 0
# these two vars are shared with the lauching process and give a job length and progress report.
frames_to_export = len(trimmed_timestamps)
current_frame = 0
exp_info = (
"Will export from frame {} to frame {}. This means I will export {} frames."
)
logger.debug(
exp_info.format(
start_frame, start_frame + frames_to_export, frames_to_export
)
)
# setup of writer
writer = AV_Writer(
out_file_path, fps=cap.frame_rate, audio_loc=audio_path, use_timestamps=True
)
cap.seek_to_frame(start_frame)
start_time = time()
g_pool.plugin_by_name = plugin_by_name
g_pool.capture = cap
g_pool.rec_dir = rec_dir
g_pool.user_dir = user_dir
g_pool.meta_info = meta_info
g_pool.timestamps = timestamps
g_pool.delayed_notifications = {}
g_pool.notifications = []
# load pupil_positions, gaze_positions
pupil_data = pre_computed.get("pupil_data") or load_object(pupil_data_path)
g_pool.pupil_data = pupil_data
g_pool.pupil_positions = (
pre_computed.get("pupil_positions") or pupil_data["pupil_positions"]
)
g_pool.gaze_positions = (
pre_computed.get("gaze_positions") or pupil_data["gaze_positions"]
)
g_pool.fixations = [] # populated by the fixation detector plugin
g_pool.pupil_positions_by_frame = correlate_data(
g_pool.pupil_positions, g_pool.timestamps
)
g_pool.gaze_positions_by_frame = correlate_data(
g_pool.gaze_positions, g_pool.timestamps
)
g_pool.fixations_by_frame = [
[] for x in g_pool.timestamps
] # populated by the fixation detector plugin
# add plugins
g_pool.plugins = Plugin_List(g_pool, plugin_initializers)
while frames_to_export > current_frame:
try:
frame = cap.get_frame()
except EndofVideoFileError:
break
events = {"frame": frame}
# new positons and events
events["gaze_positions"] = g_pool.gaze_positions_by_frame[frame.index]
events["pupil_positions"] = g_pool.pupil_positions_by_frame[frame.index]
# publish delayed notifiactions when their time has come.
for n in list(g_pool.delayed_notifications.values()):
if n["_notify_time_"] < time():
del n["_notify_time_"]
del g_pool.delayed_notifications[n["subject"]]
g_pool.notifications.append(n)
# notify each plugin if there are new notifactions:
while g_pool.notifications:
n = g_pool.notifications.pop(0)
for p in g_pool.plugins:
p.on_notify(n)
# allow each Plugin to do its work.
for p in g_pool.plugins:
p.recent_events(events)
writer.write_video_frame(frame)
current_frame += 1
yield "Exporting", current_frame
writer.close()
writer = None
duration = time() - start_time
effective_fps = float(current_frame) / duration
result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
print(result.format(current_frame, out_file_path, duration, effective_fps))
yield "Export done. This took {:.0f} seconds.".format(duration), current_frame
except GeneratorExit:
print("Video export with pid {} was canceled.".format(os.getpid()))
except:
from time import sleep
import traceback
trace = traceback.format_exc()
print(
"Process Export (pid: {}) crashed with trace:\n{}".format(
os.getpid(), trace
)
)
sleep(1.0)
|
https://github.com/pupil-labs/pupil/issues/913
|
Traceback (most recent call last):
File "/home/papr/work/pupil/pupil_src/launchables/player.py", line 441, in player
p.recent_events(events)
File "/home/papr/work/pupil/pupil_src/shared_modules/batch_exporter.py", line 190, in recent_events
self.workers[i].start()
File "/usr/lib/python3.5/multiprocessing/process.py", line 99, in start
assert self._popen is None, 'cannot start a process twice'
AssertionError: cannot start a process twice
|
AssertionError
|
def recent_events(self, events):
for e in self.exports:
try:
recent = [d for d in e.fetch()]
except Exception as e:
self.status, self.progress = "{}: {}".format(type(e).__name__, e), 0
else:
if recent:
e.status, e.progress = recent[-1]
if e.canceled:
e.status = "Export has been canceled."
|
def recent_events(self, events):
for e in self.exports:
recent = [d for d in e.fetch()]
if recent:
e.status, e.progress = recent[-1]
if e.canceled:
e.status = "Export has been canceled."
|
https://github.com/pupil-labs/pupil/issues/913
|
Traceback (most recent call last):
File "/home/papr/work/pupil/pupil_src/launchables/player.py", line 441, in player
p.recent_events(events)
File "/home/papr/work/pupil/pupil_src/shared_modules/batch_exporter.py", line 190, in recent_events
self.workers[i].start()
File "/usr/lib/python3.5/multiprocessing/process.py", line 99, in start
assert self._popen is None, 'cannot start a process twice'
AssertionError: cannot start a process twice
|
AssertionError
|
def update_recording_bytes_to_unicode(rec_dir):
logger.info("Updating recording from bytes to unicode.")
# update to python 3
meta_info_path = os.path.join(rec_dir, "info.csv")
def convert(data):
if isinstance(data, bytes):
return data.decode()
elif isinstance(data, str) or isinstance(data, np.ndarray):
return data
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.items()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
for file in os.listdir(rec_dir):
rec_file = os.path.join(rec_dir, file)
try:
rec_object = load_object(rec_file)
converted_object = convert(rec_object)
if converted_object != rec_object:
logger.info("Converted `{}` from bytes to unicode".format(file))
save_object(rec_object, rec_file)
except (ValueError, IsADirectoryError):
continue
# except TypeError:
# logger.error('TypeError when parsing `{}`'.format(file))
# continue
with open(meta_info_path, "r", encoding="utf-8") as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
meta_info["Capture Software Version"] = "v0.8.8"
with open(meta_info_path, "w", newline="") as csvfile:
csv_utils.write_key_value_file(csvfile, meta_info)
|
def update_recording_bytes_to_unicode(rec_dir):
logger.info("Updating recording from bytes to unicode.")
# update to python 3
meta_info_path = os.path.join(rec_dir, "info.csv")
def convert(data):
if isinstance(data, bytes):
return data.decode()
elif isinstance(data, str) or isinstance(data, np.ndarray):
return data
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.items()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
for file in os.listdir(rec_dir):
rec_file = os.path.join(rec_dir, file)
try:
rec_object = load_object(rec_file)
converted_object = convert(rec_object)
if converted_object != rec_object:
logger.info("Converted `{}` from bytes to unicode".format(file))
save_object(rec_object, rec_file)
except ValueError:
continue
# except TypeError:
# logger.error('TypeError when parsing `{}`'.format(file))
# continue
with open(meta_info_path, "r", encoding="utf-8") as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
meta_info["Capture Software Version"] = "v0.8.8"
with open(meta_info_path, "w", newline="") as csvfile:
csv_utils.write_key_value_file(csvfile, meta_info)
|
https://github.com/pupil-labs/pupil/issues/591
|
Traceback (most recent call last):
File "main.py", line 626, in <module>
session(this_session_dir)
File "main.py", line 490, in session
p.on_notify(n)
File "/Users/pabloprietz/work/pupil/pupil_src/shared_modules/annotations.py", line 247, in on_notify
self.export_annotations(notification['range'],notification['export_dir'])
File "/Users/pabloprietz/work/pupil/pupil_src/shared_modules/annotations.py", line 217, in export_annotations
annotations_in_section.sort(key=lambda a:a['index'])
AttributeError: 'dict_values' object has no attribute 'sort'
|
AttributeError
|
def export_annotations(self, export_range, export_dir):
if not self.annotations:
logger.warning("No annotations in this recording nothing to export")
return
annotations_in_section = chain(*self.annotations_by_frame[export_range])
annotations_in_section = list(
{a["index"]: a for a in annotations_in_section}.values()
) # remove duplicates
annotations_in_section.sort(key=lambda a: a["index"])
with open(
os.path.join(export_dir, "annotations.csv"), "w", encoding="utf-8", newline=""
) as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerow(self.csv_representation_keys())
for a in annotations_in_section:
csv_writer.writerow(self.csv_representation_for_annotations(a))
logger.info("Created 'annotations.csv' file.")
|
def export_annotations(self, export_range, export_dir):
if not self.annotations:
logger.warning("No annotations in this recording nothing to export")
return
annotations_in_section = chain(*self.annotations_by_frame[export_range])
annotations_in_section = {
a["index"]: a for a in annotations_in_section
}.values() # remove dublicates
annotations_in_section.sort(key=lambda a: a["index"])
with open(
os.path.join(export_dir, "annotations.csv"), "w", encoding="utf-8", newline=""
) as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerow(self.csv_representation_keys())
for a in annotations_in_section:
csv_writer.writerow(self.csv_representation_for_annotations(a))
logger.info("Created 'annotations.csv' file.")
|
https://github.com/pupil-labs/pupil/issues/591
|
Traceback (most recent call last):
File "main.py", line 626, in <module>
session(this_session_dir)
File "main.py", line 490, in session
p.on_notify(n)
File "/Users/pabloprietz/work/pupil/pupil_src/shared_modules/annotations.py", line 247, in on_notify
self.export_annotations(notification['range'],notification['export_dir'])
File "/Users/pabloprietz/work/pupil/pupil_src/shared_modules/annotations.py", line 217, in export_annotations
annotations_in_section.sort(key=lambda a:a['index'])
AttributeError: 'dict_values' object has no attribute 'sort'
|
AttributeError
|
def load_object(file_path):
file_path = os.path.expanduser(file_path)
# reading to string and loads is 2.5x faster that using the file handle and load.
try:
with open(file_path, "rb") as fh:
return pickle.load(fh, encoding="bytes")
except pickle.UnpicklingError as e:
raise ValueError from e
|
def load_object(file_path):
file_path = os.path.expanduser(file_path)
# reading to string and loads is 2.5x faster that using the file handle and load.
try:
with open(file_path, "rb") as fh:
return pickle.load(fh, encoding="bytes")
except pickle.UnpicklingError as e:
raise ValueError(e)
|
https://github.com/pupil-labs/pupil/issues/591
|
Traceback (most recent call last):
File "main.py", line 626, in <module>
session(this_session_dir)
File "main.py", line 490, in session
p.on_notify(n)
File "/Users/pabloprietz/work/pupil/pupil_src/shared_modules/annotations.py", line 247, in on_notify
self.export_annotations(notification['range'],notification['export_dir'])
File "/Users/pabloprietz/work/pupil/pupil_src/shared_modules/annotations.py", line 217, in export_annotations
annotations_in_section.sort(key=lambda a:a['index'])
AttributeError: 'dict_values' object has no attribute 'sort'
|
AttributeError
|
def save_surface_statsics_to_file(self, export_range, export_dir):
"""
between in and out mark
report: gaze distribution:
- total gazepoints
- gaze points on surface x
- gaze points not on any surface
report: surface visisbility
- total frames
- surface x visible framecount
surface events:
frame_no, ts, surface "name", "id" enter/exit
for each surface:
fixations_on_name.csv
gaze_on_name_id.csv
positions_of_name_id.csv
"""
metrics_dir = os.path.join(export_dir, "surfaces")
section = export_range
in_mark = export_range.start
out_mark = export_range.stop
logger.info("exporting metrics to {}".format(metrics_dir))
if os.path.isdir(metrics_dir):
logger.info("Will overwrite previous export for this section")
else:
try:
os.mkdir(metrics_dir)
except:
logger.warning("Could not make metrics dir {}".format(metrics_dir))
return
with open(
os.path.join(metrics_dir, "surface_visibility.csv"),
"w",
encoding="utf-8",
newline="",
) as csvfile:
csv_writer = csv.writer(csvfile, delimiter=",")
# surface visibility report
frame_count = len(self.g_pool.timestamps[section])
csv_writer.writerow(("frame_count", frame_count))
csv_writer.writerow((""))
csv_writer.writerow(("surface_name", "visible_frame_count"))
for s in self.surfaces:
if s.cache == None:
logger.warning(
"The surface is not cached. Please wait for the cacher to collect data."
)
return
visible_count = s.visible_count_in_section(section)
csv_writer.writerow((s.name, visible_count))
logger.info("Created 'surface_visibility.csv' file")
with open(
os.path.join(metrics_dir, "surface_gaze_distribution.csv"),
"w",
encoding="utf-8",
newline="",
) as csvfile:
csv_writer = csv.writer(csvfile, delimiter=",")
# gaze distribution report
gaze_in_section = list(chain(*self.g_pool.gaze_positions_by_frame[section]))
not_on_any_srf = set([gp["timestamp"] for gp in gaze_in_section])
csv_writer.writerow(("total_gaze_point_count", len(gaze_in_section)))
csv_writer.writerow((""))
csv_writer.writerow(("surface_name", "gaze_count"))
for s in self.surfaces:
gaze_on_srf = s.gaze_on_srf_in_section(section)
gaze_on_srf = set([gp["base_data"]["timestamp"] for gp in gaze_on_srf])
not_on_any_srf -= gaze_on_srf
csv_writer.writerow((s.name, len(gaze_on_srf)))
csv_writer.writerow(("not_on_any_surface", len(not_on_any_srf)))
logger.info("Created 'surface_gaze_distribution.csv' file")
with open(
os.path.join(metrics_dir, "surface_events.csv"),
"w",
encoding="utf-8",
newline="",
) as csvfile:
csv_writer = csv.writer(csvfile, delimiter=",")
# surface events report
csv_writer.writerow(
("frame_number", "timestamp", "surface_name", "surface_uid", "event_type")
)
events = []
for s in self.surfaces:
for enter_frame_id, exit_frame_id in s.cache.positive_ranges:
events.append(
{
"frame_id": enter_frame_id,
"srf_name": s.name,
"srf_uid": s.uid,
"event": "enter",
}
)
events.append(
{
"frame_id": exit_frame_id,
"srf_name": s.name,
"srf_uid": s.uid,
"event": "exit",
}
)
events.sort(key=lambda x: x["frame_id"])
for e in events:
csv_writer.writerow(
(
e["frame_id"],
self.g_pool.timestamps[e["frame_id"]],
e["srf_name"],
e["srf_uid"],
e["event"],
)
)
logger.info("Created 'surface_events.csv' file")
for s in self.surfaces:
# per surface names:
surface_name = "_" + s.name.replace("/", "") + "_" + s.uid
# save surface_positions as pickle file
save_object(
s.cache.to_list(), os.path.join(metrics_dir, "srf_positions" + surface_name)
)
# save surface_positions as csv
with open(
os.path.join(metrics_dir, "srf_positons" + surface_name + ".csv"),
"w",
encoding="utf-8",
newline="",
) as csvfile:
csv_writer = csv.writer(csvfile, delimiter=",")
csv_writer.writerow(
(
"frame_idx",
"timestamp",
"m_to_screen",
"m_from_screen",
"detected_markers",
)
)
for idx, ts, ref_srf_data in zip(
range(len(self.g_pool.timestamps)), self.g_pool.timestamps, s.cache
):
if in_mark <= idx <= out_mark:
if ref_srf_data is not None and ref_srf_data is not False:
csv_writer.writerow(
(
idx,
ts,
ref_srf_data["m_to_screen"],
ref_srf_data["m_from_screen"],
ref_srf_data["detected_markers"],
)
)
# save gaze on srf as csv.
with open(
os.path.join(
metrics_dir, "gaze_positions_on_surface" + surface_name + ".csv"
),
"w",
encoding="utf-8",
newline="",
) as csvfile:
csv_writer = csv.writer(csvfile, delimiter=",")
csv_writer.writerow(
(
"world_timestamp",
"world_frame_idx",
"gaze_timestamp",
"x_norm",
"y_norm",
"x_scaled",
"y_scaled",
"on_srf",
)
)
for idx, ts, ref_srf_data in zip(
range(len(self.g_pool.timestamps)), self.g_pool.timestamps, s.cache
):
if in_mark <= idx <= out_mark:
if ref_srf_data is not None and ref_srf_data is not False:
for gp in s.gaze_on_srf_by_frame_idx(
idx, ref_srf_data["m_from_screen"]
):
csv_writer.writerow(
(
ts,
idx,
gp["base_data"]["timestamp"],
gp["norm_pos"][0],
gp["norm_pos"][1],
gp["norm_pos"][0] * s.real_world_size["x"],
gp["norm_pos"][1] * s.real_world_size["y"],
gp["on_srf"],
)
)
# save fixation on srf as csv.
with open(
os.path.join(metrics_dir, "fixations_on_surface" + surface_name + ".csv"),
"w",
encoding="utf-8",
newline="",
) as csvfile:
csv_writer = csv.writer(csvfile, delimiter=",")
csv_writer.writerow(
(
"id",
"start_timestamp",
"duration",
"start_frame",
"end_frame",
"norm_pos_x",
"norm_pos_y",
"x_scaled",
"y_scaled",
"on_srf",
)
)
fixations_on_surface = []
for idx, ref_srf_data in zip(range(len(self.g_pool.timestamps)), s.cache):
if in_mark <= idx <= out_mark:
if ref_srf_data is not None and ref_srf_data is not False:
for f in s.fixations_on_srf_by_frame_idx(
idx, ref_srf_data["m_from_screen"]
):
fixations_on_surface.append(f)
removed_duplicates = dict(
[(f["base_data"]["id"], f) for f in fixations_on_surface]
).values()
for f_on_s in removed_duplicates:
f = f_on_s["base_data"]
f_x, f_y = f_on_s["norm_pos"]
f_on_srf = f_on_s["on_srf"]
csv_writer.writerow(
(
f["id"],
f["timestamp"],
f["duration"],
f["start_frame_index"],
f["end_frame_index"],
f_x,
f_y,
f_x * s.real_world_size["x"],
f_y * s.real_world_size["y"],
f_on_srf,
)
)
logger.info(
"Saved surface positon gaze and fixation data for '{}' with uid:'{}'".format(
s.name, s.uid
)
)
if s.heatmap is not None:
logger.info("Saved Heatmap as .png file.")
cv2.imwrite(
os.path.join(metrics_dir, "heatmap" + surface_name + ".png"), s.heatmap
)
logger.info("Done exporting reference surface data.")
|
def save_surface_statsics_to_file(self, export_range, export_dir):
"""
between in and out mark
report: gaze distribution:
- total gazepoints
- gaze points on surface x
- gaze points not on any surface
report: surface visisbility
- total frames
- surface x visible framecount
surface events:
frame_no, ts, surface "name", "id" enter/exit
for each surface:
fixations_on_name.csv
gaze_on_name_id.csv
positions_of_name_id.csv
"""
metrics_dir = os.path.join(export_dir, "surfaces")
section = export_range
in_mark = export_range.start
out_mark = export_range.stop
logger.info("exporting metrics to {}".format(metrics_dir))
if os.path.isdir(metrics_dir):
logger.info("Will overwrite previous export for this section")
else:
try:
os.mkdir(metrics_dir)
except:
logger.warning("Could not make metrics dir {}".format(metrics_dir))
return
with open(
os.path.join(metrics_dir, "surface_visibility.csv"),
"w",
encoding="utf-8",
newline="",
) as csvfile:
csv_writer = csv.writer(csvfile, delimiter=",")
# surface visibility report
frame_count = len(self.g_pool.timestamps[section])
csv_writer.writerow(("frame_count", frame_count))
csv_writer.writerow((""))
csv_writer.writerow(("surface_name", "visible_frame_count"))
for s in self.surfaces:
if s.cache == None:
logger.warning(
"The surface is not cached. Please wait for the cacher to collect data."
)
return
visible_count = s.visible_count_in_section(section)
csv_writer.writerow((s.name, visible_count))
logger.info("Created 'surface_visibility.csv' file")
with open(
os.path.join(metrics_dir, "surface_gaze_distribution.csv"),
"w",
encoding="utf-8",
newline="",
) as csvfile:
csv_writer = csv.writer(csvfile, delimiter=",")
# gaze distribution report
gaze_in_section = list(chain(*self.g_pool.gaze_positions_by_frame[section]))
not_on_any_srf = set([gp["timestamp"] for gp in gaze_in_section])
csv_writer.writerow(("total_gaze_point_count", len(gaze_in_section)))
csv_writer.writerow((""))
csv_writer.writerow(("surface_name", "gaze_count"))
for s in self.surfaces:
gaze_on_srf = s.gaze_on_srf_in_section(section)
gaze_on_srf = set([gp["base_data"]["timestamp"] for gp in gaze_on_srf])
not_on_any_srf -= gaze_on_srf
csv_writer.writerow((s.name, len(gaze_on_srf)))
csv_writer.writerow(("not_on_any_surface", len(not_on_any_srf)))
logger.info("Created 'surface_gaze_distribution.csv' file")
with open(
os.path.join(metrics_dir, "surface_events.csv"),
"w",
encoding="utf-8",
newline="",
) as csvfile:
csv_writer = csv.writer(csvfile, delimiter=",")
# surface events report
csv_writer.writerow(
("frame_number", "timestamp", "surface_name", "surface_uid", "event_type")
)
events = []
for s in self.surfaces:
for enter_frame_id, exit_frame_id in s.cache.positive_ranges:
events.append(
{
"frame_id": enter_frame_id,
"srf_name": s.name,
"srf_uid": s.uid,
"event": "enter",
}
)
events.append(
{
"frame_id": exit_frame_id,
"srf_name": s.name,
"srf_uid": s.uid,
"event": "exit",
}
)
events.sort(key=lambda x: x["frame_id"])
for e in events:
csv_writer.writerow(
(
e["frame_id"],
self.g_pool.timestamps[e["frame_id"]],
e["srf_name"],
e["srf_uid"],
e["event"],
)
)
logger.info("Created 'surface_events.csv' file")
for s in self.surfaces:
# per surface names:
surface_name = "_" + s.name.replace("/", "") + "_" + s.uid
# save surface_positions as pickle file
save_object(
s.cache.to_list(), os.path.join(metrics_dir, "srf_positions" + surface_name)
)
# save surface_positions as csv
with open(
os.path.join(metrics_dir, "srf_positons" + surface_name + ".csv"),
"w",
encoding="utf-8",
newline="",
) as csvfile:
csv_writer = csv.writer(csvfile, delimiter=",")
csv_writer.writerow(
(
"frame_idx",
"timestamp",
"m_to_screen",
"m_from_screen",
"detected_markers",
)
)
for idx, ts, ref_srf_data in zip(
range(len(self.g_pool.timestamps)), self.g_pool.timestamps, s.cache
):
if in_mark <= idx <= out_mark:
if ref_srf_data is not None and ref_srf_data is not False:
csv_writer.writerow(
(
idx,
ts,
ref_srf_data["m_to_screen"],
ref_srf_data["m_from_screen"],
ref_srf_data["detected_markers"],
)
)
# save gaze on srf as csv.
with open(
os.path.join(
metrics_dir, "gaze_positions_on_surface" + surface_name + ".csv"
),
"w",
encoding="utf-8",
newline="",
) as csvfile:
csv_writer = csv.writer(csvfile, delimiter=",")
csv_writer.writerow(
(
"world_timestamp",
"world_frame_idx",
"gaze_timestamp",
"x_norm",
"y_norm",
"x_scaled",
"y_scaled",
"on_srf",
)
)
for idx, ts, ref_srf_data in zip(
range(len(self.g_pool.timestamps)), self.g_pool.timestamps, s.cache
):
if in_mark <= idx <= out_mark:
if ref_srf_data is not None and ref_srf_data is not False:
for gp in s.gaze_on_srf_by_frame_idx(
idx, ref_srf_data["m_from_screen"]
):
csv_writer.writerow(
(
ts,
idx,
gp["base_data"]["timestamp"],
gp["norm_pos"][0],
gp["norm_pos"][1],
gp["norm_pos"][0] * s.real_world_size["x"],
gp["norm_pos"][1] * s.real_world_size["y"],
gp["on_srf"],
)
)
# save fixation on srf as csv.
with open(
os.path.join(metrics_dir, "fixations_on_surface" + surface_name + ".csv"),
"w",
encoding="utf-8",
newline="",
) as csvfile:
csv_writer = csv.writer(csvfile, delimiter=",")
csv_writer.writerow(
(
"id",
"start_timestamp",
"duration",
"start_frame",
"end_frame",
"norm_pos_x",
"norm_pos_y",
"x_scaled",
"y_scaled",
"on_srf",
)
)
fixations_on_surface = []
for idx, ref_srf_data in zip(range(len(self.g_pool.timestamps)), s.cache):
if in_mark <= idx <= out_mark:
if ref_srf_data is not None and ref_srf_data is not False:
for f in s.fixations_on_srf_by_frame_idx(
idx, ref_srf_data["m_from_screen"]
):
fixations_on_surface.append(f)
removed_dublicates = dict(
[(f["base_data"]["id"], f) for f in fixations_on_surface]
).values()
for f_on_s in removed_dublicates:
f = f_on_s["base_data"]
f_x, f_y = f_on_s["norm_pos"]
f_on_srf = f_on_s["on_srf"]
csv_writer.writerow(
(
f["id"],
f["timestamp"],
f["duration"],
f["start_frame_index"],
f["end_frame_index"],
f_x,
f_y,
f_x * s.real_world_size["x"],
f_y * s.real_world_size["y"],
f_on_srf,
)
)
logger.info(
"Saved surface positon gaze and fixation data for '{}' with uid:'{}'".format(
s.name, s.uid
)
)
if s.heatmap is not None:
logger.info("Saved Heatmap as .png file.")
cv2.imwrite(
os.path.join(metrics_dir, "heatmap" + surface_name + ".png"), s.heatmap
)
logger.info("Done exporting reference surface data.")
|
https://github.com/pupil-labs/pupil/issues/591
|
Traceback (most recent call last):
File "main.py", line 626, in <module>
session(this_session_dir)
File "main.py", line 490, in session
p.on_notify(n)
File "/Users/pabloprietz/work/pupil/pupil_src/shared_modules/annotations.py", line 247, in on_notify
self.export_annotations(notification['range'],notification['export_dir'])
File "/Users/pabloprietz/work/pupil/pupil_src/shared_modules/annotations.py", line 217, in export_annotations
annotations_in_section.sort(key=lambda a:a['index'])
AttributeError: 'dict_values' object has no attribute 'sort'
|
AttributeError
|
def set_items(self, data, new_items, indexes):
if type(data) in [list, tuple]:
data_out = data.copy() if isinstance(data, list) else list(data)
params = list_match_func[self.list_match_local]([indexes, new_items])
for ind, i in zip(*params):
if self.replace and len(data_out) > ind:
data_out.pop(ind)
data_out.insert(ind, i)
return data_out
elif type(data) == np.ndarray:
out_data = np.array(data)
ind, items = list_match_func[self.list_match_local]([indexes, new_items])
if self.replace:
out_data[ind] = items
else:
for i, item in zip(ind, items):
out_data = np.concatenate([data[:i], [item], data[i:]])
return out_data
elif type(data) == str:
ind, items = list_match_func[self.list_match_local]([indexes, new_items])
add_one = 1 if self.replace else 0
out_data = data
for i, item in zip(ind, items):
out_data = out_data[:i] + str(item) + out_data[i + add_one :]
return out_data
return None
|
def set_items(self, data, new_items, indexes):
if type(data) in [list, tuple]:
data_out = data.copy()
params = list_match_func[self.list_match_local]([indexes, new_items])
for ind, i in zip(*params):
if self.replace and len(data_out) > ind:
data_out.pop(ind)
data_out.insert(ind, i)
return data_out
elif type(data) == np.ndarray:
out_data = np.array(data)
ind, items = list_match_func[self.list_match_local]([indexes, new_items])
if self.replace:
out_data[ind] = items
else:
for i, item in zip(ind, items):
out_data = np.concatenate([data[:i], [item], data[i:]])
return out_data
elif type(data) == str:
ind, items = list_match_func[self.list_match_local]([indexes, new_items])
add_one = 1 if self.replace else 0
out_data = data
for i, item in zip(ind, items):
out_data = out_data[:i] + str(item) + out_data[i + add_one :]
return out_data
return None
|
https://github.com/nortikin/sverchok/issues/3834
|
Traceback (most recent call last):
File "\sverchok\core\update_system.py", line 384, in do_update_general
node.process()
File "\sverchok\nodes\list_struct\item_insert.py", line 99, in process
out = self.set_items(data, new_item, indexes[0])
File "\sverchok\nodes\list_struct\item_insert.py", line 105, in set_items
data_out = data.copy()
AttributeError: 'tuple' object has no attribute 'copy'
|
AttributeError
|
def simple28_grid_xy(x, y, args):
"""x and y are passed by default so you could add font content"""
geom, config = args
back_color, grid_color, line_color = config.palette
matrix = gpu.matrix.get_projection_matrix()
bg_vertex_shader = """
in vec2 pos;
uniform mat4 viewProjectionMatrix;
uniform float x_offset;
uniform float y_offset;
void main()
{
gl_Position = viewProjectionMatrix * vec4(pos.x + x_offset, pos.y + y_offset, 0.0f, 1.0f);
}
"""
bg_fragment_shader = """
uniform vec4 color;
out vec4 gl_FragColor;
void main()
{
gl_FragColor = color;
}
"""
shader = gpu.types.GPUShader(bg_vertex_shader, bg_fragment_shader)
batch = batch_for_shader(
shader, "TRIS", {"pos": geom.background_coords}, indices=geom.background_indices
)
shader.bind()
shader.uniform_float("color", back_color)
shader.uniform_float("x_offset", x)
shader.uniform_float("y_offset", y)
shader.uniform_float("viewProjectionMatrix", matrix)
batch.draw(shader)
# draw grid and graph
line_vertex_shader = """
in vec2 pos;
layout(location=1) in vec4 color;
uniform mat4 viewProjectionMatrix;
uniform float x_offset;
uniform float y_offset;
out vec4 a_color;
void main()
{
gl_Position = viewProjectionMatrix * vec4(pos.x + x_offset, pos.y + y_offset, 0.0f, 1.0f);
a_color = color;
}
"""
line_fragment_shader = """
in vec4 a_color;
void main()
{
gl_FragColor = a_color;
}
"""
shader2 = gpu.types.GPUShader(line_vertex_shader, line_fragment_shader)
batch2 = batch_for_shader(
shader2,
"LINES",
{"pos": geom.vertices, "color": geom.vertex_colors},
indices=geom.indices,
)
shader2.bind()
shader2.uniform_float("x_offset", x)
shader2.uniform_float("y_offset", y)
shader2.uniform_float("viewProjectionMatrix", matrix)
batch2.draw(shader2)
|
def simple28_grid_xy(x, y, args):
"""x and y are passed by default so you could add font content"""
geom, config = args
back_color, grid_color, line_color = config.palette
matrix = gpu.matrix.get_projection_matrix()
bg_vertex_shader = """
in vec2 pos;
uniform mat4 viewProjectionMatrix;
uniform float x_offset;
uniform float y_offset;
void main()
{
gl_Position = viewProjectionMatrix * vec4(pos.x + x_offset, pos.y + y_offset, 0.0f, 1.0f);
}
"""
bg_fragment_shader = """
uniform vec4 color;
void main()
{
gl_FragColor = color;
}
"""
shader = gpu.types.GPUShader(bg_vertex_shader, bg_fragment_shader)
batch = batch_for_shader(
shader, "TRIS", {"pos": geom.background_coords}, indices=geom.background_indices
)
shader.bind()
shader.uniform_float("color", back_color)
shader.uniform_float("x_offset", x)
shader.uniform_float("y_offset", y)
shader.uniform_float("viewProjectionMatrix", matrix)
batch.draw(shader)
# draw grid and graph
line_vertex_shader = """
in vec2 pos;
layout(location=1) in vec4 color;
uniform mat4 viewProjectionMatrix;
uniform float x_offset;
uniform float y_offset;
out vec4 a_color;
void main()
{
gl_Position = viewProjectionMatrix * vec4(pos.x + x_offset, pos.y + y_offset, 0.0f, 1.0f);
a_color = color;
}
"""
line_fragment_shader = """
in vec4 a_color;
void main()
{
gl_FragColor = a_color;
}
"""
shader2 = gpu.types.GPUShader(line_vertex_shader, line_fragment_shader)
batch2 = batch_for_shader(
shader2,
"LINES",
{"pos": geom.vertices, "color": geom.vertex_colors},
indices=geom.indices,
)
shader2.bind()
shader2.uniform_float("x_offset", x)
shader2.uniform_float("y_offset", y)
shader2.uniform_float("viewProjectionMatrix", matrix)
batch2.draw(shader2)
|
https://github.com/nortikin/sverchok/issues/3480
|
2020-08-25 10:58:36,292 [ERROR] sverchok.core.update_system: Node Viewer 2D had exception: Shader Compile Error, see console for more details
Traceback (most recent call last):
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/core/update_system.py", line 383, in do_update_general
node.process()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 813, in process
geom = generate_mesh_geom(config, vecs)
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 478, in generate_mesh_geom
config.v_shader = get_2d_smooth_color_shader()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 156, in get_2d_smooth_color_shader
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
Exception: Shader Compile Error, see console for more details
EVENT: node_property_update IN: SvIcosphereNode INSTANCE: IcoSphere
GPUShader: compile error:
ERROR: 0:33: Use of undeclared identifier 'gl_FragColor'
2020-08-25 10:58:36,327 [ERROR] sverchok.core.update_system: Node Viewer 2D had exception: Shader Compile Error, see console for more details
Traceback (most recent call last):
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/core/update_system.py", line 383, in do_update_general
node.process()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 813, in process
geom = generate_mesh_geom(config, vecs)
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 478, in generate_mesh_geom
config.v_shader = get_2d_smooth_color_shader()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 156, in get_2d_smooth_color_shader
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
Exception: Shader Compile Error, see console for more details
|
Exception
|
def get_2d_uniform_color_shader():
uniform_2d_vertex_shader = """
in vec2 pos;
uniform mat4 viewProjectionMatrix;
uniform float x_offset;
uniform float y_offset;
void main()
{
gl_Position = viewProjectionMatrix * vec4(pos.x + x_offset, pos.y + y_offset, 0.0f, 1.0f);
}
"""
uniform_2d_fragment_shader = """
uniform vec4 color;
out vec4 gl_FragColor;
void main()
{
gl_FragColor = color;
}
"""
return gpu.types.GPUShader(uniform_2d_vertex_shader, uniform_2d_fragment_shader)
|
def get_2d_uniform_color_shader():
uniform_2d_vertex_shader = """
in vec2 pos;
uniform mat4 viewProjectionMatrix;
uniform float x_offset;
uniform float y_offset;
void main()
{
gl_Position = viewProjectionMatrix * vec4(pos.x + x_offset, pos.y + y_offset, 0.0f, 1.0f);
}
"""
uniform_2d_fragment_shader = """
uniform vec4 color;
void main()
{
gl_FragColor = color;
}
"""
return gpu.types.GPUShader(uniform_2d_vertex_shader, uniform_2d_fragment_shader)
|
https://github.com/nortikin/sverchok/issues/3480
|
2020-08-25 10:58:36,292 [ERROR] sverchok.core.update_system: Node Viewer 2D had exception: Shader Compile Error, see console for more details
Traceback (most recent call last):
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/core/update_system.py", line 383, in do_update_general
node.process()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 813, in process
geom = generate_mesh_geom(config, vecs)
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 478, in generate_mesh_geom
config.v_shader = get_2d_smooth_color_shader()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 156, in get_2d_smooth_color_shader
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
Exception: Shader Compile Error, see console for more details
EVENT: node_property_update IN: SvIcosphereNode INSTANCE: IcoSphere
GPUShader: compile error:
ERROR: 0:33: Use of undeclared identifier 'gl_FragColor'
2020-08-25 10:58:36,327 [ERROR] sverchok.core.update_system: Node Viewer 2D had exception: Shader Compile Error, see console for more details
Traceback (most recent call last):
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/core/update_system.py", line 383, in do_update_general
node.process()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 813, in process
geom = generate_mesh_geom(config, vecs)
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 478, in generate_mesh_geom
config.v_shader = get_2d_smooth_color_shader()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 156, in get_2d_smooth_color_shader
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
Exception: Shader Compile Error, see console for more details
|
Exception
|
def get_2d_smooth_color_shader():
smooth_2d_vertex_shader = """
in vec2 pos;
layout(location=1) in vec4 color;
uniform mat4 viewProjectionMatrix;
uniform float x_offset;
uniform float y_offset;
out vec4 a_color;
void main()
{
gl_Position = viewProjectionMatrix * vec4(pos.x + x_offset, pos.y + y_offset, 0.0f, 1.0f);
a_color = color;
}
"""
smooth_2d_fragment_shader = """
in vec4 a_color;
out vec4 gl_FragColor;
void main()
{
gl_FragColor = a_color;
}
"""
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
|
def get_2d_smooth_color_shader():
smooth_2d_vertex_shader = """
in vec2 pos;
layout(location=1) in vec4 color;
uniform mat4 viewProjectionMatrix;
uniform float x_offset;
uniform float y_offset;
out vec4 a_color;
void main()
{
gl_Position = viewProjectionMatrix * vec4(pos.x + x_offset, pos.y + y_offset, 0.0f, 1.0f);
a_color = color;
}
"""
smooth_2d_fragment_shader = """
in vec4 a_color;
void main()
{
gl_FragColor = a_color;
}
"""
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
|
https://github.com/nortikin/sverchok/issues/3480
|
2020-08-25 10:58:36,292 [ERROR] sverchok.core.update_system: Node Viewer 2D had exception: Shader Compile Error, see console for more details
Traceback (most recent call last):
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/core/update_system.py", line 383, in do_update_general
node.process()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 813, in process
geom = generate_mesh_geom(config, vecs)
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 478, in generate_mesh_geom
config.v_shader = get_2d_smooth_color_shader()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 156, in get_2d_smooth_color_shader
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
Exception: Shader Compile Error, see console for more details
EVENT: node_property_update IN: SvIcosphereNode INSTANCE: IcoSphere
GPUShader: compile error:
ERROR: 0:33: Use of undeclared identifier 'gl_FragColor'
2020-08-25 10:58:36,327 [ERROR] sverchok.core.update_system: Node Viewer 2D had exception: Shader Compile Error, see console for more details
Traceback (most recent call last):
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/core/update_system.py", line 383, in do_update_general
node.process()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 813, in process
geom = generate_mesh_geom(config, vecs)
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 478, in generate_mesh_geom
config.v_shader = get_2d_smooth_color_shader()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 156, in get_2d_smooth_color_shader
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
Exception: Shader Compile Error, see console for more details
|
Exception
|
def get_2d_uniform_color_shader():
# return gpu.shader.from_builtin('2D_UNIFORM_COLOR')
uniform_2d_vertex_shader = """
in vec2 pos;
uniform mat4 viewProjectionMatrix;
uniform float x_offset;
uniform float y_offset;
void main()
{
gl_Position = viewProjectionMatrix * vec4(pos.x + x_offset, pos.y + y_offset, 0.0f, 1.0f);
}
"""
uniform_2d_fragment_shader = """
uniform vec4 color;
out vec4 gl_FragColor;
void main()
{
gl_FragColor = color;
}
"""
return gpu.types.GPUShader(uniform_2d_vertex_shader, uniform_2d_fragment_shader)
|
def get_2d_uniform_color_shader():
# return gpu.shader.from_builtin('2D_UNIFORM_COLOR')
uniform_2d_vertex_shader = """
in vec2 pos;
uniform mat4 viewProjectionMatrix;
uniform float x_offset;
uniform float y_offset;
void main()
{
gl_Position = viewProjectionMatrix * vec4(pos.x + x_offset, pos.y + y_offset, 0.0f, 1.0f);
}
"""
uniform_2d_fragment_shader = """
uniform vec4 color;
void main()
{
gl_FragColor = color;
}
"""
return gpu.types.GPUShader(uniform_2d_vertex_shader, uniform_2d_fragment_shader)
|
https://github.com/nortikin/sverchok/issues/3480
|
2020-08-25 10:58:36,292 [ERROR] sverchok.core.update_system: Node Viewer 2D had exception: Shader Compile Error, see console for more details
Traceback (most recent call last):
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/core/update_system.py", line 383, in do_update_general
node.process()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 813, in process
geom = generate_mesh_geom(config, vecs)
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 478, in generate_mesh_geom
config.v_shader = get_2d_smooth_color_shader()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 156, in get_2d_smooth_color_shader
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
Exception: Shader Compile Error, see console for more details
EVENT: node_property_update IN: SvIcosphereNode INSTANCE: IcoSphere
GPUShader: compile error:
ERROR: 0:33: Use of undeclared identifier 'gl_FragColor'
2020-08-25 10:58:36,327 [ERROR] sverchok.core.update_system: Node Viewer 2D had exception: Shader Compile Error, see console for more details
Traceback (most recent call last):
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/core/update_system.py", line 383, in do_update_general
node.process()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 813, in process
geom = generate_mesh_geom(config, vecs)
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 478, in generate_mesh_geom
config.v_shader = get_2d_smooth_color_shader()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 156, in get_2d_smooth_color_shader
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
Exception: Shader Compile Error, see console for more details
|
Exception
|
def get_2d_smooth_color_shader():
# return gpu.shader.from_builtin('2D_SMOOTH_COLOR')
smooth_2d_vertex_shader = """
in vec2 pos;
layout(location=1) in vec4 color;
uniform mat4 viewProjectionMatrix;
uniform float x_offset;
uniform float y_offset;
out vec4 a_color;
void main()
{
gl_Position = viewProjectionMatrix * vec4(pos.x + x_offset, pos.y + y_offset, 0.0f, 1.0f);
a_color = color;
}
"""
smooth_2d_fragment_shader = """
in vec4 a_color;
out vec4 gl_FragColor;
void main()
{
gl_FragColor = a_color;
}
"""
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
|
def get_2d_smooth_color_shader():
# return gpu.shader.from_builtin('2D_SMOOTH_COLOR')
smooth_2d_vertex_shader = """
in vec2 pos;
layout(location=1) in vec4 color;
uniform mat4 viewProjectionMatrix;
uniform float x_offset;
uniform float y_offset;
out vec4 a_color;
void main()
{
gl_Position = viewProjectionMatrix * vec4(pos.x + x_offset, pos.y + y_offset, 0.0f, 1.0f);
a_color = color;
}
"""
smooth_2d_fragment_shader = """
in vec4 a_color;
void main()
{
gl_FragColor = a_color;
}
"""
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
|
https://github.com/nortikin/sverchok/issues/3480
|
2020-08-25 10:58:36,292 [ERROR] sverchok.core.update_system: Node Viewer 2D had exception: Shader Compile Error, see console for more details
Traceback (most recent call last):
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/core/update_system.py", line 383, in do_update_general
node.process()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 813, in process
geom = generate_mesh_geom(config, vecs)
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 478, in generate_mesh_geom
config.v_shader = get_2d_smooth_color_shader()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 156, in get_2d_smooth_color_shader
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
Exception: Shader Compile Error, see console for more details
EVENT: node_property_update IN: SvIcosphereNode INSTANCE: IcoSphere
GPUShader: compile error:
ERROR: 0:33: Use of undeclared identifier 'gl_FragColor'
2020-08-25 10:58:36,327 [ERROR] sverchok.core.update_system: Node Viewer 2D had exception: Shader Compile Error, see console for more details
Traceback (most recent call last):
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/core/update_system.py", line 383, in do_update_general
node.process()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 813, in process
geom = generate_mesh_geom(config, vecs)
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 478, in generate_mesh_geom
config.v_shader = get_2d_smooth_color_shader()
File "/Users/jimmygunawan/Library/Application Support/Blender/2.83/scripts/addons/sverchok-master/nodes/viz/viewer_2d.py", line 156, in get_2d_smooth_color_shader
return gpu.types.GPUShader(smooth_2d_vertex_shader, smooth_2d_fragment_shader)
Exception: Shader Compile Error, see console for more details
|
Exception
|
def perform_svtextin_node_object(node, node_ref):
"""
as it's a beta service, old IO json may not be compatible - in this interest
of neat code we assume it finds everything.
"""
texts = bpy.data.texts
params = node_ref.get("params")
# original textin used 'current_text', textin+ uses 'text'
current_text = params.get("current_text", params.get("text"))
# it's not clear from the exporter code why textmode parameter isn't stored
# in params.. for now this lets us look in both places. ugly but whatever.
textmode = params.get("textmode")
if not textmode:
textmode = node_ref.get("textmode")
node.textmode = textmode
if not current_text:
info("`%s' doesn't store a current_text in params", node.name)
elif not current_text in texts:
new_text = texts.new(current_text)
text_line_entry = node_ref["text_lines"]
if node.textmode == "JSON":
if isinstance(text_line_entry, str):
debug("loading old text json content / backward compatibility mode")
elif isinstance(text_line_entry, dict):
text_line_entry = json.dumps(text_line_entry["stored_as_json"])
new_text.from_string(text_line_entry)
else:
# reaches here if (current_text) and (current_text in texts)
# can probably skip this..
# texts[current_text].from_string(node_ref['text_lines'])
debug(
"%s seems to reuse a text block loaded by another node - skipping",
node.name,
)
|
def perform_svtextin_node_object(node, node_ref):
"""
as it's a beta service, old IO json may not be compatible - in this interest
of neat code we assume it finds everything.
"""
texts = bpy.data.texts
params = node_ref.get("params")
current_text = params["current_text"]
# it's not clear from the exporter code why textmode parameter isn't stored
# in params.. for now this lets us look in both places. ugly but whatever.
textmode = params.get("textmode")
if not textmode:
textmode = node_ref.get("textmode")
node.textmode = textmode
if not current_text:
info("`%s' doesn't store a current_text in params", node.name)
elif not current_text in texts:
new_text = texts.new(current_text)
text_line_entry = node_ref["text_lines"]
if node.textmode == "JSON":
if isinstance(text_line_entry, str):
debug("loading old text json content / backward compatibility mode")
pass
elif isinstance(text_line_entry, dict):
text_line_entry = json.dumps(text_line_entry["stored_as_json"])
new_text.from_string(text_line_entry)
else:
# reaches here if (current_text) and (current_text in texts)
# can probably skip this..
# texts[current_text].from_string(node_ref['text_lines'])
debug(
"%s seems to reuse a text block loaded by another node - skipping",
node.name,
)
|
https://github.com/nortikin/sverchok/issues/2126
|
======================================================================
ERROR: test_import_examples (tree_import_tests.ExamplesImportTest) (file='5.Sverchok_Spread.json')
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/portnov/.config/blender/2.79/scripts/addons/sverchok-master/tests/tree_import_tests.py", line 66, in test_import_examples
import_tree(new_tree, path)
File "/home/portnov/.config/blender/2.79/scripts/addons/sverchok-master/utils/sv_IO_panel_tools.py", line 729, in import_tree
generate_layout(fullpath, nodes_json)
File "/home/portnov/.config/blender/2.79/scripts/addons/sverchok-master/utils/sv_IO_panel_tools.py", line 711, in generate_layout
place_frames(ng, nodes_json, name_remap)
File "/home/portnov/.config/blender/2.79/scripts/addons/sverchok-master/utils/sv_IO_panel_tools.py", line 638, in place_frames
ng.nodes[finalize(node_name)].parent = ng.nodes[finalize(parent)]
KeyError: 'bpy_prop_collection[key]: key "Text in+.001" not found'
|
KeyError
|
def storage_get_data(self, node_dict):
local_storage = {"knots": [], "knotsnames": []}
if "knots" in self.SvLists:
# implies "knotsnames" will be found too.. because that's how it works..
for knot in self.SvLists["knots"].SvSubLists:
local_storage["knots"].append([knot.SvX, knot.SvY, knot.SvZ])
for outname in self.SvLists["knotsnames"].SvSubLists:
local_storage["knotsnames"].append(outname.SvName)
# store anyway
node_dict["profile_sublist_storage"] = json.dumps(local_storage, sort_keys=True)
if self.filename:
node_dict["path_file"] = bpy.data.texts[self.filename].as_string()
else:
node_dict["path_file"] = ""
|
def storage_get_data(self, node_dict):
local_storage = {"knots": [], "knotsnames": []}
for knot in self.SvLists["knots"].SvSubLists:
local_storage["knots"].append([knot.SvX, knot.SvY, knot.SvZ])
for outname in self.SvLists["knotsnames"].SvSubLists:
local_storage["knotsnames"].append(outname.SvName)
node_dict["profile_sublist_storage"] = json.dumps(local_storage, sort_keys=True)
node_dict["path_file"] = bpy.data.texts[self.filename].as_string()
|
https://github.com/nortikin/sverchok/issues/2118
|
Traceback (most recent call last):
File "/home/portnov/.config/blender/2.79/scripts/addons/sverchok-master/utils/sv_IO_panel_operators.py", line 58, in execute
layout_dict = create_dict_of_tree(ng)
File "/home/portnov/.config/blender/2.79/scripts/addons/sverchok-master/utils/sv_IO_panel_tools.py", line 298, in create_dict_of_tree
node.storage_get_data(node_dict)
File "/home/portnov/.config/blender/2.79/scripts/addons/sverchok-master/nodes/generators_extended/profile_mk2.py", line 952, in storage_get_data
for knot in self.SvLists['knots'].SvSubLists:
KeyError: 'bpy_prop_collection[key]: key "knots" not found'
|
KeyError
|
def migrate_customfieldvalues(apps, schema_editor):
"""
Copy data from CustomFieldValues into the custom_field_data JSON field on each model instance.
"""
CustomFieldValue = apps.get_model("extras", "CustomFieldValue")
for cfv in CustomFieldValue.objects.prefetch_related("field").exclude(
serialized_value=""
):
model = apps.get_model(cfv.obj_type.app_label, cfv.obj_type.model)
# Read and update custom field value for each instance
# TODO: This can be done more efficiently once .update() is supported for JSON fields
cf_data = (
model.objects.filter(pk=cfv.obj_id).values("custom_field_data").first()
)
try:
cf_data["custom_field_data"][cfv.field.name] = deserialize_value(
cfv.field, cfv.serialized_value
)
except Exception as e:
print(
f"{cfv.field.name} ({cfv.field.type}): {cfv.serialized_value} ({cfv.pk})"
)
raise e
model.objects.filter(pk=cfv.obj_id).update(**cf_data)
|
def migrate_customfieldvalues(apps, schema_editor):
"""
Copy data from CustomFieldValues into the custom_field_data JSON field on each model instance.
"""
CustomFieldValue = apps.get_model("extras", "CustomFieldValue")
for cfv in CustomFieldValue.objects.prefetch_related("field").exclude(
serialized_value=""
):
model = apps.get_model(cfv.obj_type.app_label, cfv.obj_type.model)
# Read and update custom field value for each instance
# TODO: This can be done more efficiently once .update() is supported for JSON fields
cf_data = (
model.objects.filter(pk=cfv.obj_id).values("custom_field_data").first()
)
try:
cf_data["custom_field_data"][cfv.field.name] = deserialize_value(
cfv.field, cfv.serialized_value
)
except ValueError as e:
print(
f"{cfv.field.name} ({cfv.field.type}): {cfv.serialized_value} ({cfv.pk})"
)
raise e
model.objects.filter(pk=cfv.obj_id).update(**cf_data)
|
https://github.com/netbox-community/netbox/issues/5573
|
Applying extras.0051_migrate_customfields...Traceback (most recent call last):
File "netbox/manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/opt/netbox/venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/opt/netbox/venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/opt/netbox/venv/lib/python3.6/site-packages/django/core/management/base.py", line 330, in run_from_argv
self.execute(*args, **cmd_options)
File "/opt/netbox/venv/lib/python3.6/site-packages/django/core/management/base.py", line 371, in execute
output = self.handle(*args, **options)
File "/opt/netbox/venv/lib/python3.6/site-packages/django/core/management/base.py", line 85, in wrapped
res = handle_func(*args, **kwargs)
File "/opt/netbox/venv/lib/python3.6/site-packages/django/core/management/commands/migrate.py", line 245, in handle
fake_initial=fake_initial,
File "/opt/netbox/venv/lib/python3.6/site-packages/django/db/migrations/executor.py", line 117, in migrate
state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)
File "/opt/netbox/venv/lib/python3.6/site-packages/django/db/migrations/executor.py", line 147, in _migrate_all_forwards
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
File "/opt/netbox/venv/lib/python3.6/site-packages/django/db/migrations/executor.py", line 227, in apply_migration
state = migration.apply(state, schema_editor)
File "/opt/netbox/venv/lib/python3.6/site-packages/django/db/migrations/migration.py", line 124, in apply
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
File "/opt/netbox/venv/lib/python3.6/site-packages/django/db/migrations/operations/special.py", line 190, in database_forwards
self.code(from_state.apps, schema_editor)
File "/opt/netbox/netbox/extras/migrations/0051_migrate_customfields.py", line 69, in migrate_customfieldvalues
cf_data['custom_field_data'][cfv.field.name] = deserialize_value(cfv.field, cfv.serialized_value)
TypeError: 'NoneType' object is not subscriptable
|
TypeError
|
def get_extra_context(self, request, instance):
related_paths = []
# If tracing a PathEndpoint, locate the CablePath (if one exists) by its origin
if isinstance(instance, PathEndpoint):
path = instance._path
# Otherwise, find all CablePaths which traverse the specified object
else:
related_paths = CablePath.objects.filter(
path__contains=instance
).prefetch_related("origin")
# Check for specification of a particular path (when tracing pass-through ports)
try:
path_id = int(request.GET.get("cablepath_id"))
except TypeError:
path_id = None
if path_id in list(related_paths.values_list("pk", flat=True)):
path = CablePath.objects.get(pk=path_id)
else:
path = related_paths.first()
return {
"path": path,
"related_paths": related_paths,
"total_length": path.get_total_length() if path else None,
}
|
def get_extra_context(self, request, instance):
related_paths = []
# If tracing a PathEndpoint, locate the CablePath (if one exists) by its origin
if isinstance(instance, PathEndpoint):
path = instance._path
# Otherwise, find all CablePaths which traverse the specified object
else:
related_paths = CablePath.objects.filter(path__contains=obj).prefetch_related(
"origin"
)
# Check for specification of a particular path (when tracing pass-through ports)
try:
path_id = int(request.GET.get("cablepath_id"))
except TypeError:
path_id = None
if path_id in list(related_paths.values_list("pk", flat=True)):
path = CablePath.objects.get(pk=path_id)
else:
path = related_paths.first()
return {
"path": path,
"related_paths": related_paths,
"total_length": path.get_total_length() if path else None,
}
|
https://github.com/netbox-community/netbox/issues/5374
|
Traceback (most recent call last):
File "/usr/local/netbox/venv/lib64/python3.6/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/usr/local/netbox/venv/lib64/python3.6/site-packages/django/core/handlers/base.py", line 179, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/local/netbox/venv/lib64/python3.6/site-packages/django/views/generic/base.py", line 70, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/local/netbox/netbox/dcim/views.py", line 2068, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/usr/local/netbox/netbox/utilities/views.py", line 91, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/usr/local/netbox/venv/lib64/python3.6/site-packages/django/views/generic/base.py", line 98, in dispatch
return handler(request, *args, **kwargs)
File "/usr/local/netbox/netbox/netbox/views/generic.py", line 70, in get
**self.get_extra_context(request, instance),
File "/usr/local/netbox/netbox/dcim/views.py", line 2079, in get_extra_context
related_paths = CablePath.objects.filter(path__contains=obj).prefetch_related('origin')
Exception Type: NameError at /dcim/rear-ports/3950/trace/ Exception Value: name 'obj' is not defined
|
NameError
|
def custom_links(context, obj):
"""
Render all applicable links for the given object.
"""
content_type = ContentType.objects.get_for_model(obj)
custom_links = CustomLink.objects.filter(content_type=content_type)
if not custom_links:
return ""
# Pass select context data when rendering the CustomLink
link_context = {
"obj": obj,
"debug": context.get(
"debug", False
), # django.template.context_processors.debug
"request": context["request"], # django.template.context_processors.request
"user": context["user"], # django.contrib.auth.context_processors.auth
"perms": context["perms"], # django.contrib.auth.context_processors.auth
}
template_code = ""
group_names = OrderedDict()
for cl in custom_links:
# Organize custom links by group
if cl.group_name and cl.group_name in group_names:
group_names[cl.group_name].append(cl)
elif cl.group_name:
group_names[cl.group_name] = [cl]
# Add non-grouped links
else:
try:
text_rendered = render_jinja2(cl.text, link_context)
if text_rendered:
link_rendered = render_jinja2(cl.url, link_context)
link_target = ' target="_blank"' if cl.new_window else ""
template_code += LINK_BUTTON.format(
link_rendered, link_target, cl.button_class, text_rendered
)
except Exception as e:
template_code += (
'<a class="btn btn-sm btn-default" disabled="disabled" title="{}">'
'<i class="fa fa-warning"></i> {}</a>\n'.format(e, cl.name)
)
# Add grouped links to template
for group, links in group_names.items():
links_rendered = []
for cl in links:
try:
text_rendered = render_jinja2(cl.text, link_context)
if text_rendered:
link_target = ' target="_blank"' if cl.new_window else ""
link_rendered = render_jinja2(cl.url, link_context)
links_rendered.append(
GROUP_LINK.format(link_rendered, link_target, text_rendered)
)
except Exception as e:
links_rendered.append(
'<li><a disabled="disabled" title="{}"><span class="text-muted">'
'<i class="fa fa-warning"></i> {}</span></a></li>'.format(
e, cl.name
)
)
if links_rendered:
template_code += GROUP_BUTTON.format(
links[0].button_class, group, "".join(links_rendered)
)
return mark_safe(template_code)
|
def custom_links(context, obj):
"""
Render all applicable links for the given object.
"""
content_type = ContentType.objects.get_for_model(obj)
custom_links = CustomLink.objects.filter(content_type=content_type)
if not custom_links:
return ""
# Pass select context data when rendering the CustomLink
link_context = {
"obj": obj,
"debug": context["debug"], # django.template.context_processors.debug
"request": context["request"], # django.template.context_processors.request
"user": context["user"], # django.contrib.auth.context_processors.auth
"perms": context["perms"], # django.contrib.auth.context_processors.auth
}
template_code = ""
group_names = OrderedDict()
for cl in custom_links:
# Organize custom links by group
if cl.group_name and cl.group_name in group_names:
group_names[cl.group_name].append(cl)
elif cl.group_name:
group_names[cl.group_name] = [cl]
# Add non-grouped links
else:
try:
text_rendered = render_jinja2(cl.text, link_context)
if text_rendered:
link_rendered = render_jinja2(cl.url, link_context)
link_target = ' target="_blank"' if cl.new_window else ""
template_code += LINK_BUTTON.format(
link_rendered, link_target, cl.button_class, text_rendered
)
except Exception as e:
template_code += (
'<a class="btn btn-sm btn-default" disabled="disabled" title="{}">'
'<i class="fa fa-warning"></i> {}</a>\n'.format(e, cl.name)
)
# Add grouped links to template
for group, links in group_names.items():
links_rendered = []
for cl in links:
try:
text_rendered = render_jinja2(cl.text, link_context)
if text_rendered:
link_target = ' target="_blank"' if cl.new_window else ""
link_rendered = render_jinja2(cl.url, link_context)
links_rendered.append(
GROUP_LINK.format(link_rendered, link_target, text_rendered)
)
except Exception as e:
links_rendered.append(
'<li><a disabled="disabled" title="{}"><span class="text-muted">'
'<i class="fa fa-warning"></i> {}</span></a></li>'.format(
e, cl.name
)
)
if links_rendered:
template_code += GROUP_BUTTON.format(
links[0].button_class, group, "".join(links_rendered)
)
return mark_safe(template_code)
|
https://github.com/netbox-community/netbox/issues/5231
|
Traceback (most recent call last):
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 179, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/views/generic/base.py", line 73, in view
return self.dispatch(request, *args, **kwargs)
File "/opt/netbox/netbox/utilities/views.py", line 123, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/views/generic/base.py", line 101, in dispatch
return handler(request, *args, **kwargs)
File "/opt/netbox/netbox/dcim/views.py", line 1094, in get
'show_interface_graphs': Graph.objects.filter(type__model='interface').exists(),
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/shortcuts.py", line 19, in render
content = loader.render_to_string(template_name, context, request, using=using)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/loader.py", line 62, in render_to_string
return template.render(context, request)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/backends/django.py", line 61, in render
return self.template.render(context)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/base.py", line 170, in render
return self._render(context)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/test/utils.py", line 96, in instrumented_test_render
return self.nodelist.render(context)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/base.py", line 938, in render
bit = node.render_annotated(context)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/base.py", line 905, in render_annotated
return self.render(context)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/loader_tags.py", line 150, in render
return compiled_parent._render(context)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/test/utils.py", line 96, in instrumented_test_render
return self.nodelist.render(context)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/base.py", line 938, in render
bit = node.render_annotated(context)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/base.py", line 905, in render_annotated
return self.render(context)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/loader_tags.py", line 62, in render
result = block.nodelist.render(context)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/base.py", line 938, in render
bit = node.render_annotated(context)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/base.py", line 905, in render_annotated
return self.render(context)
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/library.py", line 192, in render
output = self.func(*resolved_args, **resolved_kwargs)
File "/opt/netbox/netbox/extras/templatetags/custom_links.py", line 36, in custom_links
'debug': context['debug'], # django.template.context_processors.debug
File "/opt/netbox-2.9.6/venv/lib/python3.7/site-packages/django/template/context.py", line 83, in __getitem__
raise KeyError(key)
Exception Type: KeyError at /dcim/devices/28/
Exception Value: 'debug'
|
KeyError
|
def handle(self, *args, **options):
# Gather all available reports
reports = get_reports()
# Run reports
for module_name, report_list in reports:
for report in report_list:
if (
module_name in options["reports"]
or report.full_name in options["reports"]
):
# Run the report and create a new ReportResult
self.stdout.write(
"[{:%H:%M:%S}] Running {}...".format(
timezone.now(), report.full_name
)
)
report_content_type = ContentType.objects.get(
app_label="extras", model="report"
)
job_result = JobResult.enqueue_job(
run_report, report.full_name, report_content_type, None
)
# Wait on the job to finish
while (
job_result.status
not in JobResultStatusChoices.TERMINAL_STATE_CHOICES
):
time.sleep(1)
job_result = JobResult.objects.get(pk=job_result.pk)
# Report on success/failure
if job_result.status == JobResultStatusChoices.STATUS_FAILED:
status = self.style.ERROR("FAILED")
elif job_result == JobResultStatusChoices.STATUS_ERRORED:
status = self.style.ERROR("ERRORED")
else:
status = self.style.SUCCESS("SUCCESS")
for test_name, attrs in job_result.data.items():
self.stdout.write(
"\t{}: {} success, {} info, {} warning, {} failure".format(
test_name,
attrs["success"],
attrs["info"],
attrs["warning"],
attrs["failure"],
)
)
self.stdout.write(
"[{:%H:%M:%S}] {}: {}".format(
timezone.now(), report.full_name, status
)
)
self.stdout.write(
"[{:%H:%M:%S}] {}: Duration {}".format(
timezone.now(), report.full_name, job_result.duration
)
)
# Wrap things up
self.stdout.write("[{:%H:%M:%S}] Finished".format(timezone.now()))
|
def handle(self, *args, **options):
# Gather all available reports
reports = get_reports()
# Run reports
for module_name, report_list in reports:
for report in report_list:
if (
module_name in options["reports"]
or report.full_name in options["reports"]
):
# Run the report and create a new ReportResult
self.stdout.write(
"[{:%H:%M:%S}] Running {}...".format(
timezone.now(), report.full_name
)
)
report.run()
# Report on success/failure
status = (
self.style.ERROR("FAILED")
if report.failed
else self.style.SUCCESS("SUCCESS")
)
for test_name, attrs in report.result.data.items():
self.stdout.write(
"\t{}: {} success, {} info, {} warning, {} failure".format(
test_name,
attrs["success"],
attrs["info"],
attrs["warning"],
attrs["failure"],
)
)
self.stdout.write(
"[{:%H:%M:%S}] {}: {}".format(
timezone.now(), report.full_name, status
)
)
# Wrap things up
self.stdout.write("[{:%H:%M:%S}] Finished".format(timezone.now()))
|
https://github.com/netbox-community/netbox/issues/5108
|
$ python3 manage.py runreport DeviceConnectionsReport
[17:50:53] Running DeviceConnectionsReport.DeviceConnectionsReport...
Traceback (most recent call last):
File "manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.7/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.7/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.7/site-packages/django/core/management/base.py", line 330, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.7/site-packages/django/core/management/base.py", line 371, in execute
output = self.handle(*args, **options)
File "/opt/netbox/netbox/extras/management/commands/runreport.py", line 27, in handle
report.run()
TypeError: run() missing 1 required positional argument: 'job_result'
|
TypeError
|
def clean(self):
if self.address:
# /0 masks are not acceptable
if self.address.prefixlen == 0:
raise ValidationError({"address": "Cannot create IP address with /0 mask."})
# Enforce unique IP space (if applicable)
if self.role not in IPADDRESS_ROLES_NONUNIQUE and (
(self.vrf is None and settings.ENFORCE_GLOBAL_UNIQUE)
or (self.vrf and self.vrf.enforce_unique)
):
duplicate_ips = self.get_duplicates()
if duplicate_ips:
raise ValidationError(
{
"address": "Duplicate IP address found in {}: {}".format(
"VRF {}".format(self.vrf) if self.vrf else "global table",
duplicate_ips.first(),
)
}
)
# Check for primary IP assignment that doesn't match the assigned device/VM
if self.pk and type(self.assigned_object) is Interface:
device = Device.objects.filter(
Q(primary_ip4=self) | Q(primary_ip6=self)
).first()
if device:
if self.assigned_object is None:
raise ValidationError(
{
"interface": f"IP address is primary for device {device} but not assigned to an interface"
}
)
elif self.assigned_object.device != device:
raise ValidationError(
{
"interface": f"IP address is primary for device {device} but assigned to "
f"{self.assigned_object.device} ({self.assigned_object})"
}
)
elif self.pk and type(self.assigned_object) is VMInterface:
vm = VirtualMachine.objects.filter(
Q(primary_ip4=self) | Q(primary_ip6=self)
).first()
if vm:
if self.assigned_object is None:
raise ValidationError(
{
"vminterface": f"IP address is primary for virtual machine {vm} but not assigned to an "
f"interface"
}
)
elif self.assigned_object.virtual_machine != vm:
raise ValidationError(
{
"vminterface": f"IP address is primary for virtual machine {vm} but assigned to "
f"{self.assigned_object.virtual_machine} ({self.assigned_object})"
}
)
|
def clean(self):
if self.address:
# /0 masks are not acceptable
if self.address.prefixlen == 0:
raise ValidationError({"address": "Cannot create IP address with /0 mask."})
# Enforce unique IP space (if applicable)
if self.role not in IPADDRESS_ROLES_NONUNIQUE and (
(self.vrf is None and settings.ENFORCE_GLOBAL_UNIQUE)
or (self.vrf and self.vrf.enforce_unique)
):
duplicate_ips = self.get_duplicates()
if duplicate_ips:
raise ValidationError(
{
"address": "Duplicate IP address found in {}: {}".format(
"VRF {}".format(self.vrf) if self.vrf else "global table",
duplicate_ips.first(),
)
}
)
# Check for primary IP assignment that doesn't match the assigned device/VM
if self.pk and type(self.assigned_object) is Interface:
device = Device.objects.filter(
Q(primary_ip4=self) | Q(primary_ip6=self)
).first()
if device:
if self.assigned_object is None:
raise ValidationError(
{
"interface": f"IP address is primary for device {device} but not assigned to an interface"
}
)
elif self.assigned_object.device != device:
raise ValidationError(
{
"interface": f"IP address is primary for device {device} but assigned to "
f"{self.assigned_object.device} ({self.assigned_object})"
}
)
elif self.pk and type(self.assigned_object) is VMInterface:
vm = VirtualMachine.objects.filter(
Q(primary_ip4=self) | Q(primary_ip6=self)
).first()
if vm:
if self.assigned_object is None:
raise ValidationError(
{
"vminterface": f"IP address is primary for virtual machine {vm} but not assigned to an "
f"interface"
}
)
elif self.interface.virtual_machine != vm:
raise ValidationError(
{
"vminterface": f"IP address is primary for virtual machine {vm} but assigned to "
f"{self.assigned_object.virtual_machine} ({self.assigned_object})"
}
)
|
https://github.com/netbox-community/netbox/issues/5035
|
Traceback (most recent call last):
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 179, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/views/generic/base.py", line 73, in view
return self.dispatch(request, *args, **kwargs)
File "/opt/netbox/netbox/utilities/views.py", line 392, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/opt/netbox/netbox/utilities/views.py", line 123, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/views/generic/base.py", line 101, in dispatch
return handler(request, *args, **kwargs)
File "/opt/netbox/netbox/utilities/views.py", line 418, in post
if form.is_valid():
File "/opt/netbox/venv/lib/python3.7/site-packages/django/forms/forms.py", line 177, in is_valid
return self.is_bound and not self.errors
File "/opt/netbox/venv/lib/python3.7/site-packages/django/forms/forms.py", line 172, in errors
self.full_clean()
File "/opt/netbox/venv/lib/python3.7/site-packages/django/forms/forms.py", line 376, in full_clean
self._post_clean()
File "/opt/netbox/venv/lib/python3.7/site-packages/django/forms/models.py", line 405, in _post_clean
self.instance.full_clean(exclude=exclude, validate_unique=False)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/db/models/base.py", line 1213, in full_clean
self.clean()
File "/opt/netbox/netbox/ipam/models.py", line 748, in clean
elif self.interface.virtual_machine != vm:
Exception Type: AttributeError at /ipam/ip-addresses/4195/edit/
Exception Value: 'RelatedManager' object has no attribute 'virtual_machine'
|
AttributeError
|
def __init__(self, *args, **kwargs):
# Initialize helper selectors
instance = kwargs.get("instance")
initial = kwargs.get("initial", {}).copy()
if instance:
if type(instance.assigned_object) is Interface:
initial["device"] = instance.assigned_object.device
initial["interface"] = instance.assigned_object
elif type(instance.assigned_object) is VMInterface:
initial["virtual_machine"] = instance.assigned_object.virtual_machine
initial["vminterface"] = instance.assigned_object
if instance.nat_inside:
nat_inside_parent = instance.nat_inside.assigned_object
if type(nat_inside_parent) is Interface:
initial["nat_site"] = nat_inside_parent.device.site.pk
initial["nat_rack"] = nat_inside_parent.device.rack.pk
initial["nat_device"] = nat_inside_parent.device.pk
elif type(nat_inside_parent) is VMInterface:
initial["nat_cluster"] = nat_inside_parent.virtual_machine.cluster.pk
initial["nat_virtual_machine"] = nat_inside_parent.virtual_machine.pk
kwargs["initial"] = initial
super().__init__(*args, **kwargs)
self.fields["vrf"].empty_label = "Global"
# Initialize primary_for_parent if IP address is already assigned
if self.instance.pk and self.instance.assigned_object:
parent = self.instance.assigned_object.parent
if (
self.instance.address.version == 4
and parent.primary_ip4_id == self.instance.pk
or self.instance.address.version == 6
and parent.primary_ip6_id == self.instance.pk
):
self.initial["primary_for_parent"] = True
|
def __init__(self, *args, **kwargs):
# Initialize helper selectors
instance = kwargs.get("instance")
initial = kwargs.get("initial", {}).copy()
if instance:
if type(instance.assigned_object) is Interface:
initial["device"] = instance.assigned_object.device
initial["interface"] = instance.assigned_object
elif type(instance.assigned_object) is VMInterface:
initial["virtual_machine"] = instance.assigned_object.virtual_machine
initial["vminterface"] = instance.assigned_object
if instance.nat_inside and instance.nat_inside.device is not None:
initial["nat_site"] = instance.nat_inside.device.site
initial["nat_rack"] = instance.nat_inside.device.rack
initial["nat_device"] = instance.nat_inside.device
kwargs["initial"] = initial
super().__init__(*args, **kwargs)
self.fields["vrf"].empty_label = "Global"
# Initialize primary_for_parent if IP address is already assigned
if self.instance.pk and self.instance.assigned_object:
parent = self.instance.assigned_object.parent
if (
self.instance.address.version == 4
and parent.primary_ip4_id == self.instance.pk
or self.instance.address.version == 6
and parent.primary_ip6_id == self.instance.pk
):
self.initial["primary_for_parent"] = True
|
https://github.com/netbox-community/netbox/issues/5022
|
Traceback (most recent call last):
File "/home/jstretch/.virtualenvs/netbox-2.9/lib/python3.6/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/home/jstretch/.virtualenvs/netbox-2.9/lib/python3.6/site-packages/django/core/handlers/base.py", line 179, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/home/jstretch/.virtualenvs/netbox-2.9/lib/python3.6/site-packages/django/views/generic/base.py", line 73, in view
return self.dispatch(request, *args, **kwargs)
File "/opt/netbox/netbox/utilities/views.py", line 390, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/opt/netbox/netbox/utilities/views.py", line 121, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/home/jstretch/.virtualenvs/netbox-2.9/lib/python3.6/site-packages/django/views/generic/base.py", line 101, in dispatch
return handler(request, *args, **kwargs)
File "/opt/netbox/netbox/utilities/views.py", line 396, in get
form = self.model_form(instance=obj, initial=initial_data)
File "/opt/netbox/netbox/ipam/forms.py", line 619, in __init__
if instance.nat_inside and instance.nat_inside.device is not None:
Exception Type: AttributeError at /ipam/ip-addresses/56298/edit/
Exception Value: 'IPAddress' object has no attribute 'device'
|
AttributeError
|
def clean(self):
# Validate that termination A exists
if not hasattr(self, "termination_a_type"):
raise ValidationError("Termination A type has not been specified")
try:
self.termination_a_type.model_class().objects.get(pk=self.termination_a_id)
except ObjectDoesNotExist:
raise ValidationError(
{"termination_a": "Invalid ID for type {}".format(self.termination_a_type)}
)
# Validate that termination B exists
if not hasattr(self, "termination_b_type"):
raise ValidationError("Termination B type has not been specified")
try:
self.termination_b_type.model_class().objects.get(pk=self.termination_b_id)
except ObjectDoesNotExist:
raise ValidationError(
{"termination_b": "Invalid ID for type {}".format(self.termination_b_type)}
)
# If editing an existing Cable instance, check that neither termination has been modified.
if self.pk:
err_msg = "Cable termination points may not be modified. Delete and recreate the cable instead."
if (
self.termination_a_type != self._orig_termination_a_type
or self.termination_a_id != self._orig_termination_a_id
):
raise ValidationError({"termination_a": err_msg})
if (
self.termination_b_type != self._orig_termination_b_type
or self.termination_b_id != self._orig_termination_b_id
):
raise ValidationError({"termination_b": err_msg})
type_a = self.termination_a_type.model
type_b = self.termination_b_type.model
# Validate interface types
if type_a == "interface" and self.termination_a.type in NONCONNECTABLE_IFACE_TYPES:
raise ValidationError(
{
"termination_a_id": "Cables cannot be terminated to {} interfaces".format(
self.termination_a.get_type_display()
)
}
)
if type_b == "interface" and self.termination_b.type in NONCONNECTABLE_IFACE_TYPES:
raise ValidationError(
{
"termination_b_id": "Cables cannot be terminated to {} interfaces".format(
self.termination_b.get_type_display()
)
}
)
# Check that termination types are compatible
if type_b not in COMPATIBLE_TERMINATION_TYPES.get(type_a):
raise ValidationError(
f"Incompatible termination types: {self.termination_a_type} and {self.termination_b_type}"
)
# A RearPort with multiple positions must be connected to a RearPort with an equal number of positions
for term_a, term_b in [
(self.termination_a, self.termination_b),
(self.termination_b, self.termination_a),
]:
if isinstance(term_a, RearPort) and term_a.positions > 1:
if not isinstance(term_b, RearPort):
raise ValidationError(
"Rear ports with multiple positions may only be connected to other rear ports"
)
elif term_a.positions != term_b.positions:
raise ValidationError(
f"{term_a} has {term_a.positions} position(s) but {term_b} has {term_b.positions}. "
f"Both terminations must have the same number of positions."
)
# A termination point cannot be connected to itself
if self.termination_a == self.termination_b:
raise ValidationError(f"Cannot connect {self.termination_a_type} to itself")
# A front port cannot be connected to its corresponding rear port
if (
type_a in ["frontport", "rearport"]
and type_b in ["frontport", "rearport"]
and (
getattr(self.termination_a, "rear_port", None) == self.termination_b
or getattr(self.termination_b, "rear_port", None) == self.termination_a
)
):
raise ValidationError(
"A front port cannot be connected to it corresponding rear port"
)
# Check for an existing Cable connected to either termination object
if self.termination_a.cable not in (None, self):
raise ValidationError(
"{} already has a cable attached (#{})".format(
self.termination_a, self.termination_a.cable_id
)
)
if self.termination_b.cable not in (None, self):
raise ValidationError(
"{} already has a cable attached (#{})".format(
self.termination_b, self.termination_b.cable_id
)
)
# Validate length and length_unit
if self.length is not None and not self.length_unit:
raise ValidationError("Must specify a unit when setting a cable length")
elif self.length is None:
self.length_unit = ""
|
def clean(self):
# Validate that termination A exists
if not hasattr(self, "termination_a_type"):
raise ValidationError("Termination A type has not been specified")
try:
self.termination_a_type.model_class().objects.get(pk=self.termination_a_id)
except ObjectDoesNotExist:
raise ValidationError(
{"termination_a": "Invalid ID for type {}".format(self.termination_a_type)}
)
# Validate that termination B exists
if not hasattr(self, "termination_b_type"):
raise ValidationError("Termination B type has not been specified")
try:
self.termination_b_type.model_class().objects.get(pk=self.termination_b_id)
except ObjectDoesNotExist:
raise ValidationError(
{"termination_b": "Invalid ID for type {}".format(self.termination_b_type)}
)
# If editing an existing Cable instance, check that neither termination has been modified.
if self.pk:
err_msg = "Cable termination points may not be modified. Delete and recreate the cable instead."
if (
self.termination_a_type != self._orig_termination_a_type
or self.termination_a_id != self._orig_termination_a_id
):
raise ValidationError({"termination_a": err_msg})
if (
self.termination_b_type != self._orig_termination_b_type
or self.termination_b_id != self._orig_termination_b_id
):
raise ValidationError({"termination_b": err_msg})
type_a = self.termination_a_type.model
type_b = self.termination_b_type.model
# Validate interface types
if type_a == "interface" and self.termination_a.type in NONCONNECTABLE_IFACE_TYPES:
raise ValidationError(
{
"termination_a_id": "Cables cannot be terminated to {} interfaces".format(
self.termination_a.get_type_display()
)
}
)
if type_b == "interface" and self.termination_b.type in NONCONNECTABLE_IFACE_TYPES:
raise ValidationError(
{
"termination_b_id": "Cables cannot be terminated to {} interfaces".format(
self.termination_b.get_type_display()
)
}
)
# Check that termination types are compatible
if type_b not in COMPATIBLE_TERMINATION_TYPES.get(type_a):
raise ValidationError(
"Incompatible termination types: {} and {}".format(
self.termination_a_type, self.termination_b_type
)
)
# A RearPort with multiple positions must be connected to a component with an equal number of positions
if isinstance(self.termination_a, RearPort) and isinstance(
self.termination_b, RearPort
):
if self.termination_a.positions != self.termination_b.positions:
raise ValidationError(
"{} has {} positions and {} has {}. Both terminations must have the same number of positions.".format(
self.termination_a,
self.termination_a.positions,
self.termination_b,
self.termination_b.positions,
)
)
# A termination point cannot be connected to itself
if self.termination_a == self.termination_b:
raise ValidationError(
"Cannot connect {} to itself".format(self.termination_a_type)
)
# A front port cannot be connected to its corresponding rear port
if (
type_a in ["frontport", "rearport"]
and type_b in ["frontport", "rearport"]
and (
getattr(self.termination_a, "rear_port", None) == self.termination_b
or getattr(self.termination_b, "rear_port", None) == self.termination_a
)
):
raise ValidationError(
"A front port cannot be connected to it corresponding rear port"
)
# Check for an existing Cable connected to either termination object
if self.termination_a.cable not in (None, self):
raise ValidationError(
"{} already has a cable attached (#{})".format(
self.termination_a, self.termination_a.cable_id
)
)
if self.termination_b.cable not in (None, self):
raise ValidationError(
"{} already has a cable attached (#{})".format(
self.termination_b, self.termination_b.cable_id
)
)
# Validate length and length_unit
if self.length is not None and not self.length_unit:
raise ValidationError("Must specify a unit when setting a cable length")
elif self.length is None:
self.length_unit = ""
|
https://github.com/netbox-community/netbox/issues/4604
|
Traceback (most recent call last):
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/backends/utils.py", line 86, in _execute
return self.cursor.execute(sql, params)
The above exception (duplicate key value violates unique constraint "dcim_interface__connected_interface_id_key"
DETAIL: Key (_connected_interface_id)=(18403) already exists.
) was the direct cause of the following exception:
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/core/handlers/base.py", line 115, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/core/handlers/base.py", line 113, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/views/generic/base.py", line 71, in view
return self.dispatch(request, *args, **kwargs)
File "/home/jstretch/netbox/netbox/dcim/views.py", line 2101, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/contrib/auth/mixins.py", line 85, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/views/generic/base.py", line 97, in dispatch
return handler(request, *args, **kwargs)
File "/home/jstretch/netbox/netbox/dcim/views.py", line 2129, in post
obj = form.save()
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/forms/models.py", line 459, in save
self.instance.save()
File "/home/jstretch/netbox/netbox/dcim/models/__init__.py", line 2244, in save
super().save(*args, **kwargs)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/base.py", line 746, in save
force_update=force_update, update_fields=update_fields)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/base.py", line 795, in save_base
update_fields=update_fields, raw=raw, using=using,
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/dispatch/dispatcher.py", line 175, in send
for receiver in self._live_receivers(sender)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/dispatch/dispatcher.py", line 175, in <listcomp>
for receiver in self._live_receivers(sender)
File "/home/jstretch/netbox/netbox/dcim/signals.py", line 70, in update_connected_endpoints
endpoint_a.save()
File "/home/jstretch/netbox/netbox/dcim/models/device_components.py", line 771, in save
return super().save(*args, **kwargs)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/base.py", line 746, in save
force_update=force_update, update_fields=update_fields)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/base.py", line 784, in save_base
force_update, using, update_fields,
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/base.py", line 865, in _save_table
forced_update)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/base.py", line 917, in _do_update
return filtered._update(values) > 0
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/query.py", line 771, in _update
return query.get_compiler(self.db).execute_sql(CURSOR)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/sql/compiler.py", line 1499, in execute_sql
cursor = super().execute_sql(result_type)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/sql/compiler.py", line 1151, in execute_sql
cursor.execute(sql, params)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/debug_toolbar/panels/sql/tracking.py", line 198, in execute
return self._record(self.cursor.execute, sql, params)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/debug_toolbar/panels/sql/tracking.py", line 133, in _record
return method(sql, params)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/backends/utils.py", line 100, in execute
return super().execute(sql, params)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/cacheops/transaction.py", line 99, in execute
result = self._no_monkey.execute(self, sql, params)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/backends/utils.py", line 68, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/backends/utils.py", line 77, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/backends/utils.py", line 86, in _execute
return self.cursor.execute(sql, params)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/backends/utils.py", line 86, in _execute
return self.cursor.execute(sql, params)
Exception Type: IntegrityError at /dcim/front-ports/486/connect/interface/
Exception Value: duplicate key value violates unique constraint "dcim_interface__connected_interface_id_key"
DETAIL: Key (_connected_interface_id)=(18403) already exists.
|
IntegrityError
|
def validate(self, data):
# Validate uniqueness of (group, facility_id) since we omitted the automatically-created validator from Meta.
if data.get("facility_id", None):
validator = UniqueTogetherValidator(
queryset=Rack.objects.all(), fields=("group", "facility_id")
)
validator(data, self)
# Enforce model validation
super().validate(data)
return data
|
def validate(self, data):
# Validate uniqueness of (group, facility_id) since we omitted the automatically-created validator from Meta.
if data.get("facility_id", None):
validator = UniqueTogetherValidator(
queryset=Rack.objects.all(), fields=("group", "facility_id")
)
validator.set_context(self)
validator(data)
# Enforce model validation
super().validate(data)
return data
|
https://github.com/netbox-community/netbox/issues/4496
|
AttributeError at /api/ipam/vlans/
'UniqueTogetherValidator' object has no attribute 'set_context'
Request Method: POST
Request URL: http://10.0.192.20/api/ipam/vlans/
Django Version: 3.0.5
Python Executable: /opt/netbox/venv/bin/python3
Python Version: 3.7.3
Python Path: ['/opt/netbox/venv/bin', '/opt/netbox/netbox', '/', '/usr/lib/python37.zip', '/usr/lib/python3.7', '/usr/lib/python3.7/lib-dynload', '/opt/netbox/venv/lib/python3.7/site-packages']
Server time: Wed, 15 Apr 2020 10:01:42 +0000
Installed Applications:
['django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'cacheops',
'corsheaders',
'debug_toolbar',
'django_filters',
'django_tables2',
'django_prometheus',
'mptt',
'rest_framework',
'taggit',
'taggit_serializer',
'timezone_field',
'circuits',
'dcim',
'ipam',
'extras',
'secrets',
'tenancy',
'users',
'utilities',
'virtualization',
'django_rq',
'drf_yasg']
Installed Middleware:
['debug_toolbar.middleware.DebugToolbarMiddleware',
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'utilities.middleware.ExceptionHandlingMiddleware',
'utilities.middleware.RemoteUserMiddleware',
'utilities.middleware.LoginRequiredMiddleware',
'utilities.middleware.APIVersionMiddleware',
'extras.middleware.ObjectChangeMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware']
Traceback (most recent call last):
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 115, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 113, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/viewsets.py", line 114, in view
return self.dispatch(request, *args, **kwargs)
File "/opt/netbox/netbox/utilities/api.py", line 330, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 505, in dispatch
response = self.handle_exception(exc)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 465, in handle_exception
self.raise_uncaught_exception(exc)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 476, in raise_uncaught_exception
raise exc
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 502, in dispatch
response = handler(request, *args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/mixins.py", line 18, in create
serializer.is_valid(raise_exception=True)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/serializers.py", line 234, in is_valid
self._validated_data = self.run_validation(self.initial_data)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/serializers.py", line 436, in run_validation
value = self.validate(value)
File "/opt/netbox/netbox/ipam/api/serializers.py", line 125, in validate
validator.set_context(self)
Exception Type: AttributeError at /api/ipam/vlans/
Exception Value: 'UniqueTogetherValidator' object has no attribute 'set_context'
|
AttributeError
|
def validate(self, data):
# Validate uniqueness of (rack, position, face) since we omitted the automatically-created validator from Meta.
if data.get("rack") and data.get("position") and data.get("face"):
validator = UniqueTogetherValidator(
queryset=Device.objects.all(), fields=("rack", "position", "face")
)
validator(data, self)
# Enforce model validation
super().validate(data)
return data
|
def validate(self, data):
# Validate uniqueness of (rack, position, face) since we omitted the automatically-created validator from Meta.
if data.get("rack") and data.get("position") and data.get("face"):
validator = UniqueTogetherValidator(
queryset=Device.objects.all(), fields=("rack", "position", "face")
)
validator.set_context(self)
validator(data)
# Enforce model validation
super().validate(data)
return data
|
https://github.com/netbox-community/netbox/issues/4496
|
AttributeError at /api/ipam/vlans/
'UniqueTogetherValidator' object has no attribute 'set_context'
Request Method: POST
Request URL: http://10.0.192.20/api/ipam/vlans/
Django Version: 3.0.5
Python Executable: /opt/netbox/venv/bin/python3
Python Version: 3.7.3
Python Path: ['/opt/netbox/venv/bin', '/opt/netbox/netbox', '/', '/usr/lib/python37.zip', '/usr/lib/python3.7', '/usr/lib/python3.7/lib-dynload', '/opt/netbox/venv/lib/python3.7/site-packages']
Server time: Wed, 15 Apr 2020 10:01:42 +0000
Installed Applications:
['django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'cacheops',
'corsheaders',
'debug_toolbar',
'django_filters',
'django_tables2',
'django_prometheus',
'mptt',
'rest_framework',
'taggit',
'taggit_serializer',
'timezone_field',
'circuits',
'dcim',
'ipam',
'extras',
'secrets',
'tenancy',
'users',
'utilities',
'virtualization',
'django_rq',
'drf_yasg']
Installed Middleware:
['debug_toolbar.middleware.DebugToolbarMiddleware',
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'utilities.middleware.ExceptionHandlingMiddleware',
'utilities.middleware.RemoteUserMiddleware',
'utilities.middleware.LoginRequiredMiddleware',
'utilities.middleware.APIVersionMiddleware',
'extras.middleware.ObjectChangeMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware']
Traceback (most recent call last):
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 115, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 113, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/viewsets.py", line 114, in view
return self.dispatch(request, *args, **kwargs)
File "/opt/netbox/netbox/utilities/api.py", line 330, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 505, in dispatch
response = self.handle_exception(exc)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 465, in handle_exception
self.raise_uncaught_exception(exc)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 476, in raise_uncaught_exception
raise exc
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 502, in dispatch
response = handler(request, *args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/mixins.py", line 18, in create
serializer.is_valid(raise_exception=True)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/serializers.py", line 234, in is_valid
self._validated_data = self.run_validation(self.initial_data)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/serializers.py", line 436, in run_validation
value = self.validate(value)
File "/opt/netbox/netbox/ipam/api/serializers.py", line 125, in validate
validator.set_context(self)
Exception Type: AttributeError at /api/ipam/vlans/
Exception Value: 'UniqueTogetherValidator' object has no attribute 'set_context'
|
AttributeError
|
def validate(self, data):
# Validate uniqueness of name and slug if a site has been assigned.
if data.get("site", None):
for field in ["name", "slug"]:
validator = UniqueTogetherValidator(
queryset=VLANGroup.objects.all(), fields=("site", field)
)
validator(data, self)
# Enforce model validation
super().validate(data)
return data
|
def validate(self, data):
# Validate uniqueness of name and slug if a site has been assigned.
if data.get("site", None):
for field in ["name", "slug"]:
validator = UniqueTogetherValidator(
queryset=VLANGroup.objects.all(), fields=("site", field)
)
validator.set_context(self)
validator(data)
# Enforce model validation
super().validate(data)
return data
|
https://github.com/netbox-community/netbox/issues/4496
|
AttributeError at /api/ipam/vlans/
'UniqueTogetherValidator' object has no attribute 'set_context'
Request Method: POST
Request URL: http://10.0.192.20/api/ipam/vlans/
Django Version: 3.0.5
Python Executable: /opt/netbox/venv/bin/python3
Python Version: 3.7.3
Python Path: ['/opt/netbox/venv/bin', '/opt/netbox/netbox', '/', '/usr/lib/python37.zip', '/usr/lib/python3.7', '/usr/lib/python3.7/lib-dynload', '/opt/netbox/venv/lib/python3.7/site-packages']
Server time: Wed, 15 Apr 2020 10:01:42 +0000
Installed Applications:
['django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'cacheops',
'corsheaders',
'debug_toolbar',
'django_filters',
'django_tables2',
'django_prometheus',
'mptt',
'rest_framework',
'taggit',
'taggit_serializer',
'timezone_field',
'circuits',
'dcim',
'ipam',
'extras',
'secrets',
'tenancy',
'users',
'utilities',
'virtualization',
'django_rq',
'drf_yasg']
Installed Middleware:
['debug_toolbar.middleware.DebugToolbarMiddleware',
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'utilities.middleware.ExceptionHandlingMiddleware',
'utilities.middleware.RemoteUserMiddleware',
'utilities.middleware.LoginRequiredMiddleware',
'utilities.middleware.APIVersionMiddleware',
'extras.middleware.ObjectChangeMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware']
Traceback (most recent call last):
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 115, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 113, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/viewsets.py", line 114, in view
return self.dispatch(request, *args, **kwargs)
File "/opt/netbox/netbox/utilities/api.py", line 330, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 505, in dispatch
response = self.handle_exception(exc)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 465, in handle_exception
self.raise_uncaught_exception(exc)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 476, in raise_uncaught_exception
raise exc
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 502, in dispatch
response = handler(request, *args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/mixins.py", line 18, in create
serializer.is_valid(raise_exception=True)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/serializers.py", line 234, in is_valid
self._validated_data = self.run_validation(self.initial_data)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/serializers.py", line 436, in run_validation
value = self.validate(value)
File "/opt/netbox/netbox/ipam/api/serializers.py", line 125, in validate
validator.set_context(self)
Exception Type: AttributeError at /api/ipam/vlans/
Exception Value: 'UniqueTogetherValidator' object has no attribute 'set_context'
|
AttributeError
|
def validate(self, data):
# Validate uniqueness of vid and name if a group has been assigned.
if data.get("group", None):
for field in ["vid", "name"]:
validator = UniqueTogetherValidator(
queryset=VLAN.objects.all(), fields=("group", field)
)
validator(data, self)
# Enforce model validation
super().validate(data)
return data
|
def validate(self, data):
# Validate uniqueness of vid and name if a group has been assigned.
if data.get("group", None):
for field in ["vid", "name"]:
validator = UniqueTogetherValidator(
queryset=VLAN.objects.all(), fields=("group", field)
)
validator.set_context(self)
validator(data)
# Enforce model validation
super().validate(data)
return data
|
https://github.com/netbox-community/netbox/issues/4496
|
AttributeError at /api/ipam/vlans/
'UniqueTogetherValidator' object has no attribute 'set_context'
Request Method: POST
Request URL: http://10.0.192.20/api/ipam/vlans/
Django Version: 3.0.5
Python Executable: /opt/netbox/venv/bin/python3
Python Version: 3.7.3
Python Path: ['/opt/netbox/venv/bin', '/opt/netbox/netbox', '/', '/usr/lib/python37.zip', '/usr/lib/python3.7', '/usr/lib/python3.7/lib-dynload', '/opt/netbox/venv/lib/python3.7/site-packages']
Server time: Wed, 15 Apr 2020 10:01:42 +0000
Installed Applications:
['django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'cacheops',
'corsheaders',
'debug_toolbar',
'django_filters',
'django_tables2',
'django_prometheus',
'mptt',
'rest_framework',
'taggit',
'taggit_serializer',
'timezone_field',
'circuits',
'dcim',
'ipam',
'extras',
'secrets',
'tenancy',
'users',
'utilities',
'virtualization',
'django_rq',
'drf_yasg']
Installed Middleware:
['debug_toolbar.middleware.DebugToolbarMiddleware',
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'utilities.middleware.ExceptionHandlingMiddleware',
'utilities.middleware.RemoteUserMiddleware',
'utilities.middleware.LoginRequiredMiddleware',
'utilities.middleware.APIVersionMiddleware',
'extras.middleware.ObjectChangeMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware']
Traceback (most recent call last):
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 115, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 113, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/viewsets.py", line 114, in view
return self.dispatch(request, *args, **kwargs)
File "/opt/netbox/netbox/utilities/api.py", line 330, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 505, in dispatch
response = self.handle_exception(exc)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 465, in handle_exception
self.raise_uncaught_exception(exc)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 476, in raise_uncaught_exception
raise exc
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/views.py", line 502, in dispatch
response = handler(request, *args, **kwargs)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/mixins.py", line 18, in create
serializer.is_valid(raise_exception=True)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/serializers.py", line 234, in is_valid
self._validated_data = self.run_validation(self.initial_data)
File "/opt/netbox/venv/lib/python3.7/site-packages/rest_framework/serializers.py", line 436, in run_validation
value = self.validate(value)
File "/opt/netbox/netbox/ipam/api/serializers.py", line 125, in validate
validator.set_context(self)
Exception Type: AttributeError at /api/ipam/vlans/
Exception Value: 'UniqueTogetherValidator' object has no attribute 'set_context'
|
AttributeError
|
def get_bound_field(self, form, field_name):
bound_field = BoundField(form, self, field_name)
# Modify the QuerySet of the field before we return it. Limit choices to any data already bound: Options
# will be populated on-demand via the APISelect widget.
data = bound_field.data or bound_field.initial
if data:
filter = self.filter(
field_name=self.to_field_name or "pk", queryset=self.queryset
)
self.queryset = filter.filter(self.queryset, data)
else:
self.queryset = self.queryset.none()
return bound_field
|
def get_bound_field(self, form, field_name):
bound_field = BoundField(form, self, field_name)
# Modify the QuerySet of the field before we return it. Limit choices to any data already bound: Options
# will be populated on-demand via the APISelect widget.
field_name = "{}{}".format(self.to_field_name or "pk", self.field_modifier)
if bound_field.data:
self.queryset = self.queryset.filter(
**{field_name: self.prepare_value(bound_field.data)}
)
elif bound_field.initial:
self.queryset = self.queryset.filter(
**{field_name: self.prepare_value(bound_field.initial)}
)
else:
self.queryset = self.queryset.none()
return bound_field
|
https://github.com/netbox-community/netbox/issues/4240
|
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "/usr/local/lib/python3.6/site-packages/django/core/handlers/base.py", line 115, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/usr/local/lib/python3.6/site-packages/django/core/handlers/base.py", line 113, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/local/lib/python3.6/site-packages/django/views/generic/base.py", line 71, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/django/contrib/auth/mixins.py", line 85, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/django/views/generic/base.py", line 97, in dispatch
return handler(request, *args, **kwargs)
File "/opt/netbox-dev/netbox/utilities/views.py", line 187, in get
return render(request, self.template_name, context)
File "/usr/local/lib/python3.6/site-packages/django/shortcuts.py", line 36, in render
content = loader.render_to_string(template_name, context, request, using=using)
File "/usr/local/lib/python3.6/site-packages/django/template/loader.py", line 62, in render_to_string
return template.render(context, request)
File "/usr/local/lib/python3.6/site-packages/django/template/backends/django.py", line 61, in render
return self.template.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 171, in render
return self._render(context)
File "/usr/local/lib/python3.6/site-packages/django/test/utils.py", line 96, in instrumented_test_render
return self.nodelist.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/loader_tags.py", line 150, in render
return compiled_parent._render(context)
File "/usr/local/lib/python3.6/site-packages/django/test/utils.py", line 96, in instrumented_test_render
return self.nodelist.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/loader_tags.py", line 150, in render
return compiled_parent._render(context)
File "/usr/local/lib/python3.6/site-packages/django/test/utils.py", line 96, in instrumented_test_render
return self.nodelist.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/loader_tags.py", line 62, in render
result = block.nodelist.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/defaulttags.py", line 309, in render
return nodelist.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/loader_tags.py", line 188, in render
return template.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 173, in render
return self._render(context)
File "/usr/local/lib/python3.6/site-packages/django/test/utils.py", line 96, in instrumented_test_render
return self.nodelist.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/usr/local/lib/python3.6/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/usr/local/lib/python3.6/site-packages/django/template/defaulttags.py", line 165, in render
values = list(values)
File "/usr/local/lib/python3.6/site-packages/django/forms/forms.py", line 158, in __iter__
yield self[name]
File "/usr/local/lib/python3.6/site-packages/django/forms/forms.py", line 173, in __getitem__
self._bound_fields_cache[name] = field.get_bound_field(self, name)
File "/opt/netbox-dev/netbox/utilities/forms.py", line 576, in get_bound_field
self.queryset = self.queryset.filter(**{field_name: self.prepare_value(bound_field.data)})
File "/usr/local/lib/python3.6/site-packages/django/db/models/query.py", line 892, in filter
return self._filter_or_exclude(False, *args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/django/db/models/query.py", line 910, in _filter_or_exclude
clone.query.add_q(Q(*args, **kwargs))
File "/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1290, in add_q
clause, _ = self._add_q(q_object, self.used_aliases)
File "/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1318, in _add_q
split_subq=split_subq, simple_col=simple_col,
File "/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1251, in build_filter
condition = self.build_lookup(lookups, col, value)
File "/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1116, in build_lookup
lookup = lookup_class(lhs, rhs)
File "/usr/local/lib/python3.6/site-packages/django/db/models/lookups.py", line 20, in __init__
self.rhs = self.get_prep_lookup()
File "/usr/local/lib/python3.6/site-packages/django/db/models/lookups.py", line 210, in get_prep_lookup
rhs_value = self.lhs.output_field.get_prep_value(rhs_value)
File "/usr/local/lib/python3.6/site-packages/django/db/models/fields/__init__.py", line 972, in get_prep_value
return int(value)
ValueError: invalid literal for int() with base 10: 'null'
|
ValueError
|
def post(self, request, **kwargs):
model = self.queryset.model
# If we are editing *all* objects in the queryset, replace the PK list with all matched objects.
if request.POST.get("_all") and self.filterset is not None:
pk_list = [
obj.pk for obj in self.filterset(request.GET, model.objects.only("pk")).qs
]
else:
pk_list = request.POST.getlist("pk")
if "_apply" in request.POST:
form = self.form(model, request.POST)
if form.is_valid():
custom_fields = form.custom_fields if hasattr(form, "custom_fields") else []
standard_fields = [
field for field in form.fields if field not in custom_fields + ["pk"]
]
nullified_fields = request.POST.getlist("_nullify")
try:
with transaction.atomic():
updated_count = 0
for obj in model.objects.filter(pk__in=form.cleaned_data["pk"]):
# Update standard fields. If a field is listed in _nullify, delete its value.
for name in standard_fields:
try:
model_field = model._meta.get_field(name)
except FieldDoesNotExist:
# This form field is used to modify a field rather than set its value directly
model_field = None
# Handle nullification
if (
name in form.nullable_fields
and name in nullified_fields
):
if isinstance(model_field, ManyToManyField):
getattr(obj, name).set([])
else:
setattr(obj, name, None if model_field.null else "")
# ManyToManyFields
elif isinstance(model_field, ManyToManyField):
getattr(obj, name).set(form.cleaned_data[name])
# Normal fields
elif form.cleaned_data[name] not in (None, ""):
setattr(obj, name, form.cleaned_data[name])
obj.full_clean()
obj.save()
# Update custom fields
obj_type = ContentType.objects.get_for_model(model)
for name in custom_fields:
field = form.fields[name].model
if (
name in form.nullable_fields
and name in nullified_fields
):
CustomFieldValue.objects.filter(
field=field, obj_type=obj_type, obj_id=obj.pk
).delete()
elif form.cleaned_data[name] not in [None, ""]:
try:
cfv = CustomFieldValue.objects.get(
field=field, obj_type=obj_type, obj_id=obj.pk
)
except CustomFieldValue.DoesNotExist:
cfv = CustomFieldValue(
field=field, obj_type=obj_type, obj_id=obj.pk
)
cfv.value = form.cleaned_data[name]
cfv.save()
# Add/remove tags
if form.cleaned_data.get("add_tags", None):
obj.tags.add(*form.cleaned_data["add_tags"])
if form.cleaned_data.get("remove_tags", None):
obj.tags.remove(*form.cleaned_data["remove_tags"])
updated_count += 1
if updated_count:
msg = "Updated {} {}".format(
updated_count, model._meta.verbose_name_plural
)
messages.success(self.request, msg)
return redirect(self.get_return_url(request))
except ValidationError as e:
messages.error(self.request, "{} failed validation: {}".format(obj, e))
else:
form = self.form(model, initial={"pk": pk_list})
# Retrieve objects being edited
table = self.table(self.queryset.filter(pk__in=pk_list), orderable=False)
if not table.rows:
messages.warning(
request, "No {} were selected.".format(model._meta.verbose_name_plural)
)
return redirect(self.get_return_url(request))
return render(
request,
self.template_name,
{
"form": form,
"table": table,
"obj_type_plural": model._meta.verbose_name_plural,
"return_url": self.get_return_url(request),
},
)
|
def post(self, request, **kwargs):
model = self.queryset.model
# Create a mutable copy of the POST data
post_data = request.POST.copy()
# If we are editing *all* objects in the queryset, replace the PK list with all matched objects.
if post_data.get("_all") and self.filterset is not None:
post_data["pk"] = [
obj.pk for obj in self.filterset(request.GET, model.objects.only("pk")).qs
]
if "_apply" in request.POST:
form = self.form(model, request.POST)
if form.is_valid():
custom_fields = form.custom_fields if hasattr(form, "custom_fields") else []
standard_fields = [
field for field in form.fields if field not in custom_fields + ["pk"]
]
nullified_fields = request.POST.getlist("_nullify")
try:
with transaction.atomic():
updated_count = 0
for obj in model.objects.filter(pk__in=form.cleaned_data["pk"]):
# Update standard fields. If a field is listed in _nullify, delete its value.
for name in standard_fields:
try:
model_field = model._meta.get_field(name)
except FieldDoesNotExist:
# This form field is used to modify a field rather than set its value directly
model_field = None
# Handle nullification
if (
name in form.nullable_fields
and name in nullified_fields
):
if isinstance(model_field, ManyToManyField):
getattr(obj, name).set([])
else:
setattr(obj, name, None if model_field.null else "")
# ManyToManyFields
elif isinstance(model_field, ManyToManyField):
getattr(obj, name).set(form.cleaned_data[name])
# Normal fields
elif form.cleaned_data[name] not in (None, ""):
setattr(obj, name, form.cleaned_data[name])
obj.full_clean()
obj.save()
# Update custom fields
obj_type = ContentType.objects.get_for_model(model)
for name in custom_fields:
field = form.fields[name].model
if (
name in form.nullable_fields
and name in nullified_fields
):
CustomFieldValue.objects.filter(
field=field, obj_type=obj_type, obj_id=obj.pk
).delete()
elif form.cleaned_data[name] not in [None, ""]:
try:
cfv = CustomFieldValue.objects.get(
field=field, obj_type=obj_type, obj_id=obj.pk
)
except CustomFieldValue.DoesNotExist:
cfv = CustomFieldValue(
field=field, obj_type=obj_type, obj_id=obj.pk
)
cfv.value = form.cleaned_data[name]
cfv.save()
# Add/remove tags
if form.cleaned_data.get("add_tags", None):
obj.tags.add(*form.cleaned_data["add_tags"])
if form.cleaned_data.get("remove_tags", None):
obj.tags.remove(*form.cleaned_data["remove_tags"])
updated_count += 1
if updated_count:
msg = "Updated {} {}".format(
updated_count, model._meta.verbose_name_plural
)
messages.success(self.request, msg)
return redirect(self.get_return_url(request))
except ValidationError as e:
messages.error(self.request, "{} failed validation: {}".format(obj, e))
else:
# Pass the PK list as initial data to avoid binding the form
initial_data = querydict_to_dict(post_data)
form = self.form(model, initial=initial_data)
# Retrieve objects being edited
table = self.table(
self.queryset.filter(pk__in=post_data.getlist("pk")), orderable=False
)
if not table.rows:
messages.warning(
request, "No {} were selected.".format(model._meta.verbose_name_plural)
)
return redirect(self.get_return_url(request))
return render(
request,
self.template_name,
{
"form": form,
"table": table,
"obj_type_plural": model._meta.verbose_name_plural,
"return_url": self.get_return_url(request),
},
)
|
https://github.com/netbox-community/netbox/issues/4239
|
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "/usr/local/lib/python3.6/site-packages/django/core/handlers/base.py", line 115, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/usr/local/lib/python3.6/site-packages/django/core/handlers/base.py", line 113, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/local/lib/python3.6/site-packages/django/views/generic/base.py", line 71, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/django/contrib/auth/mixins.py", line 85, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/django/views/generic/base.py", line 97, in dispatch
return handler(request, *args, **kwargs)
File "/opt/netbox-dev/netbox/utilities/views.py", line 723, in post
table = self.table(self.queryset.filter(pk__in=post_data.getlist('pk')), orderable=False)
File "/usr/local/lib/python3.6/site-packages/django/db/models/query.py", line 892, in filter
return self._filter_or_exclude(False, *args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/django/db/models/query.py", line 910, in _filter_or_exclude
clone.query.add_q(Q(*args, **kwargs))
File "/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1290, in add_q
clause, _ = self._add_q(q_object, self.used_aliases)
File "/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1318, in _add_q
split_subq=split_subq, simple_col=simple_col,
File "/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1251, in build_filter
condition = self.build_lookup(lookups, col, value)
File "/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1116, in build_lookup
lookup = lookup_class(lhs, rhs)
File "/usr/local/lib/python3.6/site-packages/django/db/models/lookups.py", line 20, in __init__
self.rhs = self.get_prep_lookup()
File "/usr/local/lib/python3.6/site-packages/django/db/models/lookups.py", line 210, in get_prep_lookup
rhs_value = self.lhs.output_field.get_prep_value(rhs_value)
File "/usr/local/lib/python3.6/site-packages/django/db/models/fields/__init__.py", line 972, in get_prep_value
return int(value)
TypeError: int() argument must be a string, a bytes-like object or a number, not 'list'
|
TypeError
|
def process_webhook(webhook, data, model_name, event, timestamp, username, request_id):
"""
Make a POST request to the defined Webhook
"""
payload = {
"event": dict(ObjectChangeActionChoices)[event].lower(),
"timestamp": timestamp,
"model": model_name,
"username": username,
"request_id": request_id,
"data": data,
}
headers = {
"Content-Type": webhook.get_http_content_type_display(),
}
if webhook.additional_headers:
headers.update(webhook.additional_headers)
params = {"method": "POST", "url": webhook.payload_url, "headers": headers}
if webhook.http_content_type == WebhookContentTypeChoices.CONTENTTYPE_JSON:
params.update({"data": json.dumps(payload, cls=JSONEncoder)})
elif webhook.http_content_type == WebhookContentTypeChoices.CONTENTTYPE_FORMDATA:
params.update({"data": payload})
prepared_request = requests.Request(**params).prepare()
if webhook.secret != "":
# Sign the request with a hash of the secret key and its content.
hmac_prep = hmac.new(
key=webhook.secret.encode("utf8"),
msg=prepared_request.body.encode("utf8"),
digestmod=hashlib.sha512,
)
prepared_request.headers["X-Hook-Signature"] = hmac_prep.hexdigest()
with requests.Session() as session:
session.verify = webhook.ssl_verification
if webhook.ca_file_path:
session.verify = webhook.ca_file_path
response = session.send(prepared_request)
if response.status_code >= 200 and response.status_code <= 299:
return "Status {} returned, webhook successfully processed.".format(
response.status_code
)
else:
raise requests.exceptions.RequestException(
"Status {} returned with content '{}', webhook FAILED to process.".format(
response.status_code, response.content
)
)
|
def process_webhook(webhook, data, model_name, event, timestamp, username, request_id):
"""
Make a POST request to the defined Webhook
"""
payload = {
"event": dict(ObjectChangeActionChoices)[event].lower(),
"timestamp": timestamp,
"model": model_name,
"username": username,
"request_id": request_id,
"data": data,
}
headers = {
"Content-Type": webhook.get_http_content_type_display(),
}
if webhook.additional_headers:
headers.update(webhook.additional_headers)
params = {"method": "POST", "url": webhook.payload_url, "headers": headers}
if webhook.http_content_type == WEBHOOK_CT_JSON:
params.update({"data": json.dumps(payload, cls=JSONEncoder)})
elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED:
params.update({"data": payload})
prepared_request = requests.Request(**params).prepare()
if webhook.secret != "":
# Sign the request with a hash of the secret key and its content.
hmac_prep = hmac.new(
key=webhook.secret.encode("utf8"),
msg=prepared_request.body.encode("utf8"),
digestmod=hashlib.sha512,
)
prepared_request.headers["X-Hook-Signature"] = hmac_prep.hexdigest()
with requests.Session() as session:
session.verify = webhook.ssl_verification
if webhook.ca_file_path:
session.verify = webhook.ca_file_path
response = session.send(prepared_request)
if response.status_code >= 200 and response.status_code <= 299:
return "Status {} returned, webhook successfully processed.".format(
response.status_code
)
else:
raise requests.exceptions.RequestException(
"Status {} returned with content '{}', webhook FAILED to process.".format(
response.status_code, response.content
)
)
|
https://github.com/netbox-community/netbox/issues/3951
|
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/rq/worker.py", line 812, in perform_job
rv = job.perform()
File "/usr/local/lib/python3.5/dist-packages/rq/job.py", line 588, in perform
self._result = self._execute()
File "/usr/local/lib/python3.5/dist-packages/rq/job.py", line 594, in _execute
return self.func(*self.args, **self.kwargs)
File "/opt/netbox/netbox/extras/webhooks_worker.py", line 49, in process_webhook
msg=prepared_request.body.encode('utf8'),
AttributeError: 'NoneType' object has no attribute 'encode'
|
AttributeError
|
def __str__(self):
try:
device = self.device
except Device.DoesNotExist:
device = None
if self.role and device and self.name:
return "{} for {} ({})".format(self.role, self.device, self.name)
# Return role and device if no name is set
if self.role and device:
return "{} for {}".format(self.role, self.device)
return "Secret"
|
def __str__(self):
if self.role and self.device and self.name:
return "{} for {} ({})".format(self.role, self.device, self.name)
# Return role and device if no name is set
if self.role and self.device:
return "{} for {}".format(self.role, self.device)
return "Secret"
|
https://github.com/netbox-community/netbox/issues/3927
|
Internal Server Error: /dcim/devices/1611/delete/
Traceback (most recent call last):
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/fields/related_descriptors.py", line 164, in __get__
rel_obj = self.field.get_cached_value(instance)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/fields/mixins.py", line 13, in get_cached_value
return instance._state.fields_cache[cache_name]
KeyError: 'device'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "/home/jstretch/netbox/netbox/extras/middleware.py", line 110, in __call__
objectchange = instance.to_objectchange(action)
File "/home/jstretch/netbox/netbox/utilities/models.py", line 33, in to_objectchange
object_repr=str(self),
File "/home/jstretch/netbox/netbox/secrets/models.py", line 362, in __str__
if self.role and self.device and self.name:
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/fields/related_descriptors.py", line 178, in __get__
rel_obj = self.get_object(instance)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/fields/related_descriptors.py", line 145, in get_object
return qs.get(self.field.get_reverse_related_filter(instance))
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/cacheops/query.py", line 356, in get
return qs._no_monkey.get(qs, *args, **kwargs)
File "/home/jstretch/.virtualenvs/netbox/lib/python3.6/site-packages/django/db/models/query.py", line 408, in get
self.model._meta.object_name
dcim.models.Device.DoesNotExist: Device matching query does not exist.
|
KeyError
|
def assign_virtualchassis_master(instance, created, **kwargs):
"""
When a VirtualChassis is created, automatically assign its master device to the VC.
"""
# Default to 1 but don't overwrite an existing position (see #2087)
if instance.master.vc_position is not None:
vc_position = instance.master.vc_position
else:
vc_position = 1
if created:
Device.objects.filter(pk=instance.master.pk).update(
virtual_chassis=instance, vc_position=vc_position
)
|
def assign_virtualchassis_master(instance, created, **kwargs):
"""
When a VirtualChassis is created, automatically assign its master device to the VC.
"""
if created:
Device.objects.filter(pk=instance.master.pk).update(
virtual_chassis=instance, vc_position=1
)
|
https://github.com/netbox-community/netbox/issues/2087
|
[user@netboxserver:netbox] python manage.py dumpdata > ~/VCtest.json
[user@netboxserver:netbox] python manage.py flush --noinput
[user@netboxserver:netbox] python manage.py loaddata ~/VCtest.json
Traceback (most recent call last):
File "/var/netbox_env/lib/python3.6/site-packages/django/db/backends/utils.py", line 64, in execute
return self.cursor.execute(sql, params)
psycopg2.IntegrityError: duplicate key value violates unique constraint "dcim_device_virtual_chassis_id_vc_position_efea7133_uniq"
DETAIL: Key (virtual_chassis_id, vc_position)=(1, 1) already exists.
|
psycopg2.IntegrityError
|
def deserialize_value(self, serialized_value):
"""
Convert a string into the object it represents depending on the type of field
"""
if serialized_value == "":
return None
if self.type == CF_TYPE_INTEGER:
return int(serialized_value)
if self.type == CF_TYPE_BOOLEAN:
return bool(int(serialized_value))
if self.type == CF_TYPE_DATE:
# Read date as YYYY-MM-DD
return date(*[int(n) for n in serialized_value.split("-")])
if self.type == CF_TYPE_SELECT:
return self.choices.get(pk=int(serialized_value))
return serialized_value
|
def deserialize_value(self, serialized_value):
"""
Convert a string into the object it represents depending on the type of field
"""
if serialized_value is "":
return None
if self.type == CF_TYPE_INTEGER:
return int(serialized_value)
if self.type == CF_TYPE_BOOLEAN:
return bool(int(serialized_value))
if self.type == CF_TYPE_DATE:
# Read date as YYYY-MM-DD
return date(*[int(n) for n in serialized_value.split("-")])
if self.type == CF_TYPE_SELECT:
return self.choices.get(pk=int(serialized_value))
return serialized_value
|
https://github.com/netbox-community/netbox/issues/1980
|
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/usr/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/django/contrib/auth/mixins.py", line 92, in dispatch
return super(PermissionRequiredMixin, self).dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/django/views/generic/base.py", line 88, in dispatch
return handler(request, *args, **kwargs)
File "/opt/netbox/netbox/utilities/views.py", line 196, in post
obj = form.save()
File "/opt/netbox/netbox/extras/forms.py", line 123, in save
self._save_custom_fields()
File "/opt/netbox/netbox/extras/forms.py", line 116, in _save_custom_fields
cfv.save()
File "/opt/netbox/netbox/extras/models.py", line 169, in save
if self.pk and self.value is None:
File "/opt/netbox/netbox/extras/models.py", line 161, in value
return self.field.deserialize_value(self.serialized_value)
File "/opt/netbox/netbox/extras/models.py", line 140, in deserialize_value
return self.choices.get(pk=int(serialized_value))
ValueError: invalid literal for int() with base 10: ''
|
ValueError
|
def napalm(self, request, pk):
"""
Execute a NAPALM method on a Device
"""
device = get_object_or_404(Device, pk=pk)
if not device.primary_ip:
raise ServiceUnavailable(
"This device does not have a primary IP address configured."
)
if device.platform is None:
raise ServiceUnavailable("No platform is configured for this device.")
if not device.platform.napalm_driver:
raise ServiceUnavailable(
"No NAPALM driver is configured for this device's platform ().".format(
device.platform
)
)
# Check that NAPALM is installed
try:
import napalm
except ImportError:
raise ServiceUnavailable(
"NAPALM is not installed. Please see the documentation for instructions."
)
# TODO: Remove support for NAPALM < 2.0
try:
from napalm.base.exceptions import ConnectAuthError, ModuleImportError
except ImportError:
from napalm_base.exceptions import ConnectAuthError, ModuleImportError
# Validate the configured driver
try:
driver = napalm.get_network_driver(device.platform.napalm_driver)
except ModuleImportError:
raise ServiceUnavailable(
"NAPALM driver for platform {} not found: {}.".format(
device.platform, device.platform.napalm_driver
)
)
# Verify user permission
if not request.user.has_perm("dcim.napalm_read"):
return HttpResponseForbidden()
# Validate requested NAPALM methods
napalm_methods = request.GET.getlist("method")
for method in napalm_methods:
if not hasattr(driver, method):
return HttpResponseBadRequest("Unknown NAPALM method: {}".format(method))
elif not method.startswith("get_"):
return HttpResponseBadRequest(
"Unsupported NAPALM method: {}".format(method)
)
# Connect to the device and execute the requested methods
# TODO: Improve error handling
response = OrderedDict([(m, None) for m in napalm_methods])
ip_address = str(device.primary_ip.address.ip)
d = driver(
hostname=ip_address,
username=settings.NAPALM_USERNAME,
password=settings.NAPALM_PASSWORD,
timeout=settings.NAPALM_TIMEOUT,
optional_args=settings.NAPALM_ARGS,
)
try:
d.open()
for method in napalm_methods:
response[method] = getattr(d, method)()
except Exception as e:
raise ServiceUnavailable(
"Error connecting to the device at {}: {}".format(ip_address, e)
)
d.close()
return Response(response)
|
def napalm(self, request, pk):
"""
Execute a NAPALM method on a Device
"""
device = get_object_or_404(Device, pk=pk)
if not device.primary_ip:
raise ServiceUnavailable(
"This device does not have a primary IP address configured."
)
if device.platform is None:
raise ServiceUnavailable("No platform is configured for this device.")
if not device.platform.napalm_driver:
raise ServiceUnavailable(
"No NAPALM driver is configured for this device's platform ().".format(
device.platform
)
)
# Check that NAPALM is installed and verify the configured driver
try:
import napalm
from napalm_base.exceptions import ConnectAuthError, ModuleImportError
except ImportError:
raise ServiceUnavailable(
"NAPALM is not installed. Please see the documentation for instructions."
)
try:
driver = napalm.get_network_driver(device.platform.napalm_driver)
except ModuleImportError:
raise ServiceUnavailable(
"NAPALM driver for platform {} not found: {}.".format(
device.platform, device.platform.napalm_driver
)
)
# Verify user permission
if not request.user.has_perm("dcim.napalm_read"):
return HttpResponseForbidden()
# Validate requested NAPALM methods
napalm_methods = request.GET.getlist("method")
for method in napalm_methods:
if not hasattr(driver, method):
return HttpResponseBadRequest("Unknown NAPALM method: {}".format(method))
elif not method.startswith("get_"):
return HttpResponseBadRequest(
"Unsupported NAPALM method: {}".format(method)
)
# Connect to the device and execute the requested methods
# TODO: Improve error handling
response = OrderedDict([(m, None) for m in napalm_methods])
ip_address = str(device.primary_ip.address.ip)
d = driver(
hostname=ip_address,
username=settings.NAPALM_USERNAME,
password=settings.NAPALM_PASSWORD,
timeout=settings.NAPALM_TIMEOUT,
optional_args=settings.NAPALM_ARGS,
)
try:
d.open()
for method in napalm_methods:
response[method] = getattr(d, method)()
except Exception as e:
raise ServiceUnavailable(
"Error connecting to the device at {}: {}".format(ip_address, e)
)
d.close()
return Response(response)
|
https://github.com/netbox-community/netbox/issues/1696
|
/opt/netbox-2.2.4/netbox# ./manage.py nbshell
NetBox interactive shell
Python 2.7.9 | Django 1.11.7 | NetBox 2.2.4
lsmodels() will show available models. Use help(<model>) for more info.
import napalm
from napalm_base.exceptions import ConnectAuthError, ModuleImportError
Traceback (most recent call last):
File "<console>", line 1, in <module>
ImportError: No module named napalm_base.exceptions
|
ImportError
|
def get_return_url(self, request, imageattachment):
return imageattachment.parent.get_absolute_url()
|
def get_return_url(self, request, imageattachment):
return imageattachment.obj.get_absolute_url()
|
https://github.com/netbox-community/netbox/issues/1113
|
Internal Server Error: /extras/image-attachments/1/delete/
Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/local/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/usr/local/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/local/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/django/contrib/auth/mixins.py", line 92, in dispatch
return super(PermissionRequiredMixin, self).dispatch(request, *args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/django/views/generic/base.py", line 88, in dispatch
return handler(request, *args, **kwargs)
File "/opt/netbox/utilities/views.py", line 261, in get
'return_url': self.get_return_url(request, obj),
File "/opt/netbox/extras/views.py", line 30, in get_return_url
return imageattachment.obj.get_absolute_url()
AttributeError: 'ImageAttachment' object has no attribute 'obj'
|
AttributeError
|
def secret_add(request, pk):
# Retrieve device
device = get_object_or_404(Device, pk=pk)
secret = Secret(device=device)
session_key = get_session_key(request)
if request.method == "POST":
form = forms.SecretForm(request.POST, instance=secret)
if form.is_valid():
# We need a valid session key in order to create a Secret
if session_key is None:
form.add_error(
None,
"No session key was provided with the request. Unable to encrypt secret data.",
)
# Create and encrypt the new Secret
else:
master_key = None
try:
sk = SessionKey.objects.get(userkey__user=request.user)
master_key = sk.get_master_key(session_key)
except SessionKey.DoesNotExist:
form.add_error(None, "No session key found for this user.")
if master_key is not None:
secret = form.save(commit=False)
secret.plaintext = str(form.cleaned_data["plaintext"])
secret.encrypt(master_key)
secret.save()
messages.success(request, "Added new secret: {}.".format(secret))
if "_addanother" in request.POST:
return redirect("dcim:device_addsecret", pk=device.pk)
else:
return redirect("secrets:secret", pk=secret.pk)
else:
form = forms.SecretForm(instance=secret)
return render(
request,
"secrets/secret_edit.html",
{
"secret": secret,
"form": form,
"return_url": device.get_absolute_url(),
},
)
|
def secret_add(request, pk):
# Retrieve device
device = get_object_or_404(Device, pk=pk)
secret = Secret(device=device)
uk = UserKey.objects.get(user=request.user)
if request.method == "POST":
form = forms.SecretForm(request.POST, instance=secret)
if form.is_valid():
# We need a valid session key in order to create a Secret
session_key = base64.b64decode(request.COOKIES.get("session_key", None))
if session_key is None:
form.add_error(
None,
"No session key was provided with the request. Unable to encrypt secret data.",
)
# Create and encrypt the new Secret
else:
master_key = None
try:
sk = SessionKey.objects.get(userkey__user=request.user)
master_key = sk.get_master_key(session_key)
except SessionKey.DoesNotExist:
form.add_error(None, "No session key found for this user.")
if master_key is not None:
secret = form.save(commit=False)
secret.plaintext = str(form.cleaned_data["plaintext"])
secret.encrypt(master_key)
secret.save()
messages.success(request, "Added new secret: {}.".format(secret))
if "_addanother" in request.POST:
return redirect("dcim:device_addsecret", pk=device.pk)
else:
return redirect("secrets:secret", pk=secret.pk)
else:
form = forms.SecretForm(instance=secret)
return render(
request,
"secrets/secret_edit.html",
{
"secret": secret,
"form": form,
"return_url": device.get_absolute_url(),
},
)
|
https://github.com/netbox-community/netbox/issues/1049
|
Performing system checks...
System check identified no issues (0 silenced).
April 06, 2017 - 01:41:36
Django version 1.11, using settings 'netbox.settings'
Starting development server at http://0.0.0.0:8002/
Quit the server with CONTROL-C.
Internal Server Error: /dcim/devices/9/add-secret/
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/local/lib/python2.7/dist-packages/django/contrib/auth/decorators.py", line 23, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/opt/netbox-2.0-beta2/netbox/secrets/decorators.py", line 22, in wrapped_view
return view(request, *args, **kwargs)
File "/opt/netbox-2.0-beta2/netbox/secrets/views.py", line 83, in secret_add
session_key = base64.b64decode(request.COOKIES.get('session_key', None))
File "/usr/lib/python2.7/base64.py", line 73, in b64decode
return binascii.a2b_base64(s)
TypeError: must be string or buffer, not None
[06/Apr/2017 01:41:47] "POST /dcim/devices/9/add-secret/ HTTP/1.1" 500 85437
|
TypeError
|
def secret_edit(request, pk):
secret = get_object_or_404(Secret, pk=pk)
session_key = get_session_key(request)
if request.method == "POST":
form = forms.SecretForm(request.POST, instance=secret)
if form.is_valid():
# Re-encrypt the Secret if a plaintext and session key have been provided.
if form.cleaned_data["plaintext"] and session_key is not None:
# Retrieve the master key using the provided session key
master_key = None
try:
sk = SessionKey.objects.get(userkey__user=request.user)
master_key = sk.get_master_key(session_key)
except SessionKey.DoesNotExist:
form.add_error(None, "No session key found for this user.")
# Create and encrypt the new Secret
if master_key is not None:
secret = form.save(commit=False)
secret.plaintext = str(form.cleaned_data["plaintext"])
secret.encrypt(master_key)
secret.save()
messages.success(request, "Modified secret {}.".format(secret))
return redirect("secrets:secret", pk=secret.pk)
else:
form.add_error(
None, "Invalid session key. Unable to encrypt secret data."
)
# We can't save the plaintext without a session key.
elif form.cleaned_data["plaintext"]:
form.add_error(
None,
"No session key was provided with the request. Unable to encrypt secret data.",
)
# If no new plaintext was specified, a session key is not needed.
else:
secret = form.save()
messages.success(request, "Modified secret {}.".format(secret))
return redirect("secrets:secret", pk=secret.pk)
else:
form = forms.SecretForm(instance=secret)
return render(
request,
"secrets/secret_edit.html",
{
"secret": secret,
"form": form,
"return_url": reverse("secrets:secret", kwargs={"pk": secret.pk}),
},
)
|
def secret_edit(request, pk):
secret = get_object_or_404(Secret, pk=pk)
if request.method == "POST":
form = forms.SecretForm(request.POST, instance=secret)
if form.is_valid():
# Re-encrypt the Secret if a plaintext and session key have been provided.
session_key = base64.b64decode(request.COOKIES.get("session_key", None))
if form.cleaned_data["plaintext"] and session_key is not None:
# Retrieve the master key using the provided session key
master_key = None
try:
sk = SessionKey.objects.get(userkey__user=request.user)
master_key = sk.get_master_key(session_key)
except SessionKey.DoesNotExist:
form.add_error(None, "No session key found for this user.")
# Create and encrypt the new Secret
if master_key is not None:
secret = form.save(commit=False)
secret.plaintext = str(form.cleaned_data["plaintext"])
secret.encrypt(master_key)
secret.save()
messages.success(request, "Modified secret {}.".format(secret))
return redirect("secrets:secret", pk=secret.pk)
else:
form.add_error(
None, "Invalid session key. Unable to encrypt secret data."
)
# We can't save the plaintext without a session key.
elif form.cleaned_data["plaintext"]:
form.add_error(
None,
"No session key was provided with the request. Unable to encrypt secret data.",
)
# If no new plaintext was specified, a session key is not needed.
else:
secret = form.save()
messages.success(request, "Modified secret {}.".format(secret))
return redirect("secrets:secret", pk=secret.pk)
else:
form = forms.SecretForm(instance=secret)
return render(
request,
"secrets/secret_edit.html",
{
"secret": secret,
"form": form,
"return_url": reverse("secrets:secret", kwargs={"pk": secret.pk}),
},
)
|
https://github.com/netbox-community/netbox/issues/1049
|
Performing system checks...
System check identified no issues (0 silenced).
April 06, 2017 - 01:41:36
Django version 1.11, using settings 'netbox.settings'
Starting development server at http://0.0.0.0:8002/
Quit the server with CONTROL-C.
Internal Server Error: /dcim/devices/9/add-secret/
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/local/lib/python2.7/dist-packages/django/contrib/auth/decorators.py", line 23, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/opt/netbox-2.0-beta2/netbox/secrets/decorators.py", line 22, in wrapped_view
return view(request, *args, **kwargs)
File "/opt/netbox-2.0-beta2/netbox/secrets/views.py", line 83, in secret_add
session_key = base64.b64decode(request.COOKIES.get('session_key', None))
File "/usr/lib/python2.7/base64.py", line 73, in b64decode
return binascii.a2b_base64(s)
TypeError: must be string or buffer, not None
[06/Apr/2017 01:41:47] "POST /dcim/devices/9/add-secret/ HTTP/1.1" 500 85437
|
TypeError
|
def add_message_event(proto_message, span, message_event_type, message_id=1):
"""Adds a MessageEvent to the span based off of the given protobuf
message
"""
span.add_message_event(
time_event.MessageEvent(
datetime.utcnow(),
message_id,
type=message_event_type,
uncompressed_size_bytes=extract_byte_size(proto_message),
)
)
|
def add_message_event(proto_message, span, message_event_type, message_id=1):
"""Adds a MessageEvent to the span based off of the given protobuf
message
"""
span.add_message_event(
time_event.MessageEvent(
datetime.utcnow(),
message_id,
type=message_event_type,
uncompressed_size_bytes=proto_message.ByteSize(),
)
)
|
https://github.com/census-instrumentation/opencensus-python/issues/969
|
Traceback (most recent call last):
File "/redacted.py", line 167, in redacted
public_key = client.get_public_key(request={"name": key_name})
File "/usr/local/lib/python3.7/site-packages/google/cloud/kms_v1/services/key_management_service/client.py", line 1026, in get_public_key
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
File "/usr/local/lib/python3.7/site-packages/google/api_core/gapic_v1/method.py", line 145, in __call__
return wrapped_func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/google/api_core/retry.py", line 286, in retry_wrapped_func
on_error=on_error,
File "/usr/local/lib/python3.7/site-packages/google/api_core/retry.py", line 184, in retry_target
return target()
File "/usr/local/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 57, in error_remapped_callable
return callable_(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/grpc/_interceptor.py", line 221, in __call__
compression=compression)
File "/usr/local/lib/python3.7/site-packages/grpc/_interceptor.py", line 256, in _with_call
request)
File "/usr/local/lib/python3.7/site-packages/opencensus/ext/grpc/client_interceptor.py", line 166, in intercept_unary_unary
next(new_request))
File "/usr/local/lib/python3.7/site-packages/opencensus/ext/grpc/utils.py", line 33, in wrap_iter_with_message_events
message_id=message_id)
File "/usr/local/lib/python3.7/site-packages/opencensus/ext/grpc/utils.py", line 18, in add_message_event
uncompressed_size_bytes=proto_message.ByteSize()
File "/usr/local/lib/python3.7/site-packages/proto/message.py", line 525, in __getattr__
raise AttributeError(str(ex))
AttributeError: 'ByteSize'
|
AttributeError
|
def create_time_series_list(self, v_data, option_resource_type, metric_prefix):
"""Create the TimeSeries object based on the view data"""
time_series_list = []
aggregation_type = v_data.view.aggregation.aggregation_type
tag_agg = v_data.tag_value_aggregation_data_map
for tag_value, agg in tag_agg.items():
series = monitoring_v3.types.TimeSeries()
series.metric.type = namespaced_view_name(v_data.view.name, metric_prefix)
set_metric_labels(series, v_data.view, tag_value)
set_monitored_resource(series, option_resource_type)
point = series.points.add()
if aggregation_type is aggregation.Type.DISTRIBUTION:
dist_value = point.value.distribution_value
dist_value.count = agg.count_data
dist_value.mean = agg.mean_data
sum_of_sqd = agg.sum_of_sqd_deviations
dist_value.sum_of_squared_deviation = sum_of_sqd
# Uncomment this when stackdriver supports Range
# point.value.distribution_value.range.min = agg_data.min
# point.value.distribution_value.range.max = agg_data.max
bounds = dist_value.bucket_options.explicit_buckets.bounds
buckets = dist_value.bucket_counts
# Stackdriver expects a first bucket for samples in (-inf, 0),
# but we record positive samples only, and our first bucket is
# [0, first_bound).
bounds.extend([0])
buckets.extend([0])
bounds.extend(list(map(float, agg.bounds)))
buckets.extend(list(map(int, agg.counts_per_bucket)))
elif aggregation_type is aggregation.Type.COUNT:
point.value.int64_value = agg.count_data
elif aggregation_type is aggregation.Type.SUM:
if isinstance(v_data.view.measure, measure.MeasureInt):
# TODO: Add implementation of sum aggregation that does not
# store it's data as a float.
point.value.int64_value = int(agg.sum_data)
if isinstance(v_data.view.measure, measure.MeasureFloat):
point.value.double_value = float(agg.sum_data)
elif aggregation_type is aggregation.Type.LASTVALUE:
if isinstance(v_data.view.measure, measure.MeasureInt):
point.value.int64_value = int(agg.value)
if isinstance(v_data.view.measure, measure.MeasureFloat):
point.value.double_value = float(agg.value)
else:
raise TypeError(
"Unsupported aggregation type: %s" % type(v_data.view.aggregation)
)
start = datetime.strptime(v_data.start_time, EPOCH_PATTERN)
end = datetime.strptime(v_data.end_time, EPOCH_PATTERN)
timestamp_start = (start - EPOCH_DATETIME).total_seconds()
timestamp_end = (end - EPOCH_DATETIME).total_seconds()
point.interval.end_time.seconds = int(timestamp_end)
secs = point.interval.end_time.seconds
point.interval.end_time.nanos = int((timestamp_end - secs) * 10**9)
if aggregation_type is not aggregation.Type.LASTVALUE:
if timestamp_start == timestamp_end:
# avoiding start_time and end_time to be equal
timestamp_start = timestamp_start - 1
else:
# For LastValue (Gauge), start and end time must be the same.
timestamp_start = timestamp_end
start_time = point.interval.start_time
start_time.seconds = int(timestamp_start)
start_secs = start_time.seconds
start_time.nanos = int((timestamp_start - start_secs) * 1e9)
time_series_list.append(series)
return time_series_list
|
def create_time_series_list(self, v_data, option_resource_type, metric_prefix):
"""Create the TimeSeries object based on the view data"""
time_series_list = []
aggregation_type = v_data.view.aggregation.aggregation_type
tag_agg = v_data.tag_value_aggregation_data_map
for tag_value, agg in tag_agg.items():
series = monitoring_v3.types.TimeSeries()
series.metric.type = namespaced_view_name(v_data.view.name, metric_prefix)
set_metric_labels(series, v_data.view, tag_value)
set_monitored_resource(series, option_resource_type)
point = series.points.add()
if aggregation_type is aggregation.Type.DISTRIBUTION:
dist_value = point.value.distribution_value
dist_value.count = agg.count_data
dist_value.mean = agg.mean_data
sum_of_sqd = agg.sum_of_sqd_deviations
dist_value.sum_of_squared_deviation = sum_of_sqd
# Uncomment this when stackdriver supports Range
# point.value.distribution_value.range.min = agg_data.min
# point.value.distribution_value.range.max = agg_data.max
bounds = dist_value.bucket_options.explicit_buckets.bounds
buckets = dist_value.bucket_counts
# Stackdriver expects a first bucket for samples in (-inf, 0),
# but we record positive samples only, and our first bucket is
# [0, first_bound).
bounds.extend([0])
buckets.extend([0])
bounds.extend(list(map(float, agg.bounds)))
buckets.extend(list(map(int, agg.counts_per_bucket)))
elif aggregation_type is aggregation.Type.COUNT:
point.value.int64_value = agg.count_data
elif aggregation_type is aggregation.Type.SUM:
if isinstance(v_data.view.measure, measure.MeasureInt):
# TODO: Add implementation of sum aggregation that does not
# store it's data as a float.
point.value.int64_value = int(agg.sum_data)
if isinstance(v_data.view.measure, measure.MeasureFloat):
point.value.double_value = float(agg.sum_data)
elif aggregation_type is aggregation.Type.LASTVALUE:
if isinstance(v_data.view.measure, measure.MeasureInt):
point.value.int64_value = int(agg.value)
if isinstance(v_data.view.measure, measure.MeasureFloat):
point.value.double_value = float(agg.value)
else:
raise TypeError(
"Unsupported aggregation type: %s" % type(v_data.view.aggregation)
)
start = datetime.strptime(v_data.start_time, EPOCH_PATTERN)
end = datetime.strptime(v_data.end_time, EPOCH_PATTERN)
timestamp_start = (start - EPOCH_DATETIME).total_seconds()
timestamp_end = (end - EPOCH_DATETIME).total_seconds()
point.interval.end_time.seconds = int(timestamp_end)
secs = point.interval.end_time.seconds
point.interval.end_time.nanos = int((timestamp_end - secs) * 10**9)
if aggregation_type is not aggregation.Type.LASTVALUE:
if timestamp_start == timestamp_end:
# avoiding start_time and end_time to be equal
timestamp_start = timestamp_start - 1
start_time = point.interval.start_time
start_time.seconds = int(timestamp_start)
start_secs = start_time.seconds
start_time.nanos = int((timestamp_start - start_secs) * 1e9)
time_series_list.append(series)
return time_series_list
|
https://github.com/census-instrumentation/opencensus-python/issues/584
|
Traceback (most recent call last):
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 57, in error_remapped_callable
return callable_(*args, **kwargs)
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/grpc/_channel.py", line 549, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/grpc/_channel.py", line 466, in _end_unary_response_blocking
raise _Rendezvous(state, None, None, deadline)
grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with:
status = StatusCode.INVALID_ARGUMENT
details = "Field timeSeries[0].points[0].interval.start had an invalid value of "2019-03-27T14:35:55.669958-07:00": The start time must be equal to the end time (2019-03-27T14:37:33.72174-07:00) for the gauge metric 'custom.googleapis.com/opencensus/rsatesting_agglast/request_count'."
debug_error_string = "{"created":"@1553722655.800461000","description":"Error received from peer","file":"src/core/lib/surface/call.cc","file_line":1039,"grpc_message":"Field timeSeries[0].points[0].interval.start had an invalid value of "2019-03-27T14:35:55.669958-07:00": The start time must be equal to the end time (2019-03-27T14:37:33.72174-07:00) for the gauge metric 'custom.googleapis.com/opencensus/rsatesting_agglast/request_count'.","grpc_status":3}"
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "bunk.py", line 5, in <module>
bunk.record(1, {'http_verb': 'GET', 'response_code': '200'})
File "/Users/rsa/dev/metrics/metrics.py", line 114, in record
mm.record(tagmap)
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/opencensus/stats/measurement_map.py", line 118, in record
attachments=self.attachments
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/opencensus/stats/measure_to_view_map.py", line 123, in record
self.export(view_datas)
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/opencensus/stats/measure_to_view_map.py", line 134, in export
e.export(view_datas_copy)
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/opencensus/ext/stackdriver/stats_exporter/__init__.py", line 158, in export
self.transport.export(view_data)
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/opencensus/common/transports/sync.py", line 23, in export
self.exporter.emit(datas)
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/opencensus/ext/stackdriver/stats_exporter/__init__.py", line 153, in emit
self.handle_upload(view_data)
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/opencensus/ext/stackdriver/stats_exporter/__init__.py", line 165, in handle_upload
self.upload_stats(view_data)
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/opencensus/ext/stackdriver/stats_exporter/__init__.py", line 177, in upload_stats
time_series_batch)
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/google/cloud/monitoring_v3/gapic/metric_service_client.py", line 897, in create_time_series
request, retry=retry, timeout=timeout, metadata=metadata
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/google/api_core/gapic_v1/method.py", line 143, in __call__
return wrapped_func(*args, **kwargs)
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/google/api_core/retry.py", line 270, in retry_wrapped_func
on_error=on_error,
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/google/api_core/retry.py", line 179, in retry_target
return target()
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/google/api_core/timeout.py", line 214, in func_with_timeout
return func(*args, **kwargs)
File "/Users/rsa/dev/metrics/.venv/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 59, in error_remapped_callable
six.raise_from(exceptions.from_grpc_error(exc), exc)
File "<string>", line 3, in raise_from
google.api_core.exceptions.InvalidArgument: 400 Field timeSeries[0].points[0].interval.start had an invalid value of "2019-03-27T14:35:55.669958-07:00": The start time must be equal to the end time (2019-03-27T14:37:33.72174-07:00) for the gauge metric 'custom.googleapis.com/opencensus/rsatesting_agglast/request_count'.
|
google.api_core.exceptions.InvalidArgument
|
def to_metric(self, desc, tag_values, agg_data):
"""to_metric translate the data that OpenCensus create
to Prometheus format, using Prometheus Metric object
:type desc: dict
:param desc: The map that describes view definition
:type tag_values: tuple of :class:
`~opencensus.tags.tag_value.TagValue`
:param object of opencensus.tags.tag_value.TagValue:
TagValue object used as label values
:type agg_data: object of :class:
`~opencensus.stats.aggregation_data.AggregationData`
:param object of opencensus.stats.aggregation_data.AggregationData:
Aggregated data that needs to be converted as Prometheus samples
:rtype: :class:`~prometheus_client.core.CounterMetricFamily` or
:class:`~prometheus_client.core.HistogramMetricFamily` or
:class:`~prometheus_client.core.UntypedMetricFamily` or
:class:`~prometheus_client.core.GaugeMetricFamily`
:returns: A Prometheus metric object
"""
metric_name = desc["name"]
metric_description = desc["documentation"]
label_keys = desc["labels"]
# Prometheus requires that all tag values be strings hence
# the need to cast none to the empty string before exporting. See
# https://github.com/census-instrumentation/opencensus-python/issues/480
tag_values = [tv if tv else "" for tv in tag_values]
if isinstance(agg_data, aggregation_data_module.CountAggregationData):
metric = CounterMetricFamily(
name=metric_name, documentation=metric_description, labels=label_keys
)
metric.add_metric(labels=tag_values, value=agg_data.count_data)
return metric
elif isinstance(agg_data, aggregation_data_module.DistributionAggregationData):
assert agg_data.bounds == sorted(agg_data.bounds)
points = {}
cum_count = 0
for ii, bound in enumerate(agg_data.bounds):
cum_count += agg_data.counts_per_bucket[ii]
points[str(bound)] = cum_count
metric = HistogramMetricFamily(
name=metric_name, documentation=metric_description, labels=label_keys
)
metric.add_metric(
labels=tag_values,
buckets=list(points.items()),
sum_value=agg_data.sum,
)
return metric
elif isinstance(agg_data, aggregation_data_module.SumAggregationDataFloat):
metric = UntypedMetricFamily(
name=metric_name, documentation=metric_description, labels=label_keys
)
metric.add_metric(labels=tag_values, value=agg_data.sum_data)
return metric
elif isinstance(agg_data, aggregation_data_module.LastValueAggregationData):
metric = GaugeMetricFamily(
name=metric_name, documentation=metric_description, labels=label_keys
)
metric.add_metric(labels=tag_values, value=agg_data.value)
return metric
else:
raise ValueError("unsupported aggregation type %s" % type(agg_data))
|
def to_metric(self, desc, tag_values, agg_data):
"""to_metric translate the data that OpenCensus create
to Prometheus format, using Prometheus Metric object
:type desc: dict
:param desc: The map that describes view definition
:type tag_values: tuple of :class:
`~opencensus.tags.tag_value.TagValue`
:param object of opencensus.tags.tag_value.TagValue:
TagValue object used as label values
:type agg_data: object of :class:
`~opencensus.stats.aggregation_data.AggregationData`
:param object of opencensus.stats.aggregation_data.AggregationData:
Aggregated data that needs to be converted as Prometheus samples
:rtype: :class:`~prometheus_client.core.CounterMetricFamily` or
:class:`~prometheus_client.core.HistogramMetricFamily` or
:class:`~prometheus_client.core.UntypedMetricFamily` or
:class:`~prometheus_client.core.GaugeMetricFamily`
:returns: A Prometheus metric object
"""
metric_name = desc["name"]
metric_description = desc["documentation"]
label_keys = desc["labels"]
if isinstance(agg_data, aggregation_data_module.CountAggregationData):
metric = CounterMetricFamily(
name=metric_name, documentation=metric_description, labels=label_keys
)
metric.add_metric(labels=list(tag_values), value=agg_data.count_data)
return metric
elif isinstance(agg_data, aggregation_data_module.DistributionAggregationData):
assert agg_data.bounds == sorted(agg_data.bounds)
points = {}
cum_count = 0
for ii, bound in enumerate(agg_data.bounds):
cum_count += agg_data.counts_per_bucket[ii]
points[str(bound)] = cum_count
metric = HistogramMetricFamily(
name=metric_name, documentation=metric_description, labels=label_keys
)
metric.add_metric(
labels=list(tag_values),
buckets=list(points.items()),
sum_value=agg_data.sum,
)
return metric
elif isinstance(agg_data, aggregation_data_module.SumAggregationDataFloat):
metric = UntypedMetricFamily(
name=metric_name, documentation=metric_description, labels=label_keys
)
metric.add_metric(labels=list(tag_values), value=agg_data.sum_data)
return metric
elif isinstance(agg_data, aggregation_data_module.LastValueAggregationData):
metric = GaugeMetricFamily(
name=metric_name, documentation=metric_description, labels=label_keys
)
metric.add_metric(labels=list(tag_values), value=agg_data.value)
return metric
else:
raise ValueError("unsupported aggregation type %s" % type(agg_data))
|
https://github.com/census-instrumentation/opencensus-python/issues/480
|
Exception happened during processing of request from ('127.0.0.1', 50691)
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/socketserver.py", line 625, in process_request_thread
self.finish_request(request, client_address)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/socketserver.py", line 354, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/socketserver.py", line 681, in __init__
self.handle()
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/http/server.py", line 422, in handle
self.handle_one_request()
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/http/server.py", line 410, in handle_one_request
method()
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prometheus_client/exposition.py", line 146, in do_GET
output = encoder(registry)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prometheus_client/openmetrics/exposition.py", line 26, in generate_latest
for k, v in sorted(s.labels.items())]))
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prometheus_client/openmetrics/exposition.py", line 26, in <listcomp>
for k, v in sorted(s.labels.items())]))
AttributeError: 'NoneType' object has no attribute 'replace'
|
AttributeError
|
def register_view(self, view):
"""register_view will create the needed structure
in order to be able to sent all data to Prometheus
"""
v_name = get_view_name(self.options.namespace, view)
if v_name not in self.registered_views:
desc = {
"name": v_name,
"documentation": view.description,
"labels": list(map(sanitize, view.columns)),
}
self.registered_views[v_name] = desc
self.registry.register(self)
|
def register_view(self, view):
"""register_view will create the needed structure
in order to be able to sent all data to Prometheus
"""
v_name = get_view_name(self.options.namespace, view)
if v_name not in self.registered_views:
desc = {
"name": v_name,
"documentation": view.description,
"labels": list(view.columns),
}
self.registered_views[v_name] = desc
self.registry.register(self)
|
https://github.com/census-instrumentation/opencensus-python/issues/468
|
Exception happened during processing of request from ('127.0.0.1', 64332)
Traceback (most recent call last):
File "/Users/prakritibansal/anaconda2/lib/python2.7/SocketServer.py", line 596, in process_request_thread
self.finish_request(request, client_address)
File "/Users/prakritibansal/anaconda2/lib/python2.7/SocketServer.py", line 331, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/Users/prakritibansal/anaconda2/lib/python2.7/SocketServer.py", line 652, in __init__
self.handle()
File "/Users/prakritibansal/anaconda2/lib/python2.7/BaseHTTPServer.py", line 340, in handle
self.handle_one_request()
File "/Users/prakritibansal/anaconda2/lib/python2.7/BaseHTTPServer.py", line 328, in handle_one_request
method()
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/prometheus_client/exposition.py", line 146, in do_GET
output = encoder(registry)
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/prometheus_client/exposition.py", line 89, in generate_latest
for metric in registry.collect():
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/prometheus_client/registry.py", line 75, in collect
for metric in collector.collect():
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/opencensus/stats/exporters/prometheus_exporter.py", line 226, in collect
metric = self.to_metric(desc, tag_values, agg_data)
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/opencensus/stats/exporters/prometheus_exporter.py", line 186, in to_metric
labels=label_keys)
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/prometheus_client/metrics_core.py", line 185, in __init__
Metric.__init__(self, name, documentation, 'histogram', unit)
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/prometheus_client/metrics_core.py", line 27, in __init__
raise ValueError('Invalid metric name: ' + name)
ValueError: Invalid metric name: oc_python_demo/latency
|
ValueError
|
def get_view_name(namespace, view):
"""create the name for the view"""
name = ""
if namespace != "":
name = namespace + "_"
return sanitize(name + view.name)
|
def get_view_name(namespace, view):
"""create the name for the view"""
name = ""
if namespace != "":
name = namespace + "_"
return name + view.name
|
https://github.com/census-instrumentation/opencensus-python/issues/468
|
Exception happened during processing of request from ('127.0.0.1', 64332)
Traceback (most recent call last):
File "/Users/prakritibansal/anaconda2/lib/python2.7/SocketServer.py", line 596, in process_request_thread
self.finish_request(request, client_address)
File "/Users/prakritibansal/anaconda2/lib/python2.7/SocketServer.py", line 331, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/Users/prakritibansal/anaconda2/lib/python2.7/SocketServer.py", line 652, in __init__
self.handle()
File "/Users/prakritibansal/anaconda2/lib/python2.7/BaseHTTPServer.py", line 340, in handle
self.handle_one_request()
File "/Users/prakritibansal/anaconda2/lib/python2.7/BaseHTTPServer.py", line 328, in handle_one_request
method()
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/prometheus_client/exposition.py", line 146, in do_GET
output = encoder(registry)
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/prometheus_client/exposition.py", line 89, in generate_latest
for metric in registry.collect():
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/prometheus_client/registry.py", line 75, in collect
for metric in collector.collect():
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/opencensus/stats/exporters/prometheus_exporter.py", line 226, in collect
metric = self.to_metric(desc, tag_values, agg_data)
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/opencensus/stats/exporters/prometheus_exporter.py", line 186, in to_metric
labels=label_keys)
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/prometheus_client/metrics_core.py", line 185, in __init__
Metric.__init__(self, name, documentation, 'histogram', unit)
File "/Users/prakritibansal/anaconda2/lib/python2.7/site-packages/prometheus_client/metrics_core.py", line 27, in __init__
raise ValueError('Invalid metric name: ' + name)
ValueError: Invalid metric name: oc_python_demo/latency
|
ValueError
|
def _extract_logs_from_span(span):
if span.time_events is None:
return None
logs = []
for time_event in span.time_events:
annotation = time_event.annotation
if not annotation:
continue
fields = []
if annotation.attributes is not None:
fields = _extract_tags(annotation.attributes.attributes)
fields.append(
jaeger.Tag(
key="message", vType=jaeger.TagType.STRING, vStr=annotation.description
)
)
event_time = datetime.datetime.strptime(
time_event.timestamp, ISO_DATETIME_REGEX
)
timestamp = calendar.timegm(event_time.timetuple()) * 1000
logs.append(jaeger.Log(timestamp=timestamp, fields=fields))
return logs
|
def _extract_logs_from_span(span):
if span.time_events is None:
return None
logs = []
for time_event in span.time_events:
annotation = time_event.annotation
if not annotation:
continue
fields = _extract_tags(annotation.attributes)
fields.append(
jaeger.Tag(
key="message", vType=jaeger.TagType.STRING, vStr=annotation.description
)
)
event_time = datetime.datetime.strptime(
time_event.timestamp, ISO_DATETIME_REGEX
)
timestamp = calendar.timegm(event_time.timetuple()) * 1000
logs.append(jaeger.Log(timestamp=timestamp, fields=fields))
return logs
|
https://github.com/census-instrumentation/opencensus-python/issues/314
|
Traceback (most recent call last):
File "main.py", line 37, in <module>
main()
File "main.py", line 33, in main
more_work()
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/span.py", line 251, in __exit__
self.context_tracer.end_span()
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/tracers/context_tracer.py", line 123, in end_span
self.exporter.export(span_datas)
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/exporters/jaeger_exporter.py", line 160, in export
self.transport.export(span_datas)
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/exporters/transports/sync.py", line 23, in export
self.exporter.emit(span_datas)
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/exporters/jaeger_exporter.py", line 140, in emit
jaeger_spans = self.translate_to_jaeger(span_datas)
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/exporters/jaeger_exporter.py", line 208, in translate_to_jaeger
logs = _extract_logs_from_span(span)
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/exporters/jaeger_exporter.py", line 283, in _extract_logs_from_span
fields = _extract_tags(annotation.attributes)
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/exporters/jaeger_exporter.py", line 302, in _extract_tags
for attribute_key, attribute_value in attr.items():
AttributeError: 'Attributes' object has no attribute 'items'
|
AttributeError
|
def _extract_tags(attr):
if attr is None:
return []
tags = []
for attribute_key, attribute_value in attr.items():
tag = _convert_attribute_to_tag(attribute_key, attribute_value)
if tag is None:
continue
tags.append(tag)
return tags
|
def _extract_tags(attr):
if attr is None:
return None
tags = []
for attribute_key, attribute_value in attr.items():
tag = _convert_attribute_to_tag(attribute_key, attribute_value)
if tag is None:
continue
tags.append(tag)
return tags
|
https://github.com/census-instrumentation/opencensus-python/issues/314
|
Traceback (most recent call last):
File "main.py", line 37, in <module>
main()
File "main.py", line 33, in main
more_work()
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/span.py", line 251, in __exit__
self.context_tracer.end_span()
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/tracers/context_tracer.py", line 123, in end_span
self.exporter.export(span_datas)
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/exporters/jaeger_exporter.py", line 160, in export
self.transport.export(span_datas)
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/exporters/transports/sync.py", line 23, in export
self.exporter.emit(span_datas)
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/exporters/jaeger_exporter.py", line 140, in emit
jaeger_spans = self.translate_to_jaeger(span_datas)
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/exporters/jaeger_exporter.py", line 208, in translate_to_jaeger
logs = _extract_logs_from_span(span)
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/exporters/jaeger_exporter.py", line 283, in _extract_logs_from_span
fields = _extract_tags(annotation.attributes)
File "/Users/tw/.local/share/virtualenvs/census-K4PTEclS/lib/python3.6/site-packages/opencensus/trace/exporters/jaeger_exporter.py", line 302, in _extract_tags
for attribute_key, attribute_value in attr.items():
AttributeError: 'Attributes' object has no attribute 'items'
|
AttributeError
|
def parse_get_bucket_location(parsed, http_response, **kwargs):
# s3.GetBucketLocation cannot be modeled properly. To
# account for this we just manually parse the XML document.
# The "parsed" passed in only has the ResponseMetadata
# filled out. This handler will fill in the LocationConstraint
# value.
if http_response.raw is None:
return
response_body = http_response.content
parser = xml.etree.cElementTree.XMLParser(
target=xml.etree.cElementTree.TreeBuilder(), encoding="utf-8"
)
parser.feed(response_body)
root = parser.close()
region = root.text
parsed["LocationConstraint"] = region
|
def parse_get_bucket_location(parsed, http_response, **kwargs):
# s3.GetBucketLocation cannot be modeled properly. To
# account for this we just manually parse the XML document.
# The "parsed" passed in only has the ResponseMetadata
# filled out. This handler will fill in the LocationConstraint
# value.
if "LocationConstraint" in parsed:
# Response already set - a stub?
return
response_body = http_response.content
parser = xml.etree.cElementTree.XMLParser(
target=xml.etree.cElementTree.TreeBuilder(), encoding="utf-8"
)
parser.feed(response_body)
root = parser.close()
region = root.text
parsed["LocationConstraint"] = region
|
https://github.com/boto/botocore/issues/1884
|
Traceback (most recent call last):
File "untitled.py", line 24, in <module>
client.get_bucket_location(Bucket="meh")
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 655, in _make_api_call
model=operation_model, context=request_context
File "/usr/local/lib/python3.7/site-packages/botocore/hooks.py", line 356, in emit
return self._emitter.emit(aliased_event_name, **kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/hooks.py", line 228, in emit
return self._emit(event_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/hooks.py", line 211, in _emit
response = handler(**kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/handlers.py", line 485, in parse_get_bucket_location
response_body = http_response.content
File "/usr/local/lib/python3.7/site-packages/botocore/awsrequest.py", line 587, in content
self._content = bytes().join(self.raw.stream()) or bytes()
AttributeError: 'NoneType' object has no attribute 'stream'
|
AttributeError
|
def _send_request(self, method, url, body, headers, *args, **kwargs):
self._response_received = False
if headers.get("Expect", b"") == b"100-continue":
self._expect_header_set = True
else:
self._expect_header_set = False
self.response_class = self._original_response_cls
rval = HTTPConnection._send_request(
self, method, url, body, headers, *args, **kwargs
)
self._expect_header_set = False
return rval
|
def _send_request(self, method, url, body, headers):
self._response_received = False
if headers.get("Expect", b"") == b"100-continue":
self._expect_header_set = True
else:
self._expect_header_set = False
self.response_class = self._original_response_cls
rval = HTTPConnection._send_request(self, method, url, body, headers)
self._expect_header_set = False
return rval
|
https://github.com/boto/botocore/issues/1079
|
Python 3.6.0b2 (default, Nov 1 2016, 00:18:55)
[GCC 5.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
import boto3
s3 = boto3.client('s3')
s3.list_buckets()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 251, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 526, in _make_api_call
operation_model, request_dict)
File "/usr/local/lib/python3.6/site-packages/botocore/endpoint.py", line 141, in make_request
return self._send_request(request_dict, operation_model)
File "/usr/local/lib/python3.6/site-packages/botocore/endpoint.py", line 170, in _send_request
success_response, exception):
File "/usr/local/lib/python3.6/site-packages/botocore/endpoint.py", line 249, in _needs_retry
caught_exception=caught_exception, request_dict=request_dict)
File "/usr/local/lib/python3.6/site-packages/botocore/hooks.py", line 227, in emit
return self._emit(event_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/hooks.py", line 210, in _emit
response = handler(**kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/retryhandler.py", line 183, in __call__
if self._checker(attempts, response, caught_exception):
File "/usr/local/lib/python3.6/site-packages/botocore/retryhandler.py", line 251, in __call__
caught_exception)
File "/usr/local/lib/python3.6/site-packages/botocore/retryhandler.py", line 269, in _should_retry
return self._checker(attempt_number, response, caught_exception)
File "/usr/local/lib/python3.6/site-packages/botocore/retryhandler.py", line 317, in __call__
caught_exception)
File "/usr/local/lib/python3.6/site-packages/botocore/retryhandler.py", line 223, in __call__
attempt_number, caught_exception)
File "/usr/local/lib/python3.6/site-packages/botocore/retryhandler.py", line 359, in _check_caught_exception
raise caught_exception
File "/usr/local/lib/python3.6/site-packages/botocore/endpoint.py", line 204, in _get_response
proxies=self.proxies, timeout=self.timeout)
File "/usr/local/lib/python3.6/site-packages/botocore/vendored/requests/sessions.py", line 573, in send
r = adapter.send(request, **kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/vendored/requests/adapters.py", line 370, in send
timeout=timeout
File "/usr/local/lib/python3.6/site-packages/botocore/vendored/requests/packages/urllib3/connectionpool.py", line 544, in urlopen
body=body, headers=headers)
File "/usr/local/lib/python3.6/site-packages/botocore/vendored/requests/packages/urllib3/connectionpool.py", line 349, in _make_request
conn.request(method, url, **httplib_request_kw)
File "/usr/local/lib/python3.6/http/client.py", line 1239, in request
self._send_request(method, url, body, headers, encode_chunked)
TypeError: _send_request() takes 5 positional arguments but 6 were given
|
TypeError
|
def _send_output(self, message_body=None, *args, **kwargs):
self._buffer.extend((b"", b""))
msg = self._convert_to_bytes(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, bytes):
msg += message_body
message_body = None
self.send(msg)
if self._expect_header_set:
# This is our custom behavior. If the Expect header was
# set, it will trigger this custom behavior.
logger.debug("Waiting for 100 Continue response.")
# Wait for 1 second for the server to send a response.
read, write, exc = select.select([self.sock], [], [self.sock], 1)
if read:
self._handle_expect_response(message_body)
return
else:
# From the RFC:
# Because of the presence of older implementations, the
# protocol allows ambiguous situations in which a client may
# send "Expect: 100-continue" without receiving either a 417
# (Expectation Failed) status or a 100 (Continue) status.
# Therefore, when a client sends this header field to an origin
# server (possibly via a proxy) from which it has never seen a
# 100 (Continue) status, the client SHOULD NOT wait for an
# indefinite period before sending the request body.
logger.debug(
"No response seen from server, continuing to send the response body."
)
if message_body is not None:
# message_body was not a string (i.e. it is a file), and
# we must run the risk of Nagle.
self.send(message_body)
|
def _send_output(self, message_body=None):
self._buffer.extend((b"", b""))
msg = self._convert_to_bytes(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, bytes):
msg += message_body
message_body = None
self.send(msg)
if self._expect_header_set:
# This is our custom behavior. If the Expect header was
# set, it will trigger this custom behavior.
logger.debug("Waiting for 100 Continue response.")
# Wait for 1 second for the server to send a response.
read, write, exc = select.select([self.sock], [], [self.sock], 1)
if read:
self._handle_expect_response(message_body)
return
else:
# From the RFC:
# Because of the presence of older implementations, the
# protocol allows ambiguous situations in which a client may
# send "Expect: 100-continue" without receiving either a 417
# (Expectation Failed) status or a 100 (Continue) status.
# Therefore, when a client sends this header field to an origin
# server (possibly via a proxy) from which it has never seen a
# 100 (Continue) status, the client SHOULD NOT wait for an
# indefinite period before sending the request body.
logger.debug(
"No response seen from server, continuing to send the response body."
)
if message_body is not None:
# message_body was not a string (i.e. it is a file), and
# we must run the risk of Nagle.
self.send(message_body)
|
https://github.com/boto/botocore/issues/1079
|
Python 3.6.0b2 (default, Nov 1 2016, 00:18:55)
[GCC 5.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
import boto3
s3 = boto3.client('s3')
s3.list_buckets()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 251, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 526, in _make_api_call
operation_model, request_dict)
File "/usr/local/lib/python3.6/site-packages/botocore/endpoint.py", line 141, in make_request
return self._send_request(request_dict, operation_model)
File "/usr/local/lib/python3.6/site-packages/botocore/endpoint.py", line 170, in _send_request
success_response, exception):
File "/usr/local/lib/python3.6/site-packages/botocore/endpoint.py", line 249, in _needs_retry
caught_exception=caught_exception, request_dict=request_dict)
File "/usr/local/lib/python3.6/site-packages/botocore/hooks.py", line 227, in emit
return self._emit(event_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/hooks.py", line 210, in _emit
response = handler(**kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/retryhandler.py", line 183, in __call__
if self._checker(attempts, response, caught_exception):
File "/usr/local/lib/python3.6/site-packages/botocore/retryhandler.py", line 251, in __call__
caught_exception)
File "/usr/local/lib/python3.6/site-packages/botocore/retryhandler.py", line 269, in _should_retry
return self._checker(attempt_number, response, caught_exception)
File "/usr/local/lib/python3.6/site-packages/botocore/retryhandler.py", line 317, in __call__
caught_exception)
File "/usr/local/lib/python3.6/site-packages/botocore/retryhandler.py", line 223, in __call__
attempt_number, caught_exception)
File "/usr/local/lib/python3.6/site-packages/botocore/retryhandler.py", line 359, in _check_caught_exception
raise caught_exception
File "/usr/local/lib/python3.6/site-packages/botocore/endpoint.py", line 204, in _get_response
proxies=self.proxies, timeout=self.timeout)
File "/usr/local/lib/python3.6/site-packages/botocore/vendored/requests/sessions.py", line 573, in send
r = adapter.send(request, **kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/vendored/requests/adapters.py", line 370, in send
timeout=timeout
File "/usr/local/lib/python3.6/site-packages/botocore/vendored/requests/packages/urllib3/connectionpool.py", line 544, in urlopen
body=body, headers=headers)
File "/usr/local/lib/python3.6/site-packages/botocore/vendored/requests/packages/urllib3/connectionpool.py", line 349, in _make_request
conn.request(method, url, **httplib_request_kw)
File "/usr/local/lib/python3.6/http/client.py", line 1239, in request
self._send_request(method, url, body, headers, encode_chunked)
TypeError: _send_request() takes 5 positional arguments but 6 were given
|
TypeError
|
def _default_serialize(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = six.text_type(params)
|
def _default_serialize(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = str(params)
|
https://github.com/boto/botocore/issues/868
|
import boto3
client = boto3.client('s3')
bucket = '<your-bucket-name>'
key = u'日本語でおk'
client.put_object(Bucket=bucket, Key=key)
client.delete_objects(Bucket=bucket, Delete={'Objects': [{'Key': key}]})
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/client.py", line 236, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/client.py", line 476, in _make_api_call
api_params, operation_model, context=request_context)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/client.py", line 529, in _convert_to_request_dict
api_params, operation_model)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/validate.py", line 271, in serialize_to_request
operation_model)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/serialize.py", line 415, in serialize_to_request
serialized, shape, shape_members)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/serialize.py", line 457, in _serialize_payload
shape_members[payload_member])
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/serialize.py", line 532, in _serialize_body_params
self._serialize(shape, params, pseudo_root, root_name)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/serialize.py", line 539, in _serialize
method(xmlnode, params, shape, name)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/serialize.py", line 565, in _serialize_type_structure
self._serialize(member_shape, value, structure_node, member_name)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/serialize.py", line 539, in _serialize
method(xmlnode, params, shape, name)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/serialize.py", line 576, in _serialize_type_list
self._serialize(member_shape, item, list_node, element_name)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/serialize.py", line 539, in _serialize
method(xmlnode, params, shape, name)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/serialize.py", line 565, in _serialize_type_structure
self._serialize(member_shape, value, structure_node, member_name)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/serialize.py", line 539, in _serialize
method(xmlnode, params, shape, name)
File "/home/ec2-user/Workspace/test/local/lib/python2.7/site-packages/botocore/serialize.py", line 618, in _default_serialize
node.text = str(params)
UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-5: ordinal not in range(128)
|
UnicodeEncodeError
|
def _parse_error_from_body(self, response):
xml_contents = response["body"]
root = self._parse_xml_string_to_dom(xml_contents)
parsed = self._build_name_to_xml_node(root)
self._replace_nodes(parsed)
if root.tag == "Error":
# This is an S3 error response. First we'll populate the
# response metadata.
metadata = self._populate_response_metadata(response)
# The RequestId and the HostId are already in the
# ResponseMetadata, but are also duplicated in the XML
# body. We don't need these values in both places,
# we'll just remove them from the parsed XML body.
parsed.pop("RequestId", "")
parsed.pop("HostId", "")
return {"Error": parsed, "ResponseMetadata": metadata}
elif "RequestId" in parsed:
# Other rest-xml serivces:
parsed["ResponseMetadata"] = {"RequestId": parsed.pop("RequestId")}
default = {"Error": {"Message": "", "Code": ""}}
merge_dicts(default, parsed)
return default
|
def _parse_error_from_body(self, response):
xml_contents = response["body"]
root = self._parse_xml_string_to_dom(xml_contents)
parsed = self._build_name_to_xml_node(root)
self._replace_nodes(parsed)
if root.tag == "Error":
# This is an S3 error response. First we'll populate the
# response metadata.
metadata = self._populate_response_metadata(response)
# The RequestId and the HostId are already in the
# ResponseMetadata, but are also duplicated in the XML
# body. We don't need these values in both places,
# we'll just remove them from the parsed XML body.
parsed.pop("RequestId", "")
parsed.pop("HostId", "")
return {"Error": parsed, "ResponseMetadata": metadata}
elif "RequestId" in parsed:
# Other rest-xml serivces:
parsed["ResponseMetadata"] = {"RequestId": parsed.pop("RequestId")}
return parsed
|
https://github.com/boto/botocore/issues/532
|
from boto3.session import Session
sess = Session(region_name='us-east-1')
r53 = sess.client('route53')
zone = r53.list_hosted_zones_by_name(DNSName='example.com.')['HostedZones'][0]
zid = zone['Id']
r53.list_resource_record_sets(HostedZoneId=zid, StartRecordType='SOA')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/mike/venv/stacker/lib/python2.7/site-packages/botocore-0.106.0-py2.7.egg/botocore/client.py", line 199, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/mike/venv/stacker/lib/python2.7/site-packages/botocore-0.106.0-py2.7.egg/botocore/client.py", line 254, in _make_api_call
raise ClientError(parsed_response, operation_name)
File "/Users/mike/venv/stacker/lib/python2.7/site-packages/botocore-0.106.0-py2.7.egg/botocore/exceptions.py", line 300, in __init__
error_message=error_response['Error']['Message'],
KeyError: 'Message'
|
KeyError
|
def startElement(self, name, attrs):
if name == "elliot":
self.elliottag = True
if name == "exploit" and self.elliottag:
self.exploittag = True
if self.exploittag:
self.tag = name
if self.tag == "name":
self.nametag = True
self.name = ""
elif self.tag == "url":
self.urltag = True
self.url = ""
elif self.tag == "ref":
self.reftag = True
self.reftype = attrs.getValue("type")
if self.reftype == "CVE":
self.refcvetag = True
self.cveref = ""
elif self.reftype != "CVE":
self.refcvetag = False
self.cveref = False
|
def startElement(self, name, attrs):
if name == "elliot":
self.elliottag = True
if name == "exploit" and self.elliottag:
self.exploittag = True
if self.exploittag:
self.tag = name
if self.tag == "name":
self.nametag = True
self.name = ""
elif self.tag == "url":
self.urltag = True
self.url = ""
elif self.tag == "ref":
self.reftag = True
self.reftype = attrs.getValue("type")
if self.reftype == "CVE":
self.refcvetag = True
self.cveref = ""
elif self.reftype != "CVE":
self.refcvetag = False
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def endElement(self, name):
if name == "ref":
if self.cveref != "" and self.cveref:
self.refl.append(self.cveref.rstrip())
self.reftag = False
if name == "name":
self.nametag = False
if name == "url":
self.urltag = False
if name == "ref":
self.reftag = False
if name == "exploit":
for refl in self.refl:
self.d2sec.append({"name": self.name, "url": self.url, "id": refl})
self.exploittag = False
self.refl = []
if name == "elliot":
self.elliottag = False
|
def endElement(self, name):
if name == "ref":
if self.cveref != "":
self.refl.append(self.cveref.rstrip())
self.reftag = False
if name == "name":
self.nametag = False
if name == "url":
self.urltag = False
if name == "ref":
self.reftag = False
if name == "exploit":
for refl in self.refl:
self.d2sec.append({"name": self.name, "url": self.url, "id": refl})
self.exploittag = False
self.refl = []
if name == "elliot":
self.elliottag = False
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def apiCVEFor(cpe):
cpe = urllib.parse.unquote_plus(cpe)
cpe = toStringFormattedCPE(cpe)
if not cpe:
cpe = "None"
r = []
cvesp = cves.last(
rankinglookup=False, namelookup=False, vfeedlookup=True, capeclookup=False
)
for x in dbLayer.cvesForCPE(cpe):
r.append(cvesp.getcve(x["id"]))
return json.dumps(r, default=json_util.default)
|
def apiCVEFor(cpe):
cpe = urllib.parse.unquote_plus(cpe)
cpe = toStringFormattedCPE(cpe)
if not cpe:
cpe = "None"
r = []
cvesp = cves.last(
rankinglookup=False, namelookup=False, vfeedlookup=True, capeclookup=False
)
for x in dbLayer.cvesForCPE(cpe):
r.append(cvesp.getcve(x["id"]))
return json.dumps(r)
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def apiCVE(cveid):
cvesp = cves.last(
rankinglookup=True, namelookup=True, vfeedlookup=True, capeclookup=True
)
cve = cvesp.getcve(cveid=cveid)
if cve is None:
cve = {}
return jsonify(cve)
|
def apiCVE(cveid):
cvesp = cves.last(
rankinglookup=True, namelookup=True, vfeedlookup=True, capeclookup=True
)
cve = cvesp.getcve(cveid=cveid)
if cve is None:
cve = {}
return jsonify(cve)
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def apibrowse(vendor=None):
if vendor is not None:
vendor = urllib.parse.quote_plus(vendor).lower()
browseList = getBrowseList(vendor)
if isinstance(browseList, dict):
return jsonify(browseList)
else:
return jsonify({})
|
def apibrowse(vendor=None):
if vendor is not None:
vendor = urllib.parse.quote_plus(vendor).lower()
browseList = getBrowseList(vendor)
if isinstance(browseList, dict):
return jsonify(browseList)
else:
return jsonify({})
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def apilast():
limit = 30
cvesp = cves.last(
rankinglookup=True, namelookup=True, vfeedlookup=True, capeclookup=True
)
cve = cvesp.get(limit=limit)
return jsonify({"results": cve})
|
def apilast():
limit = 30
cvesp = cves.last(
rankinglookup=True, namelookup=True, vfeedlookup=True, capeclookup=True
)
cve = cvesp.get(limit=limit)
return jsonify({"results": cve})
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def apisearch(vendor=None, product=None):
if vendor is None or product is None:
return jsonify({})
search = vendor + ":" + product
return json.dumps(dbLayer.cvesForCPE(search), default=json_util.default)
|
def apisearch(vendor=None, product=None):
if vendor is None or product is None:
return jsonify({})
search = vendor + ":" + product
return json.dumps(dbLayer.cvesForCPE(search))
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def apidbInfo():
return json.dumps(dbLayer.getDBStats(), default=json_util.default)
|
def apidbInfo():
return json.dumps(dbLayer.getDBStats())
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def apiCVEFor(cpe):
cpe = urllib.parse.unquote_plus(cpe)
cpe = toStringFormattedCPE(cpe)
r = []
cvesp = cves.last(
rankinglookup=False, namelookup=False, vfeedlookup=True, capeclookup=False
)
for x in db.cvesForCPE(cpe):
r.append(cvesp.getcve(x["id"]))
return json.dumps(r, default=json_util.default)
|
def apiCVEFor(cpe):
cpe = urllib.parse.unquote_plus(cpe)
cpe = toStringFormattedCPE(cpe)
r = []
cvesp = cves.last(
rankinglookup=False, namelookup=False, vfeedlookup=True, capeclookup=False
)
for x in db.cvesForCPE(cpe):
r.append(cvesp.getcve(x["id"]))
return json.dumps(r)
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def apisearch(vendor=None, product=None):
if vendor is None or product is None:
return jsonify({})
search = vendor + ":" + product
return json.dumps(db.cvesForCPE(search), default=json_util.default)
|
def apisearch(vendor=None, product=None):
if vendor is None or product is None:
return jsonify({})
search = vendor + ":" + product
return json.dumps(db.cvesForCPE(search))
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def apidbInfo():
return json.dumps(db.getDBStats(), default=json_util.default)
|
def apidbInfo():
return json.dumps(db.getDBStats())
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def getFilterSettingsFromPost(r):
filters = dict(request.form)
filters = {x: filters[x][0] for x in filters.keys()}
errors = False
# retrieving data
try:
cve = filter_logic(filters, pageLength, r)
except:
cve = dbLayer.getCVEs(limit=pageLength, skip=r)
errors = True
return (filters, cve, errors)
|
def getFilterSettingsFromPost(r):
filters = dict(request.form)
filters = {x: filters[x][0] for x in filters.keys()}
errors = False
# retrieving data
try:
cve = filter_logic(filters, pageLength, r)
except:
cve = db.getCVEs(limit=pageLength, skip=r)
errors = True
return (filters, cve, errors)
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def isURLFilter(s):
return isinstance(s, list)
|
def isURLFilter(string):
return isURL(string)
|
https://github.com/cve-search/cve-search/issues/129
|
Traceback (most recent call last):
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 109, in <module>
parser.parse(f)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/usr/lib/python3.4/xml/sax/xmlreader.py", line 123, in parse
self.feed(buffer)
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
File "../Modules/pyexpat.c", line 459, in EndElement
File "/usr/lib/python3.4/xml/sax/expatreader.py", line 307, in end_element
self._cont_handler.endElement(name)
File "/home/PidgeyL/git/PidgeyL/cve-search/sbin/db_mgmt_d2sec.py", line 74, in endElement
if self.cveref != "":
AttributeError: 'ExploitHandler' object has no attribute 'cveref'
|
AttributeError
|
def masterLogin():
master = input("Master account username: ")
masterPass = buildPassword(getpass.getpass("Master password:"), user=master)
if (
collection.find(
{"username": master, "password": masterPass, "master": True}
).count()
== 0
):
sys.exit("Master user/password combination does not exist")
return True
|
def masterLogin():
master = input("Master account username: ")
masterPass = hashlib.sha256(
bytes(getpass.getpass("Master password:"), "utf-8")
).hexdigest()
if (
collection.find(
{"username": master, "password": masterPass, "master": True}
).count()
== 0
):
sys.exit("Master user/password combination does not exist")
return True
|
https://github.com/cve-search/cve-search/issues/34
|
Starting vendor
Traceback (most recent call last): ] 0/1440
File ".../cve-search/db_mgmt_vendorstatements.py", line 83, in <module>
bulk.find({'id': statement['id']}).upsert().update({'id': statement['id']}, {"$set":{'statement': statement['statement'], 'id': statement['id'], 'organization': statement['organization'], 'contributor': statement['contributor'], 'lastmodified': statement['lastmodified']}})
TypeError: update() takes 2 positional arguments but 3 were given
|
TypeError
|
def existsInDB(user):
return True if collection.find({"username": user}).count() > 0 else False
|
def existsInDB(user):
return True if collection.find({"username": username}).count() > 0 else False
|
https://github.com/cve-search/cve-search/issues/34
|
Starting vendor
Traceback (most recent call last): ] 0/1440
File ".../cve-search/db_mgmt_vendorstatements.py", line 83, in <module>
bulk.find({'id': statement['id']}).upsert().update({'id': statement['id']}, {"$set":{'statement': statement['statement'], 'id': statement['id'], 'organization': statement['organization'], 'contributor': statement['contributor'], 'lastmodified': statement['lastmodified']}})
TypeError: update() takes 2 positional arguments but 3 were given
|
TypeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.