after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def _update_block(blk):
"""
This method will construct any additional indices in a block
resulting from the discretization of a ContinuousSet. For
Block-derived components we check if the Block construct method has
been overridden. If not then we update it like a regular block. If
construct has been overridden then we try to call the component's
update_after_discretization method. If the component hasn't
implemented this method then we throw a warning and try to update it
like a normal block. The issue, when construct is overridden, is that
anything could be happening and we can't automatically assume that
treating the block-derived component like a normal block will be
sufficient to update it correctly.
"""
# Check if Block construct method is overridden
# getattr needed below for Python 2, 3 compatibility
if blk.construct.__func__ is not getattr(
IndexedBlock.construct, "__func__", IndexedBlock.construct
):
# check for custom update function
if hasattr(blk, "update_after_discretization"):
blk.update_after_discretization()
return
else:
logger.warning(
"DAE(misc): Attempting to apply a discretization "
'transformation to the Block-derived component "%s". The '
"component overrides the Block construct method but no "
"update_after_discretization() function was found. Will "
"attempt to update as a standard Block but user should verify "
"that the component was expanded correctly. To suppress this "
"warning, please provide an update_after_discretization() "
"function on Block-derived components that override "
"construct()" % blk.name
)
# Code taken from the construct() method of Block
missing_idx = getattr(blk, "_dae_missing_idx", set([]))
for idx in list(missing_idx):
_block = blk[idx]
obj = apply_indexed_rule(blk, blk._rule, _block, idx, blk._options)
if isinstance(obj, _BlockData) and obj is not _block:
# If the user returns a block, use their block instead
# of the empty one we just created.
for c in list(obj.component_objects(descend_into=False)):
obj.del_component(c)
_block.add_component(c.local_name, c)
# transfer over any other attributes that are not components
for name, val in iteritems(obj.__dict__):
if not hasattr(_block, name) and not hasattr(blk, name):
super(_BlockData, _block).__setattr__(name, val)
# Remove book-keeping data after Block is discretized
if hasattr(blk, "_dae_missing_idx"):
del blk._dae_missing_idx
|
def _update_block(blk):
"""
This method will construct any additional indices in a block
resulting from the discretization of a ContinuousSet. For
Block-derived components we check if the Block construct method has
been overridden. If not then we update it like a regular block. If
construct has been overridden then we try to call the component's
update_after_discretization method. If the component hasn't
implemented this method then we throw a warning and try to update it
like a normal block. The issue, when construct is overridden, is that
anything could be happening and we can't automatically assume that
treating the block-derived component like a normal block will be
sufficient to update it correctly.
"""
# Check if Block construct method is overridden
# getattr needed below for Python 2, 3 compatibility
if blk.construct.__func__ is not getattr(
IndexedBlock.construct, "__func__", IndexedBlock.construct
):
# check for custom update function
try:
blk.update_after_discretization()
return
except AttributeError:
logger.warning(
"DAE(misc): Attempting to apply a discretization "
'transformation to the Block-derived component "%s". The '
"component overrides the Block construct method but no "
"update_after_discretization() function was found. Will "
"attempt to update as a standard Block but user should verify "
"that the component was expanded correctly. To suppress this "
"warning, please provide an update_after_discretization() "
"function on Block-derived components that override "
"construct()" % blk.name
)
# Code taken from the construct() method of Block
missing_idx = set(blk._index) - set(iterkeys(blk._data))
for idx in list(missing_idx):
_block = blk[idx]
obj = apply_indexed_rule(blk, blk._rule, _block, idx, blk._options)
if isinstance(obj, _BlockData) and obj is not _block:
# If the user returns a block, use their block instead
# of the empty one we just created.
for c in list(obj.component_objects(descend_into=False)):
obj.del_component(c)
_block.add_component(c.local_name, c)
# transfer over any other attributes that are not components
for name, val in iteritems(obj.__dict__):
if not hasattr(_block, name) and not hasattr(blk, name):
super(_BlockData, _block).__setattr__(name, val)
|
https://github.com/Pyomo/pyomo/issues/353
|
$ python -i temp.py
Traceback (most recent call last):
File "temp.py", line 26, in <module>
disc.apply_to(m, nfe=2)
File "/home/blnicho/Research/pyomo/pyomo/core/base/plugin.py", line 334, in apply_to
self._apply_to(model, **kwds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 187, in _apply_to
self._transformBlock(block, currentds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 218, in _transformBlock
update_contset_indexed_component(c)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 145, in update_contset_indexed_component
_update_constraint(comp)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 189, in _update_constraint
con.add(i, apply_indexed_rule(con, _rule, _parent, i))
File "/home/blnicho/Research/pyomo/pyomo/core/base/misc.py", line 61, in apply_indexed_rule
return rule(model, index)
File "temp.py", line 22, in _ratio_rule
return b.ratioP[t] <= b.holdup.properties_in[t].pressure - b.holdup.properties_out[t].pressure
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 521, in __getattr__
% (self.__class__.__name__, val))
AttributeError: '_BlockData' object has no attribute 'pressure'
|
AttributeError
|
def _apply_to(self, instance, **kwds):
"""
Applies specified collocation transformation to a modeling instance
Keyword Arguments:
nfe The desired number of finite element points to be
included in the discretization.
ncp The desired number of collocation points over each
finite element.
wrt Indicates which ContinuousSet the transformation
should be applied to. If this keyword argument is not
specified then the same scheme will be applied to all
ContinuousSets.
scheme Indicates which finite difference method to apply.
Options are LAGRANGE-RADAU, LAGRANGE-LEGENDRE, or
HERMITE-CUBIC. The default scheme is Lagrange polynomials
with Radau roots.
"""
tmpnfe = kwds.pop("nfe", 10)
tmpncp = kwds.pop("ncp", 3)
tmpds = kwds.pop("wrt", None)
tmpscheme = kwds.pop("scheme", "LAGRANGE-RADAU")
self._scheme_name = tmpscheme.upper()
if tmpds is not None:
if tmpds.type() is not ContinuousSet:
raise TypeError(
"The component specified using the 'wrt' "
"keyword must be a continuous set"
)
elif "scheme" in tmpds.get_discretization_info():
raise ValueError(
"The discretization scheme '%s' has already "
"been applied to the ContinuousSet '%s'"
% (tmpds.get_discretization_info()["scheme"], tmpds.name)
)
if tmpnfe <= 0:
raise ValueError("The number of finite elements must be at least 1")
if tmpncp <= 0:
raise ValueError("The number of collocation points must be at least 1")
if None in self._nfe:
raise ValueError(
"A general discretization scheme has already been applied to "
"to every differential set in the model. If you would like to "
"specify a specific discretization scheme for one of the "
"differential sets you must discretize each differential set "
"individually. If you would like to apply a different "
"discretization scheme to all differential sets you must "
"declare a new transformation object"
)
if len(self._nfe) == 0 and tmpds is None:
# Same discretization on all differentialsets
self._nfe[None] = tmpnfe
self._ncp[None] = tmpncp
currentds = None
else:
self._nfe[tmpds.name] = tmpnfe
self._ncp[tmpds.name] = tmpncp
currentds = tmpds.name
self._scheme = self.all_schemes.get(self._scheme_name, None)
if self._scheme is None:
raise ValueError(
"Unknown collocation scheme '%s' specified using "
"the 'scheme' keyword. Valid schemes are "
"'LAGRANGE-RADAU', 'LAGRANGE-LEGENDRE', and "
"'HERMITE-CUBIC'" % tmpscheme
)
if self._scheme_name == "LAGRANGE-RADAU":
self._get_radau_constants(currentds)
elif self._scheme_name == "LAGRANGE-LEGENDRE":
self._get_legendre_constants(currentds)
self._transformBlock(instance, currentds)
return instance
|
def _apply_to(self, instance, **kwds):
"""
Applies specified collocation transformation to a modeling instance
Keyword Arguments:
nfe The desired number of finite element points to be
included in the discretization.
ncp The desired number of collocation points over each
finite element.
wrt Indicates which ContinuousSet the transformation
should be applied to. If this keyword argument is not
specified then the same scheme will be applied to all
ContinuousSets.
scheme Indicates which finite difference method to apply.
Options are LAGRANGE-RADAU, LAGRANGE-LEGENDRE, or
HERMITE-CUBIC. The default scheme is Lagrange polynomials
with Radau roots.
"""
tmpnfe = kwds.pop("nfe", 10)
tmpncp = kwds.pop("ncp", 3)
tmpds = kwds.pop("wrt", None)
tmpscheme = kwds.pop("scheme", "LAGRANGE-RADAU")
self._scheme_name = tmpscheme.upper()
if tmpds is not None:
if tmpds.type() is not ContinuousSet:
raise TypeError(
"The component specified using the 'wrt' "
"keyword must be a continuous set"
)
elif "scheme" in tmpds.get_discretization_info():
raise ValueError(
"The discretization scheme '%s' has already "
"been applied to the ContinuousSet '%s'"
% (tmpds.get_discretization_info()["scheme"], tmpds.name)
)
if tmpnfe <= 0:
raise ValueError("The number of finite elements must be at least 1")
if tmpncp <= 0:
raise ValueError("The number of collocation points must be at least 1")
if None in self._nfe:
raise ValueError(
"A general discretization scheme has already been applied to "
"to every differential set in the model. If you would like to "
"specify a specific discretization scheme for one of the "
"differential sets you must discretize each differential set "
"individually. If you would like to apply a different "
"discretization scheme to all differential sets you must "
"declare a new transformation object"
)
if len(self._nfe) == 0 and tmpds is None:
# Same discretization on all differentialsets
self._nfe[None] = tmpnfe
self._ncp[None] = tmpncp
currentds = None
else:
self._nfe[tmpds.name] = tmpnfe
self._ncp[tmpds.name] = tmpncp
currentds = tmpds.name
self._scheme = self.all_schemes.get(self._scheme_name, None)
if self._scheme is None:
raise ValueError(
"Unknown collocation scheme '%s' specified using "
"the 'scheme' keyword. Valid schemes are "
"'LAGRANGE-RADAU', 'LAGRANGE-LEGENDRE', and "
"'HERMITE-CUBIC'" % tmpscheme
)
if self._scheme_name == "LAGRANGE-RADAU":
self._get_radau_constants(currentds)
elif self._scheme_name == "LAGRANGE-LEGENDRE":
self._get_legendre_constants(currentds)
for block in instance.block_data_objects(active=True):
self._transformBlock(block, currentds)
return instance
|
https://github.com/Pyomo/pyomo/issues/353
|
$ python -i temp.py
Traceback (most recent call last):
File "temp.py", line 26, in <module>
disc.apply_to(m, nfe=2)
File "/home/blnicho/Research/pyomo/pyomo/core/base/plugin.py", line 334, in apply_to
self._apply_to(model, **kwds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 187, in _apply_to
self._transformBlock(block, currentds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 218, in _transformBlock
update_contset_indexed_component(c)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 145, in update_contset_indexed_component
_update_constraint(comp)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 189, in _update_constraint
con.add(i, apply_indexed_rule(con, _rule, _parent, i))
File "/home/blnicho/Research/pyomo/pyomo/core/base/misc.py", line 61, in apply_indexed_rule
return rule(model, index)
File "temp.py", line 22, in _ratio_rule
return b.ratioP[t] <= b.holdup.properties_in[t].pressure - b.holdup.properties_out[t].pressure
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 521, in __getattr__
% (self.__class__.__name__, val))
AttributeError: '_BlockData' object has no attribute 'pressure'
|
AttributeError
|
def _transformBlock(self, block, currentds):
self._fe = {}
for ds in block.component_objects(ContinuousSet, descend_into=True):
if currentds is None or currentds == ds.name:
generate_finite_elements(ds, self._nfe[currentds])
if not ds.get_changed():
if len(ds) - 1 > self._nfe[currentds]:
print(
"***WARNING: More finite elements were found in "
"ContinuousSet '%s' than the number of finite "
"elements specified in apply. The larger number "
"of finite elements will be used." % ds.name
)
self._nfe[ds.name] = len(ds) - 1
self._fe[ds.name] = sorted(ds)
generate_colloc_points(ds, self._tau[currentds])
# Adding discretization information to the continuousset
# object itself so that it can be accessed outside of the
# discretization object
disc_info = ds.get_discretization_info()
disc_info["nfe"] = self._nfe[ds.name]
disc_info["ncp"] = self._ncp[currentds]
disc_info["tau_points"] = self._tau[currentds]
disc_info["adot"] = self._adot[currentds]
disc_info["adotdot"] = self._adotdot[currentds]
disc_info["afinal"] = self._afinal[currentds]
disc_info["scheme"] = self._scheme_name
expand_components(block)
for d in block.component_objects(DerivativeVar, descend_into=True):
dsets = d.get_continuousset_list()
for i in set(dsets):
if currentds is None or i.name == currentds:
oldexpr = d.get_derivative_expression()
loc = d.get_state_var()._contset[i]
count = dsets.count(i)
if count >= 3:
raise DAE_Error(
"Error discretizing '%s' with respect to '%s'. "
"Current implementation only allows for taking the"
" first or second derivative with respect to a "
"particular ContinuousSet" % (d.name, i.name)
)
scheme = self._scheme[count - 1]
# print("%s %s" % (i.name, scheme.__name__))
newexpr = create_partial_expression(scheme, oldexpr, i, loc)
d.set_derivative_expression(newexpr)
if self._scheme_name == "LAGRANGE-LEGENDRE":
# Add continuity equations to DerivativeVar's parent
# block
add_continuity_equations(d.parent_block(), d, i, loc)
# Reclassify DerivativeVar if all indexing ContinuousSets have
# been discretized. Add discretization equations to the
# DerivativeVar's parent block.
if d.is_fully_discretized():
add_discretization_equations(d.parent_block(), d)
d.parent_block().reclassify_component_type(d, Var)
# Reclassify Integrals if all ContinuousSets have been discretized
if block_fully_discretized(block):
if block.contains_component(Integral):
for i in block.component_objects(Integral, descend_into=True):
i.reconstruct()
i.parent_block().reclassify_component_type(i, Expression)
# If a model contains integrals they are most likely to appear
# in the objective function which will need to be reconstructed
# after the model is discretized.
for k in block.component_objects(Objective, descend_into=True):
# TODO: check this, reconstruct might not work
k.reconstruct()
|
def _transformBlock(self, block, currentds):
self._fe = {}
for ds in itervalues(block.component_map(ContinuousSet)):
if currentds is None or currentds == ds.name:
generate_finite_elements(ds, self._nfe[currentds])
if not ds.get_changed():
if len(ds) - 1 > self._nfe[currentds]:
print(
"***WARNING: More finite elements were found in "
"ContinuousSet '%s' than the number of finite "
"elements specified in apply. The larger number "
"of finite elements will be used." % ds.name
)
self._nfe[ds.name] = len(ds) - 1
self._fe[ds.name] = sorted(ds)
generate_colloc_points(ds, self._tau[currentds])
# Adding discretization information to the continuousset
# object itself so that it can be accessed outside of the
# discretization object
disc_info = ds.get_discretization_info()
disc_info["nfe"] = self._nfe[ds.name]
disc_info["ncp"] = self._ncp[currentds]
disc_info["tau_points"] = self._tau[currentds]
disc_info["adot"] = self._adot[currentds]
disc_info["adotdot"] = self._adotdot[currentds]
disc_info["afinal"] = self._afinal[currentds]
disc_info["scheme"] = self._scheme_name
for c in itervalues(block.component_map()):
update_contset_indexed_component(c)
for d in itervalues(block.component_map(DerivativeVar)):
dsets = d.get_continuousset_list()
for i in set(dsets):
if currentds is None or i.name == currentds:
oldexpr = d.get_derivative_expression()
loc = d.get_state_var()._contset[i]
count = dsets.count(i)
if count >= 3:
raise DAE_Error(
"Error discretizing '%s' with respect to '%s'. "
"Current implementation only allows for taking the"
" first or second derivative with respect to a "
"particular ContinuousSet" % (d.name, i.name)
)
scheme = self._scheme[count - 1]
# print("%s %s" % (i.name, scheme.__name__))
newexpr = create_partial_expression(scheme, oldexpr, i, loc)
d.set_derivative_expression(newexpr)
if self._scheme_name == "LAGRANGE-LEGENDRE":
add_continuity_equations(block, d, i, loc)
# Reclassify DerivativeVar if all indexing ContinuousSets have
# been discretized
if d.is_fully_discretized():
add_discretization_equations(block, d)
block.reclassify_component_type(d, Var)
# Reclassify Integrals if all ContinuousSets have been discretized
if block_fully_discretized(block):
if block.contains_component(Integral):
for i in itervalues(block.component_map(Integral)):
i.reconstruct()
block.reclassify_component_type(i, Expression)
# If a model contains integrals they are most likely to appear
# in the objective function which will need to be reconstructed
# after the model is discretized.
for k in itervalues(block.component_map(Objective)):
k.reconstruct()
|
https://github.com/Pyomo/pyomo/issues/353
|
$ python -i temp.py
Traceback (most recent call last):
File "temp.py", line 26, in <module>
disc.apply_to(m, nfe=2)
File "/home/blnicho/Research/pyomo/pyomo/core/base/plugin.py", line 334, in apply_to
self._apply_to(model, **kwds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 187, in _apply_to
self._transformBlock(block, currentds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 218, in _transformBlock
update_contset_indexed_component(c)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 145, in update_contset_indexed_component
_update_constraint(comp)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 189, in _update_constraint
con.add(i, apply_indexed_rule(con, _rule, _parent, i))
File "/home/blnicho/Research/pyomo/pyomo/core/base/misc.py", line 61, in apply_indexed_rule
return rule(model, index)
File "temp.py", line 22, in _ratio_rule
return b.ratioP[t] <= b.holdup.properties_in[t].pressure - b.holdup.properties_out[t].pressure
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 521, in __getattr__
% (self.__class__.__name__, val))
AttributeError: '_BlockData' object has no attribute 'pressure'
|
AttributeError
|
def _apply_to(self, instance, **kwds):
"""
Applies the transformation to a modeling instance
Keyword Arguments:
nfe The desired number of finite element points to be
included in the discretization.
wrt Indicates which ContinuousSet the transformation
should be applied to. If this keyword argument is not
specified then the same scheme will be applied to all
ContinuousSets.
scheme Indicates which finite difference method to apply.
Options are BACKWARD, CENTRAL, or FORWARD. The default
scheme is the backward difference method
"""
tmpnfe = kwds.pop("nfe", 10)
tmpds = kwds.pop("wrt", None)
tmpscheme = kwds.pop("scheme", "BACKWARD")
self._scheme_name = tmpscheme.upper()
if tmpds is not None:
if tmpds.type() is not ContinuousSet:
raise TypeError(
"The component specified using the 'wrt' "
"keyword must be a continuous set"
)
elif "scheme" in tmpds.get_discretization_info():
raise ValueError(
"The discretization scheme '%s' has already "
"been applied to the ContinuousSet '%s'"
% (tmpds.get_discretization_info()["scheme"], tmpds.name)
)
if None in self._nfe:
raise ValueError(
"A general discretization scheme has already been applied to "
"to every continuous set in the model. If you would like to "
"apply a different discretization scheme to each continuous "
"set, you must declare a new transformation object"
)
if len(self._nfe) == 0 and tmpds is None:
# Same discretization on all differentialsets
self._nfe[None] = tmpnfe
currentds = None
else:
self._nfe[tmpds.name] = tmpnfe
currentds = tmpds.name
self._scheme = self.all_schemes.get(self._scheme_name, None)
if self._scheme is None:
raise ValueError(
"Unknown finite difference scheme '%s' specified "
"using the 'scheme' keyword. Valid schemes are "
"'BACKWARD', 'CENTRAL', and 'FORWARD'" % tmpscheme
)
self._transformBlock(instance, currentds)
return instance
|
def _apply_to(self, instance, **kwds):
"""
Applies the transformation to a modeling instance
Keyword Arguments:
nfe The desired number of finite element points to be
included in the discretization.
wrt Indicates which ContinuousSet the transformation
should be applied to. If this keyword argument is not
specified then the same scheme will be applied to all
ContinuousSets.
scheme Indicates which finite difference method to apply.
Options are BACKWARD, CENTRAL, or FORWARD. The default
scheme is the backward difference method
"""
tmpnfe = kwds.pop("nfe", 10)
tmpds = kwds.pop("wrt", None)
tmpscheme = kwds.pop("scheme", "BACKWARD")
self._scheme_name = tmpscheme.upper()
if tmpds is not None:
if tmpds.type() is not ContinuousSet:
raise TypeError(
"The component specified using the 'wrt' "
"keyword must be a continuous set"
)
elif "scheme" in tmpds.get_discretization_info():
raise ValueError(
"The discretization scheme '%s' has already "
"been applied to the ContinuousSet '%s'"
% (tmpds.get_discretization_info()["scheme"], tmpds.name)
)
if None in self._nfe:
raise ValueError(
"A general discretization scheme has already been applied to "
"to every continuous set in the model. If you would like to "
"apply a different discretization scheme to each continuous "
"set, you must declare a new transformation object"
)
if len(self._nfe) == 0 and tmpds is None:
# Same discretization on all differentialsets
self._nfe[None] = tmpnfe
currentds = None
else:
self._nfe[tmpds.name] = tmpnfe
currentds = tmpds.name
self._scheme = self.all_schemes.get(self._scheme_name, None)
if self._scheme is None:
raise ValueError(
"Unknown finite difference scheme '%s' specified "
"using the 'scheme' keyword. Valid schemes are "
"'BACKWARD', 'CENTRAL', and 'FORWARD'" % tmpscheme
)
for block in instance.block_data_objects(active=True):
self._transformBlock(block, currentds)
return instance
|
https://github.com/Pyomo/pyomo/issues/353
|
$ python -i temp.py
Traceback (most recent call last):
File "temp.py", line 26, in <module>
disc.apply_to(m, nfe=2)
File "/home/blnicho/Research/pyomo/pyomo/core/base/plugin.py", line 334, in apply_to
self._apply_to(model, **kwds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 187, in _apply_to
self._transformBlock(block, currentds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 218, in _transformBlock
update_contset_indexed_component(c)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 145, in update_contset_indexed_component
_update_constraint(comp)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 189, in _update_constraint
con.add(i, apply_indexed_rule(con, _rule, _parent, i))
File "/home/blnicho/Research/pyomo/pyomo/core/base/misc.py", line 61, in apply_indexed_rule
return rule(model, index)
File "temp.py", line 22, in _ratio_rule
return b.ratioP[t] <= b.holdup.properties_in[t].pressure - b.holdup.properties_out[t].pressure
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 521, in __getattr__
% (self.__class__.__name__, val))
AttributeError: '_BlockData' object has no attribute 'pressure'
|
AttributeError
|
def _transformBlock(self, block, currentds):
self._fe = {}
for ds in block.component_objects(ContinuousSet):
if currentds is None or currentds == ds.name or currentds is ds:
generate_finite_elements(ds, self._nfe[currentds])
if not ds.get_changed():
if len(ds) - 1 > self._nfe[currentds]:
print(
"***WARNING: More finite elements were found in "
"ContinuousSet '%s' than the number of finite "
"elements specified in apply. The larger number "
"of finite elements will be used." % ds.name
)
self._nfe[ds.name] = len(ds) - 1
self._fe[ds.name] = sorted(ds)
# Adding discretization information to the differentialset
# object itself so that it can be accessed outside of the
# discretization object
disc_info = ds.get_discretization_info()
disc_info["nfe"] = self._nfe[ds.name]
disc_info["scheme"] = self._scheme_name + " Difference"
# Maybe check to see if any of the ContinuousSets have been changed,
# if they haven't then the model components need not be updated
# or even iterated through
expand_components(block)
for d in block.component_objects(DerivativeVar, descend_into=True):
dsets = d.get_continuousset_list()
for i in set(dsets):
if currentds is None or i.name == currentds:
oldexpr = d.get_derivative_expression()
loc = d.get_state_var()._contset[i]
count = dsets.count(i)
if count >= 3:
raise DAE_Error(
"Error discretizing '%s' with respect to '%s'. "
"Current implementation only allows for taking the"
" first or second derivative with respect to "
"a particular ContinuousSet" % (d.name, i.name)
)
scheme = self._scheme[count - 1]
newexpr = create_partial_expression(scheme, oldexpr, i, loc)
d.set_derivative_expression(newexpr)
# Reclassify DerivativeVar if all indexing ContinuousSets have
# been discretized. Add discretization equations to the
# DerivativeVar's parent block.
if d.is_fully_discretized():
add_discretization_equations(d.parent_block(), d)
d.parent_block().reclassify_component_type(d, Var)
# Reclassify Integrals if all ContinuousSets have been discretized
if block_fully_discretized(block):
if block.contains_component(Integral):
for i in block.component_objects(Integral, descend_into=True):
i.reconstruct()
i.parent_block().reclassify_component_type(i, Expression)
# If a model contains integrals they are most likely to
# appear in the objective function which will need to be
# reconstructed after the model is discretized.
for k in block.component_objects(Objective, descend_into=True):
# TODO: check this, reconstruct might not work
k.reconstruct()
|
def _transformBlock(self, block, currentds):
self._fe = {}
for ds in itervalues(block.component_map(ContinuousSet)):
if currentds is None or currentds == ds.name:
generate_finite_elements(ds, self._nfe[currentds])
if not ds.get_changed():
if len(ds) - 1 > self._nfe[currentds]:
print(
"***WARNING: More finite elements were found in "
"ContinuousSet '%s' than the number of finite "
"elements specified in apply. The larger number "
"of finite elements will be used." % ds.name
)
self._nfe[ds.name] = len(ds) - 1
self._fe[ds.name] = sorted(ds)
# Adding discretization information to the differentialset
# object itself so that it can be accessed outside of the
# discretization object
disc_info = ds.get_discretization_info()
disc_info["nfe"] = self._nfe[ds.name]
disc_info["scheme"] = self._scheme_name + " Difference"
# Maybe check to see if any of the ContinuousSets have been changed,
# if they haven't then the model components need not be updated
# or even iterated through
for c in itervalues(block.component_map()):
update_contset_indexed_component(c)
for d in itervalues(block.component_map(DerivativeVar)):
dsets = d.get_continuousset_list()
for i in set(dsets):
if currentds is None or i.name == currentds:
oldexpr = d.get_derivative_expression()
loc = d.get_state_var()._contset[i]
count = dsets.count(i)
if count >= 3:
raise DAE_Error(
"Error discretizing '%s' with respect to '%s'. "
"Current implementation only allows for taking the"
" first or second derivative with respect to "
"a particular ContinuousSet" % (d.name, i.name)
)
scheme = self._scheme[count - 1]
newexpr = create_partial_expression(scheme, oldexpr, i, loc)
d.set_derivative_expression(newexpr)
# Reclassify DerivativeVar if all indexing ContinuousSets have
# been discretized
if d.is_fully_discretized():
add_discretization_equations(block, d)
block.reclassify_component_type(d, Var)
# Reclassify Integrals if all ContinuousSets have been discretized
if block_fully_discretized(block):
if block.contains_component(Integral):
for i in itervalues(block.component_map(Integral)):
i.reconstruct()
block.reclassify_component_type(i, Expression)
# If a model contains integrals they are most likely to
# appear in the objective function which will need to be
# reconstructed after the model is discretized.
for k in itervalues(block.component_map(Objective)):
k.reconstruct()
|
https://github.com/Pyomo/pyomo/issues/353
|
$ python -i temp.py
Traceback (most recent call last):
File "temp.py", line 26, in <module>
disc.apply_to(m, nfe=2)
File "/home/blnicho/Research/pyomo/pyomo/core/base/plugin.py", line 334, in apply_to
self._apply_to(model, **kwds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 187, in _apply_to
self._transformBlock(block, currentds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 218, in _transformBlock
update_contset_indexed_component(c)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 145, in update_contset_indexed_component
_update_constraint(comp)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 189, in _update_constraint
con.add(i, apply_indexed_rule(con, _rule, _parent, i))
File "/home/blnicho/Research/pyomo/pyomo/core/base/misc.py", line 61, in apply_indexed_rule
return rule(model, index)
File "temp.py", line 22, in _ratio_rule
return b.ratioP[t] <= b.holdup.properties_in[t].pressure - b.holdup.properties_out[t].pressure
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 521, in __getattr__
% (self.__class__.__name__, val))
AttributeError: '_BlockData' object has no attribute 'pressure'
|
AttributeError
|
def expand_components(block):
"""
Loop over block components and try expanding them. If expansion fails
then save the component and try again later. This function has some
built-in robustness for block-hierarchical models with circular
references but will not work for all cases.
"""
# Used to map components to the functions used to expand them so that
# the update_contset_indexed_component function logic only has to be
# called once even in the case where we have to re-try expanding
# components due to circular references
expansion_map = ComponentMap()
redo_expansion = list()
# Record the missing BlockData before expanding components. This is for
# the case where a ContinuousSet indexed Block is used in a Constraint.
# If the Constraint is expanded before the Block then the missing
# BlockData will be added to the indexed Block but will not be
# constructed correctly.
for blk in block.component_objects(Block, descend_into=True):
missing_idx = set(blk._index) - set(iterkeys(blk._data))
if missing_idx:
blk._dae_missing_idx = missing_idx
# Identify components that need to be expanded and try expanding them
for c in block.component_objects(descend_into=True, sort=SortComponents.declOrder):
try:
update_contset_indexed_component(c, expansion_map)
except AttributeError:
redo_expansion.append(c)
N = len(redo_expansion)
while N:
for i in range(N):
c = redo_expansion.pop()
try:
expansion_map[c](c)
except AttributeError:
redo_expansion.append(c)
if len(redo_expansion) == N:
raise DAE_Error(
"Unable to fully discretize %s. Possible "
"circular references detected between components "
"%s. Reformulate your model to remove circular "
"references or apply a discretization "
"transformation before linking blocks together."
% (block, str(redo_expansion))
)
N = len(redo_expansion)
|
def expand_components(block):
"""
Loop over block components and try expanding them. If expansion fails
then save the component and try again later. This function has some
built-in robustness for block-hierarchical models with circular
references but will not work for all cases.
"""
expansion_map = ComponentMap()
redo_expansion = list()
# Identify components that need to be expanded and try expanding them
for c in block.component_objects(descend_into=True, sort=SortComponents.declOrder):
try:
update_contset_indexed_component(c, expansion_map)
except AttributeError:
redo_expansion.append(c)
print("Completed first discretization pass")
N = len(redo_expansion)
print("Number of components to re-expand: ", N)
while N:
print(redo_expansion)
for i in range(N):
c = redo_expansion.pop()
print("Re-expanding component ", str(c))
expansion_map[c](c)
# try:
# expansion_map[c](c)
# except AttributeError:
# redo_expansion.append(c)
print(redo_expansion)
if len(redo_expansion) == N:
raise DAE_Error(
"Unable to fully discretize %s. Possible "
"circular references detected between components "
"%s. Reformulate your model to remove circular "
"references or apply a discretization "
"transformation before linking blocks together."
% (block, str(redo_expansion))
)
N = len(redo_expansion)
|
https://github.com/Pyomo/pyomo/issues/353
|
$ python -i temp.py
Traceback (most recent call last):
File "temp.py", line 26, in <module>
disc.apply_to(m, nfe=2)
File "/home/blnicho/Research/pyomo/pyomo/core/base/plugin.py", line 334, in apply_to
self._apply_to(model, **kwds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 187, in _apply_to
self._transformBlock(block, currentds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 218, in _transformBlock
update_contset_indexed_component(c)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 145, in update_contset_indexed_component
_update_constraint(comp)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 189, in _update_constraint
con.add(i, apply_indexed_rule(con, _rule, _parent, i))
File "/home/blnicho/Research/pyomo/pyomo/core/base/misc.py", line 61, in apply_indexed_rule
return rule(model, index)
File "temp.py", line 22, in _ratio_rule
return b.ratioP[t] <= b.holdup.properties_in[t].pressure - b.holdup.properties_out[t].pressure
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 521, in __getattr__
% (self.__class__.__name__, val))
AttributeError: '_BlockData' object has no attribute 'pressure'
|
AttributeError
|
def update_contset_indexed_component(comp, expansion_map):
"""
Update any model components which are indexed by a ContinuousSet that
has changed
"""
# This implemenation will *NOT* check for or update
# components which use a ContinuousSet implicitly. ex) an
# objective function which iterates through a ContinuousSet and
# sums the squared error. If you use a ContinuousSet implicitly
# you must initialize it with every index you would like to have
# access to!
if comp.type() is Suffix:
return
# Params indexed by a ContinuousSet should include an initialize
# and/or default rule which will be called automatically when the
# parameter value at a new point in the ContinuousSet is
# requested. Therefore, no special processing is required for
# Params.
if comp.type() is Param:
return
# Components indexed by a ContinuousSet must have a dimension of at
# least 1
if comp.dim() == 0:
return
# Extract the indexing sets. Must treat components with a single
# index separately from components with multiple indexing sets.
if comp._implicit_subsets is None:
indexset = [comp._index]
else:
indexset = comp._implicit_subsets
for s in indexset:
if s.type() == ContinuousSet and s.get_changed():
if isinstance(comp, Var): # Don't use the type() method here
# because we want to catch DerivativeVar components as well
# as Var components
expansion_map[comp] = _update_var
_update_var(comp)
elif comp.type() == Constraint:
expansion_map[comp] = _update_constraint
_update_constraint(comp)
elif comp.type() == Expression:
expansion_map[comp] = _update_expression
_update_expression(comp)
elif isinstance(comp, Piecewise):
expansion_map[comp] = _update_piecewise
_update_piecewise(comp)
elif comp.type() == Block:
expansion_map[comp] = _update_block
_update_block(comp)
else:
raise TypeError(
"Found component %s of type %s indexed "
"by a ContinuousSet. Components of this type are "
"not currently supported by the automatic "
"discretization transformation in pyomo.dae. "
"Try adding the component to the model "
"after discretizing. Alert the pyomo developers "
"for more assistance." % (str(comp), comp.type())
)
|
def update_contset_indexed_component(comp, expansion_map):
"""
Update any model components which are indexed by a ContinuousSet that
has changed
"""
# This implemenation will *NOT* check for or update
# components which use a ContinuousSet implicitly. ex) an
# objective function which iterates through a ContinuousSet and
# sums the squared error. If you use a ContinuousSet implicitly
# you must initialize it with every index you would like to have
# access to!
if comp.type() is Suffix:
return
# Params indexed by a ContinuousSet should include an initialize
# and/or default rule which will be called automatically when the
# parameter value at a new point in the ContinuousSet is
# requested. Therefore, no special processing is required for
# Params.
if comp.type() is Param:
return
# Components indexed by a ContinuousSet must have a dimension of at
# least 1
if comp.dim() == 0:
return
# Extract the indexing sets. Must treat components with a single
# index separately from components with multiple indexing sets.
if comp._implicit_subsets is None:
indexset = [comp._index]
else:
indexset = comp._implicit_subsets
for s in indexset:
if s.type() == ContinuousSet and s.get_changed():
print("Expanding component ", str(comp))
if isinstance(comp, Var): # Don't use the type() method here
# because we want to catch DerivativeVar components as well
# as Var components
expansion_map[comp] = _update_var
_update_var(comp)
elif comp.type() == Constraint:
expansion_map[comp] = _update_constraint
_update_constraint(comp)
elif comp.type() == Expression:
expansion_map[comp] = _update_expression
_update_expression(comp)
elif isinstance(comp, Piecewise):
expansion_map[comp] = _update_piecewise
_update_piecewise(comp)
elif comp.type() == Block:
expansion_map[comp] = _update_block
_update_block(comp)
else:
raise TypeError(
"Found component %s of type %s indexed "
"by a ContinuousSet. Components of this type are "
"not currently supported by the automatic "
"discretization transformation in pyomo.dae. "
"Try adding the component to the model "
"after discretizing. Alert the pyomo developers "
"for more assistance." % (str(comp), comp.type())
)
|
https://github.com/Pyomo/pyomo/issues/353
|
$ python -i temp.py
Traceback (most recent call last):
File "temp.py", line 26, in <module>
disc.apply_to(m, nfe=2)
File "/home/blnicho/Research/pyomo/pyomo/core/base/plugin.py", line 334, in apply_to
self._apply_to(model, **kwds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 187, in _apply_to
self._transformBlock(block, currentds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 218, in _transformBlock
update_contset_indexed_component(c)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 145, in update_contset_indexed_component
_update_constraint(comp)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 189, in _update_constraint
con.add(i, apply_indexed_rule(con, _rule, _parent, i))
File "/home/blnicho/Research/pyomo/pyomo/core/base/misc.py", line 61, in apply_indexed_rule
return rule(model, index)
File "temp.py", line 22, in _ratio_rule
return b.ratioP[t] <= b.holdup.properties_in[t].pressure - b.holdup.properties_out[t].pressure
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 521, in __getattr__
% (self.__class__.__name__, val))
AttributeError: '_BlockData' object has no attribute 'pressure'
|
AttributeError
|
def expand_components(block):
"""
Loop over block components and try expanding them. If expansion fails
then save the component and try again later. This function has some
built-in robustness for block-hierarchical models with circular
references but will not work for all cases.
"""
# expansion_map is used to map components to the functions used to
# expand them so that the update_contset_indexed_component function
# logic only has to be called once even in the case where we have to
# re-try expanding components due to circular references
expansion_map = ComponentMap()
redo_expansion = list()
# Record the missing BlockData before expanding components. This is for
# the case where a ContinuousSet indexed Block is used in a Constraint.
# If the Constraint is expanded before the Block then the missing
# BlockData will be added to the indexed Block but will not be
# constructed correctly.
for blk in block.component_objects(Block, descend_into=True):
missing_idx = set(blk._index) - set(iterkeys(blk._data))
if missing_idx:
blk._dae_missing_idx = missing_idx
# Wrap this whole process in a try block in order to ensure that errors
# swallowed by the LoggingIntercept context below are re-raised if the
# discretization encounters an error it isn't expecting.
try:
# Intercept logging to suppress Error messages arising from failed
# constraint rules. These error messages get logged even though the
# AttributeError causing the error is caught and handled by this
# function when expanding discretized models. We maintain a stream
# of the intercepted logging messages which will be printed if an
# unexpected exception is raised.
buf = StringIO()
with LoggingIntercept(buf, "pyomo.core", logging.ERROR):
# Identify components that need to be expanded and try expanding
# them
for c in block.component_objects(
descend_into=True, sort=SortComponents.declOrder
):
try:
update_contset_indexed_component(c, expansion_map)
except AttributeError:
redo_expansion.append(c)
# Re-try expansion on any components that failed the first time.
# This is indicative of circular component references and not
# expanding components in the correct order the first time
# through.
N = len(redo_expansion)
while N:
for i in range(N):
c = redo_expansion.pop()
try:
expansion_map[c](c)
except AttributeError:
redo_expansion.append(c)
if len(redo_expansion) == N:
raise DAE_Error(
"Unable to fully discretize %s. Possible "
"circular references detected between "
"components %s. Reformulate your model to"
" remove circular references or apply a "
"discretization transformation before "
"linking blocks together." % (block, str(redo_expansion))
)
N = len(redo_expansion)
except Exception as e:
logger.error(buf.getvalue())
raise
|
def expand_components(block):
"""
Loop over block components and try expanding them. If expansion fails
then save the component and try again later. This function has some
built-in robustness for block-hierarchical models with circular
references but will not work for all cases.
"""
# Used to map components to the functions used to expand them so that
# the update_contset_indexed_component function logic only has to be
# called once even in the case where we have to re-try expanding
# components due to circular references
expansion_map = ComponentMap()
redo_expansion = list()
# Record the missing BlockData before expanding components. This is for
# the case where a ContinuousSet indexed Block is used in a Constraint.
# If the Constraint is expanded before the Block then the missing
# BlockData will be added to the indexed Block but will not be
# constructed correctly.
for blk in block.component_objects(Block, descend_into=True):
missing_idx = set(blk._index) - set(iterkeys(blk._data))
if missing_idx:
blk._dae_missing_idx = missing_idx
# Identify components that need to be expanded and try expanding them
for c in block.component_objects(descend_into=True, sort=SortComponents.declOrder):
try:
update_contset_indexed_component(c, expansion_map)
except AttributeError:
redo_expansion.append(c)
N = len(redo_expansion)
while N:
for i in range(N):
c = redo_expansion.pop()
try:
expansion_map[c](c)
except AttributeError:
redo_expansion.append(c)
if len(redo_expansion) == N:
raise DAE_Error(
"Unable to fully discretize %s. Possible "
"circular references detected between components "
"%s. Reformulate your model to remove circular "
"references or apply a discretization "
"transformation before linking blocks together."
% (block, str(redo_expansion))
)
N = len(redo_expansion)
|
https://github.com/Pyomo/pyomo/issues/353
|
$ python -i temp.py
Traceback (most recent call last):
File "temp.py", line 26, in <module>
disc.apply_to(m, nfe=2)
File "/home/blnicho/Research/pyomo/pyomo/core/base/plugin.py", line 334, in apply_to
self._apply_to(model, **kwds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 187, in _apply_to
self._transformBlock(block, currentds)
File "/home/blnicho/Research/pyomo/pyomo/dae/plugins/finitedifference.py", line 218, in _transformBlock
update_contset_indexed_component(c)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 145, in update_contset_indexed_component
_update_constraint(comp)
File "/home/blnicho/Research/pyomo/pyomo/dae/misc.py", line 189, in _update_constraint
con.add(i, apply_indexed_rule(con, _rule, _parent, i))
File "/home/blnicho/Research/pyomo/pyomo/core/base/misc.py", line 61, in apply_indexed_rule
return rule(model, index)
File "temp.py", line 22, in _ratio_rule
return b.ratioP[t] <= b.holdup.properties_in[t].pressure - b.holdup.properties_out[t].pressure
File "/home/blnicho/Research/pyomo/pyomo/core/base/block.py", line 521, in __getattr__
% (self.__class__.__name__, val))
AttributeError: '_BlockData' object has no attribute 'pressure'
|
AttributeError
|
def to_parquet( # pylint: disable=too-many-arguments,too-many-locals
df: pd.DataFrame,
path: str,
index: bool = False,
compression: Optional[str] = "snappy",
max_rows_by_file: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, Any]] = None,
sanitize_columns: bool = False,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
bucketing_info: Optional[Tuple[List[str], int]] = None,
concurrent_partitioning: bool = False,
mode: Optional[str] = None,
catalog_versioning: bool = False,
schema_evolution: bool = True,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
regular_partitions: bool = True,
projection_enabled: bool = False,
projection_types: Optional[Dict[str, str]] = None,
projection_ranges: Optional[Dict[str, str]] = None,
projection_values: Optional[Dict[str, str]] = None,
projection_intervals: Optional[Dict[str, str]] = None,
projection_digits: Optional[Dict[str, str]] = None,
catalog_id: Optional[str] = None,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write Parquet file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of ordinary files and enable more
complex features like partitioning and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
If `database` and `table` arguments are passed, the table name and all column names
will be automatically sanitized using `wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Please, pass `sanitize_columns=True` to enforce this behaviour always.
Note
----
On `append` mode, the `parameters` will be upsert on an existing table.
Note
----
In case of `use_threads=True` the number of threads
that will be spawned will be gotten from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
S3 path (for file e.g. ``s3://bucket/prefix/filename.parquet``) (for dataset e.g. ``s3://bucket/prefix``).
index : bool
True to store the DataFrame index in file, otherwise False to ignore it.
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``).
max_rows_by_file : int
Max number of rows in each file.
Default is None i.e. dont split the files.
(e.g. 33554432, 268435456)
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs : Optional[Dict[str, Any]]
Forward to botocore requests. Valid parameters: "ACL", "Metadata", "ServerSideEncryption", "StorageClass",
"SSECustomerAlgorithm", "SSECustomerKey", "SSEKMSKeyId", "SSEKMSEncryptionContext", "Tagging", "RequestPayer".
e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'}
sanitize_columns : bool
True to sanitize columns names (using `wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`)
or False to keep it as is.
True value behaviour is enforced if `database` and `table` arguments are passed.
dataset : bool
If True store a parquet dataset instead of a ordinary file(s)
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, concurrent_partitioning,
catalog_versioning, projection_enabled, projection_types, projection_ranges, projection_values,
projection_intervals, projection_digits, catalog_id, schema_evolution.
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
bucketing_info: Tuple[List[str], int], optional
Tuple consisting of the column names used for bucketing as the first element and the number of buckets as the
second element.
Only `str`, `int` and `bool` are supported as column data types for bucketing.
concurrent_partitioning: bool
If True will increase the parallelism level during the partitions writing. It will decrease the
writing time and increase the memory usage.
https://github.com/awslabs/aws-data-wrangler/blob/main/tutorials/022%20-%20Writing%20Partitions%20Concurrently.ipynb
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
For details check the related tutorial:
https://aws-data-wrangler.readthedocs.io/en/2.4.0-docs/stubs/awswrangler.s3.to_parquet.html#awswrangler.s3.to_parquet
catalog_versioning : bool
If True and `mode="overwrite"`, creates an archived version of the table catalog before updating it.
schema_evolution : bool
If True allows schema evolution (new or missing columns), otherwise a exception will be raised.
(Only considered if dataset=True and mode in ("append", "overwrite_partitions"))
Related tutorial:
https://github.com/awslabs/aws-data-wrangler/blob/main/tutorials/014%20-%20Schema%20Evolution.ipynb
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype : Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description : str, optional
Glue/Athena catalog: Table description
parameters : Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments : Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
regular_partitions : bool
Create regular partitions (Non projected partitions) on Glue Catalog.
Disable when you will work only with Partition Projection.
Keep enabled even when working with projections is useful to keep
Redshift Spectrum working with the regular partitions.
projection_enabled : bool
Enable Partition Projection on Athena (https://docs.aws.amazon.com/athena/latest/ug/partition-projection.html)
projection_types : Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections types.
Valid types: "enum", "integer", "date", "injected"
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': 'enum', 'col2_name': 'integer'})
projection_ranges: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections ranges.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': '0,10', 'col2_name': '-1,8675309'})
projection_values: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections values.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': 'A,B,Unknown', 'col2_name': 'foo,boo,bar'})
projection_intervals: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections intervals.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': '1', 'col2_name': '5'})
projection_digits: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections digits.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': '1', 'col2_name': '2'})
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
Returns
-------
Dict[str, Union[List[str], Dict[str, List[str]]]]
Dictionary with:
'paths': List of all stored files paths on S3.
'partitions_values': Dictionary of partitions added with keys as S3 path locations
and values as a list of partitions values as str.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing bucketed dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... bucketing_info=(["col2"], 2)
... )
{
'paths': ['s3://.../x_bucket-00000.csv', 's3://.../col2=B/x_bucket-00001.csv'],
'partitions_values: {}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.parquet'],
'partitions_values: {}
}
"""
_validate_args(
df=df,
table=table,
database=database,
dataset=dataset,
path=path,
partition_cols=partition_cols,
bucketing_info=bucketing_info,
mode=mode,
description=description,
parameters=parameters,
columns_comments=columns_comments,
)
# Evaluating compression
if _COMPRESSION_2_EXT.get(compression, None) is None:
raise exceptions.InvalidCompression(
f"{compression} is invalid, please use None, 'snappy' or 'gzip'."
)
compression_ext: str = _COMPRESSION_2_EXT[compression]
# Initializing defaults
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
partitions_values: Dict[str, List[str]] = {}
mode = "append" if mode is None else mode
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
session: boto3.Session = _utils.ensure_session(session=boto3_session)
# Sanitize table to respect Athena's standards
if (sanitize_columns is True) or (database is not None and table is not None):
df, dtype, partition_cols = _sanitize(
df=df, dtype=dtype, partition_cols=partition_cols
)
# Evaluating dtype
catalog_table_input: Optional[Dict[str, Any]] = None
if database is not None and table is not None:
catalog_table_input = catalog._get_table_input( # pylint: disable=protected-access
database=database, table=table, boto3_session=session, catalog_id=catalog_id
)
df = _apply_dtype(
df=df, dtype=dtype, catalog_table_input=catalog_table_input, mode=mode
)
schema: pa.Schema = _data_types.pyarrow_schema_from_pandas(
df=df, index=index, ignore_cols=partition_cols, dtype=dtype
)
_logger.debug("schema: \n%s", schema)
if dataset is False:
paths = _to_parquet(
df=df,
path=path,
schema=schema,
index=index,
cpus=cpus,
compression=compression,
compression_ext=compression_ext,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
dtype=dtype,
max_rows_by_file=max_rows_by_file,
use_threads=use_threads,
)
else:
columns_types: Dict[str, str] = {}
partitions_types: Dict[str, str] = {}
if (database is not None) and (table is not None):
columns_types, partitions_types = (
_data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype
)
)
if schema_evolution is False:
_check_schema_changes(
columns_types=columns_types,
table_input=catalog_table_input,
mode=mode,
)
paths, partitions_values = _to_dataset(
func=_to_parquet,
concurrent_partitioning=concurrent_partitioning,
df=df,
path_root=path,
index=index,
compression=compression,
compression_ext=compression_ext,
cpus=cpus,
use_threads=use_threads,
partition_cols=partition_cols,
bucketing_info=bucketing_info,
dtype=dtype,
mode=mode,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
schema=schema,
max_rows_by_file=max_rows_by_file,
)
if (database is not None) and (table is not None):
try:
catalog._create_parquet_table( # pylint: disable=protected-access
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
bucketing_info=bucketing_info,
compression=compression,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode=mode,
catalog_versioning=catalog_versioning,
projection_enabled=projection_enabled,
projection_types=projection_types,
projection_ranges=projection_ranges,
projection_values=projection_values,
projection_intervals=projection_intervals,
projection_digits=projection_digits,
catalog_id=catalog_id,
catalog_table_input=catalog_table_input,
)
if partitions_values and (regular_partitions is True):
_logger.debug("partitions_values:\n%s", partitions_values)
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
bucketing_info=bucketing_info,
compression=compression,
boto3_session=session,
catalog_id=catalog_id,
columns_types=columns_types,
)
except Exception:
_logger.debug(
"Catalog write failed, cleaning up S3 (paths: %s).", paths
)
delete_objects(
path=paths,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
)
raise
return {"paths": paths, "partitions_values": partitions_values}
|
def to_parquet( # pylint: disable=too-many-arguments,too-many-locals
df: pd.DataFrame,
path: str,
index: bool = False,
compression: Optional[str] = "snappy",
max_rows_by_file: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, Any]] = None,
sanitize_columns: bool = False,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
bucketing_info: Optional[Tuple[List[str], int]] = None,
concurrent_partitioning: bool = False,
mode: Optional[str] = None,
catalog_versioning: bool = False,
schema_evolution: bool = True,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
regular_partitions: bool = True,
projection_enabled: bool = False,
projection_types: Optional[Dict[str, str]] = None,
projection_ranges: Optional[Dict[str, str]] = None,
projection_values: Optional[Dict[str, str]] = None,
projection_intervals: Optional[Dict[str, str]] = None,
projection_digits: Optional[Dict[str, str]] = None,
catalog_id: Optional[str] = None,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write Parquet file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of ordinary files and enable more
complex features like partitioning and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
If `database` and `table` arguments are passed, the table name and all column names
will be automatically sanitized using `wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Please, pass `sanitize_columns=True` to enforce this behaviour always.
Note
----
On `append` mode, the `parameters` will be upsert on an existing table.
Note
----
In case of `use_threads=True` the number of threads
that will be spawned will be gotten from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
S3 path (for file e.g. ``s3://bucket/prefix/filename.parquet``) (for dataset e.g. ``s3://bucket/prefix``).
index : bool
True to store the DataFrame index in file, otherwise False to ignore it.
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``).
max_rows_by_file : int
Max number of rows in each file.
Default is None i.e. dont split the files.
(e.g. 33554432, 268435456)
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs : Optional[Dict[str, Any]]
Forward to botocore requests. Valid parameters: "ACL", "Metadata", "ServerSideEncryption", "StorageClass",
"SSECustomerAlgorithm", "SSECustomerKey", "SSEKMSKeyId", "SSEKMSEncryptionContext", "Tagging", "RequestPayer".
e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'}
sanitize_columns : bool
True to sanitize columns names (using `wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`)
or False to keep it as is.
True value behaviour is enforced if `database` and `table` arguments are passed.
dataset : bool
If True store a parquet dataset instead of a ordinary file(s)
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, concurrent_partitioning,
catalog_versioning, projection_enabled, projection_types, projection_ranges, projection_values,
projection_intervals, projection_digits, catalog_id, schema_evolution.
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
bucketing_info: Tuple[List[str], int], optional
Tuple consisting of the column names used for bucketing as the first element and the number of buckets as the
second element.
Only `str`, `int` and `bool` are supported as column data types for bucketing.
concurrent_partitioning: bool
If True will increase the parallelism level during the partitions writing. It will decrease the
writing time and increase the memory usage.
https://github.com/awslabs/aws-data-wrangler/blob/main/tutorials/022%20-%20Writing%20Partitions%20Concurrently.ipynb
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
For details check the related tutorial:
https://aws-data-wrangler.readthedocs.io/en/stable/stubs/awswrangler.s3.to_parquet.html#awswrangler.s3.to_parquet
catalog_versioning : bool
If True and `mode="overwrite"`, creates an archived version of the table catalog before updating it.
schema_evolution : bool
If True allows schema evolution (new or missing columns), otherwise a exception will be raised.
(Only considered if dataset=True and mode in ("append", "overwrite_partitions"))
Related tutorial:
https://github.com/awslabs/aws-data-wrangler/blob/main/tutorials/014%20-%20Schema%20Evolution.ipynb
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype : Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description : str, optional
Glue/Athena catalog: Table description
parameters : Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments : Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
regular_partitions : bool
Create regular partitions (Non projected partitions) on Glue Catalog.
Disable when you will work only with Partition Projection.
Keep enabled even when working with projections is useful to keep
Redshift Spectrum working with the regular partitions.
projection_enabled : bool
Enable Partition Projection on Athena (https://docs.aws.amazon.com/athena/latest/ug/partition-projection.html)
projection_types : Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections types.
Valid types: "enum", "integer", "date", "injected"
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': 'enum', 'col2_name': 'integer'})
projection_ranges: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections ranges.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': '0,10', 'col2_name': '-1,8675309'})
projection_values: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections values.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': 'A,B,Unknown', 'col2_name': 'foo,boo,bar'})
projection_intervals: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections intervals.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': '1', 'col2_name': '5'})
projection_digits: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections digits.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': '1', 'col2_name': '2'})
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
Returns
-------
Dict[str, Union[List[str], Dict[str, List[str]]]]
Dictionary with:
'paths': List of all stored files paths on S3.
'partitions_values': Dictionary of partitions added with keys as S3 path locations
and values as a list of partitions values as str.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing bucketed dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... bucketing_info=(["col2"], 2)
... )
{
'paths': ['s3://.../x_bucket-00000.csv', 's3://.../col2=B/x_bucket-00001.csv'],
'partitions_values: {}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.parquet'],
'partitions_values: {}
}
"""
_validate_args(
df=df,
table=table,
database=database,
dataset=dataset,
path=path,
partition_cols=partition_cols,
bucketing_info=bucketing_info,
mode=mode,
description=description,
parameters=parameters,
columns_comments=columns_comments,
)
# Evaluating compression
if _COMPRESSION_2_EXT.get(compression, None) is None:
raise exceptions.InvalidCompression(
f"{compression} is invalid, please use None, 'snappy' or 'gzip'."
)
compression_ext: str = _COMPRESSION_2_EXT[compression]
# Initializing defaults
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
partitions_values: Dict[str, List[str]] = {}
mode = "append" if mode is None else mode
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
session: boto3.Session = _utils.ensure_session(session=boto3_session)
# Sanitize table to respect Athena's standards
if (sanitize_columns is True) or (database is not None and table is not None):
df, dtype, partition_cols = _sanitize(
df=df, dtype=dtype, partition_cols=partition_cols
)
# Evaluating dtype
catalog_table_input: Optional[Dict[str, Any]] = None
if database is not None and table is not None:
catalog_table_input = catalog._get_table_input( # pylint: disable=protected-access
database=database, table=table, boto3_session=session, catalog_id=catalog_id
)
df = _apply_dtype(
df=df, dtype=dtype, catalog_table_input=catalog_table_input, mode=mode
)
schema: pa.Schema = _data_types.pyarrow_schema_from_pandas(
df=df, index=index, ignore_cols=partition_cols, dtype=dtype
)
_logger.debug("schema: \n%s", schema)
if dataset is False:
paths = _to_parquet(
df=df,
path=path,
schema=schema,
index=index,
cpus=cpus,
compression=compression,
compression_ext=compression_ext,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
dtype=dtype,
max_rows_by_file=max_rows_by_file,
use_threads=use_threads,
)
else:
columns_types: Dict[str, str] = {}
partitions_types: Dict[str, str] = {}
if (database is not None) and (table is not None):
columns_types, partitions_types = (
_data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype
)
)
if schema_evolution is False:
_check_schema_changes(
columns_types=columns_types,
table_input=catalog_table_input,
mode=mode,
)
paths, partitions_values = _to_dataset(
func=_to_parquet,
concurrent_partitioning=concurrent_partitioning,
df=df,
path_root=path,
index=index,
compression=compression,
compression_ext=compression_ext,
cpus=cpus,
use_threads=use_threads,
partition_cols=partition_cols,
bucketing_info=bucketing_info,
dtype=dtype,
mode=mode,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
schema=schema,
max_rows_by_file=max_rows_by_file,
)
if (database is not None) and (table is not None):
try:
catalog._create_parquet_table( # pylint: disable=protected-access
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
bucketing_info=bucketing_info,
compression=compression,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode=mode,
catalog_versioning=catalog_versioning,
projection_enabled=projection_enabled,
projection_types=projection_types,
projection_ranges=projection_ranges,
projection_values=projection_values,
projection_intervals=projection_intervals,
projection_digits=projection_digits,
catalog_id=catalog_id,
catalog_table_input=catalog_table_input,
)
if partitions_values and (regular_partitions is True):
_logger.debug("partitions_values:\n%s", partitions_values)
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
bucketing_info=bucketing_info,
compression=compression,
boto3_session=session,
catalog_id=catalog_id,
columns_types=columns_types,
)
except Exception:
_logger.debug(
"Catalog write failed, cleaning up S3 (paths: %s).", paths
)
delete_objects(
path=paths,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
)
raise
return {"paths": paths, "partitions_values": partitions_values}
|
https://github.com/awslabs/aws-data-wrangler/issues/549
|
Collecting awswrangler
Downloading https://files.pythonhosted.org/packages/28/57/debf8f714d5b1a14ce39a6e8bbd1105beaab8490f96c2673fe04bf27cedb/awswrangler-2.4.0-py3-none-any.whl (171kB)
Requirement already satisfied: pandas<1.3.0,>=1.1.0 in /usr/local/lib64/python3.7/site-packages (from awswrangler)
Requirement already satisfied: boto3<2.0.0,>=1.12.49 in /usr/local/lib/python3.7/site-packages (from awswrangler)
Collecting pyarrow<3.1.0,>=2.0.0 (from awswrangler)
Downloading https://files.pythonhosted.org/packages/62/d3/a482d8a4039bf931ed6388308f0cc0541d0cab46f0bbff7c897a74f1c576/pyarrow-3.0.0.tar.gz (682kB)
Complete output from command python setup.py egg_info:
/usr/lib64/python3.7/distutils/dist.py:274: UserWarning: Unknown distribution option: 'long_description_content_type'
warnings.warn(msg)
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/mnt/tmp/pip-build-00riuili/pyarrow/setup.py", line 624, in <module>
url='https://arrow.apache.org/'
File "/usr/lib/python3.7/site-packages/setuptools/__init__.py", line 129, in setup
return distutils.core.setup(**attrs)
File "/usr/lib64/python3.7/distutils/core.py", line 108, in setup
_setup_distribution = dist = klass(attrs)
File "/usr/lib/python3.7/site-packages/setuptools/dist.py", line 333, in __init__
_Distribution.__init__(self, attrs)
File "/usr/lib64/python3.7/distutils/dist.py", line 292, in __init__
self.finalize_options()
File "/usr/lib/python3.7/site-packages/setuptools/dist.py", line 476, in finalize_options
ep.load()(self, ep.name, value)
File "/mnt/tmp/pip-build-00riuili/pyarrow/.eggs/setuptools_scm-5.0.1-py3.7.egg/setuptools_scm/integration.py", line 26, in version_keyword
dist.metadata.version = _get_version(config)
File "/mnt/tmp/pip-build-00riuili/pyarrow/.eggs/setuptools_scm-5.0.1-py3.7.egg/setuptools_scm/__init__.py", line 173, in _get_version
parsed_version = _do_parse(config)
File "/mnt/tmp/pip-build-00riuili/pyarrow/.eggs/setuptools_scm-5.0.1-py3.7.egg/setuptools_scm/__init__.py", line 119, in _do_parse
parse_result = _call_entrypoint_fn(config.absolute_root, config, config.parse)
File "/mnt/tmp/pip-build-00riuili/pyarrow/.eggs/setuptools_scm-5.0.1-py3.7.egg/setuptools_scm/__init__.py", line 54, in _call_entrypoint_fn
return fn(root)
File "/mnt/tmp/pip-build-00riuili/pyarrow/setup.py", line 542, in parse_git
return parse(root, **kwargs)
File "/mnt/tmp/pip-build-00riuili/pyarrow/.eggs/setuptools_scm-5.0.1-py3.7.egg/setuptools_scm/git.py", line 95, in parse
require_command("git")
File "/mnt/tmp/pip-build-00riuili/pyarrow/.eggs/setuptools_scm-5.0.1-py3.7.egg/setuptools_scm/utils.py", line 151, in require_command
raise EnvironmentError("%r was not found" % name)
OSError: 'git' was not found
----------------------------------------
|
OSError
|
def to_csv( # pylint: disable=too-many-arguments,too-many-locals,too-many-statements
df: pd.DataFrame,
path: str,
sep: str = ",",
index: bool = True,
columns: Optional[List[str]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, Any]] = None,
sanitize_columns: bool = False,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
bucketing_info: Optional[Tuple[List[str], int]] = None,
concurrent_partitioning: bool = False,
mode: Optional[str] = None,
catalog_versioning: bool = False,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
regular_partitions: bool = True,
projection_enabled: bool = False,
projection_types: Optional[Dict[str, str]] = None,
projection_ranges: Optional[Dict[str, str]] = None,
projection_values: Optional[Dict[str, str]] = None,
projection_intervals: Optional[Dict[str, str]] = None,
projection_digits: Optional[Dict[str, str]] = None,
catalog_id: Optional[str] = None,
**pandas_kwargs: Any,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write CSV file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of ordinary files and enable more
complex features like partitioning and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
If database` and `table` arguments are passed, the table name and all column names
will be automatically sanitized using `wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Please, pass `sanitize_columns=True` to enforce this behaviour always.
Note
----
If `table` and `database` arguments are passed, `pandas_kwargs` will be ignored due
restrictive quoting, date_format, escapechar and encoding required by Athena/Glue Catalog.
Note
----
Compression: The minimum acceptable version to achive it is Pandas 1.2.0 that requires Python >= 3.7.1.
Note
----
On `append` mode, the `parameters` will be upsert on an existing table.
Note
----
In case of `use_threads=True` the number of threads
that will be spawned will be gotten from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
Amazon S3 path (e.g. s3://bucket/filename.csv).
sep : str
String of length 1. Field delimiter for the output file.
index : bool
Write row names (index).
columns : Optional[List[str]]
Columns to write.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 Session will be used if boto3_session receive None.
s3_additional_kwargs : Optional[Dict[str, Any]]
Forward to botocore requests. Valid parameters: "ACL", "Metadata", "ServerSideEncryption", "StorageClass",
"SSECustomerAlgorithm", "SSECustomerKey", "SSEKMSKeyId", "SSEKMSEncryptionContext", "Tagging", "RequestPayer".
e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'}
sanitize_columns : bool
True to sanitize columns names or False to keep it as is.
True value is forced if `dataset=True`.
dataset : bool
If True store a parquet dataset instead of a ordinary file(s)
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, concurrent_partitioning,
catalog_versioning, projection_enabled, projection_types, projection_ranges, projection_values,
projection_intervals, projection_digits, catalog_id, schema_evolution.
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
bucketing_info: Tuple[List[str], int], optional
Tuple consisting of the column names used for bucketing as the first element and the number of buckets as the
second element.
Only `str`, `int` and `bool` are supported as column data types for bucketing.
concurrent_partitioning: bool
If True will increase the parallelism level during the partitions writing. It will decrease the
writing time and increase the memory usage.
https://github.com/awslabs/aws-data-wrangler/blob/main/tutorials/022%20-%20Writing%20Partitions%20Concurrently.ipynb
mode : str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
For details check the related tutorial:
https://aws-data-wrangler.readthedocs.io/en/2.4.0-docs/stubs/awswrangler.s3.to_parquet.html#awswrangler.s3.to_parquet
catalog_versioning : bool
If True and `mode="overwrite"`, creates an archived version of the table catalog before updating it.
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype : Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description : str, optional
Glue/Athena catalog: Table description
parameters : Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments : Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
regular_partitions : bool
Create regular partitions (Non projected partitions) on Glue Catalog.
Disable when you will work only with Partition Projection.
Keep enabled even when working with projections is useful to keep
Redshift Spectrum working with the regular partitions.
projection_enabled : bool
Enable Partition Projection on Athena (https://docs.aws.amazon.com/athena/latest/ug/partition-projection.html)
projection_types : Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections types.
Valid types: "enum", "integer", "date", "injected"
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': 'enum', 'col2_name': 'integer'})
projection_ranges: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections ranges.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': '0,10', 'col2_name': '-1,8675309'})
projection_values: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections values.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': 'A,B,Unknown', 'col2_name': 'foo,boo,bar'})
projection_intervals: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections intervals.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': '1', 'col2_name': '5'})
projection_digits: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections digits.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': '1', 'col2_name': '2'})
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
pandas_kwargs :
KEYWORD arguments forwarded to pandas.DataFrame.to_csv(). You can NOT pass `pandas_kwargs` explicit, just add
valid Pandas arguments in the function call and Wrangler will accept it.
e.g. wr.s3.to_csv(df, path, sep='|', na_rep='NULL', decimal=',')
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_csv.html
Returns
-------
Dict[str, Union[List[str], Dict[str, List[str]]]]
Dictionary with:
'paths': List of all stored files paths on S3.
'partitions_values': Dictionary of partitions added with keys as S3 path locations
and values as a list of partitions values as str.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing single file with pandas_kwargs
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... sep='|',
... na_rep='NULL',
... decimal=','
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing bucketed dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... bucketing_info=(["col2"], 2)
... )
{
'paths': ['s3://.../x_bucket-00000.csv', 's3://.../col2=B/x_bucket-00001.csv'],
'partitions_values: {}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.csv'],
'partitions_values: {}
}
"""
if "pandas_kwargs" in pandas_kwargs:
raise exceptions.InvalidArgument(
"You can NOT pass `pandas_kwargs` explicit, just add valid "
"Pandas arguments in the function call and Wrangler will accept it."
"e.g. wr.s3.to_csv(df, path, sep='|', na_rep='NULL', decimal=',', compression='gzip')"
)
if pandas_kwargs.get("compression") and str(pd.__version__) < LooseVersion("1.2.0"):
raise exceptions.InvalidArgument(
f"CSV compression on S3 is not supported for Pandas version {pd.__version__}. "
"The minimum acceptable version to achive it is Pandas 1.2.0 that requires Python >=3.7.1."
)
_validate_args(
df=df,
table=table,
database=database,
dataset=dataset,
path=path,
partition_cols=partition_cols,
bucketing_info=bucketing_info,
mode=mode,
description=description,
parameters=parameters,
columns_comments=columns_comments,
)
# Initializing defaults
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
partitions_values: Dict[str, List[str]] = {}
mode = "append" if mode is None else mode
session: boto3.Session = _utils.ensure_session(session=boto3_session)
# Sanitize table to respect Athena's standards
if (sanitize_columns is True) or (database is not None and table is not None):
df, dtype, partition_cols = _sanitize(
df=df, dtype=dtype, partition_cols=partition_cols
)
# Evaluating dtype
catalog_table_input: Optional[Dict[str, Any]] = None
if database and table:
catalog_table_input = catalog._get_table_input( # pylint: disable=protected-access
database=database, table=table, boto3_session=session, catalog_id=catalog_id
)
if pandas_kwargs.get("compression") not in ("gzip", "bz2", None):
raise exceptions.InvalidArgumentCombination(
"If database and table are given, you must use one of these compressions: gzip, bz2 or None."
)
df = _apply_dtype(
df=df, dtype=dtype, catalog_table_input=catalog_table_input, mode=mode
)
if dataset is False:
pandas_kwargs["sep"] = sep
pandas_kwargs["index"] = index
pandas_kwargs["columns"] = columns
_to_text(
file_format="csv",
df=df,
use_threads=use_threads,
path=path,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
**pandas_kwargs,
)
paths = [path]
else:
if database and table:
quoting: Optional[int] = csv.QUOTE_NONE
escapechar: Optional[str] = "\\"
header: Union[bool, List[str]] = False
date_format: Optional[str] = "%Y-%m-%d %H:%M:%S.%f"
pd_kwargs: Dict[str, Any] = {}
compression: Optional[str] = pandas_kwargs.get("compression", None)
else:
quoting = pandas_kwargs.get("quoting", None)
escapechar = pandas_kwargs.get("escapechar", None)
header = pandas_kwargs.get("header", True)
date_format = pandas_kwargs.get("date_format", None)
compression = pandas_kwargs.get("compression", None)
pd_kwargs = pandas_kwargs.copy()
pd_kwargs.pop("quoting", None)
pd_kwargs.pop("escapechar", None)
pd_kwargs.pop("header", None)
pd_kwargs.pop("date_format", None)
pd_kwargs.pop("compression", None)
df = df[columns] if columns else df
paths, partitions_values = _to_dataset(
func=_to_text,
concurrent_partitioning=concurrent_partitioning,
df=df,
path_root=path,
index=index,
sep=sep,
compression=compression,
use_threads=use_threads,
partition_cols=partition_cols,
bucketing_info=bucketing_info,
mode=mode,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
file_format="csv",
quoting=quoting,
escapechar=escapechar,
header=header,
date_format=date_format,
**pd_kwargs,
)
if database and table:
try:
columns_types, partitions_types = (
_data_types.athena_types_from_pandas_partitioned(
df=df,
index=index,
partition_cols=partition_cols,
dtype=dtype,
index_left=True,
)
)
catalog._create_csv_table( # pylint: disable=protected-access
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
bucketing_info=bucketing_info,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode=mode,
catalog_versioning=catalog_versioning,
sep=sep,
projection_enabled=projection_enabled,
projection_types=projection_types,
projection_ranges=projection_ranges,
projection_values=projection_values,
projection_intervals=projection_intervals,
projection_digits=projection_digits,
catalog_table_input=catalog_table_input,
catalog_id=catalog_id,
compression=pandas_kwargs.get("compression"),
skip_header_line_count=None,
)
if partitions_values and (regular_partitions is True):
_logger.debug("partitions_values:\n%s", partitions_values)
catalog.add_csv_partitions(
database=database,
table=table,
partitions_values=partitions_values,
bucketing_info=bucketing_info,
boto3_session=session,
sep=sep,
catalog_id=catalog_id,
columns_types=columns_types,
compression=pandas_kwargs.get("compression"),
)
except Exception:
_logger.debug(
"Catalog write failed, cleaning up S3 (paths: %s).", paths
)
delete_objects(
path=paths,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
)
raise
return {"paths": paths, "partitions_values": partitions_values}
|
def to_csv( # pylint: disable=too-many-arguments,too-many-locals,too-many-statements
df: pd.DataFrame,
path: str,
sep: str = ",",
index: bool = True,
columns: Optional[List[str]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, Any]] = None,
sanitize_columns: bool = False,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
bucketing_info: Optional[Tuple[List[str], int]] = None,
concurrent_partitioning: bool = False,
mode: Optional[str] = None,
catalog_versioning: bool = False,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
regular_partitions: bool = True,
projection_enabled: bool = False,
projection_types: Optional[Dict[str, str]] = None,
projection_ranges: Optional[Dict[str, str]] = None,
projection_values: Optional[Dict[str, str]] = None,
projection_intervals: Optional[Dict[str, str]] = None,
projection_digits: Optional[Dict[str, str]] = None,
catalog_id: Optional[str] = None,
**pandas_kwargs: Any,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write CSV file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of ordinary files and enable more
complex features like partitioning and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
If database` and `table` arguments are passed, the table name and all column names
will be automatically sanitized using `wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Please, pass `sanitize_columns=True` to enforce this behaviour always.
Note
----
If `table` and `database` arguments are passed, `pandas_kwargs` will be ignored due
restrictive quoting, date_format, escapechar and encoding required by Athena/Glue Catalog.
Note
----
Compression: The minimum acceptable version to achive it is Pandas 1.2.0 that requires Python >= 3.7.1.
Note
----
On `append` mode, the `parameters` will be upsert on an existing table.
Note
----
In case of `use_threads=True` the number of threads
that will be spawned will be gotten from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
Amazon S3 path (e.g. s3://bucket/filename.csv).
sep : str
String of length 1. Field delimiter for the output file.
index : bool
Write row names (index).
columns : Optional[List[str]]
Columns to write.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 Session will be used if boto3_session receive None.
s3_additional_kwargs : Optional[Dict[str, Any]]
Forward to botocore requests. Valid parameters: "ACL", "Metadata", "ServerSideEncryption", "StorageClass",
"SSECustomerAlgorithm", "SSECustomerKey", "SSEKMSKeyId", "SSEKMSEncryptionContext", "Tagging", "RequestPayer".
e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'}
sanitize_columns : bool
True to sanitize columns names or False to keep it as is.
True value is forced if `dataset=True`.
dataset : bool
If True store a parquet dataset instead of a ordinary file(s)
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, concurrent_partitioning,
catalog_versioning, projection_enabled, projection_types, projection_ranges, projection_values,
projection_intervals, projection_digits, catalog_id, schema_evolution.
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
bucketing_info: Tuple[List[str], int], optional
Tuple consisting of the column names used for bucketing as the first element and the number of buckets as the
second element.
Only `str`, `int` and `bool` are supported as column data types for bucketing.
concurrent_partitioning: bool
If True will increase the parallelism level during the partitions writing. It will decrease the
writing time and increase the memory usage.
https://github.com/awslabs/aws-data-wrangler/blob/main/tutorials/022%20-%20Writing%20Partitions%20Concurrently.ipynb
mode : str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
For details check the related tutorial:
https://aws-data-wrangler.readthedocs.io/en/stable/stubs/awswrangler.s3.to_parquet.html#awswrangler.s3.to_parquet
catalog_versioning : bool
If True and `mode="overwrite"`, creates an archived version of the table catalog before updating it.
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype : Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description : str, optional
Glue/Athena catalog: Table description
parameters : Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments : Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
regular_partitions : bool
Create regular partitions (Non projected partitions) on Glue Catalog.
Disable when you will work only with Partition Projection.
Keep enabled even when working with projections is useful to keep
Redshift Spectrum working with the regular partitions.
projection_enabled : bool
Enable Partition Projection on Athena (https://docs.aws.amazon.com/athena/latest/ug/partition-projection.html)
projection_types : Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections types.
Valid types: "enum", "integer", "date", "injected"
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': 'enum', 'col2_name': 'integer'})
projection_ranges: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections ranges.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': '0,10', 'col2_name': '-1,8675309'})
projection_values: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections values.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': 'A,B,Unknown', 'col2_name': 'foo,boo,bar'})
projection_intervals: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections intervals.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': '1', 'col2_name': '5'})
projection_digits: Optional[Dict[str, str]]
Dictionary of partitions names and Athena projections digits.
https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html
(e.g. {'col_name': '1', 'col2_name': '2'})
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
pandas_kwargs :
KEYWORD arguments forwarded to pandas.DataFrame.to_csv(). You can NOT pass `pandas_kwargs` explicit, just add
valid Pandas arguments in the function call and Wrangler will accept it.
e.g. wr.s3.to_csv(df, path, sep='|', na_rep='NULL', decimal=',')
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_csv.html
Returns
-------
Dict[str, Union[List[str], Dict[str, List[str]]]]
Dictionary with:
'paths': List of all stored files paths on S3.
'partitions_values': Dictionary of partitions added with keys as S3 path locations
and values as a list of partitions values as str.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing single file with pandas_kwargs
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... sep='|',
... na_rep='NULL',
... decimal=','
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing bucketed dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... bucketing_info=(["col2"], 2)
... )
{
'paths': ['s3://.../x_bucket-00000.csv', 's3://.../col2=B/x_bucket-00001.csv'],
'partitions_values: {}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.csv'],
'partitions_values: {}
}
"""
if "pandas_kwargs" in pandas_kwargs:
raise exceptions.InvalidArgument(
"You can NOT pass `pandas_kwargs` explicit, just add valid "
"Pandas arguments in the function call and Wrangler will accept it."
"e.g. wr.s3.to_csv(df, path, sep='|', na_rep='NULL', decimal=',', compression='gzip')"
)
if pandas_kwargs.get("compression") and str(pd.__version__) < LooseVersion("1.2.0"):
raise exceptions.InvalidArgument(
f"CSV compression on S3 is not supported for Pandas version {pd.__version__}. "
"The minimum acceptable version to achive it is Pandas 1.2.0 that requires Python >=3.7.1."
)
_validate_args(
df=df,
table=table,
database=database,
dataset=dataset,
path=path,
partition_cols=partition_cols,
bucketing_info=bucketing_info,
mode=mode,
description=description,
parameters=parameters,
columns_comments=columns_comments,
)
# Initializing defaults
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
partitions_values: Dict[str, List[str]] = {}
mode = "append" if mode is None else mode
session: boto3.Session = _utils.ensure_session(session=boto3_session)
# Sanitize table to respect Athena's standards
if (sanitize_columns is True) or (database is not None and table is not None):
df, dtype, partition_cols = _sanitize(
df=df, dtype=dtype, partition_cols=partition_cols
)
# Evaluating dtype
catalog_table_input: Optional[Dict[str, Any]] = None
if database and table:
catalog_table_input = catalog._get_table_input( # pylint: disable=protected-access
database=database, table=table, boto3_session=session, catalog_id=catalog_id
)
if pandas_kwargs.get("compression") not in ("gzip", "bz2", None):
raise exceptions.InvalidArgumentCombination(
"If database and table are given, you must use one of these compressions: gzip, bz2 or None."
)
df = _apply_dtype(
df=df, dtype=dtype, catalog_table_input=catalog_table_input, mode=mode
)
if dataset is False:
pandas_kwargs["sep"] = sep
pandas_kwargs["index"] = index
pandas_kwargs["columns"] = columns
_to_text(
file_format="csv",
df=df,
use_threads=use_threads,
path=path,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
**pandas_kwargs,
)
paths = [path]
else:
if database and table:
quoting: Optional[int] = csv.QUOTE_NONE
escapechar: Optional[str] = "\\"
header: Union[bool, List[str]] = False
date_format: Optional[str] = "%Y-%m-%d %H:%M:%S.%f"
pd_kwargs: Dict[str, Any] = {}
compression: Optional[str] = pandas_kwargs.get("compression", None)
else:
quoting = pandas_kwargs.get("quoting", None)
escapechar = pandas_kwargs.get("escapechar", None)
header = pandas_kwargs.get("header", True)
date_format = pandas_kwargs.get("date_format", None)
compression = pandas_kwargs.get("compression", None)
pd_kwargs = pandas_kwargs.copy()
pd_kwargs.pop("quoting", None)
pd_kwargs.pop("escapechar", None)
pd_kwargs.pop("header", None)
pd_kwargs.pop("date_format", None)
pd_kwargs.pop("compression", None)
df = df[columns] if columns else df
paths, partitions_values = _to_dataset(
func=_to_text,
concurrent_partitioning=concurrent_partitioning,
df=df,
path_root=path,
index=index,
sep=sep,
compression=compression,
use_threads=use_threads,
partition_cols=partition_cols,
bucketing_info=bucketing_info,
mode=mode,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
file_format="csv",
quoting=quoting,
escapechar=escapechar,
header=header,
date_format=date_format,
**pd_kwargs,
)
if database and table:
try:
columns_types, partitions_types = (
_data_types.athena_types_from_pandas_partitioned(
df=df,
index=index,
partition_cols=partition_cols,
dtype=dtype,
index_left=True,
)
)
catalog._create_csv_table( # pylint: disable=protected-access
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
bucketing_info=bucketing_info,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode=mode,
catalog_versioning=catalog_versioning,
sep=sep,
projection_enabled=projection_enabled,
projection_types=projection_types,
projection_ranges=projection_ranges,
projection_values=projection_values,
projection_intervals=projection_intervals,
projection_digits=projection_digits,
catalog_table_input=catalog_table_input,
catalog_id=catalog_id,
compression=pandas_kwargs.get("compression"),
skip_header_line_count=None,
)
if partitions_values and (regular_partitions is True):
_logger.debug("partitions_values:\n%s", partitions_values)
catalog.add_csv_partitions(
database=database,
table=table,
partitions_values=partitions_values,
bucketing_info=bucketing_info,
boto3_session=session,
sep=sep,
catalog_id=catalog_id,
columns_types=columns_types,
compression=pandas_kwargs.get("compression"),
)
except Exception:
_logger.debug(
"Catalog write failed, cleaning up S3 (paths: %s).", paths
)
delete_objects(
path=paths,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
)
raise
return {"paths": paths, "partitions_values": partitions_values}
|
https://github.com/awslabs/aws-data-wrangler/issues/549
|
Collecting awswrangler
Downloading https://files.pythonhosted.org/packages/28/57/debf8f714d5b1a14ce39a6e8bbd1105beaab8490f96c2673fe04bf27cedb/awswrangler-2.4.0-py3-none-any.whl (171kB)
Requirement already satisfied: pandas<1.3.0,>=1.1.0 in /usr/local/lib64/python3.7/site-packages (from awswrangler)
Requirement already satisfied: boto3<2.0.0,>=1.12.49 in /usr/local/lib/python3.7/site-packages (from awswrangler)
Collecting pyarrow<3.1.0,>=2.0.0 (from awswrangler)
Downloading https://files.pythonhosted.org/packages/62/d3/a482d8a4039bf931ed6388308f0cc0541d0cab46f0bbff7c897a74f1c576/pyarrow-3.0.0.tar.gz (682kB)
Complete output from command python setup.py egg_info:
/usr/lib64/python3.7/distutils/dist.py:274: UserWarning: Unknown distribution option: 'long_description_content_type'
warnings.warn(msg)
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/mnt/tmp/pip-build-00riuili/pyarrow/setup.py", line 624, in <module>
url='https://arrow.apache.org/'
File "/usr/lib/python3.7/site-packages/setuptools/__init__.py", line 129, in setup
return distutils.core.setup(**attrs)
File "/usr/lib64/python3.7/distutils/core.py", line 108, in setup
_setup_distribution = dist = klass(attrs)
File "/usr/lib/python3.7/site-packages/setuptools/dist.py", line 333, in __init__
_Distribution.__init__(self, attrs)
File "/usr/lib64/python3.7/distutils/dist.py", line 292, in __init__
self.finalize_options()
File "/usr/lib/python3.7/site-packages/setuptools/dist.py", line 476, in finalize_options
ep.load()(self, ep.name, value)
File "/mnt/tmp/pip-build-00riuili/pyarrow/.eggs/setuptools_scm-5.0.1-py3.7.egg/setuptools_scm/integration.py", line 26, in version_keyword
dist.metadata.version = _get_version(config)
File "/mnt/tmp/pip-build-00riuili/pyarrow/.eggs/setuptools_scm-5.0.1-py3.7.egg/setuptools_scm/__init__.py", line 173, in _get_version
parsed_version = _do_parse(config)
File "/mnt/tmp/pip-build-00riuili/pyarrow/.eggs/setuptools_scm-5.0.1-py3.7.egg/setuptools_scm/__init__.py", line 119, in _do_parse
parse_result = _call_entrypoint_fn(config.absolute_root, config, config.parse)
File "/mnt/tmp/pip-build-00riuili/pyarrow/.eggs/setuptools_scm-5.0.1-py3.7.egg/setuptools_scm/__init__.py", line 54, in _call_entrypoint_fn
return fn(root)
File "/mnt/tmp/pip-build-00riuili/pyarrow/setup.py", line 542, in parse_git
return parse(root, **kwargs)
File "/mnt/tmp/pip-build-00riuili/pyarrow/.eggs/setuptools_scm-5.0.1-py3.7.egg/setuptools_scm/git.py", line 95, in parse
require_command("git")
File "/mnt/tmp/pip-build-00riuili/pyarrow/.eggs/setuptools_scm-5.0.1-py3.7.egg/setuptools_scm/utils.py", line 151, in require_command
raise EnvironmentError("%r was not found" % name)
OSError: 'git' was not found
----------------------------------------
|
OSError
|
def unload(
sql: str,
path: str,
con: redshift_connector.Connection,
iam_role: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
region: Optional[str] = None,
max_file_size: Optional[float] = None,
kms_key_id: Optional[str] = None,
categories: Optional[List[str]] = None,
chunked: Union[bool, int] = False,
keep_files: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Load Pandas DataFrame from a Amazon Redshift query result using Parquet files on s3 as stage.
This is a **HIGH** latency and **HIGH** throughput alternative to
`wr.redshift.read_sql_query()`/`wr.redshift.read_sql_table()` to extract large
Amazon Redshift data into a Pandas DataFrames through the **UNLOAD command**.
This strategy has more overhead and requires more IAM privileges
than the regular `wr.redshift.read_sql_query()`/`wr.redshift.read_sql_table()` function,
so it is only recommended to fetch 1k+ rows at once.
https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html
Note
----
``Batching`` (`chunked` argument) (Memory Friendly):
Will anable the function to return a Iterable of DataFrames instead of a regular DataFrame.
There are two batching strategies on Wrangler:
- If **chunked=True**, a new DataFrame will be returned for each file in your path/dataset.
- If **chunked=INTEGER**, Wrangler will iterate on the data by number of rows igual the received INTEGER.
`P.S.` `chunked=True` if faster and uses less memory while `chunked=INTEGER` is more precise
in number of rows for each Dataframe.
Note
----
In case of `use_threads=True` the number of threads
that will be spawned will be gotten from os.cpu_count().
Parameters
----------
sql: str
SQL query.
path : Union[str, List[str]]
S3 path to write stage files (e.g. s3://bucket_name/any_name/)
con : redshift_connector.Connection
Use redshift_connector.connect() to use "
"credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog.
iam_role : str, optional
AWS IAM role with the related permissions.
aws_access_key_id : str, optional
The access key for your AWS account.
aws_secret_access_key : str, optional
The secret key for your AWS account.
aws_session_token : str, optional
The session key for your AWS account. This is only needed when you are using temporary credentials.
region : str, optional
Specifies the AWS Region where the target Amazon S3 bucket is located.
REGION is required for UNLOAD to an Amazon S3 bucket that isn't in the
same AWS Region as the Amazon Redshift cluster. By default, UNLOAD
assumes that the target Amazon S3 bucket is located in the same AWS
Region as the Amazon Redshift cluster.
max_file_size : float, optional
Specifies the maximum size (MB) of files that UNLOAD creates in Amazon S3.
Specify a decimal value between 5.0 MB and 6200.0 MB. If None, the default
maximum file size is 6200.0 MB.
kms_key_id : str, optional
Specifies the key ID for an AWS Key Management Service (AWS KMS) key to be
used to encrypt data files on Amazon S3.
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
keep_files : bool
Should keep stage files?
chunked : Union[int, bool]
If passed will split the data in a Iterable of DataFrames (Memory friendly).
If `True` wrangler will iterate on the data by files in the most efficient way without guarantee of chunksize.
If an `INTEGER` is passed Wrangler will iterate on the data by number of rows igual the received INTEGER.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to botocore requests, only "SSECustomerAlgorithm" and "SSECustomerKey" arguments will be considered.
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Result as Pandas DataFrame(s).
Examples
--------
>>> import awswrangler as wr
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> df = wr.db.unload(
... sql="SELECT * FROM public.mytable",
... path="s3://bucket/extracted_parquet_files/",
... con=con,
... iam_role="arn:aws:iam::XXX:role/XXX"
... )
>>> con.close()
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
unload_to_files(
sql=sql,
path=path,
con=con,
iam_role=iam_role,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region=region,
max_file_size=max_file_size,
kms_key_id=kms_key_id,
manifest=False,
use_threads=use_threads,
boto3_session=session,
)
if chunked is False:
df: pd.DataFrame = s3.read_parquet(
path=path,
categories=categories,
chunked=chunked,
dataset=False,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
)
if keep_files is False:
s3.delete_objects(path=path, use_threads=use_threads, boto3_session=session)
return df
return _read_parquet_iterator(
path=path,
categories=categories,
chunked=chunked,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
keep_files=keep_files,
)
|
def unload(
sql: str,
path: str,
con: redshift_connector.Connection,
iam_role: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
region: Optional[str] = None,
max_file_size: Optional[float] = None,
kms_key_id: Optional[str] = None,
categories: Optional[List[str]] = None,
chunked: Union[bool, int] = False,
keep_files: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Load Pandas DataFrame from a Amazon Redshift query result using Parquet files on s3 as stage.
This is a **HIGH** latency and **HIGH** throughput alternative to
`wr.redshift.read_sql_query()`/`wr.redshift.read_sql_table()` to extract large
Amazon Redshift data into a Pandas DataFrames through the **UNLOAD command**.
This strategy has more overhead and requires more IAM privileges
than the regular `wr.redshift.read_sql_query()`/`wr.redshift.read_sql_table()` function,
so it is only recommended to fetch 1k+ rows at once.
https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html
Note
----
``Batching`` (`chunked` argument) (Memory Friendly):
Will anable the function to return a Iterable of DataFrames instead of a regular DataFrame.
There are two batching strategies on Wrangler:
- If **chunked=True**, a new DataFrame will be returned for each file in your path/dataset.
- If **chunked=INTEGER**, Wrangler will iterate on the data by number of rows igual the received INTEGER.
`P.S.` `chunked=True` if faster and uses less memory while `chunked=INTEGER` is more precise
in number of rows for each Dataframe.
Note
----
In case of `use_threads=True` the number of threads
that will be spawned will be gotten from os.cpu_count().
Parameters
----------
sql: str
SQL query.
path : Union[str, List[str]]
S3 path to write stage files (e.g. s3://bucket_name/any_name/)
con : redshift_connector.Connection
Use redshift_connector.connect() to use "
"credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog.
iam_role : str, optional
AWS IAM role with the related permissions.
aws_access_key_id : str, optional
The access key for your AWS account.
aws_secret_access_key : str, optional
The secret key for your AWS account.
aws_session_token : str, optional
The session key for your AWS account. This is only needed when you are using temporary credentials.
region : str, optional
Specifies the AWS Region where the target Amazon S3 bucket is located.
REGION is required for UNLOAD to an Amazon S3 bucket that isn't in the
same AWS Region as the Amazon Redshift cluster. By default, UNLOAD
assumes that the target Amazon S3 bucket is located in the same AWS
Region as the Amazon Redshift cluster.
max_file_size : float, optional
Specifies the maximum size (MB) of files that UNLOAD creates in Amazon S3.
Specify a decimal value between 5.0 MB and 6200.0 MB. If None, the default
maximum file size is 6200.0 MB.
kms_key_id : str, optional
Specifies the key ID for an AWS Key Management Service (AWS KMS) key to be
used to encrypt data files on Amazon S3.
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
keep_files : bool
Should keep the stage files?
chunked : Union[int, bool]
If passed will split the data in a Iterable of DataFrames (Memory friendly).
If `True` wrangler will iterate on the data by files in the most efficient way without guarantee of chunksize.
If an `INTEGER` is passed Wrangler will iterate on the data by number of rows igual the received INTEGER.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to botocore requests, only "SSECustomerAlgorithm" and "SSECustomerKey" arguments will be considered.
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Result as Pandas DataFrame(s).
Examples
--------
>>> import awswrangler as wr
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> df = wr.db.unload(
... sql="SELECT * FROM public.mytable",
... path="s3://bucket/extracted_parquet_files/",
... con=con,
... iam_role="arn:aws:iam::XXX:role/XXX"
... )
>>> con.close()
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
unload_to_files(
sql=sql,
path=path,
con=con,
iam_role=iam_role,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region=region,
max_file_size=max_file_size,
kms_key_id=kms_key_id,
manifest=False,
use_threads=use_threads,
boto3_session=session,
)
if chunked is False:
df: pd.DataFrame = s3.read_parquet(
path=path,
categories=categories,
chunked=chunked,
dataset=False,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
)
if keep_files is False:
s3.delete_objects(path=path, use_threads=use_threads, boto3_session=session)
return df
return _read_parquet_iterator(
path=path,
categories=categories,
chunked=chunked,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
keep_files=keep_files,
)
|
https://github.com/awslabs/aws-data-wrangler/issues/505
|
---------------------------------------------------------------------------
ProgrammingError Traceback (most recent call last)
<ipython-input-39-489fc07fb9d7> in <module>
2
3
----> 4 wr.redshift.copy(
5 df=df,
6 path=path,
~/miniconda3/envs/leadgen/lib/python3.8/site-packages/awswrangler/redshift.py in copy(df, path, con, table, schema, iam_role, aws_access_key_id, aws_secret_access_key, aws_session_token, index, dtype, mode, diststyle, distkey, sortstyle, sortkey, primary_keys, varchar_lengths_default, varchar_lengths, keep_files, use_threads, boto3_session, s3_additional_kwargs, max_rows_by_file)
1345 max_rows_by_file=max_rows_by_file,
1346 )
-> 1347 copy_from_files(
1348 path=path,
1349 con=con,
~/miniconda3/envs/leadgen/lib/python3.8/site-packages/awswrangler/redshift.py in copy_from_files(path, con, table, schema, iam_role, aws_access_key_id, aws_secret_access_key, aws_session_token, parquet_infer_sampling, mode, diststyle, distkey, sortstyle, sortkey, primary_keys, varchar_lengths_default, varchar_lengths, path_suffix, path_ignore_suffix, use_threads, boto3_session, s3_additional_kwargs)
1170 s3_additional_kwargs=s3_additional_kwargs,
1171 )
-> 1172 _copy(
1173 cursor=cursor,
1174 path=path,
~/miniconda3/envs/leadgen/lib/python3.8/site-packages/awswrangler/redshift.py in _copy(cursor, path, table, iam_role, aws_access_key_id, aws_secret_access_key, aws_session_token, boto3_session, schema)
112 sql: str = f"COPY {table_name} FROM '{path}'{auth_str}\nFORMAT AS PARQUET"
113 _logger.debug("copy query:\n%s", sql)
--> 114 cursor.execute(sql)
115
116
~/miniconda3/envs/leadgen/lib/python3.8/site-packages/redshift_connector/cursor.py in execute(self, operation, args, stream, merge_socket_read)
160 self._c.execute(self, "begin transaction", None)
161 self._c.merge_socket_read = merge_socket_read
--> 162 self._c.execute(self, operation, args)
163 except AttributeError as e:
164 raise e
~/miniconda3/envs/leadgen/lib/python3.8/site-packages/redshift_connector/core.py in execute(self, cursor, operation, vals)
1095 self.handle_messages_merge_socket_read(cursor)
1096 else:
-> 1097 self.handle_messages(cursor)
1098
1099 def _send_message(self: "Connection", code: bytes, data: bytes) -> None:
~/miniconda3/envs/leadgen/lib/python3.8/site-packages/redshift_connector/core.py in handle_messages(self, cursor)
1170
1171 if self.error is not None:
-> 1172 raise self.error
1173
1174 def handle_messages_merge_socket_read(self: "Connection", cursor: Cursor):
ProgrammingError: {'S': 'ERROR', 'C': 'XX000', 'M': 'Spectrum Scan Error', 'D': '\n -----------------------------------------------\n error: Spectrum Scan Error\n code: 15001\n context: The length of the data column data is longer than the length defined in the table. Table: 256, Data: 2737, File name: https://s3.us-west-2.amazonaws.com/{**}/3a13c036d1ac_0.snappy.parquet?&amp;X-Amz-Al\n query: 69281625\n location: dory_util.cpp:945\n process: fetchtask_thread [pid=1668]\n -----------------------------------------------\n', 'F': '/home/ec2-user/padb/src/sys/xen_execute.cpp', 'L': '8948', 'R': 'pg_throw'}
|
ProgrammingError
|
def copy( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
con: redshift_connector.Connection,
table: str,
schema: str,
iam_role: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
index: bool = False,
dtype: Optional[Dict[str, str]] = None,
mode: str = "append",
diststyle: str = "AUTO",
distkey: Optional[str] = None,
sortstyle: str = "COMPOUND",
sortkey: Optional[List[str]] = None,
primary_keys: Optional[List[str]] = None,
varchar_lengths_default: int = 256,
varchar_lengths: Optional[Dict[str, int]] = None,
keep_files: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
max_rows_by_file: Optional[int] = 10_000_000,
) -> None:
"""Load Pandas DataFrame as a Table on Amazon Redshift using parquet files on S3 as stage.
This is a **HIGH** latency and **HIGH** throughput alternative to `wr.redshift.to_sql()` to load large
DataFrames into Amazon Redshift through the ** SQL COPY command**.
This strategy has more overhead and requires more IAM privileges
than the regular `wr.redshift.to_sql()` function, so it is only recommended
to inserting +1K rows at once.
https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html
Note
----
If the table does not exist yet,
it will be automatically created for you
using the Parquet metadata to
infer the columns data types.
Note
----
In case of `use_threads=True` the number of threads
that will be spawned will be gotten from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame.
path : str
S3 path to write stage files (e.g. s3://bucket_name/any_name/).
Note: This path must be empty.
con : redshift_connector.Connection
Use redshift_connector.connect() to use "
"credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog.
table : str
Table name
schema : str
Schema name
iam_role : str, optional
AWS IAM role with the related permissions.
aws_access_key_id : str, optional
The access key for your AWS account.
aws_secret_access_key : str, optional
The secret key for your AWS account.
aws_session_token : str, optional
The session key for your AWS account. This is only needed when you are using temporary credentials.
index : bool
True to store the DataFrame index in file, otherwise False to ignore it.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
mode : str
Append, overwrite or upsert.
diststyle : str
Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"].
https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html
distkey : str, optional
Specifies a column name or positional number for the distribution key.
sortstyle : str
Sorting can be "COMPOUND" or "INTERLEAVED".
https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html
sortkey : List[str], optional
List of columns to be sorted.
primary_keys : List[str], optional
Primary keys.
varchar_lengths_default : int
The size that will be set for all VARCHAR columns not specified with varchar_lengths.
varchar_lengths : Dict[str, int], optional
Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}).
keep_files : bool
Should keep stage files?
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to botocore requests. Valid parameters: "ACL", "Metadata", "ServerSideEncryption", "StorageClass",
"SSECustomerAlgorithm", "SSECustomerKey", "SSEKMSKeyId", "SSEKMSEncryptionContext", "Tagging".
e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'}
max_rows_by_file : int
Max number of rows in each file.
Default is None i.e. dont split the files.
(e.g. 33554432, 268435456)
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> import pandas as pd
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> wr.db.copy(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path="s3://bucket/my_parquet_files/",
... con=con,
... table="my_table",
... schema="public"
... iam_role="arn:aws:iam::XXX:role/XXX"
... )
>>> con.close()
"""
path = path[:-1] if path.endswith("*") else path
path = path if path.endswith("/") else f"{path}/"
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if s3.list_objects(path=path, boto3_session=session):
raise exceptions.InvalidArgument(
f"The received S3 path ({path}) is not empty. "
"Please, provide a different path or use wr.s3.delete_objects() to clean up the current one."
)
try:
s3.to_parquet(
df=df,
path=path,
index=index,
dataset=True,
mode="append",
dtype=dtype,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
max_rows_by_file=max_rows_by_file,
)
copy_from_files(
path=path,
con=con,
table=table,
schema=schema,
iam_role=iam_role,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
mode=mode,
diststyle=diststyle,
distkey=distkey,
sortstyle=sortstyle,
sortkey=sortkey,
primary_keys=primary_keys,
varchar_lengths_default=varchar_lengths_default,
varchar_lengths=varchar_lengths,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
)
finally:
if keep_files is False:
s3.delete_objects(path=path, use_threads=use_threads, boto3_session=session)
|
def copy( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
con: redshift_connector.Connection,
table: str,
schema: str,
iam_role: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
index: bool = False,
dtype: Optional[Dict[str, str]] = None,
mode: str = "append",
diststyle: str = "AUTO",
distkey: Optional[str] = None,
sortstyle: str = "COMPOUND",
sortkey: Optional[List[str]] = None,
primary_keys: Optional[List[str]] = None,
varchar_lengths_default: int = 256,
varchar_lengths: Optional[Dict[str, int]] = None,
keep_files: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
max_rows_by_file: Optional[int] = 10_000_000,
) -> None:
"""Load Pandas DataFrame as a Table on Amazon Redshift using parquet files on S3 as stage.
This is a **HIGH** latency and **HIGH** throughput alternative to `wr.redshift.to_sql()` to load large
DataFrames into Amazon Redshift through the ** SQL COPY command**.
This strategy has more overhead and requires more IAM privileges
than the regular `wr.redshift.to_sql()` function, so it is only recommended
to inserting +1K rows at once.
https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html
Note
----
If the table does not exist yet,
it will be automatically created for you
using the Parquet metadata to
infer the columns data types.
Note
----
In case of `use_threads=True` the number of threads
that will be spawned will be gotten from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame.
path : str
S3 path to write stage files (e.g. s3://bucket_name/any_name/).
Note: This path must be empty.
con : redshift_connector.Connection
Use redshift_connector.connect() to use "
"credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog.
table : str
Table name
schema : str
Schema name
iam_role : str, optional
AWS IAM role with the related permissions.
aws_access_key_id : str, optional
The access key for your AWS account.
aws_secret_access_key : str, optional
The secret key for your AWS account.
aws_session_token : str, optional
The session key for your AWS account. This is only needed when you are using temporary credentials.
index : bool
True to store the DataFrame index in file, otherwise False to ignore it.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
mode : str
Append, overwrite or upsert.
diststyle : str
Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"].
https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html
distkey : str, optional
Specifies a column name or positional number for the distribution key.
sortstyle : str
Sorting can be "COMPOUND" or "INTERLEAVED".
https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html
sortkey : List[str], optional
List of columns to be sorted.
primary_keys : List[str], optional
Primary keys.
varchar_lengths_default : int
The size that will be set for all VARCHAR columns not specified with varchar_lengths.
varchar_lengths : Dict[str, int], optional
Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}).
keep_files : bool
Should keep the stage files?
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to botocore requests. Valid parameters: "ACL", "Metadata", "ServerSideEncryption", "StorageClass",
"SSECustomerAlgorithm", "SSECustomerKey", "SSEKMSKeyId", "SSEKMSEncryptionContext", "Tagging".
e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'}
max_rows_by_file : int
Max number of rows in each file.
Default is None i.e. dont split the files.
(e.g. 33554432, 268435456)
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> import pandas as pd
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> wr.db.copy(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path="s3://bucket/my_parquet_files/",
... con=con,
... table="my_table",
... schema="public"
... iam_role="arn:aws:iam::XXX:role/XXX"
... )
>>> con.close()
"""
path = path[:-1] if path.endswith("*") else path
path = path if path.endswith("/") else f"{path}/"
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if s3.list_objects(path=path, boto3_session=session):
raise exceptions.InvalidArgument(
f"The received S3 path ({path}) is not empty. "
"Please, provide a different path or use wr.s3.delete_objects() to clean up the current one."
)
s3.to_parquet(
df=df,
path=path,
index=index,
dataset=True,
mode="append",
dtype=dtype,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
max_rows_by_file=max_rows_by_file,
)
copy_from_files(
path=path,
con=con,
table=table,
schema=schema,
iam_role=iam_role,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
mode=mode,
diststyle=diststyle,
distkey=distkey,
sortstyle=sortstyle,
sortkey=sortkey,
primary_keys=primary_keys,
varchar_lengths_default=varchar_lengths_default,
varchar_lengths=varchar_lengths,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
)
if keep_files is False:
s3.delete_objects(path=path, use_threads=use_threads, boto3_session=session)
|
https://github.com/awslabs/aws-data-wrangler/issues/505
|
---------------------------------------------------------------------------
ProgrammingError Traceback (most recent call last)
<ipython-input-39-489fc07fb9d7> in <module>
2
3
----> 4 wr.redshift.copy(
5 df=df,
6 path=path,
~/miniconda3/envs/leadgen/lib/python3.8/site-packages/awswrangler/redshift.py in copy(df, path, con, table, schema, iam_role, aws_access_key_id, aws_secret_access_key, aws_session_token, index, dtype, mode, diststyle, distkey, sortstyle, sortkey, primary_keys, varchar_lengths_default, varchar_lengths, keep_files, use_threads, boto3_session, s3_additional_kwargs, max_rows_by_file)
1345 max_rows_by_file=max_rows_by_file,
1346 )
-> 1347 copy_from_files(
1348 path=path,
1349 con=con,
~/miniconda3/envs/leadgen/lib/python3.8/site-packages/awswrangler/redshift.py in copy_from_files(path, con, table, schema, iam_role, aws_access_key_id, aws_secret_access_key, aws_session_token, parquet_infer_sampling, mode, diststyle, distkey, sortstyle, sortkey, primary_keys, varchar_lengths_default, varchar_lengths, path_suffix, path_ignore_suffix, use_threads, boto3_session, s3_additional_kwargs)
1170 s3_additional_kwargs=s3_additional_kwargs,
1171 )
-> 1172 _copy(
1173 cursor=cursor,
1174 path=path,
~/miniconda3/envs/leadgen/lib/python3.8/site-packages/awswrangler/redshift.py in _copy(cursor, path, table, iam_role, aws_access_key_id, aws_secret_access_key, aws_session_token, boto3_session, schema)
112 sql: str = f"COPY {table_name} FROM '{path}'{auth_str}\nFORMAT AS PARQUET"
113 _logger.debug("copy query:\n%s", sql)
--> 114 cursor.execute(sql)
115
116
~/miniconda3/envs/leadgen/lib/python3.8/site-packages/redshift_connector/cursor.py in execute(self, operation, args, stream, merge_socket_read)
160 self._c.execute(self, "begin transaction", None)
161 self._c.merge_socket_read = merge_socket_read
--> 162 self._c.execute(self, operation, args)
163 except AttributeError as e:
164 raise e
~/miniconda3/envs/leadgen/lib/python3.8/site-packages/redshift_connector/core.py in execute(self, cursor, operation, vals)
1095 self.handle_messages_merge_socket_read(cursor)
1096 else:
-> 1097 self.handle_messages(cursor)
1098
1099 def _send_message(self: "Connection", code: bytes, data: bytes) -> None:
~/miniconda3/envs/leadgen/lib/python3.8/site-packages/redshift_connector/core.py in handle_messages(self, cursor)
1170
1171 if self.error is not None:
-> 1172 raise self.error
1173
1174 def handle_messages_merge_socket_read(self: "Connection", cursor: Cursor):
ProgrammingError: {'S': 'ERROR', 'C': 'XX000', 'M': 'Spectrum Scan Error', 'D': '\n -----------------------------------------------\n error: Spectrum Scan Error\n code: 15001\n context: The length of the data column data is longer than the length defined in the table. Table: 256, Data: 2737, File name: https://s3.us-west-2.amazonaws.com/{**}/3a13c036d1ac_0.snappy.parquet?&amp;X-Amz-Al\n query: 69281625\n location: dory_util.cpp:945\n process: fetchtask_thread [pid=1668]\n -----------------------------------------------\n', 'F': '/home/ec2-user/padb/src/sys/xen_execute.cpp', 'L': '8948', 'R': 'pg_throw'}
|
ProgrammingError
|
def try_it(
f: Callable[..., Any],
ex: Any,
ex_code: Optional[str] = None,
base: float = 1.0,
max_num_tries: int = 3,
**kwargs: Any,
) -> Any:
"""Run function with decorrelated Jitter.
Reference: https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
delay: float = base
for i in range(max_num_tries):
try:
return f(**kwargs)
except ex as exception:
if ex_code is not None and hasattr(exception, "response"):
if exception.response["Error"]["Code"] != ex_code:
raise
if i == (max_num_tries - 1):
raise
delay = random.uniform(base, delay * 3)
_logger.error(
"Retrying %s | Fail number %s/%s | Exception: %s",
f,
i + 1,
max_num_tries,
exception,
)
time.sleep(delay)
|
def try_it(
f: Callable[..., Any],
ex: Any,
base: float = 1.0,
max_num_tries: int = 3,
**kwargs: Any,
) -> Any:
"""Run function with decorrelated Jitter.
Reference: https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
delay: float = base
for i in range(max_num_tries):
try:
return f(**kwargs)
except ex as exception:
if i == (max_num_tries - 1):
raise exception
delay = random.uniform(base, delay * 3)
_logger.error(
"Retrying %s | Fail number %s/%s | Exception: %s",
f,
i + 1,
max_num_tries,
exception,
)
time.sleep(delay)
|
https://github.com/awslabs/aws-data-wrangler/issues/465
|
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "<ipython-input-19-1ccc4a4f07b9>", line 11, in query_convert_save
wr.athena.read_sql_query(sql=query,database='sensor-data-ingest',boto3_session=sess).to_csv(f'{eq_type}/{s_id}.csv.gz',index=False,compression='gzip')
File "/usr/local/lib/python3.7/site-packages/awswrangler/_config.py", line 361, in wrapper
return function(**args)
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 744, in read_sql_query
boto3_session=session,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 500, in _resolve_query_without_cache
boto3_session=boto3_session,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 385, in _resolve_query_without_cache_ctas
categories=categories,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_utils.py", line 176, in _get_query_metadata
_query_execution_payload = wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_utils.py", line 668, in wait_query
response = client_athena.get_query_execution(QueryExecutionId=query_execution_id)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the GetQueryExecution operation (reached max retries: 5): Rate exceeded
|
botocore.exceptions.ClientError
|
def _start_query_execution(
sql: str,
wg_config: _WorkGroupConfig,
database: Optional[str] = None,
data_source: Optional[str] = None,
s3_output: Optional[str] = None,
workgroup: Optional[str] = None,
encryption: Optional[str] = None,
kms_key: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> str:
args: Dict[str, Any] = {"QueryString": sql}
session: boto3.Session = _utils.ensure_session(session=boto3_session)
# s3_output
args["ResultConfiguration"] = {
"OutputLocation": _get_s3_output(
s3_output=s3_output, wg_config=wg_config, boto3_session=session
)
}
# encryption
if wg_config.enforced is True:
if wg_config.encryption is not None:
args["ResultConfiguration"]["EncryptionConfiguration"] = {
"EncryptionOption": wg_config.encryption
}
if wg_config.kms_key is not None:
args["ResultConfiguration"]["EncryptionConfiguration"]["KmsKey"] = (
wg_config.kms_key
)
else:
if encryption is not None:
args["ResultConfiguration"]["EncryptionConfiguration"] = {
"EncryptionOption": encryption
}
if kms_key is not None:
args["ResultConfiguration"]["EncryptionConfiguration"]["KmsKey"] = (
kms_key
)
# database
if database is not None:
args["QueryExecutionContext"] = {"Database": database}
if data_source is not None:
args["QueryExecutionContext"]["Catalog"] = data_source
# workgroup
if workgroup is not None:
args["WorkGroup"] = workgroup
client_athena: boto3.client = _utils.client(service_name="athena", session=session)
_logger.debug("args: \n%s", pprint.pformat(args))
response: Dict[str, Any] = _utils.try_it(
f=client_athena.start_query_execution,
ex=botocore.exceptions.ClientError,
ex_code="ThrottlingException",
max_num_tries=5,
**args,
)
return cast(str, response["QueryExecutionId"])
|
def _start_query_execution(
sql: str,
wg_config: _WorkGroupConfig,
database: Optional[str] = None,
data_source: Optional[str] = None,
s3_output: Optional[str] = None,
workgroup: Optional[str] = None,
encryption: Optional[str] = None,
kms_key: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> str:
args: Dict[str, Any] = {"QueryString": sql}
session: boto3.Session = _utils.ensure_session(session=boto3_session)
# s3_output
args["ResultConfiguration"] = {
"OutputLocation": _get_s3_output(
s3_output=s3_output, wg_config=wg_config, boto3_session=session
)
}
# encryption
if wg_config.enforced is True:
if wg_config.encryption is not None:
args["ResultConfiguration"]["EncryptionConfiguration"] = {
"EncryptionOption": wg_config.encryption
}
if wg_config.kms_key is not None:
args["ResultConfiguration"]["EncryptionConfiguration"]["KmsKey"] = (
wg_config.kms_key
)
else:
if encryption is not None:
args["ResultConfiguration"]["EncryptionConfiguration"] = {
"EncryptionOption": encryption
}
if kms_key is not None:
args["ResultConfiguration"]["EncryptionConfiguration"]["KmsKey"] = (
kms_key
)
# database
if database is not None:
args["QueryExecutionContext"] = {"Database": database}
if data_source is not None:
args["QueryExecutionContext"]["Catalog"] = data_source
# workgroup
if workgroup is not None:
args["WorkGroup"] = workgroup
client_athena: boto3.client = _utils.client(service_name="athena", session=session)
_logger.debug("args: \n%s", pprint.pformat(args))
response: Dict[str, Any] = client_athena.start_query_execution(**args)
return cast(str, response["QueryExecutionId"])
|
https://github.com/awslabs/aws-data-wrangler/issues/465
|
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "<ipython-input-19-1ccc4a4f07b9>", line 11, in query_convert_save
wr.athena.read_sql_query(sql=query,database='sensor-data-ingest',boto3_session=sess).to_csv(f'{eq_type}/{s_id}.csv.gz',index=False,compression='gzip')
File "/usr/local/lib/python3.7/site-packages/awswrangler/_config.py", line 361, in wrapper
return function(**args)
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 744, in read_sql_query
boto3_session=session,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 500, in _resolve_query_without_cache
boto3_session=boto3_session,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 385, in _resolve_query_without_cache_ctas
categories=categories,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_utils.py", line 176, in _get_query_metadata
_query_execution_payload = wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_utils.py", line 668, in wait_query
response = client_athena.get_query_execution(QueryExecutionId=query_execution_id)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the GetQueryExecution operation (reached max retries: 5): Rate exceeded
|
botocore.exceptions.ClientError
|
def get_work_group(
workgroup: str, boto3_session: Optional[boto3.Session] = None
) -> Dict[str, Any]:
"""Return information about the workgroup with the specified name.
Parameters
----------
workgroup : str
Work Group name.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Any]
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/athena.html#Athena.Client.get_work_group
Examples
--------
>>> import awswrangler as wr
>>> res = wr.athena.get_work_group(workgroup='workgroup_name')
"""
client_athena: boto3.client = _utils.client(
service_name="athena", session=boto3_session
)
return cast(
Dict[str, Any],
_utils.try_it(
f=client_athena.get_work_group,
ex=botocore.exceptions.ClientError,
ex_code="ThrottlingException",
max_num_tries=5,
WorkGroup=workgroup,
),
)
|
def get_work_group(
workgroup: str, boto3_session: Optional[boto3.Session] = None
) -> Dict[str, Any]:
"""Return information about the workgroup with the specified name.
Parameters
----------
workgroup : str
Work Group name.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Any]
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/athena.html#Athena.Client.get_work_group
Examples
--------
>>> import awswrangler as wr
>>> res = wr.athena.get_work_group(workgroup='workgroup_name')
"""
client_athena: boto3.client = _utils.client(
service_name="athena", session=boto3_session
)
return cast(Dict[str, Any], client_athena.get_work_group(WorkGroup=workgroup))
|
https://github.com/awslabs/aws-data-wrangler/issues/465
|
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "<ipython-input-19-1ccc4a4f07b9>", line 11, in query_convert_save
wr.athena.read_sql_query(sql=query,database='sensor-data-ingest',boto3_session=sess).to_csv(f'{eq_type}/{s_id}.csv.gz',index=False,compression='gzip')
File "/usr/local/lib/python3.7/site-packages/awswrangler/_config.py", line 361, in wrapper
return function(**args)
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 744, in read_sql_query
boto3_session=session,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 500, in _resolve_query_without_cache
boto3_session=boto3_session,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 385, in _resolve_query_without_cache_ctas
categories=categories,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_utils.py", line 176, in _get_query_metadata
_query_execution_payload = wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_utils.py", line 668, in wait_query
response = client_athena.get_query_execution(QueryExecutionId=query_execution_id)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the GetQueryExecution operation (reached max retries: 5): Rate exceeded
|
botocore.exceptions.ClientError
|
def wait_query(
query_execution_id: str, boto3_session: Optional[boto3.Session] = None
) -> Dict[str, Any]:
"""Wait for the query end.
Parameters
----------
query_execution_id : str
Athena query execution ID.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Any]
Dictionary with the get_query_execution response.
Examples
--------
>>> import awswrangler as wr
>>> res = wr.athena.wait_query(query_execution_id='query-execution-id')
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
response: Dict[str, Any] = get_query_execution(
query_execution_id=query_execution_id, boto3_session=session
)
state: str = response["Status"]["State"]
while state not in _QUERY_FINAL_STATES:
time.sleep(_QUERY_WAIT_POLLING_DELAY)
response = get_query_execution(
query_execution_id=query_execution_id, boto3_session=session
)
state = response["Status"]["State"]
_logger.debug("state: %s", state)
_logger.debug("StateChangeReason: %s", response["Status"].get("StateChangeReason"))
if state == "FAILED":
raise exceptions.QueryFailed(response["Status"].get("StateChangeReason"))
if state == "CANCELLED":
raise exceptions.QueryCancelled(response["Status"].get("StateChangeReason"))
return response
|
def wait_query(
query_execution_id: str, boto3_session: Optional[boto3.Session] = None
) -> Dict[str, Any]:
"""Wait for the query end.
Parameters
----------
query_execution_id : str
Athena query execution ID.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Any]
Dictionary with the get_query_execution response.
Examples
--------
>>> import awswrangler as wr
>>> res = wr.athena.wait_query(query_execution_id='query-execution-id')
"""
client_athena: boto3.client = _utils.client(
service_name="athena", session=boto3_session
)
response: Dict[str, Any] = client_athena.get_query_execution(
QueryExecutionId=query_execution_id
)
state: str = response["QueryExecution"]["Status"]["State"]
while state not in _QUERY_FINAL_STATES:
time.sleep(_QUERY_WAIT_POLLING_DELAY)
response = client_athena.get_query_execution(
QueryExecutionId=query_execution_id
)
state = response["QueryExecution"]["Status"]["State"]
_logger.debug("state: %s", state)
_logger.debug(
"StateChangeReason: %s",
response["QueryExecution"]["Status"].get("StateChangeReason"),
)
if state == "FAILED":
raise exceptions.QueryFailed(
response["QueryExecution"]["Status"].get("StateChangeReason")
)
if state == "CANCELLED":
raise exceptions.QueryCancelled(
response["QueryExecution"]["Status"].get("StateChangeReason")
)
return cast(Dict[str, Any], response["QueryExecution"])
|
https://github.com/awslabs/aws-data-wrangler/issues/465
|
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "<ipython-input-19-1ccc4a4f07b9>", line 11, in query_convert_save
wr.athena.read_sql_query(sql=query,database='sensor-data-ingest',boto3_session=sess).to_csv(f'{eq_type}/{s_id}.csv.gz',index=False,compression='gzip')
File "/usr/local/lib/python3.7/site-packages/awswrangler/_config.py", line 361, in wrapper
return function(**args)
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 744, in read_sql_query
boto3_session=session,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 500, in _resolve_query_without_cache
boto3_session=boto3_session,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 385, in _resolve_query_without_cache_ctas
categories=categories,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_utils.py", line 176, in _get_query_metadata
_query_execution_payload = wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_utils.py", line 668, in wait_query
response = client_athena.get_query_execution(QueryExecutionId=query_execution_id)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the GetQueryExecution operation (reached max retries: 5): Rate exceeded
|
botocore.exceptions.ClientError
|
def get_query_execution(
query_execution_id: str, boto3_session: Optional[boto3.Session] = None
) -> Dict[str, Any]:
"""Fetch query execution details.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/athena.html#Athena.Client.get_query_execution
Parameters
----------
query_execution_id : str
Athena query execution ID.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Any]
Dictionary with the get_query_execution response.
Examples
--------
>>> import awswrangler as wr
>>> res = wr.athena.get_query_execution(query_execution_id='query-execution-id')
"""
client_athena: boto3.client = _utils.client(
service_name="athena", session=boto3_session
)
response: Dict[str, Any] = _utils.try_it(
f=client_athena.get_query_execution,
ex=botocore.exceptions.ClientError,
ex_code="ThrottlingException",
max_num_tries=5,
QueryExecutionId=query_execution_id,
)
return cast(Dict[str, Any], response["QueryExecution"])
|
def get_query_execution(
query_execution_id: str, boto3_session: Optional[boto3.Session] = None
) -> Dict[str, Any]:
"""Fetch query execution details.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/athena.html#Athena.Client.get_query_execution
Parameters
----------
query_execution_id : str
Athena query execution ID.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Any]
Dictionary with the get_query_execution response.
Examples
--------
>>> import awswrangler as wr
>>> res = wr.athena.get_query_execution(query_execution_id='query-execution-id')
"""
client_athena: boto3.client = _utils.client(
service_name="athena", session=boto3_session
)
response: Dict[str, Any] = client_athena.get_query_execution(
QueryExecutionId=query_execution_id
)
return cast(Dict[str, Any], response["QueryExecution"])
|
https://github.com/awslabs/aws-data-wrangler/issues/465
|
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "<ipython-input-19-1ccc4a4f07b9>", line 11, in query_convert_save
wr.athena.read_sql_query(sql=query,database='sensor-data-ingest',boto3_session=sess).to_csv(f'{eq_type}/{s_id}.csv.gz',index=False,compression='gzip')
File "/usr/local/lib/python3.7/site-packages/awswrangler/_config.py", line 361, in wrapper
return function(**args)
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 744, in read_sql_query
boto3_session=session,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 500, in _resolve_query_without_cache
boto3_session=boto3_session,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 385, in _resolve_query_without_cache_ctas
categories=categories,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_utils.py", line 176, in _get_query_metadata
_query_execution_payload = wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_utils.py", line 668, in wait_query
response = client_athena.get_query_execution(QueryExecutionId=query_execution_id)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the GetQueryExecution operation (reached max retries: 5): Rate exceeded
|
botocore.exceptions.ClientError
|
def get_connection(
name: str,
catalog_id: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Any]:
"""Get Glue connection details.
Parameters
----------
name : str
Connection name.
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Any]
API Response for:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glue.html#Glue.Client.get_connection
Examples
--------
>>> import awswrangler as wr
>>> res = wr.catalog.get_connection(name='my_connection')
"""
client_glue: boto3.client = _utils.client(
service_name="glue", session=boto3_session
)
res = _utils.try_it(
f=client_glue.get_connection,
ex=botocore.exceptions.ClientError,
ex_code="ThrottlingException",
max_num_tries=3,
**_catalog_id(catalog_id=catalog_id, Name=name, HidePassword=False),
)["Connection"]
if "ENCRYPTED_PASSWORD" in res["ConnectionProperties"]:
client_kms = _utils.client(service_name="kms", session=boto3_session)
pwd = client_kms.decrypt(
CiphertextBlob=base64.b64decode(
res["ConnectionProperties"]["ENCRYPTED_PASSWORD"]
)
)["Plaintext"]
res["ConnectionProperties"]["PASSWORD"] = pwd
return cast(Dict[str, Any], res)
|
def get_connection(
name: str,
catalog_id: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Any]:
"""Get Glue connection details.
Parameters
----------
name : str
Connection name.
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Any]
API Response for:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glue.html#Glue.Client.get_connection
Examples
--------
>>> import awswrangler as wr
>>> res = wr.catalog.get_connection(name='my_connection')
"""
client_glue: boto3.client = _utils.client(
service_name="glue", session=boto3_session
)
res = client_glue.get_connection(
**_catalog_id(catalog_id=catalog_id, Name=name, HidePassword=False)
)["Connection"]
if "ENCRYPTED_PASSWORD" in res["ConnectionProperties"]:
client_kms = _utils.client(service_name="kms", session=boto3_session)
pwd = client_kms.decrypt(
CiphertextBlob=base64.b64decode(
res["ConnectionProperties"]["ENCRYPTED_PASSWORD"]
)
)["Plaintext"]
res["ConnectionProperties"]["PASSWORD"] = pwd
return cast(Dict[str, Any], res)
|
https://github.com/awslabs/aws-data-wrangler/issues/465
|
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "<ipython-input-19-1ccc4a4f07b9>", line 11, in query_convert_save
wr.athena.read_sql_query(sql=query,database='sensor-data-ingest',boto3_session=sess).to_csv(f'{eq_type}/{s_id}.csv.gz',index=False,compression='gzip')
File "/usr/local/lib/python3.7/site-packages/awswrangler/_config.py", line 361, in wrapper
return function(**args)
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 744, in read_sql_query
boto3_session=session,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 500, in _resolve_query_without_cache
boto3_session=boto3_session,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_read.py", line 385, in _resolve_query_without_cache_ctas
categories=categories,
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_utils.py", line 176, in _get_query_metadata
_query_execution_payload = wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)
File "/usr/local/lib/python3.7/site-packages/awswrangler/athena/_utils.py", line 668, in wait_query
response = client_athena.get_query_execution(QueryExecutionId=query_execution_id)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the GetQueryExecution operation (reached max retries: 5): Rate exceeded
|
botocore.exceptions.ClientError
|
def client(service_name: str, session: Optional[boto3.Session] = None) -> boto3.client:
"""Create a valid boto3.client."""
return ensure_session(session=session).client(
service_name=service_name, use_ssl=True, config=botocore_config()
)
|
def client(service_name: str, session: Optional[boto3.Session] = None) -> boto3.client:
"""Create a valid boto3.client."""
return ensure_session(session=session).client(
service_name=service_name,
use_ssl=True,
config=botocore.config.Config(
retries={"max_attempts": 5}, connect_timeout=10, max_pool_connections=10
),
)
|
https://github.com/awslabs/aws-data-wrangler/issues/403
|
File "/glue/lib/installation/okra_datalake/scraping.py", line 235, in copy_files
wr.s3.copy_objects(filtered_files, s3_src_dir, s3_dst_dir)
File "/glue/lib/installation/awswrangler/s3/_copy.py", line 187, in copy_objects
_copy_objects(batch=batch, use_threads=use_threads, boto3_session=session)
File "/glue/lib/installation/awswrangler/s3/_copy.py", line 19, in _copy_objects
resource_s3: boto3.resource = _utils.resource(service_name="s3", session=boto3_session)
File "/glue/lib/installation/awswrangler/_utils.py", line 78, in resource
retries=
{
"max_attempts": 10,
"mode": "adaptive"
}
, connect_timeout=10, max_pool_connections=30
File "/usr/local/lib/python3.6/site-packages/botocore/config.py", line 158, in __init__
self._validate_retry_configuration(self.retries)
File "/usr/local/lib/python3.6/site-packages/botocore/config.py", line 205, in _validate_retry_configuration
retry_config_option=key)
botocore.exceptions.InvalidRetryConfigurationError: Cannot provide retry configuration for "mode". Valid retry configuration options are: 'max_attempts'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/runscript.py", line 142, in <module>
raise e_type(e_value).with_traceback(new_stack)
TypeError: __init__() takes 1 positional argument but 2 were given
|
botocore.exceptions.InvalidRetryConfigurationError
|
def resource(
service_name: str, session: Optional[boto3.Session] = None
) -> boto3.resource:
"""Create a valid boto3.resource."""
return ensure_session(session=session).resource(
service_name=service_name, use_ssl=True, config=botocore_config()
)
|
def resource(
service_name: str, session: Optional[boto3.Session] = None
) -> boto3.resource:
"""Create a valid boto3.resource."""
return ensure_session(session=session).resource(
service_name=service_name,
use_ssl=True,
config=botocore.config.Config(
retries={"max_attempts": 10, "mode": "adaptive"},
connect_timeout=10,
max_pool_connections=30,
),
)
|
https://github.com/awslabs/aws-data-wrangler/issues/403
|
File "/glue/lib/installation/okra_datalake/scraping.py", line 235, in copy_files
wr.s3.copy_objects(filtered_files, s3_src_dir, s3_dst_dir)
File "/glue/lib/installation/awswrangler/s3/_copy.py", line 187, in copy_objects
_copy_objects(batch=batch, use_threads=use_threads, boto3_session=session)
File "/glue/lib/installation/awswrangler/s3/_copy.py", line 19, in _copy_objects
resource_s3: boto3.resource = _utils.resource(service_name="s3", session=boto3_session)
File "/glue/lib/installation/awswrangler/_utils.py", line 78, in resource
retries=
{
"max_attempts": 10,
"mode": "adaptive"
}
, connect_timeout=10, max_pool_connections=30
File "/usr/local/lib/python3.6/site-packages/botocore/config.py", line 158, in __init__
self._validate_retry_configuration(self.retries)
File "/usr/local/lib/python3.6/site-packages/botocore/config.py", line 205, in _validate_retry_configuration
retry_config_option=key)
botocore.exceptions.InvalidRetryConfigurationError: Cannot provide retry configuration for "mode". Valid retry configuration options are: 'max_attempts'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/runscript.py", line 142, in <module>
raise e_type(e_value).with_traceback(new_stack)
TypeError: __init__() takes 1 positional argument but 2 were given
|
botocore.exceptions.InvalidRetryConfigurationError
|
def _fetch_csv_result(
query_metadata: _QueryMetadata,
keep_files: bool,
chunksize: Optional[int],
use_threads: bool,
boto3_session: boto3.Session,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
_chunksize: Optional[int] = chunksize if isinstance(chunksize, int) else None
_logger.debug("_chunksize: %s", _chunksize)
if (
query_metadata.output_location is None
or query_metadata.output_location.endswith(".csv") is False
):
chunked = _chunksize is not None
return _empty_dataframe_response(chunked, query_metadata)
path: str = query_metadata.output_location
s3.wait_objects_exist(paths=[path], use_threads=False, boto3_session=boto3_session)
_logger.debug("Start CSV reading from %s", path)
ret = s3.read_csv(
path=[path],
dtype=query_metadata.dtype,
parse_dates=query_metadata.parse_timestamps,
converters=query_metadata.converters,
quoting=csv.QUOTE_ALL,
keep_default_na=False,
na_values=["", "NaN"],
chunksize=_chunksize,
skip_blank_lines=False,
use_threads=False,
boto3_session=boto3_session,
)
_logger.debug("Start type casting...")
_logger.debug(type(ret))
if _chunksize is None:
df = _fix_csv_types(
df=ret,
parse_dates=query_metadata.parse_dates,
binaries=query_metadata.binaries,
)
df = _apply_query_metadata(df=df, query_metadata=query_metadata)
if keep_files is False:
s3.delete_objects(
path=[path, f"{path}.metadata"],
use_threads=use_threads,
boto3_session=boto3_session,
)
return df
dfs = _fix_csv_types_generator(
dfs=ret,
parse_dates=query_metadata.parse_dates,
binaries=query_metadata.binaries,
)
dfs = _add_query_metadata_generator(dfs=dfs, query_metadata=query_metadata)
if keep_files is False:
return _delete_after_iterate(
dfs=dfs,
paths=[path, f"{path}.metadata"],
use_threads=use_threads,
boto3_session=boto3_session,
)
return dfs
|
def _fetch_csv_result(
query_metadata: _QueryMetadata,
keep_files: bool,
chunksize: Optional[int],
use_threads: bool,
boto3_session: boto3.Session,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
_chunksize: Optional[int] = chunksize if isinstance(chunksize, int) else None
_logger.debug("_chunksize: %s", _chunksize)
if (
query_metadata.output_location is None
or query_metadata.output_location.endswith(".csv") is False
):
chunked = _chunksize is not None
return _empty_dataframe_response(chunked, query_metadata)
path: str = query_metadata.output_location
s3.wait_objects_exist(paths=[path], use_threads=False, boto3_session=boto3_session)
_logger.debug("Start CSV reading from %s", path)
ret = s3.read_csv(
path=[path],
dtype=query_metadata.dtype,
parse_dates=query_metadata.parse_timestamps,
converters=query_metadata.converters,
quoting=csv.QUOTE_ALL,
keep_default_na=False,
na_values=[""],
chunksize=_chunksize,
skip_blank_lines=False,
use_threads=False,
boto3_session=boto3_session,
)
_logger.debug("Start type casting...")
_logger.debug(type(ret))
if _chunksize is None:
df = _fix_csv_types(
df=ret,
parse_dates=query_metadata.parse_dates,
binaries=query_metadata.binaries,
)
df = _apply_query_metadata(df=df, query_metadata=query_metadata)
if keep_files is False:
s3.delete_objects(
path=[path, f"{path}.metadata"],
use_threads=use_threads,
boto3_session=boto3_session,
)
return df
dfs = _fix_csv_types_generator(
dfs=ret,
parse_dates=query_metadata.parse_dates,
binaries=query_metadata.binaries,
)
dfs = _add_query_metadata_generator(dfs=dfs, query_metadata=query_metadata)
if keep_files is False:
return _delete_after_iterate(
dfs=dfs,
paths=[path, f"{path}.metadata"],
use_threads=use_threads,
boto3_session=boto3_session,
)
return dfs
|
https://github.com/awslabs/aws-data-wrangler/issues/351
|
TypeError: Cannot cast array data from dtype('O') to dtype('float64') according to the rule 'safe'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "runner.py", line 35, in <module>
}, {})
File "/Users/quarentine/dev/upside/ami-reports/report-generators/athena-report-generator/src/aws.py", line 19, in lambda_handler
reportGenerator.generate_report(request)
File "/Users/quarentine/.virtualenvs/ami-report-generators/lib/python3.7/site-packages/aws_embedded_metrics/metric_scope/__init__.py", line 46, in wrapper
raise e
File "/Users/quarentine/.virtualenvs/ami-report-generators/lib/python3.7/site-packages/aws_embedded_metrics/metric_scope/__init__.py", line 44, in wrapper
return fn(*args, **kwargs)
File "/Users/quarentine/dev/upside/ami-reports/report-generators/athena-report-generator/src/app.py", line 84, in generate_report
df = wr.athena.read_sql_query(query, database=self._database_name, ctas_approach=False)
File "/Users/quarentine/dev/upside/ami-reports/report-generators/aws-data-wrangler/awswrangler/_config.py", line 263, in wrapper
return function(**args)
File "/Users/quarentine/dev/upside/ami-reports/report-generators/aws-data-wrangler/awswrangler/athena/_read.py", line 700, in read_sql_query
boto3_session=session,
File "/Users/quarentine/dev/upside/ami-reports/report-generators/aws-data-wrangler/awswrangler/athena/_read.py", line 504, in _resolve_query_without_cache
boto3_session=boto3_session,
File "/Users/quarentine/dev/upside/ami-reports/report-generators/aws-data-wrangler/awswrangler/athena/_read.py", line 441, in _resolve_query_without_cache_regular
boto3_session=boto3_session,
File "/Users/quarentine/dev/upside/ami-reports/report-generators/aws-data-wrangler/awswrangler/athena/_read.py", line 273, in _fetch_csv_result
boto3_session=boto3_session,
File "/Users/quarentine/dev/upside/ami-reports/report-generators/aws-data-wrangler/awswrangler/s3/_read_text.py", line 265, in read_csv
**pandas_kwargs,
File "/Users/quarentine/dev/upside/ami-reports/report-generators/aws-data-wrangler/awswrangler/s3/_read_text.py", line 128, in _read_text
ret = _read_text_file(path=paths[0], **args)
File "/Users/quarentine/dev/upside/ami-reports/report-generators/aws-data-wrangler/awswrangler/s3/_read_text.py", line 77, in _read_text_file
df: pd.DataFrame = parser_func(f, **pandas_kwargs)
File "/Users/quarentine/.virtualenvs/ami-report-generators/lib/python3.7/site-packages/pandas/io/parsers.py", line 676, in parser_f
return _read(filepath_or_buffer, kwds)
File "/Users/quarentine/.virtualenvs/ami-report-generators/lib/python3.7/site-packages/pandas/io/parsers.py", line 454, in _read
data = parser.read(nrows)
File "/Users/quarentine/.virtualenvs/ami-report-generators/lib/python3.7/site-packages/pandas/io/parsers.py", line 1133, in read
ret = self._engine.read(nrows)
File "/Users/quarentine/.virtualenvs/ami-report-generators/lib/python3.7/site-packages/pandas/io/parsers.py", line 2037, in read
data = self._reader.read(nrows)
File "pandas/_libs/parsers.pyx", line 860, in pandas._libs.parsers.TextReader.read
File "pandas/_libs/parsers.pyx", line 875, in pandas._libs.parsers.TextReader._read_low_memory
File "pandas/_libs/parsers.pyx", line 952, in pandas._libs.parsers.TextReader._read_rows
File "pandas/_libs/parsers.pyx", line 1084, in pandas._libs.parsers.TextReader._convert_column_data
File "pandas/_libs/parsers.pyx", line 1160, in pandas._libs.parsers.TextReader._convert_tokens
ValueError: cannot safely convert passed user dtype of float64 for object dtyped data in column 2
|
ValueError
|
def _get_table_input(
database: str,
table: str,
boto3_session: Optional[boto3.Session],
catalog_id: Optional[str] = None,
) -> Optional[Dict[str, Any]]:
client_glue: boto3.client = _utils.client(
service_name="glue", session=boto3_session
)
args: Dict[str, str] = {}
if catalog_id is not None:
args["CatalogId"] = catalog_id # pragma: no cover
args["DatabaseName"] = database
args["Name"] = table
try:
response: Dict[str, Any] = client_glue.get_table(**args)
except client_glue.exceptions.EntityNotFoundException:
return None
table_input: Dict[str, Any] = {}
for k, v in response["Table"].items():
if k in [
"Name",
"Description",
"Owner",
"LastAccessTime",
"LastAnalyzedTime",
"Retention",
"StorageDescriptor",
"PartitionKeys",
"ViewOriginalText",
"ViewExpandedText",
"TableType",
"Parameters",
"TargetTable",
]:
table_input[k] = v
return table_input
|
def _get_table_input(
database: str,
table: str,
boto3_session: Optional[boto3.Session],
catalog_id: Optional[str] = None,
) -> Optional[Dict[str, str]]:
client_glue: boto3.client = _utils.client(
service_name="glue", session=boto3_session
)
args: Dict[str, str] = {}
if catalog_id is not None:
args["CatalogId"] = catalog_id # pragma: no cover
args["DatabaseName"] = database
args["Name"] = table
try:
response: Dict[str, Any] = client_glue.get_table(**args)
except client_glue.exceptions.EntityNotFoundException:
return None
if "DatabaseName" in response["Table"]:
del response["Table"]["DatabaseName"]
if "CreateTime" in response["Table"]:
del response["Table"]["CreateTime"]
if "UpdateTime" in response["Table"]:
del response["Table"]["UpdateTime"]
if "CreatedBy" in response["Table"]:
del response["Table"]["CreatedBy"]
if "IsRegisteredWithLakeFormation" in response["Table"]:
del response["Table"]["IsRegisteredWithLakeFormation"]
return response["Table"]
|
https://github.com/awslabs/aws-data-wrangler/issues/315
|
Traceback (most recent call last):
File "./src/upload_data_to_s3.py", line 88, in <module>
upload_local_files_to_dataset(full_refresh=False)
File "./src/upload_data_to_s3.py", line 83, in upload_local_files_to_dataset
partition_cols=['year'])
File "/home/circleci/project/venv/lib/python3.7/site-packages/awswrangler/s3.py", line 1217, in to_parquet
projection_digits=projection_digits,
File "/home/circleci/project/venv/lib/python3.7/site-packages/awswrangler/catalog.py", line 227, in create_parquet_table
projection_digits=projection_digits,
File "/home/circleci/project/venv/lib/python3.7/site-packages/awswrangler/catalog.py", line 1146, in _create_table
upsert_table_parameters(parameters=parameters, database=database, table=table, boto3_session=session)
File "/home/circleci/project/venv/lib/python3.7/site-packages/awswrangler/catalog.py", line 1660, in upsert_table_parameters
parameters=pars, database=database, table=table, catalog_id=catalog_id, boto3_session=session
File "/home/circleci/project/venv/lib/python3.7/site-packages/awswrangler/catalog.py", line 1715, in overwrite_table_parameters
client_glue.update_table(**args2)
File "/home/circleci/project/venv/lib/python3.7/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/circleci/project/venv/lib/python3.7/site-packages/botocore/client.py", line 608, in _make_api_call
api_params, operation_model, context=request_context)
File "/home/circleci/project/venv/lib/python3.7/site-packages/botocore/client.py", line 656, in _convert_to_request_dict
api_params, operation_model)
File "/home/circleci/project/venv/lib/python3.7/site-packages/botocore/validate.py", line 297, in serialize_to_request
raise ParamValidationError(report=report.generate_report())
botocore.exceptions.ParamValidationError: Parameter validation failed:
Unknown parameter in TableInput: "CatalogId", must be one of: Name, Description, Owner, LastAccessTime, LastAnalyzedTime, Retention, StorageDescriptor, PartitionKeys, ViewOriginalText, ViewExpandedText, TableType, Parameters, TargetTable
|
botocore.exceptions.ParamValidationError
|
def databases(
limit: int = 100,
catalog_id: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> pd.DataFrame:
"""Get a Pandas DataFrame with all listed databases.
Parameters
----------
limit : int, optional
Max number of tables to be returned.
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
pandas.DataFrame
Pandas DataFrame filled by formatted infos.
Examples
--------
>>> import awswrangler as wr
>>> df_dbs = wr.catalog.databases()
"""
database_iter: Iterator[Dict[str, Any]] = get_databases(
catalog_id=catalog_id, boto3_session=boto3_session
)
dbs = itertools.islice(database_iter, limit)
df_dict: Dict[str, List] = {"Database": [], "Description": []}
for db in dbs:
df_dict["Database"].append(db["Name"])
df_dict["Description"].append(db.get("Description", ""))
return pd.DataFrame(data=df_dict)
|
def databases(
limit: int = 100,
catalog_id: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> pd.DataFrame:
"""Get a Pandas DataFrame with all listed databases.
Parameters
----------
limit : int, optional
Max number of tables to be returned.
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
pandas.DataFrame
Pandas DataFrame filled by formatted infos.
Examples
--------
>>> import awswrangler as wr
>>> df_dbs = wr.catalog.databases()
"""
database_iter: Iterator[Dict[str, Any]] = get_databases(
catalog_id=catalog_id, boto3_session=boto3_session
)
dbs = itertools.islice(database_iter, limit)
df_dict: Dict[str, List] = {"Database": [], "Description": []}
for db in dbs:
df_dict["Database"].append(db["Name"])
if "Description" in db:
df_dict["Description"].append(db["Description"])
else: # pragma: no cover
df_dict["Description"].append("")
return pd.DataFrame(data=df_dict)
|
https://github.com/awslabs/aws-data-wrangler/issues/294
|
wr.catalog.get_table_parameters(database="sa-m2", table="item")
{'CrawlerSchemaDeserializerVersion': '1.0', 'CrawlerSchemaSerializerVersion': '1.0', 'UPDATED_BY_CRAWLER': 'sa-m2', 'averageRecordSize': '25', 'classification': 'parquet', 'compressionType': 'none', 'objectCount': '54', 'recordCount': '12349020', 'sizeKey': '174628122', 'typeOfData': 'file'}
wr.catalog.get_table_description(database="sa-m2", table="item")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.8/site-packages/awswrangler/catalog.py", line 1670, in get_table_description
desc: str = response["Table"]["Description"]
KeyError: 'Description'
|
KeyError
|
def tables(
limit: int = 100,
catalog_id: Optional[str] = None,
database: Optional[str] = None,
search_text: Optional[str] = None,
name_contains: Optional[str] = None,
name_prefix: Optional[str] = None,
name_suffix: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> pd.DataFrame:
"""Get a DataFrame with tables filtered by a search term, prefix, suffix.
Parameters
----------
limit : int, optional
Max number of tables to be returned.
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
database : str, optional
Database name.
search_text : str, optional
Select only tables with the given string in table's properties.
name_contains : str, optional
Select by a specific string on table name
name_prefix : str, optional
Select by a specific prefix on table name
name_suffix : str, optional
Select by a specific suffix on table name
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Iterator[Dict[str, Any]]
Pandas Dataframe filled by formatted infos.
Examples
--------
>>> import awswrangler as wr
>>> df_tables = wr.catalog.tables()
"""
if search_text is None:
table_iter = get_tables(
catalog_id=catalog_id,
database=database,
name_contains=name_contains,
name_prefix=name_prefix,
name_suffix=name_suffix,
boto3_session=boto3_session,
)
tbls: List[Dict[str, Any]] = list(itertools.islice(table_iter, limit))
else:
tbls = list(
search_tables(
text=search_text, catalog_id=catalog_id, boto3_session=boto3_session
)
)
if database is not None:
tbls = [x for x in tbls if x["DatabaseName"] == database]
if name_contains is not None:
tbls = [x for x in tbls if name_contains in x["Name"]]
if name_prefix is not None:
tbls = [x for x in tbls if x["Name"].startswith(name_prefix)]
if name_suffix is not None:
tbls = [x for x in tbls if x["Name"].endswith(name_suffix)]
tbls = tbls[:limit]
df_dict: Dict[str, List] = {
"Database": [],
"Table": [],
"Description": [],
"Columns": [],
"Partitions": [],
}
for tbl in tbls:
df_dict["Database"].append(tbl["DatabaseName"])
df_dict["Table"].append(tbl["Name"])
df_dict["Description"].append(tbl.get("Description", ""))
if "Columns" in tbl["StorageDescriptor"]:
df_dict["Columns"].append(
", ".join([x["Name"] for x in tbl["StorageDescriptor"]["Columns"]])
)
else:
df_dict["Columns"].append("") # pragma: no cover
if "PartitionKeys" in tbl:
df_dict["Partitions"].append(
", ".join([x["Name"] for x in tbl["PartitionKeys"]])
)
else:
df_dict["Partitions"].append("") # pragma: no cover
return pd.DataFrame(data=df_dict)
|
def tables(
limit: int = 100,
catalog_id: Optional[str] = None,
database: Optional[str] = None,
search_text: Optional[str] = None,
name_contains: Optional[str] = None,
name_prefix: Optional[str] = None,
name_suffix: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> pd.DataFrame:
"""Get a DataFrame with tables filtered by a search term, prefix, suffix.
Parameters
----------
limit : int, optional
Max number of tables to be returned.
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
database : str, optional
Database name.
search_text : str, optional
Select only tables with the given string in table's properties.
name_contains : str, optional
Select by a specific string on table name
name_prefix : str, optional
Select by a specific prefix on table name
name_suffix : str, optional
Select by a specific suffix on table name
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Iterator[Dict[str, Any]]
Pandas Dataframe filled by formatted infos.
Examples
--------
>>> import awswrangler as wr
>>> df_tables = wr.catalog.tables()
"""
if search_text is None:
table_iter = get_tables(
catalog_id=catalog_id,
database=database,
name_contains=name_contains,
name_prefix=name_prefix,
name_suffix=name_suffix,
boto3_session=boto3_session,
)
tbls: List[Dict[str, Any]] = list(itertools.islice(table_iter, limit))
else:
tbls = list(
search_tables(
text=search_text, catalog_id=catalog_id, boto3_session=boto3_session
)
)
if database is not None:
tbls = [x for x in tbls if x["DatabaseName"] == database]
if name_contains is not None:
tbls = [x for x in tbls if name_contains in x["Name"]]
if name_prefix is not None:
tbls = [x for x in tbls if x["Name"].startswith(name_prefix)]
if name_suffix is not None:
tbls = [x for x in tbls if x["Name"].endswith(name_suffix)]
tbls = tbls[:limit]
df_dict: Dict[str, List] = {
"Database": [],
"Table": [],
"Description": [],
"Columns": [],
"Partitions": [],
}
for tbl in tbls:
df_dict["Database"].append(tbl["DatabaseName"])
df_dict["Table"].append(tbl["Name"])
if "Description" in tbl:
df_dict["Description"].append(tbl["Description"])
else:
df_dict["Description"].append("")
if "Columns" in tbl["StorageDescriptor"]:
df_dict["Columns"].append(
", ".join([x["Name"] for x in tbl["StorageDescriptor"]["Columns"]])
)
else:
df_dict["Columns"].append("") # pragma: no cover
if "PartitionKeys" in tbl:
df_dict["Partitions"].append(
", ".join([x["Name"] for x in tbl["PartitionKeys"]])
)
else:
df_dict["Partitions"].append("") # pragma: no cover
return pd.DataFrame(data=df_dict)
|
https://github.com/awslabs/aws-data-wrangler/issues/294
|
wr.catalog.get_table_parameters(database="sa-m2", table="item")
{'CrawlerSchemaDeserializerVersion': '1.0', 'CrawlerSchemaSerializerVersion': '1.0', 'UPDATED_BY_CRAWLER': 'sa-m2', 'averageRecordSize': '25', 'classification': 'parquet', 'compressionType': 'none', 'objectCount': '54', 'recordCount': '12349020', 'sizeKey': '174628122', 'typeOfData': 'file'}
wr.catalog.get_table_description(database="sa-m2", table="item")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.8/site-packages/awswrangler/catalog.py", line 1670, in get_table_description
desc: str = response["Table"]["Description"]
KeyError: 'Description'
|
KeyError
|
def get_table_description(
database: str,
table: str,
catalog_id: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Optional[str]:
"""Get table description.
Parameters
----------
database : str
Database name.
table : str
Table name.
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Optional[str]
Description if exists.
Examples
--------
>>> import awswrangler as wr
>>> desc = wr.catalog.get_table_description(database="...", table="...")
"""
client_glue: boto3.client = _utils.client(
service_name="glue", session=boto3_session
)
args: Dict[str, str] = {}
if catalog_id is not None:
args["CatalogId"] = catalog_id # pragma: no cover
args["DatabaseName"] = database
args["Name"] = table
response: Dict[str, Any] = client_glue.get_table(**args)
desc: Optional[str] = response["Table"].get("Description", None)
return desc
|
def get_table_description(
database: str,
table: str,
catalog_id: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> str:
"""Get table description.
Parameters
----------
database : str
Database name.
table : str
Table name.
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
str
Description.
Examples
--------
>>> import awswrangler as wr
>>> desc = wr.catalog.get_table_description(database="...", table="...")
"""
client_glue: boto3.client = _utils.client(
service_name="glue", session=boto3_session
)
args: Dict[str, str] = {}
if catalog_id is not None:
args["CatalogId"] = catalog_id # pragma: no cover
args["DatabaseName"] = database
args["Name"] = table
response: Dict[str, Any] = client_glue.get_table(**args)
desc: str = response["Table"]["Description"]
return desc
|
https://github.com/awslabs/aws-data-wrangler/issues/294
|
wr.catalog.get_table_parameters(database="sa-m2", table="item")
{'CrawlerSchemaDeserializerVersion': '1.0', 'CrawlerSchemaSerializerVersion': '1.0', 'UPDATED_BY_CRAWLER': 'sa-m2', 'averageRecordSize': '25', 'classification': 'parquet', 'compressionType': 'none', 'objectCount': '54', 'recordCount': '12349020', 'sizeKey': '174628122', 'typeOfData': 'file'}
wr.catalog.get_table_description(database="sa-m2", table="item")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.8/site-packages/awswrangler/catalog.py", line 1670, in get_table_description
desc: str = response["Table"]["Description"]
KeyError: 'Description'
|
KeyError
|
def _ensure_workgroup(
session: boto3.Session, workgroup: Optional[str] = None
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
if workgroup is not None:
res: Dict[str, Any] = get_work_group(workgroup=workgroup, boto3_session=session)
config: Dict[str, Any] = res["WorkGroup"]["Configuration"][
"ResultConfiguration"
]
wg_s3_output: Optional[str] = config.get("OutputLocation")
encrypt_config: Optional[Dict[str, str]] = config.get("EncryptionConfiguration")
wg_encryption: Optional[str] = (
None if encrypt_config is None else encrypt_config.get("EncryptionOption")
)
wg_kms_key: Optional[str] = (
None if encrypt_config is None else encrypt_config.get("KmsKey")
)
else:
wg_s3_output, wg_encryption, wg_kms_key = None, None, None
return wg_s3_output, wg_encryption, wg_kms_key
|
def _ensure_workgroup(
session: boto3.Session, workgroup: Optional[str] = None
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
if workgroup:
res: Dict[str, Any] = get_work_group(workgroup=workgroup, boto3_session=session)
config: Dict[str, Any] = res["WorkGroup"]["Configuration"][
"ResultConfiguration"
]
wg_s3_output: Optional[str] = config.get("OutputLocation")
wg_encryption: Optional[str] = config["EncryptionConfiguration"].get(
"EncryptionOption"
)
wg_kms_key: Optional[str] = config["EncryptionConfiguration"].get("KmsKey")
else:
wg_s3_output, wg_encryption, wg_kms_key = None, None, None
return wg_s3_output, wg_encryption, wg_kms_key
|
https://github.com/awslabs/aws-data-wrangler/issues/159
|
Traceback (most recent call last):
File "sample.py", line 6, in <module>
workgroup="bar",
File "/home/nagomiso/develop/.venv/lib/python3.7/site-packages/awswrangler/athena.py", line 408, in read_sql_query
wg_s3_output, _, _ = _ensure_workgroup(session=session, workgroup=workgroup)
File "/home/nagomiso/develop/.venv/lib/python3.7/site-packages/awswrangler/athena.py", line 546, in _ensure_workgroup
wg_encryption: Optional[str] = config["EncryptionConfiguration"].get("EncryptionOption")
KeyError: 'EncryptionConfiguration'
|
KeyError
|
def _read_parquet_path(
session_primitives: "SessionPrimitives",
path: str,
columns: Optional[List[str]] = None,
filters: Optional[Union[List[Tuple[Any]], List[List[Tuple[Any]]]]] = None,
procs_cpu_bound: Optional[int] = None,
wait_objects: bool = False,
wait_objects_timeout: Optional[float] = 10.0,
) -> pd.DataFrame:
"""
Read parquet data from S3
:param session_primitives: SessionPrimitives()
:param path: AWS S3 path (E.g. s3://bucket-name/folder_name/)
:param columns: Names of columns to read from the file
:param filters: List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
:param procs_cpu_bound: Number of cores used for CPU bound tasks
:param wait_objects: Wait for all files exists (Not valid when path is a directory) (Useful for eventual consistency situations)
:param wait_objects: Wait objects Timeout (seconds)
:return: Pandas DataFrame
"""
session: Session = session_primitives.session
if wait_objects is True:
logger.debug(f"waiting {path}...")
session.s3.wait_object_exists(path=path, timeout=wait_objects_timeout)
is_file: bool = True
else:
logger.debug(f"checking if {path} exists...")
is_file = session.s3.does_object_exists(path=path)
if is_file is False:
path = path[:-1] if path[-1] == "/" else path
logger.debug(f"is_file: {is_file}")
procs_cpu_bound = (
procs_cpu_bound
if procs_cpu_bound is not None
else session_primitives.procs_cpu_bound
if session_primitives.procs_cpu_bound is not None
else 1
)
use_threads: bool = True if procs_cpu_bound > 1 else False
logger.debug(f"Reading Parquet: {path}")
if is_file is True:
client_s3 = session.boto3_session.client(
service_name="s3", use_ssl=True, config=session.botocore_config
)
bucket, key = path.replace("s3://", "").split("/", 1)
obj = client_s3.get_object(Bucket=bucket, Key=key)
table = pq.ParquetFile(source=BytesIO(obj["Body"].read())).read(
columns=columns, use_threads=use_threads
)
else:
fs: S3FileSystem = get_fs(session_primitives=session_primitives)
fs = pa.filesystem._ensure_filesystem(fs)
fs.invalidate_cache()
table = pq.read_table(
source=path,
columns=columns,
filters=filters,
filesystem=fs,
use_threads=use_threads,
)
# Check if we lose some integer during the conversion (Happens when has some null value)
integers = [
field.name
for field in table.schema
if str(field.type).startswith("int") and field.name != "__index_level_0__"
]
logger.debug(f"Converting to Pandas: {path}")
df = table.to_pandas(use_threads=use_threads, integer_object_nulls=True)
logger.debug(f"Casting Int64 columns: {path}")
for c in integers:
if not str(df[c].dtype).startswith("int"):
df[c] = df[c].astype("Int64")
logger.debug(f"Done: {path}")
return df
|
def _read_parquet_path(
session_primitives: "SessionPrimitives",
path: str,
columns: Optional[List[str]] = None,
filters: Optional[Union[List[Tuple[Any]], List[List[Tuple[Any]]]]] = None,
procs_cpu_bound: Optional[int] = None,
wait_objects: bool = False,
wait_objects_timeout: Optional[float] = 10.0,
) -> pd.DataFrame:
"""
Read parquet data from S3
:param session_primitives: SessionPrimitives()
:param path: AWS S3 path (E.g. s3://bucket-name/folder_name/)
:param columns: Names of columns to read from the file
:param filters: List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
:param procs_cpu_bound: Number of cores used for CPU bound tasks
:param wait_objects: Wait for all files exists (Not valid when path is a directory) (Useful for eventual consistency situations)
:param wait_objects: Wait objects Timeout (seconds)
:return: Pandas DataFrame
"""
session: Session = session_primitives.session
if wait_objects is True:
logger.debug(f"waiting {path}...")
session.s3.wait_object_exists(path=path, timeout=wait_objects_timeout)
is_file: bool = True
else:
logger.debug(f"checking if {path} exists...")
is_file = session.s3.does_object_exists(path=path)
if is_file is False:
path = path[:-1] if path[-1] == "/" else path
logger.debug(f"is_file: {is_file}")
procs_cpu_bound = (
procs_cpu_bound
if procs_cpu_bound is not None
else session_primitives.procs_cpu_bound
if session_primitives.procs_cpu_bound is not None
else 1
)
use_threads: bool = True if procs_cpu_bound > 1 else False
logger.debug(f"Reading Parquet: {path}")
if is_file is True:
client_s3 = session.boto3_session.client(
service_name="s3", use_ssl=True, config=session.botocore_config
)
bucket, key = path.replace("s3://", "").split("/", 1)
obj = client_s3.get_object(Bucket=bucket, Key=key)
table = pq.ParquetFile(source=BytesIO(obj["Body"].read())).read(
columns=columns, use_threads=use_threads
)
else:
fs: S3FileSystem = get_fs(session_primitives=session_primitives)
fs = pa.filesystem._ensure_filesystem(fs)
fs.invalidate_cache()
table = pq.read_table(
source=path,
columns=columns,
filters=filters,
filesystem=fs,
use_threads=use_threads,
)
# Check if we lose some integer during the conversion (Happens when has some null value)
integers = [
field.name for field in table.schema if str(field.type).startswith("int")
]
logger.debug(f"Converting to Pandas: {path}")
df = table.to_pandas(use_threads=use_threads, integer_object_nulls=True)
for c in integers:
if not str(df[c].dtype).startswith("int"):
df[c] = df[c].astype("Int64")
logger.debug(f"Done: {path}")
return df
|
https://github.com/awslabs/aws-data-wrangler/issues/111
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/miniconda3/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
2896 try:
-> 2897 return self._engine.get_loc(key)
2898 except KeyError:
pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item()
pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item()
KeyError: '__index_level_0__'
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-67-9e66c4b6764b> in <module>
----> 1 wr.pandas.read_parquet(path=PATH)
~/miniconda3/lib/python3.7/site-packages/awswrangler/pandas.py in read_parquet(self, path, columns, filters, procs_cpu_bound, wait_objects, wait_objects_timeout)
1373 procs_cpu_bound=procs_cpu_bound,
1374 wait_objects=wait_objects,
-> 1375 wait_objects_timeout=wait_objects_timeout)
1376 else:
1377 procs = []
~/miniconda3/lib/python3.7/site-packages/awswrangler/pandas.py in _read_parquet_paths(session_primitives, path, columns, filters, procs_cpu_bound, wait_objects, wait_objects_timeout)
1460 procs_cpu_bound=procs_cpu_bound,
1461 wait_objects=wait_objects,
-> 1462 wait_objects_timeout=wait_objects_timeout)
1463 return [df]
1464 else:
~/miniconda3/lib/python3.7/site-packages/awswrangler/pandas.py in _read_parquet_path(session_primitives, path, columns, filters, procs_cpu_bound, wait_objects, wait_objects_timeout)
1524 df = table.to_pandas(use_threads=use_threads, integer_object_nulls=True)
1525 for c in integers:
-> 1526 if not str(df[c].dtype).startswith("int"):
1527 df[c] = df[c].astype("Int64")
1528 logger.debug(f"Done: {path}")
~/miniconda3/lib/python3.7/site-packages/pandas/core/frame.py in __getitem__(self, key)
2993 if self.columns.nlevels > 1:
2994 return self._getitem_multilevel(key)
-> 2995 indexer = self.columns.get_loc(key)
2996 if is_integer(indexer):
2997 indexer = [indexer]
~/miniconda3/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
2897 return self._engine.get_loc(key)
2898 except KeyError:
-> 2899 return self._engine.get_loc(self._maybe_cast_indexer(key))
2900 indexer = self.get_indexer([key], method=method, tolerance=tolerance)
2901 if indexer.ndim > 1 or indexer.size > 1:
pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item()
pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item()
KeyError: '__index_level_0__'
|
KeyError
|
def __init__(
self,
filename,
buffersize,
decode_file=False,
print_infos=False,
fps=44100,
nbytes=2,
nchannels=2,
):
# TODO bring FFMPEG_AudioReader more in line with FFMPEG_VideoReader
# E.g. here self.pos is still 1-indexed.
# (or have them inherit from a shared parent class)
self.filename = filename
self.nbytes = nbytes
self.fps = fps
self.format = "s%dle" % (8 * nbytes)
self.codec = "pcm_s%dle" % (8 * nbytes)
self.nchannels = nchannels
infos = ffmpeg_parse_infos(filename, decode_file=decode_file)
self.duration = infos["duration"]
self.bitrate = infos["audio_bitrate"]
self.infos = infos
self.proc = None
self.n_frames = int(self.fps * self.duration)
self.buffersize = min(self.n_frames + 1, buffersize)
self.buffer = None
self.buffer_startframe = 1
self.initialize()
self.buffer_around(1)
|
def __init__(
self,
filename,
buffersize,
decode_file=False,
print_infos=False,
fps=44100,
nbytes=2,
nchannels=2,
):
# TODO bring FFMPEG_AudioReader more in line with FFMPEG_VideoReader
# E.g. here self.pos is still 1-indexed.
# (or have them inherit from a shared parent class)
self.filename = filename
self.nbytes = nbytes
self.fps = fps
self.format = "s%dle" % (8 * nbytes)
self.codec = "pcm_s%dle" % (8 * nbytes)
self.nchannels = nchannels
infos = ffmpeg_parse_infos(filename, decode_file=decode_file)
self.duration = infos.get("video_duration")
if self.duration is None:
self.duration = infos["duration"]
self.bitrate = infos["audio_bitrate"]
self.infos = infos
self.proc = None
self.n_frames = int(self.fps * self.duration)
self.buffersize = min(self.n_frames + 1, buffersize)
self.buffer = None
self.buffer_startframe = 1
self.initialize()
self.buffer_around(1)
|
https://github.com/Zulko/moviepy/issues/1487
|
Exception ignored in: <bound method FFMPEG_AudioReader.__del__ of <moviepy.audio.io.readers.FFMPEG_AudioReader object at 0x7fc339261cf8>>
Traceback (most recent call last):
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 264, in __del__
self.close()
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 254, in close
if self.proc:
AttributeError: 'FFMPEG_AudioReader' object has no attribute 'proc'
|
AttributeError
|
def _reset_state(self):
"""Reinitializes the state of the parser. Used internally at
initialization and at the end of the parsing process.
"""
# could be 2 possible types of metadata:
# - file_metadata: Metadata of the container. Here are the tags setted
# by the user using `-metadata` ffmpeg option
# - stream_metadata: Metadata for each stream of the container.
self._inside_file_metadata = False
# this state is neeeded if `duration_tag_separator == "time="` because
# execution of ffmpeg decoding the whole file using `-f null -` appends
# to the output a the blocks "Stream mapping:" and "Output:", which
# should be ignored
self._inside_output = False
# flag which indicates that a default stream has not been found yet
self._default_stream_found = False
# current input file, stream and chapter, which will be built at runtime
self._current_input_file = {"streams": []}
self._current_stream = None
self._current_chapter = None
# resulting data of the parsing process
self.result = {
"video_found": False,
"audio_found": False,
"metadata": {},
"inputs": [],
}
|
def _reset_state(self):
"""Reinitializes the state of the parser. Used internally at
initialization and at the end of the parsing process.
"""
# could be 2 possible types of metadata:
# - file_metadata: Metadata of the container. Here are the tags setted
# by the user using `-metadata` ffmpeg option
# - stream_metadata: Metadata for each stream of the container.
self._inside_file_metadata = False
# this state is neeeded if `duration_tag_separator == "time="` because
# execution of ffmpeg decoding the whole file using `-f null -` appends
# to the output a the blocks "Stream mapping:" and "Output:", which
# should be ignored
self._inside_output = False
# flag which indicates that a default stream has not been found yet
self._default_stream_found = False
# current input file, stream and chapter, which will be built at runtime
self._current_input_file = {"streams": []}
self._current_stream = None
self._current_chapter = None
|
https://github.com/Zulko/moviepy/issues/1487
|
Exception ignored in: <bound method FFMPEG_AudioReader.__del__ of <moviepy.audio.io.readers.FFMPEG_AudioReader object at 0x7fc339261cf8>>
Traceback (most recent call last):
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 264, in __del__
self.close()
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 254, in close
if self.proc:
AttributeError: 'FFMPEG_AudioReader' object has no attribute 'proc'
|
AttributeError
|
def parse(self):
"""Parses the information returned by FFmpeg in stderr executing their binary
for a file with ``-i`` option and returns a dictionary with all data needed
by MoviePy.
"""
# chapters by input file
input_chapters = []
for line in self.infos.splitlines()[1:]:
if (
self.duration_tag_separator == "time="
and self.check_duration
and "time=" in line
):
# parse duration using file decodification
self.result["duration"] = self.parse_duration(line)
elif self._inside_output or line[0] != " ":
if self.duration_tag_separator == "time=" and not self._inside_output:
self._inside_output = True
# skip lines like "At least one output file must be specified"
elif not self._inside_file_metadata and line.startswith(" Metadata:"):
# enter " Metadata:" group
self._inside_file_metadata = True
elif line.startswith(" Duration:"):
# exit " Metadata:" group
self._inside_file_metadata = False
if self.check_duration and self.duration_tag_separator == "Duration: ":
self.result["duration"] = self.parse_duration(line)
# parse global bitrate (in kb/s)
bitrate_match = re.search(r"bitrate: (\d+) kb/s", line)
self.result["bitrate"] = (
int(bitrate_match.group(1)) if bitrate_match else None
)
# parse start time (in seconds)
start_match = re.search(r"start: (\d+\.?\d+)", line)
self.result["start"] = float(start_match.group(1)) if start_match else None
elif self._inside_file_metadata:
# file metadata line
field, value = self.parse_metadata_field_value(line)
self.result["metadata"].update({field: value})
elif line.startswith(" Stream "):
# exit stream " Metadata:"
if self._current_stream:
self._current_input_file["streams"].append(self._current_stream)
# get input number, stream number, language and type
main_info_match = re.search(
r"^\s{4}Stream\s#(\d+):(\d+)\(?(\w+)?\)?:\s(\w+):", line
)
(
input_number,
stream_number,
language,
stream_type,
) = main_info_match.groups()
input_number = int(input_number)
stream_number = int(stream_number)
stream_type_lower = stream_type.lower()
# start builiding the current stream
self._current_stream = {
"input_number": input_number,
"stream_number": stream_number,
"stream_type": stream_type_lower,
"language": language if language != "und" else None,
"default": not self._default_stream_found or line.endswith("(default)"),
}
self._default_stream_found = True
# for default streams, set their numbers globally, so it's
# easy to get without iterating all
if self._current_stream["default"]:
self.result[f"default_{stream_type_lower}_input_number"] = input_number
self.result[f"default_{stream_type_lower}_stream_number"] = (
stream_number
)
# exit chapter
if self._current_chapter:
input_chapters[input_number].append(self._current_chapter)
self._current_chapter = None
if "input_number" not in self._current_input_file:
# first input file
self._current_input_file["input_number"] = input_number
elif self._current_input_file["input_number"] != input_number:
# new input file
# include their chapters if there are for this input file
if len(input_chapters) >= input_number + 1:
self._current_input_file["chapters"] = input_chapters[input_number]
# add new input file to self.result
self.result["inputs"].append(self._current_input_file)
self._current_input_file = {"input_number": input_number}
# parse relevant data by stream type
try:
global_data, stream_data = self.parse_data_by_stream_type(
stream_type, line
)
except NotImplementedError as exc:
warnings.warn(
f"{str(exc)}\nffmpeg output:\n\n{self.infos}", UserWarning
)
else:
self.result.update(global_data)
self._current_stream.update(stream_data)
elif line.startswith(" Metadata:"):
# enter group " Metadata:"
continue
elif self._current_stream:
# stream metadata line
if "metadata" not in self._current_stream:
self._current_stream["metadata"] = {}
field, value = self.parse_metadata_field_value(line)
if self._current_stream["stream_type"] == "video":
field, value = self.video_metadata_type_casting(field, value)
if field == "rotate":
self.result["video_rotation"] = value
self._current_stream["metadata"][field] = value
elif line.startswith(" Chapter"):
# Chapter data line
if self._current_chapter:
# there is a previews chapter?
if len(input_chapters) < self._current_chapter["input_number"] + 1:
input_chapters.append([])
# include in the chapters by input matrix
input_chapters[self._current_chapter["input_number"]].append(
self._current_chapter
)
# extract chapter data
chapter_data_match = re.search(
r"^ Chapter #(\d+):(\d+): start (\d+\.?\d+?), end (\d+\.?\d+?)",
line,
)
input_number, chapter_number, start, end = chapter_data_match.groups()
# start building the chapter
self._current_chapter = {
"input_number": int(input_number),
"chapter_number": int(chapter_number),
"start": float(start),
"end": float(end),
}
elif self._current_chapter:
# inside chapter metadata
if "metadata" not in self._current_chapter:
self._current_chapter["metadata"] = {}
field, value = self.parse_metadata_field_value(line)
self._current_chapter["metadata"][field] = value
# last input file, must be included in self.result
if self._current_input_file:
self._current_input_file["streams"].append(self._current_stream)
# include their chapters, if there are
if len(input_chapters) == self._current_input_file["input_number"] + 1:
self._current_input_file["chapters"] = input_chapters[
self._current_input_file["input_number"]
]
self.result["inputs"].append(self._current_input_file)
# some video duration utilities
if self.result["video_found"] and self.check_duration:
self.result["video_n_frames"] = int(
self.result["duration"] * self.result["video_fps"]
)
self.result["video_duration"] = self.result["duration"]
else:
self.result["video_n_frames"] = 1
self.result["video_duration"] = None
# We could have also recomputed duration from the number of frames, as follows:
# >>> result['video_duration'] = result['video_n_frames'] / result['video_fps']
result = self.result
# reset state of the parser
self._reset_state()
return result
|
def parse(self):
"""Parses the information returned by FFmpeg in stderr executing their binary
for a file with ``-i`` option and returns a dictionary with all data needed
by MoviePy.
"""
result = {
"video_found": False,
"audio_found": False,
"metadata": {},
"inputs": [],
}
# chapters by input file
input_chapters = []
for line in self.infos.splitlines()[1:]:
if (
self.duration_tag_separator == "time="
and self.check_duration
and "time=" in line
):
# parse duration using file decodification
result["duration"] = self.parse_duration(line)
elif self._inside_output or line[0] != " ":
if self.duration_tag_separator == "time=" and not self._inside_output:
self._inside_output = True
# skip lines like "At least one output file must be specified"
elif not self._inside_file_metadata and line.startswith(" Metadata:"):
# enter " Metadata:" group
self._inside_file_metadata = True
elif line.startswith(" Duration:"):
# exit " Metadata:" group
self._inside_file_metadata = False
if self.check_duration and self.duration_tag_separator == "Duration: ":
result["duration"] = self.parse_duration(line)
# parse global bitrate (in kb/s)
bitrate_match = re.search(r"bitrate: (\d+) kb/s", line)
result["bitrate"] = int(bitrate_match.group(1)) if bitrate_match else None
# parse start time (in seconds)
start_match = re.search(r"start: (\d+\.?\d+)", line)
result["start"] = float(start_match.group(1)) if start_match else None
elif self._inside_file_metadata:
# file metadata line
field, value = self.parse_metadata_field_value(line)
result["metadata"].update({field: value})
elif line.startswith(" Stream "):
# exit stream " Metadata:"
if self._current_stream:
self._current_input_file["streams"].append(self._current_stream)
# get input number, stream number, language and type
main_info_match = re.search(
r"^\s{4}Stream\s#(\d+):(\d+)\(?(\w+)?\)?:\s(\w+):", line
)
(
input_number,
stream_number,
language,
stream_type,
) = main_info_match.groups()
input_number = int(input_number)
stream_number = int(stream_number)
stream_type_lower = stream_type.lower()
# start builiding the current stream
self._current_stream = {
"input_number": input_number,
"stream_number": stream_number,
"stream_type": stream_type_lower,
"language": language if language != "und" else None,
"default": not self._default_stream_found or line.endswith("(default)"),
}
self._default_stream_found = True
# for default streams, set their numbers globally, so it's
# easy to get without iterating all
if self._current_stream["default"]:
result[f"default_{stream_type_lower}_input_number"] = input_number
result[f"default_{stream_type_lower}_stream_number"] = stream_number
# exit chapter
if self._current_chapter:
input_chapters[input_number].append(self._current_chapter)
self._current_chapter = None
if "input_number" not in self._current_input_file:
# first input file
self._current_input_file["input_number"] = input_number
elif self._current_input_file["input_number"] != input_number:
# new input file
# include their chapters if there are for this input file
if len(input_chapters) >= input_number + 1:
self._current_input_file["chapters"] = input_chapters[input_number]
# add new input file to result
result["inputs"].append(self._current_input_file)
self._current_input_file = {"input_number": input_number}
# parse relevant data by stream type
try:
global_data, stream_data = self.parse_data_by_stream_type(
stream_type, line
)
except NotImplementedError as exc:
warnings.warn(
f"{str(exc)}\nffmpeg output:\n\n{self.infos}", UserWarning
)
else:
result.update(global_data)
self._current_stream.update(stream_data)
elif line.startswith(" Metadata:"):
# enter group " Metadata:"
continue
elif self._current_stream:
# stream metadata line
if "metadata" not in self._current_stream:
self._current_stream["metadata"] = {}
field, value = self.parse_metadata_field_value(line)
if self._current_stream["stream_type"] == "video":
field, value = self.video_metadata_type_casting(field, value)
if field == "rotate":
result["video_rotation"] = value
self._current_stream["metadata"][field] = value
elif line.startswith(" Chapter"):
# Chapter data line
if self._current_chapter:
# there is a previews chapter?
if len(input_chapters) < self._current_chapter["input_number"] + 1:
input_chapters.append([])
# include in the chapters by input matrix
input_chapters[self._current_chapter["input_number"]].append(
self._current_chapter
)
# extract chapter data
chapter_data_match = re.search(
r"^ Chapter #(\d+):(\d+): start (\d+\.?\d+?), end (\d+\.?\d+?)",
line,
)
input_number, chapter_number, start, end = chapter_data_match.groups()
# start building the chapter
self._current_chapter = {
"input_number": int(input_number),
"chapter_number": int(chapter_number),
"start": float(start),
"end": float(end),
}
elif self._current_chapter:
# inside chapter metadata
if "metadata" not in self._current_chapter:
self._current_chapter["metadata"] = {}
field, value = self.parse_metadata_field_value(line)
self._current_chapter["metadata"][field] = value
# last input file, must be included in the result
if self._current_input_file:
self._current_input_file["streams"].append(self._current_stream)
# include their chapters, if there are
if len(input_chapters) == self._current_input_file["input_number"] + 1:
self._current_input_file["chapters"] = input_chapters[
self._current_input_file["input_number"]
]
result["inputs"].append(self._current_input_file)
# some video duration utilities
if result["video_found"] and self.check_duration:
result["video_n_frames"] = int(result["duration"] * result["video_fps"])
result["video_duration"] = result["duration"]
else:
result["video_n_frames"] = 1
result["video_duration"] = None
# We could have also recomputed duration from the number of frames, as follows:
# >>> result['video_duration'] = result['video_n_frames'] / result['video_fps']
# reset state of the parser
self._reset_state()
return result
|
https://github.com/Zulko/moviepy/issues/1487
|
Exception ignored in: <bound method FFMPEG_AudioReader.__del__ of <moviepy.audio.io.readers.FFMPEG_AudioReader object at 0x7fc339261cf8>>
Traceback (most recent call last):
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 264, in __del__
self.close()
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 254, in close
if self.proc:
AttributeError: 'FFMPEG_AudioReader' object has no attribute 'proc'
|
AttributeError
|
def parse_video_stream_data(self, line):
"""Parses data from "Stream ... Video" line."""
global_data, stream_data = ({"video_found": True}, {})
try:
match_video_size = re.search(r" (\d+)x(\d+)[,\s]", line)
if match_video_size:
# size, of the form 460x320 (w x h)
stream_data["size"] = [int(num) for num in match_video_size.groups()]
except Exception:
raise IOError(
(
"MoviePy error: failed to read video dimensions in"
" file '%s'.\nHere are the file infos returned by"
"ffmpeg:\n\n%s"
)
% (self.filename, self.infos)
)
match_bitrate = re.search(r"(\d+) kb/s", line)
stream_data["bitrate"] = int(match_bitrate.group(1)) if match_bitrate else None
# Get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes
# tbc, and sometimes tbc/2...
# Current policy: Trust fps first, then tbr unless fps_source is
# specified as 'tbr' in which case try tbr then fps
# If result is near from x*1000/1001 where x is 23,24,25,50,
# replace by x*1000/1001 (very common case for the fps).
if self.fps_source == "fps":
try:
fps = self.parse_fps(line)
except (AttributeError, ValueError):
fps = self.parse_tbr(line)
elif self.fps_source == "tbr":
try:
fps = self.parse_tbr(line)
except (AttributeError, ValueError):
fps = self.parse_fps(line)
else:
raise ValueError(
("fps source '%s' not supported parsing the video '%s'")
% (self.fps_source, self.filename)
)
# It is known that a fps of 24 is often written as 24000/1001
# but then ffmpeg nicely rounds it to 23.98, which we hate.
coef = 1000.0 / 1001.0
for x in [23, 24, 25, 30, 50]:
if (fps != x) and abs(fps - x * coef) < 0.01:
fps = x * coef
stream_data["fps"] = fps
if self._current_stream["default"] or "video_size" not in self.result:
global_data["video_size"] = stream_data.get("size", None)
if self._current_stream["default"] or "video_bitrate" not in self.result:
global_data["video_bitrate"] = stream_data.get("bitrate", None)
if self._current_stream["default"] or "video_fps" not in self.result:
global_data["video_fps"] = stream_data["fps"]
return (global_data, stream_data)
|
def parse_video_stream_data(self, line):
"""Parses data from "Stream ... Video" line."""
global_data, stream_data = ({"video_found": True}, {})
try:
match_video_size = re.search(r" (\d+)x(\d+)[,\s]", line)
if match_video_size:
# size, of the form 460x320 (w x h)
stream_data["size"] = [int(num) for num in match_video_size.groups()]
except Exception:
raise IOError(
(
"MoviePy error: failed to read video dimensions in"
" file '%s'.\nHere are the file infos returned by"
"ffmpeg:\n\n%s"
)
% (self.filename, self.infos)
)
match_bitrate = re.search(r"(\d+) kb/s", line)
stream_data["bitrate"] = int(match_bitrate.group(1)) if match_bitrate else None
# Get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes
# tbc, and sometimes tbc/2...
# Current policy: Trust fps first, then tbr unless fps_source is
# specified as 'tbr' in which case try tbr then fps
# If result is near from x*1000/1001 where x is 23,24,25,50,
# replace by x*1000/1001 (very common case for the fps).
if self.fps_source == "fps":
try:
fps = self.parse_fps(line)
except (AttributeError, ValueError):
fps = self.parse_tbr(line)
elif self.fps_source == "tbr":
try:
fps = self.parse_tbr(line)
except (AttributeError, ValueError):
fps = self.parse_fps(line)
else:
raise ValueError(
("fps source '%s' not supported parsing the video '%s'")
% (self.fps_source, self.filename)
)
# It is known that a fps of 24 is often written as 24000/1001
# but then ffmpeg nicely rounds it to 23.98, which we hate.
coef = 1000.0 / 1001.0
for x in [23, 24, 25, 30, 50]:
if (fps != x) and abs(fps - x * coef) < 0.01:
fps = x * coef
stream_data["fps"] = fps
if self._current_stream["default"]:
global_data["video_size"] = stream_data.get("size", None)
global_data["video_bitrate"] = stream_data.get("bitrate", None)
global_data["video_fps"] = stream_data["fps"]
return (global_data, stream_data)
|
https://github.com/Zulko/moviepy/issues/1487
|
Exception ignored in: <bound method FFMPEG_AudioReader.__del__ of <moviepy.audio.io.readers.FFMPEG_AudioReader object at 0x7fc339261cf8>>
Traceback (most recent call last):
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 264, in __del__
self.close()
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 254, in close
if self.proc:
AttributeError: 'FFMPEG_AudioReader' object has no attribute 'proc'
|
AttributeError
|
def parse(self):
"""Parses the information returned by FFmpeg in stderr executing their binary
for a file with ``-i`` option and returns a dictionary with all data needed
by MoviePy.
"""
# chapters by input file
input_chapters = []
for line in self.infos.splitlines()[1:]:
if (
self.duration_tag_separator == "time="
and self.check_duration
and "time=" in line
):
# parse duration using file decodification
self.result["duration"] = self.parse_duration(line)
elif self._inside_output or line[0] != " ":
if self.duration_tag_separator == "time=" and not self._inside_output:
self._inside_output = True
# skip lines like "At least one output file must be specified"
elif not self._inside_file_metadata and line.startswith(" Metadata:"):
# enter " Metadata:" group
self._inside_file_metadata = True
elif line.startswith(" Duration:"):
# exit " Metadata:" group
self._inside_file_metadata = False
if self.check_duration and self.duration_tag_separator == "Duration: ":
self.result["duration"] = self.parse_duration(line)
# parse global bitrate (in kb/s)
bitrate_match = re.search(r"bitrate: (\d+) kb/s", line)
self.result["bitrate"] = (
int(bitrate_match.group(1)) if bitrate_match else None
)
# parse start time (in seconds)
start_match = re.search(r"start: (\d+\.?\d+)", line)
self.result["start"] = float(start_match.group(1)) if start_match else None
elif self._inside_file_metadata:
# file metadata line
field, value = self.parse_metadata_field_value(line)
# multiline metadata value parsing
if field == "":
field = self._last_metadata_field_added
value = self.result["metadata"][field] + "\n" + value
else:
self._last_metadata_field_added = field
self.result["metadata"][field] = value
elif line.startswith(" Stream "):
# exit stream " Metadata:"
if self._current_stream:
self._current_input_file["streams"].append(self._current_stream)
# get input number, stream number, language and type
main_info_match = re.search(
r"^\s{4}Stream\s#(\d+):(\d+)\(?(\w+)?\)?:\s(\w+):", line
)
(
input_number,
stream_number,
language,
stream_type,
) = main_info_match.groups()
input_number = int(input_number)
stream_number = int(stream_number)
stream_type_lower = stream_type.lower()
# start builiding the current stream
self._current_stream = {
"input_number": input_number,
"stream_number": stream_number,
"stream_type": stream_type_lower,
"language": language if language != "und" else None,
"default": not self._default_stream_found or line.endswith("(default)"),
}
self._default_stream_found = True
# for default streams, set their numbers globally, so it's
# easy to get without iterating all
if self._current_stream["default"]:
self.result[f"default_{stream_type_lower}_input_number"] = input_number
self.result[f"default_{stream_type_lower}_stream_number"] = (
stream_number
)
# exit chapter
if self._current_chapter:
input_chapters[input_number].append(self._current_chapter)
self._current_chapter = None
if "input_number" not in self._current_input_file:
# first input file
self._current_input_file["input_number"] = input_number
elif self._current_input_file["input_number"] != input_number:
# new input file
# include their chapters if there are for this input file
if len(input_chapters) >= input_number + 1:
self._current_input_file["chapters"] = input_chapters[input_number]
# add new input file to self.result
self.result["inputs"].append(self._current_input_file)
self._current_input_file = {"input_number": input_number}
# parse relevant data by stream type
try:
global_data, stream_data = self.parse_data_by_stream_type(
stream_type, line
)
except NotImplementedError as exc:
warnings.warn(
f"{str(exc)}\nffmpeg output:\n\n{self.infos}", UserWarning
)
else:
self.result.update(global_data)
self._current_stream.update(stream_data)
elif line.startswith(" Metadata:"):
# enter group " Metadata:"
continue
elif self._current_stream:
# stream metadata line
if "metadata" not in self._current_stream:
self._current_stream["metadata"] = {}
field, value = self.parse_metadata_field_value(line)
if self._current_stream["stream_type"] == "video":
field, value = self.video_metadata_type_casting(field, value)
if field == "rotate":
self.result["video_rotation"] = value
# multiline metadata value parsing
if field == "":
field = self._last_metadata_field_added
value = self._current_stream["metadata"][field] + "\n" + value
else:
self._last_metadata_field_added = field
self._current_stream["metadata"][field] = value
elif line.startswith(" Chapter"):
# Chapter data line
if self._current_chapter:
# there is a previews chapter?
if len(input_chapters) < self._current_chapter["input_number"] + 1:
input_chapters.append([])
# include in the chapters by input matrix
input_chapters[self._current_chapter["input_number"]].append(
self._current_chapter
)
# extract chapter data
chapter_data_match = re.search(
r"^ Chapter #(\d+):(\d+): start (\d+\.?\d+?), end (\d+\.?\d+?)",
line,
)
input_number, chapter_number, start, end = chapter_data_match.groups()
# start building the chapter
self._current_chapter = {
"input_number": int(input_number),
"chapter_number": int(chapter_number),
"start": float(start),
"end": float(end),
}
elif self._current_chapter:
# inside chapter metadata
if "metadata" not in self._current_chapter:
self._current_chapter["metadata"] = {}
field, value = self.parse_metadata_field_value(line)
# multiline metadata value parsing
if field == "":
field = self._last_metadata_field_added
value = self._current_chapter["metadata"][field] + "\n" + value
else:
self._last_metadata_field_added = field
self._current_chapter["metadata"][field] = value
# last input file, must be included in self.result
if self._current_input_file:
self._current_input_file["streams"].append(self._current_stream)
# include their chapters, if there are
if len(input_chapters) == self._current_input_file["input_number"] + 1:
self._current_input_file["chapters"] = input_chapters[
self._current_input_file["input_number"]
]
self.result["inputs"].append(self._current_input_file)
# some video duration utilities
if self.result["video_found"] and self.check_duration:
self.result["video_n_frames"] = int(
self.result["duration"] * self.result["video_fps"]
)
self.result["video_duration"] = self.result["duration"]
else:
self.result["video_n_frames"] = 1
self.result["video_duration"] = None
# We could have also recomputed duration from the number of frames, as follows:
# >>> result['video_duration'] = result['video_n_frames'] / result['video_fps']
# not default audio found, assume first audio stream is the default
if self.result["audio_found"] and not self.result.get("audio_bitrate"):
for streams_input in self.result["inputs"]:
for stream in streams_input["streams"]:
if stream["stream_type"] == "audio" and stream.get("bitrate"):
self.result["audio_bitrate"] = stream["bitrate"]
break
if self.result.get("audio_bitrate"):
break
result = self.result
# reset state of the parser
self._reset_state()
return result
|
def parse(self):
"""Parses the information returned by FFmpeg in stderr executing their binary
for a file with ``-i`` option and returns a dictionary with all data needed
by MoviePy.
"""
# chapters by input file
input_chapters = []
for line in self.infos.splitlines()[1:]:
if (
self.duration_tag_separator == "time="
and self.check_duration
and "time=" in line
):
# parse duration using file decodification
self.result["duration"] = self.parse_duration(line)
elif self._inside_output or line[0] != " ":
if self.duration_tag_separator == "time=" and not self._inside_output:
self._inside_output = True
# skip lines like "At least one output file must be specified"
elif not self._inside_file_metadata and line.startswith(" Metadata:"):
# enter " Metadata:" group
self._inside_file_metadata = True
elif line.startswith(" Duration:"):
# exit " Metadata:" group
self._inside_file_metadata = False
if self.check_duration and self.duration_tag_separator == "Duration: ":
self.result["duration"] = self.parse_duration(line)
# parse global bitrate (in kb/s)
bitrate_match = re.search(r"bitrate: (\d+) kb/s", line)
self.result["bitrate"] = (
int(bitrate_match.group(1)) if bitrate_match else None
)
# parse start time (in seconds)
start_match = re.search(r"start: (\d+\.?\d+)", line)
self.result["start"] = float(start_match.group(1)) if start_match else None
elif self._inside_file_metadata:
# file metadata line
field, value = self.parse_metadata_field_value(line)
# multiline metadata value parsing
if field == "":
field = self._last_metadata_field_added
value = self.result["metadata"][field] + "\n" + value
else:
self._last_metadata_field_added = field
self.result["metadata"][field] = value
elif line.startswith(" Stream "):
# exit stream " Metadata:"
if self._current_stream:
self._current_input_file["streams"].append(self._current_stream)
# get input number, stream number, language and type
main_info_match = re.search(
r"^\s{4}Stream\s#(\d+):(\d+)\(?(\w+)?\)?:\s(\w+):", line
)
(
input_number,
stream_number,
language,
stream_type,
) = main_info_match.groups()
input_number = int(input_number)
stream_number = int(stream_number)
stream_type_lower = stream_type.lower()
# start builiding the current stream
self._current_stream = {
"input_number": input_number,
"stream_number": stream_number,
"stream_type": stream_type_lower,
"language": language if language != "und" else None,
"default": not self._default_stream_found or line.endswith("(default)"),
}
self._default_stream_found = True
# for default streams, set their numbers globally, so it's
# easy to get without iterating all
if self._current_stream["default"]:
self.result[f"default_{stream_type_lower}_input_number"] = input_number
self.result[f"default_{stream_type_lower}_stream_number"] = (
stream_number
)
# exit chapter
if self._current_chapter:
input_chapters[input_number].append(self._current_chapter)
self._current_chapter = None
if "input_number" not in self._current_input_file:
# first input file
self._current_input_file["input_number"] = input_number
elif self._current_input_file["input_number"] != input_number:
# new input file
# include their chapters if there are for this input file
if len(input_chapters) >= input_number + 1:
self._current_input_file["chapters"] = input_chapters[input_number]
# add new input file to self.result
self.result["inputs"].append(self._current_input_file)
self._current_input_file = {"input_number": input_number}
# parse relevant data by stream type
try:
global_data, stream_data = self.parse_data_by_stream_type(
stream_type, line
)
except NotImplementedError as exc:
warnings.warn(
f"{str(exc)}\nffmpeg output:\n\n{self.infos}", UserWarning
)
else:
self.result.update(global_data)
self._current_stream.update(stream_data)
elif line.startswith(" Metadata:"):
# enter group " Metadata:"
continue
elif self._current_stream:
# stream metadata line
if "metadata" not in self._current_stream:
self._current_stream["metadata"] = {}
field, value = self.parse_metadata_field_value(line)
if self._current_stream["stream_type"] == "video":
field, value = self.video_metadata_type_casting(field, value)
if field == "rotate":
self.result["video_rotation"] = value
# multiline metadata value parsing
if field == "":
field = self._last_metadata_field_added
value = self._current_stream["metadata"][field] + "\n" + value
else:
self._last_metadata_field_added = field
self._current_stream["metadata"][field] = value
elif line.startswith(" Chapter"):
# Chapter data line
if self._current_chapter:
# there is a previews chapter?
if len(input_chapters) < self._current_chapter["input_number"] + 1:
input_chapters.append([])
# include in the chapters by input matrix
input_chapters[self._current_chapter["input_number"]].append(
self._current_chapter
)
# extract chapter data
chapter_data_match = re.search(
r"^ Chapter #(\d+):(\d+): start (\d+\.?\d+?), end (\d+\.?\d+?)",
line,
)
input_number, chapter_number, start, end = chapter_data_match.groups()
# start building the chapter
self._current_chapter = {
"input_number": int(input_number),
"chapter_number": int(chapter_number),
"start": float(start),
"end": float(end),
}
elif self._current_chapter:
# inside chapter metadata
if "metadata" not in self._current_chapter:
self._current_chapter["metadata"] = {}
field, value = self.parse_metadata_field_value(line)
# multiline metadata value parsing
if field == "":
field = self._last_metadata_field_added
value = self._current_chapter["metadata"][field] + "\n" + value
else:
self._last_metadata_field_added = field
self._current_chapter["metadata"][field] = value
# last input file, must be included in self.result
if self._current_input_file:
self._current_input_file["streams"].append(self._current_stream)
# include their chapters, if there are
if len(input_chapters) == self._current_input_file["input_number"] + 1:
self._current_input_file["chapters"] = input_chapters[
self._current_input_file["input_number"]
]
self.result["inputs"].append(self._current_input_file)
# some video duration utilities
if self.result["video_found"] and self.check_duration:
self.result["video_n_frames"] = int(
self.result["duration"] * self.result["video_fps"]
)
self.result["video_duration"] = self.result["duration"]
else:
self.result["video_n_frames"] = 1
self.result["video_duration"] = None
# We could have also recomputed duration from the number of frames, as follows:
# >>> result['video_duration'] = result['video_n_frames'] / result['video_fps']
result = self.result
# reset state of the parser
self._reset_state()
return result
|
https://github.com/Zulko/moviepy/issues/1487
|
Exception ignored in: <bound method FFMPEG_AudioReader.__del__ of <moviepy.audio.io.readers.FFMPEG_AudioReader object at 0x7fc339261cf8>>
Traceback (most recent call last):
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 264, in __del__
self.close()
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 254, in close
if self.proc:
AttributeError: 'FFMPEG_AudioReader' object has no attribute 'proc'
|
AttributeError
|
def __init__(
self,
filename,
buffersize,
decode_file=False,
print_infos=False,
fps=44100,
nbytes=2,
nchannels=2,
):
# TODO bring FFMPEG_AudioReader more in line with FFMPEG_VideoReader
# E.g. here self.pos is still 1-indexed.
# (or have them inherit from a shared parent class)
self.filename = filename
self.nbytes = nbytes
self.fps = fps
self.format = "s%dle" % (8 * nbytes)
self.codec = "pcm_s%dle" % (8 * nbytes)
self.nchannels = nchannels
infos = ffmpeg_parse_infos(filename, decode_file=decode_file)
self.duration = infos.get("video_duration")
if self.duration is None:
self.duration = infos["duration"]
self.bitrate = infos["audio_bitrate"]
self.infos = infos
self.proc = None
self.n_frames = int(self.fps * self.duration)
self.buffersize = min(self.n_frames + 1, buffersize)
self.buffer = None
self.buffer_startframe = 1
self.initialize()
self.buffer_around(1)
|
def __init__(
self,
filename,
buffersize,
decode_file=False,
print_infos=False,
fps=44100,
nbytes=2,
nchannels=2,
):
# TODO bring FFMPEG_AudioReader more in line with FFMPEG_VideoReader
# E.g. here self.pos is still 1-indexed.
# (or have them inherit from a shared parent class)
self.filename = filename
self.nbytes = nbytes
self.fps = fps
self.format = "s%dle" % (8 * nbytes)
self.codec = "pcm_s%dle" % (8 * nbytes)
self.nchannels = nchannels
infos = ffmpeg_parse_infos(filename, decode_file=decode_file)
self.duration = infos["duration"]
self.bitrate = infos["audio_bitrate"]
self.infos = infos
self.proc = None
self.n_frames = int(self.fps * self.duration)
self.buffersize = min(self.n_frames + 1, buffersize)
self.buffer = None
self.buffer_startframe = 1
self.initialize()
self.buffer_around(1)
|
https://github.com/Zulko/moviepy/issues/1487
|
Exception ignored in: <bound method FFMPEG_AudioReader.__del__ of <moviepy.audio.io.readers.FFMPEG_AudioReader object at 0x7fc339261cf8>>
Traceback (most recent call last):
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 264, in __del__
self.close()
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 254, in close
if self.proc:
AttributeError: 'FFMPEG_AudioReader' object has no attribute 'proc'
|
AttributeError
|
def ffmpeg_parse_infos(
filename,
check_duration=True,
fps_source="fps",
decode_file=False,
print_infos=False,
):
"""Get the information of a file using ffmpeg.
Returns a dictionary with next fields:
- ``"duration"``
- ``"metadata"``
- ``"inputs"``
- ``"video_found"``
- ``"video_fps"``
- ``"video_n_frames"``
- ``"video_duration"``
- ``"video_bitrate"``
- ``"video_metadata"``
- ``"audio_found"``
- ``"audio_fps"``
- ``"audio_bitrate"``
- ``"audio_metadata"``
Note that "video_duration" is slightly smaller than "duration" to avoid
fetching the uncomplete frames at the end, which raises an error.
Parameters
----------
filename
Name of the file parsed, only used to raise accurate error messages.
infos
Information returned by FFmpeg.
fps_source
Indicates what source data will be preferably used to retrieve fps data.
check_duration
Enable or disable the parsing of the duration of the file. Useful to
skip the duration check, for example, for images.
decode_file
Indicates if the whole file must be read to retrieve their duration.
This is needed for some files in order to get the correct duration (see
https://github.com/Zulko/moviepy/pull/1222).
"""
# Open the file in a pipe, read output
cmd = [FFMPEG_BINARY, "-hide_banner", "-i", filename]
if decode_file:
cmd.extend(["-f", "null", "-"])
popen_params = cross_platform_popen_params(
{
"bufsize": 10**5,
"stdout": sp.PIPE,
"stderr": sp.PIPE,
"stdin": sp.DEVNULL,
}
)
proc = sp.Popen(cmd, **popen_params)
(output, error) = proc.communicate()
infos = error.decode("utf8", errors="ignore")
proc.terminate()
del proc
if print_infos:
# print the whole info text returned by FFMPEG
print(infos)
try:
return FFmpegInfosParser(
infos,
filename,
fps_source=fps_source,
check_duration=check_duration,
decode_file=decode_file,
).parse()
except Exception as exc:
if os.path.isdir(filename):
raise IsADirectoryError(f"'{filename}' is a directory")
elif not os.path.exists(filename):
raise FileNotFoundError(f"'{filename}' not found")
raise IOError(f"Error pasing `ffmpeg -i` command output:\n\n{infos}") from exc
|
def ffmpeg_parse_infos(
filename,
check_duration=True,
fps_source="fps",
decode_file=False,
print_infos=False,
):
"""Get the information of a file using ffmpeg.
Returns a dictionary with next fields:
- ``"duration"``
- ``"metadata"``
- ``"inputs"``
- ``"video_found"``
- ``"video_fps"``
- ``"video_n_frames"``
- ``"video_duration"``
- ``"video_bitrate"``
- ``"video_metadata"``
- ``"audio_found"``
- ``"audio_fps"``
- ``"audio_bitrate"``
- ``"audio_metadata"``
Note that "video_duration" is slightly smaller than "duration" to avoid
fetching the uncomplete frames at the end, which raises an error.
Parameters
----------
filename
Name of the file parsed, only used to raise accurate error messages.
infos
Information returned by FFmpeg.
fps_source
Indicates what source data will be preferably used to retrieve fps data.
check_duration
Enable or disable the parsing of the duration of the file. Useful to
skip the duration check, for example, for images.
decode_file
Indicates if the whole file must be read to retrieve their duration.
This is needed for some files in order to get the correct duration (see
https://github.com/Zulko/moviepy/pull/1222).
"""
# Open the file in a pipe, read output
cmd = [FFMPEG_BINARY, "-hide_banner", "-i", filename]
if decode_file:
cmd.extend(["-f", "null", "-"])
popen_params = cross_platform_popen_params(
{
"bufsize": 10**5,
"stdout": sp.PIPE,
"stderr": sp.PIPE,
"stdin": sp.DEVNULL,
}
)
proc = sp.Popen(cmd, **popen_params)
(output, error) = proc.communicate()
infos = error.decode("utf8", errors="ignore")
proc.terminate()
del proc
if print_infos:
# print the whole info text returned by FFMPEG
print(infos)
try:
return FFmpegInfosParser(
infos,
filename,
fps_source=fps_source,
check_duration=check_duration,
decode_file=decode_file,
).parse()
except Exception as exc:
if os.path.isdir(filename):
raise IsADirectoryError(f"'{filename}' is a directory")
elif not os.path.exists(filename):
raise FileNotFoundError(f"'{filename}' not found")
raise exc
|
https://github.com/Zulko/moviepy/issues/1487
|
Exception ignored in: <bound method FFMPEG_AudioReader.__del__ of <moviepy.audio.io.readers.FFMPEG_AudioReader object at 0x7fc339261cf8>>
Traceback (most recent call last):
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 264, in __del__
self.close()
File "/home/vagrant/.tox/py36/lib/python3.6/site-packages/moviepy/audio/io/readers.py", line 254, in close
if self.proc:
AttributeError: 'FFMPEG_AudioReader' object has no attribute 'proc'
|
AttributeError
|
def make_frame(t):
"""complicated, but must be able to handle the case where t
is a list of the form sin(t)"""
if isinstance(t, np.ndarray):
array_inds = np.round(self.fps * t).astype(int)
in_array = (array_inds >= 0) & (array_inds < len(self.array))
result = np.zeros((len(t), 2))
result[in_array] = self.array[array_inds[in_array]]
return result
else:
i = int(self.fps * t)
if i < 0 or i >= len(self.array):
return 0 * self.array[0]
else:
return self.array[i]
|
def make_frame(t):
played_parts = [clip.is_playing(t) for clip in self.clips]
sounds = [
clip.get_frame(t - clip.start) * np.array([part]).T
for clip, part in zip(self.clips, played_parts)
if (part is not False)
]
if isinstance(t, np.ndarray):
zero = np.zeros((len(t), self.nchannels))
else:
zero = np.zeros(self.nchannels)
return zero + sum(sounds)
|
https://github.com/Zulko/moviepy/issues/1457
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.8/threading.py", line 932, in _bootstrap_inner
self.run()
File "/usr/lib/python3.8/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "<decorator-gen-126>", line 2, in preview
File "./moviepy/decorators.py", line 56, in requires_duration
return func(clip, *args, **kwargs)
File "./moviepy/audio/io/preview.py", line 50, in preview
sndarray = clip.to_soundarray(timings, nbytes=nbytes, quantize=True)
File "<decorator-gen-81>", line 2, in to_soundarray
File "./moviepy/decorators.py", line 56, in requires_duration
return func(clip, *args, **kwargs)
File "./moviepy/audio/AudioClip.py", line 119, in to_soundarray
fps = self.fps
AttributeError: 'CompositeAudioClip' object has no attribute 'fps'
|
AttributeError
|
def __init__(self, clips):
self.clips = clips
self.nchannels = max(clip.nchannels for clip in self.clips)
# self.duration is setted at AudioClip
duration = None
for end in self.ends:
if end is None:
break
duration = max(end, duration or 0)
# self.fps is setted at AudioClip
fps = None
for clip in self.clips:
if hasattr(clip, "fps") and isinstance(clip.fps, numbers.Number):
fps = max(clip.fps, fps or 0)
super().__init__(duration=duration, fps=fps)
|
def __init__(self, clips):
Clip.__init__(self)
self.clips = clips
ends = [clip.end for clip in self.clips]
self.nchannels = max([clip.nchannels for clip in self.clips])
if not any([(end is None) for end in ends]):
self.duration = max(ends)
self.end = max(ends)
def make_frame(t):
played_parts = [clip.is_playing(t) for clip in self.clips]
sounds = [
clip.get_frame(t - clip.start) * np.array([part]).T
for clip, part in zip(self.clips, played_parts)
if (part is not False)
]
if isinstance(t, np.ndarray):
zero = np.zeros((len(t), self.nchannels))
else:
zero = np.zeros(self.nchannels)
return zero + sum(sounds)
self.make_frame = make_frame
|
https://github.com/Zulko/moviepy/issues/1457
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.8/threading.py", line 932, in _bootstrap_inner
self.run()
File "/usr/lib/python3.8/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "<decorator-gen-126>", line 2, in preview
File "./moviepy/decorators.py", line 56, in requires_duration
return func(clip, *args, **kwargs)
File "./moviepy/audio/io/preview.py", line 50, in preview
sndarray = clip.to_soundarray(timings, nbytes=nbytes, quantize=True)
File "<decorator-gen-81>", line 2, in to_soundarray
File "./moviepy/decorators.py", line 56, in requires_duration
return func(clip, *args, **kwargs)
File "./moviepy/audio/AudioClip.py", line 119, in to_soundarray
fps = self.fps
AttributeError: 'CompositeAudioClip' object has no attribute 'fps'
|
AttributeError
|
def concatenate_audioclips(clips):
"""Concatenates one AudioClip after another, in the order that are passed
to ``clips`` parameter.
Parameters
----------
clips
List of audio clips, which will be played one after other.
"""
# start, end/start2, end2/start3... end
starts_end = np.cumsum([0, *[clip.duration for clip in clips]])
newclips = [clip.with_start(t) for clip, t in zip(clips, starts_end[:-1])]
return CompositeAudioClip(newclips).with_duration(starts_end[-1])
|
def concatenate_audioclips(clips):
"""
The clip with the highest FPS will be the FPS of the result clip.
"""
durations = [clip.duration for clip in clips]
timings = np.cumsum([0] + durations) # start times, and end time.
newclips = [clip.with_start(t) for clip, t in zip(clips, timings)]
result = CompositeAudioClip(newclips).with_duration(timings[-1])
fpss = [clip.fps for clip in clips if getattr(clip, "fps", None)]
result.fps = max(fpss) if fpss else None
return result
|
https://github.com/Zulko/moviepy/issues/1457
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.8/threading.py", line 932, in _bootstrap_inner
self.run()
File "/usr/lib/python3.8/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "<decorator-gen-126>", line 2, in preview
File "./moviepy/decorators.py", line 56, in requires_duration
return func(clip, *args, **kwargs)
File "./moviepy/audio/io/preview.py", line 50, in preview
sndarray = clip.to_soundarray(timings, nbytes=nbytes, quantize=True)
File "<decorator-gen-81>", line 2, in to_soundarray
File "./moviepy/decorators.py", line 56, in requires_duration
return func(clip, *args, **kwargs)
File "./moviepy/audio/AudioClip.py", line 119, in to_soundarray
fps = self.fps
AttributeError: 'CompositeAudioClip' object has no attribute 'fps'
|
AttributeError
|
def color_gradient(
size,
p1,
p2=None,
vector=None,
radius=None,
color_1=0.0,
color_2=1.0,
shape="linear",
offset=0,
):
"""Draw a linear, bilinear, or radial gradient.
The result is a picture of size ``size``, whose color varies
gradually from color `color_1` in position ``p1`` to color ``color_2``
in position ``p2``.
If it is a RGB picture the result must be transformed into
a 'uint8' array to be displayed normally:
Parameters
------------
size
Size (width, height) in pixels of the final picture/array.
p1, p2
Coordinates (x,y) in pixels of the limit point for ``color_1``
and ``color_2``. The color 'before' ``p1`` is ``color_1`` and it
gradually changes in the direction of ``p2`` until it is ``color_2``
when it reaches ``p2``.
vector
A vector [x,y] in pixels that can be provided instead of ``p2``.
``p2`` is then defined as (p1 + vector).
color_1, color_2
Either floats between 0 and 1 (for gradients used in masks)
or [R,G,B] arrays (for colored gradients).
shape
'linear', 'bilinear', or 'circular'.
In a linear gradient the color varies in one direction,
from point ``p1`` to point ``p2``.
In a bilinear gradient it also varies symetrically from ``p1``
in the other direction.
In a circular gradient it goes from ``color_1`` to ``color_2`` in all
directions.
offset
Real number between 0 and 1 indicating the fraction of the vector
at which the gradient actually starts. For instance if ``offset``
is 0.9 in a gradient going from p1 to p2, then the gradient will
only occur near p2 (before that everything is of color ``color_1``)
If the offset is 0.9 in a radial gradient, the gradient will
occur in the region located between 90% and 100% of the radius,
this creates a blurry disc of radius d(p1,p2).
Returns
--------
image
An Numpy array of dimensions (W,H,ncolors) of type float
representing the image of the gradient.
Examples
---------
>>> grad = color_gradient(blabla).astype('uint8')
"""
# np-arrayize and change x,y coordinates to y,x
w, h = size
color_1 = np.array(color_1).astype(float)
color_2 = np.array(color_2).astype(float)
if shape == "bilinear":
if vector is None:
if p2 is None:
raise ValueError("You must provide either 'p2' or 'vector'")
vector = np.array(p2) - np.array(p1)
m1, m2 = [
color_gradient(
size,
p1,
vector=v,
color_1=1.0,
color_2=0.0,
shape="linear",
offset=offset,
)
for v in [vector, -vector]
]
arr = np.maximum(m1, m2)
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return arr * color_1 + (1 - arr) * color_2
p1 = np.array(p1[::-1]).astype(float)
M = np.dstack(np.meshgrid(range(w), range(h))[::-1]).astype(float)
if shape == "linear":
if vector is None:
if p2 is not None:
vector = np.array(p2[::-1]) - p1
else:
raise ValueError("You must provide either 'p2' or 'vector'")
else:
vector = np.array(vector[::-1])
norm = np.linalg.norm(vector)
n_vec = vector / norm**2 # norm 1/norm(vector)
p1 = p1 + offset * vector
arr = (M - p1).dot(n_vec) / (1 - offset)
arr = np.minimum(1, np.maximum(0, arr))
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return arr * color_1 + (1 - arr) * color_2
elif shape == "radial":
if (radius or 0) == 0:
arr = np.ones((h, w))
else:
arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * radius
arr = arr / ((1 - offset) * radius)
arr = np.minimum(1.0, np.maximum(0, arr))
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return (1 - arr) * color_1 + arr * color_2
raise ValueError("Invalid shape, should be either 'radial', 'linear' or 'bilinear'")
|
def color_gradient(
size,
p1,
p2=None,
vector=None,
radius=None,
color_1=0.0,
color_2=1.0,
shape="linear",
offset=0,
):
"""Draw a linear, bilinear, or radial gradient.
The result is a picture of size ``size``, whose color varies
gradually from color `color_1` in position ``p1`` to color ``color_2``
in position ``p2``.
If it is a RGB picture the result must be transformed into
a 'uint8' array to be displayed normally:
Parameters
------------
size
Size (width, height) in pixels of the final picture/array.
p1, p2
Coordinates (x,y) in pixels of the limit point for ``color_1``
and ``color_2``. The color 'before' ``p1`` is ``color_1`` and it
gradually changes in the direction of ``p2`` until it is ``color_2``
when it reaches ``p2``.
vector
A vector [x,y] in pixels that can be provided instead of ``p2``.
``p2`` is then defined as (p1 + vector).
color_1, color_2
Either floats between 0 and 1 (for gradients used in masks)
or [R,G,B] arrays (for colored gradients).
shape
'linear', 'bilinear', or 'circular'.
In a linear gradient the color varies in one direction,
from point ``p1`` to point ``p2``.
In a bilinear gradient it also varies symetrically from ``p1``
in the other direction.
In a circular gradient it goes from ``color_1`` to ``color_2`` in all
directions.
offset
Real number between 0 and 1 indicating the fraction of the vector
at which the gradient actually starts. For instance if ``offset``
is 0.9 in a gradient going from p1 to p2, then the gradient will
only occur near p2 (before that everything is of color ``color_1``)
If the offset is 0.9 in a radial gradient, the gradient will
occur in the region located between 90% and 100% of the radius,
this creates a blurry disc of radius d(p1,p2).
Returns
--------
image
An Numpy array of dimensions (W,H,ncolors) of type float
representing the image of the gradient.
Examples
---------
>>> grad = color_gradient(blabla).astype('uint8')
"""
# np-arrayize and change x,y coordinates to y,x
w, h = size
color_1 = np.array(color_1).astype(float)
color_2 = np.array(color_2).astype(float)
if shape == "bilinear":
if vector is None:
vector = np.array(p2) - np.array(p1)
m1, m2 = [
color_gradient(
size,
p1,
vector=v,
color_1=1.0,
color_2=0.0,
shape="linear",
offset=offset,
)
for v in [vector, -vector]
]
arr = np.maximum(m1, m2)
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return arr * color_1 + (1 - arr) * color_2
p1 = np.array(p1[::-1]).astype(float)
if vector is None and p2:
p2 = np.array(p2[::-1])
vector = p2 - p1
else:
vector = np.array(vector[::-1])
p2 = p1 + vector
if vector is not None:
norm = np.linalg.norm(vector)
M = np.dstack(np.meshgrid(range(w), range(h))[::-1]).astype(float)
if shape == "linear":
n_vec = vector / norm**2 # norm 1/norm(vector)
p1 = p1 + offset * vector
arr = (M - p1).dot(n_vec) / (1 - offset)
arr = np.minimum(1, np.maximum(0, arr))
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return arr * color_1 + (1 - arr) * color_2
elif shape == "radial":
if radius is None:
radius = norm
if radius == 0:
arr = np.ones((h, w))
else:
arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * radius
arr = arr / ((1 - offset) * radius)
arr = np.minimum(1.0, np.maximum(0, arr))
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return (1 - arr) * color_1 + arr * color_2
|
https://github.com/Zulko/moviepy/issues/1256
|
Traceback (most recent call last):
File "theend.py", line 34, in <module>
video.write_videofile("theEnd.mp4")
File "<decorator-gen-61>", line 2, in write_videofile
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/decorators.py", line 56, in requires_duration
return f(clip, *a, **k)
File "<decorator-gen-60>", line 2, in write_videofile
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/decorators.py", line 135, in use_clip_fps_by_default
return f(clip, *new_a, **new_kw)
File "<decorator-gen-59>", line 2, in write_videofile
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/decorators.py", line 24, in convert_masks_to_RGB
return f(clip, *a, **k)
File "<decorator-gen-58>", line 2, in write_videofile
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/decorators.py", line 89, in wrapper
return f(*new_a, **new_kw)
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/video/VideoClip.py", line 350, in write_videofile
logger=logger,
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/video/io/ffmpeg_writer.py", line 245, in ffmpeg_write_video
logger=logger, with_times=True, fps=fps, dtype="uint8"
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/Clip.py", line 473, in iter_frames
frame = self.get_frame(t)
File "<decorator-gen-11>", line 2, in get_frame
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/decorators.py", line 89, in wrapper
return f(*new_a, **new_kw)
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/Clip.py", line 98, in get_frame
return self.make_frame(t)
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/video/compositing/CompositeVideoClip.py", line 123, in make_frame
f = c.blit_on(f, t)
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/video/VideoClip.py", line 599, in blit_on
mask = self.mask.get_frame(ct) if self.mask else None
File "movie.py", line 15, in <lambda>
col1=1, col2=0,blur=4,)
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/video/tools/drawing.py", line 280, in circle
offset=offset,
File "/home/pandahub/.local/lib/python3.6/site-packages/moviepy/video/tools/drawing.py", line 146, in color_gradient
vector = np.array(vector[::-1])
TypeError: 'NoneType' object is not subscriptable
|
TypeError
|
def audio_normalize(clip):
"""Return a clip whose volume is normalized to 0db.
Return an audio (or video) clip whose audio volume is normalized
so that the maximum volume is at 0db, the maximum achievable volume.
Examples
========
>>> from moviepy.editor import *
>>> videoclip = VideoFileClip('myvideo.mp4').fx(afx.audio_normalize)
"""
max_volume = clip.max_volume()
if max_volume == 0:
# Nothing to normalize.
# Avoids a divide by zero error.
return clip.copy()
else:
return volumex(clip, 1 / max_volume)
|
def audio_normalize(clip):
"""Return a clip whose volume is normalized to 0db.
Return an audio (or video) clip whose audio volume is normalized
so that the maximum volume is at 0db, the maximum achievable volume.
Examples
========
>>> from moviepy.editor import *
>>> videoclip = VideoFileClip('myvideo.mp4').fx(afx.audio_normalize)
"""
max_volume = clip.max_volume()
return volumex(clip, 1 / max_volume)
|
https://github.com/Zulko/moviepy/issues/1388
|
Traceback (most recent call last):
File "test.py", line 141, in <module>
ytclip = ytclip.subclip(0, 10).audio_normalize()
File "<decorator-gen-96>", line 2, in audio_normalize
File "/Users/xxx/lib/python3.7/site-packages/moviepy/decorators.py", line 70, in audio_video_fx
newclip.audio = f(clip.audio, *a, **k)
File "/Users/xxxx/lib/python3.7/site-packages/moviepy/audio/fx/audio_normalize.py", line 23, in audio_normalize
return volumex(clip, 1 / mv)
**ZeroDivisionError: division by zero**
|
ZeroDivisionError
|
def fx(self, func, *args, **kwargs):
"""
Returns the result of ``func(self, *args, **kwargs)``.
for instance
>>> newclip = clip.fx(resize, 0.2, method="bilinear")
is equivalent to
>>> newclip = resize(clip, 0.2, method="bilinear")
The motivation of fx is to keep the name of the effect near its
parameters when the effects are chained:
>>> from moviepy.video.fx import volumex, resize, mirrorx
>>> clip.fx(volumex, 0.5).fx(resize, 0.3).fx(mirrorx)
>>> # Is equivalent, but clearer than
>>> mirrorx(resize(volumex(clip, 0.5), 0.3))
"""
return func(self, *args, **kwargs)
|
def fx(self, func, *args, **kwargs):
"""
Returns the result of ``func(self, *args, **kwargs)``.
for instance
>>> newclip = clip.fx(resize, 0.2, method='bilinear')
is equivalent to
>>> newclip = resize(clip, 0.2, method='bilinear')
The motivation of fx is to keep the name of the effect near its
parameters, when the effects are chained:
>>> from moviepy.video.fx import volumex, resize, mirrorx
>>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx )
>>> # Is equivalent, but clearer than
>>> resize( volumex( mirrorx( clip ), 0.5), 0.3)
"""
return func(self, *args, **kwargs)
|
https://github.com/Zulko/moviepy/issues/1209
|
Traceback (most recent call last):
File "<pyshell#5>", line 1, in <module>
newclip.write_videofile(r"F:\video\WinBasedWorkHard_new.mp4")
File "<decorator-gen-55>", line 2, in write_videofile
File "C:\Program Files\Python37\lib\site-packages\moviepy\decorators.py", line 52, in requires_duration
raise ValueError("Attribute 'duration' not set")
ValueError: Attribute 'duration' not set
|
ValueError
|
def __init__(self, size, color=None, ismask=False, duration=None):
w, h = size
if ismask:
shape = (h, w)
if color is None:
color = 0
elif not np.isscalar(color):
raise Exception("Color has to be a scalar when mask is true")
else:
if color is None:
color = (0, 0, 0)
elif not hasattr(color, "__getitem__"):
raise Exception("Color has to contain RGB of the clip")
shape = (h, w, len(color))
super().__init__(
np.tile(color, w * h).reshape(shape), ismask=ismask, duration=duration
)
|
def __init__(self, size, color=None, ismask=False, duration=None):
w, h = size
shape = (h, w) if np.isscalar(color) else (h, w, len(color))
super().__init__(
np.tile(color, w * h).reshape(shape), ismask=ismask, duration=duration
)
|
https://github.com/Zulko/moviepy/issues/1131
|
In [1]: from moviepy.editor import ColorClip
In [2]: c = ColorClip((10,10), duration=1)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-2-5c98bac84586> in <module>
----> 1 c = ColorClip((10,10), duration=1)
~/lab/moviepy/moviepy/video/VideoClip.py in __init__(self, size, color, ismask, duration, col)
1094 color = col
1095 w, h = size
-> 1096 shape = (h, w) if np.isscalar(color) else (h, w, len(color))
1097 ImageClip.__init__(
1098 self, np.tile(color, w * h).reshape(shape), ismask=ismask, duration=duration
TypeError: object of type 'NoneType' has no len()
|
TypeError
|
def __init__(self, clips, size=None, bg_color=None, use_bgclip=False, ismask=False):
if size is None:
size = clips[0].size
if use_bgclip and (clips[0].mask is None):
transparent = False
else:
transparent = bg_color is None
if bg_color is None:
bg_color = 0.0 if ismask else (0, 0, 0)
fpss = [c.fps for c in clips if getattr(c, "fps", None)]
self.fps = max(fpss) if fpss else None
VideoClip.__init__(self)
self.size = size
self.ismask = ismask
self.clips = clips
self.bg_color = bg_color
if use_bgclip:
self.bg = clips[0]
self.clips = clips[1:]
self.created_bg = False
else:
self.clips = clips
self.bg = ColorClip(size, color=self.bg_color, ismask=ismask)
self.created_bg = True
# compute duration
ends = [c.end for c in self.clips]
if None not in ends:
duration = max(ends)
self.duration = duration
self.end = duration
# compute audio
audioclips = [v.audio for v in self.clips if v.audio is not None]
if audioclips:
self.audio = CompositeAudioClip(audioclips)
# compute mask if necessary
if transparent:
maskclips = [
(c.mask if (c.mask is not None) else c.add_mask().mask)
.set_position(c.pos)
.set_end(c.end)
.set_start(c.start, change_end=False)
for c in self.clips
]
self.mask = CompositeVideoClip(maskclips, self.size, ismask=True, bg_color=0.0)
def make_frame(t):
"""The clips playing at time `t` are blitted over one
another."""
f = self.bg.get_frame(t)
for c in self.playing_clips(t):
f = c.blit_on(f, t)
return f
self.make_frame = make_frame
|
def __init__(self, clips, size=None, bg_color=None, use_bgclip=False, ismask=False):
if size is None:
size = clips[0].size
if use_bgclip and (clips[0].mask is None):
transparent = False
else:
transparent = bg_color is None
if bg_color is None:
bg_color = 0.0 if ismask else (0, 0, 0)
fpss = [c.fps for c in clips if getattr(c, "fps", None)]
self.fps = max(fpss) if fpss else None
VideoClip.__init__(self)
self.size = size
self.ismask = ismask
self.clips = clips
self.bg_color = bg_color
if use_bgclip:
self.bg = clips[0]
self.clips = clips[1:]
self.created_bg = False
else:
self.clips = clips
self.bg = ColorClip(size, color=self.bg_color)
self.created_bg = True
# compute duration
ends = [c.end for c in self.clips]
if None not in ends:
duration = max(ends)
self.duration = duration
self.end = duration
# compute audio
audioclips = [v.audio for v in self.clips if v.audio is not None]
if audioclips:
self.audio = CompositeAudioClip(audioclips)
# compute mask if necessary
if transparent:
maskclips = [
(c.mask if (c.mask is not None) else c.add_mask().mask)
.set_position(c.pos)
.set_end(c.end)
.set_start(c.start, change_end=False)
for c in self.clips
]
self.mask = CompositeVideoClip(maskclips, self.size, ismask=True, bg_color=0.0)
def make_frame(t):
"""The clips playing at time `t` are blitted over one
another."""
f = self.bg.get_frame(t)
for c in self.playing_clips(t):
f = c.blit_on(f, t)
return f
self.make_frame = make_frame
|
https://github.com/Zulko/moviepy/issues/1131
|
In [1]: from moviepy.editor import ColorClip
In [2]: c = ColorClip((10,10), duration=1)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-2-5c98bac84586> in <module>
----> 1 c = ColorClip((10,10), duration=1)
~/lab/moviepy/moviepy/video/VideoClip.py in __init__(self, size, color, ismask, duration, col)
1094 color = col
1095 w, h = size
-> 1096 shape = (h, w) if np.isscalar(color) else (h, w, len(color))
1097 ImageClip.__init__(
1098 self, np.tile(color, w * h).reshape(shape), ismask=ismask, duration=duration
TypeError: object of type 'NoneType' has no len()
|
TypeError
|
def pil_rotater(pic, angle, resample, expand):
# Ensures that pic is of the correct type
return np.array(
Image.fromarray(np.array(pic).astype(np.uint8)).rotate(
angle, expand=expand, resample=resample
)
)
|
def pil_rotater(pic, angle, resample, expand):
return np.array(
Image.fromarray(pic).rotate(angle, expand=expand, resample=resample)
)
|
https://github.com/Zulko/moviepy/issues/1140
|
Traceback (most recent call last):
MoviePy\venv\lib\site-packages\PIL\Image.py", line 2680, in fromarray
mode, rawmode = _fromarray_typemap[typekey]
KeyError: ((1, 1, 3), '<i4')
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
rotation_test.py", line 10, in <module>
rotated_image = colour_clip.rotate(20)
MoviePy\venv\lib\site-packages\moviepy\video\fx\rotate.py", line 71, in rotate
return clip.fl(fl, apply_to=["mask"])
MoviePy\venv\lib\site-packages\moviepy\video\VideoClip.py", line 951, in fl
keep_duration=keep_duration)
MoviePy\venv\lib\site-packages\moviepy\Clip.py", line 138, in fl
newclip = self.set_make_frame(lambda t: fun(self.get_frame, t))
File "<decorator-gen-57>", line 2, in set_make_frame
MoviePy\venv\lib\site-packages\moviepy\decorators.py", line 14, in outplace
f(newclip, *a, **k)
MoviePy\venv\lib\site-packages\moviepy\video\VideoClip.py", line 673, in set_make_frame
self.size = self.get_frame(0).shape[:2][::-1]
File "<decorator-gen-10>", line 2, in get_frame
MoviePy\venv\lib\site-packages\moviepy\decorators.py", line 89, in wrapper
return f(*new_a, **new_kw)
MoviePy\venv\lib\site-packages\moviepy\Clip.py", line 95, in get_frame
return self.make_frame(t)
MoviePy\venv\lib\site-packages\moviepy\Clip.py", line 138, in <lambda>
newclip = self.set_make_frame(lambda t: fun(self.get_frame, t))
MoviePy\venv\lib\site-packages\moviepy\video\fx\rotate.py", line 69, in fl
return pil_rotater(im, a, resample=resample, expand=expand)
MoviePy\venv\lib\site-packages\moviepy\video\fx\rotate.py", line 7, in pil_rotater
return np.array( Image.fromarray(pic).rotate(angle, expand=expand,
MoviePy\venv\lib\site-packages\PIL\Image.py", line 2682, in fromarray
raise TypeError("Cannot handle this data type: %s, %s" % typekey)
TypeError: Cannot handle this data type: (1, 1, 3), <i4
Process finished with exit code 1
|
KeyError
|
def get_frame(self, tt):
buffersize = self.buffersize
if isinstance(tt, np.ndarray):
# lazy implementation, but should not cause problems in
# 99.99 % of the cases
# elements of t that are actually in the range of the
# audio file.
in_time = (tt >= 0) & (tt < self.duration)
# Check that the requested time is in the valid range
if not in_time.any():
raise IOError(
"Error in file %s, " % (self.filename)
+ "Accessing time t=%.02f-%.02f seconds, " % (tt[0], tt[-1])
+ "with clip duration=%d seconds, " % self.duration
)
# The np.round in the next line is super-important.
# Removing it results in artifacts in the noise.
frames = np.round((self.fps * tt)).astype(int)[in_time]
fr_min, fr_max = frames.min(), frames.max()
if not (0 <= (fr_min - self.buffer_startframe) < len(self.buffer)):
self.buffer_around(fr_min)
elif not (0 <= (fr_max - self.buffer_startframe) < len(self.buffer)):
self.buffer_around(fr_max)
try:
result = np.zeros((len(tt), self.nchannels))
indices = frames - self.buffer_startframe
result[in_time] = self.buffer[indices]
return result
except IndexError as error:
warnings.warn(
"Error in file %s, " % (self.filename)
+ "At time t=%.02f-%.02f seconds, " % (tt[0], tt[-1])
+ "indices wanted: %d-%d, " % (indices.min(), indices.max())
+ "but len(buffer)=%d\n" % (len(self.buffer))
+ str(error),
UserWarning,
)
# repeat the last frame instead
indices[indices >= len(self.buffer)] = len(self.buffer) - 1
result[in_time] = self.buffer[indices]
return result
else:
ind = int(self.fps * tt)
if ind < 0 or ind > self.nframes: # out of time: return 0
return np.zeros(self.nchannels)
if not (0 <= (ind - self.buffer_startframe) < len(self.buffer)):
# out of the buffer: recenter the buffer
self.buffer_around(ind)
# read the frame in the buffer
return self.buffer[ind - self.buffer_startframe]
|
def get_frame(self, tt):
buffersize = self.buffersize
if isinstance(tt, np.ndarray):
# lazy implementation, but should not cause problems in
# 99.99 % of the cases
# elements of t that are actually in the range of the
# audio file.
in_time = (tt >= 0) & (tt < self.duration)
# The np.round in the next line is super-important.
# Removing it results in artifacts in the noise.
frames = np.round((self.fps * tt)).astype(int)[in_time]
fr_min, fr_max = frames.min(), frames.max()
if not (0 <= (fr_min - self.buffer_startframe) < len(self.buffer)):
self.buffer_around(fr_min)
elif not (0 <= (fr_max - self.buffer_startframe) < len(self.buffer)):
self.buffer_around(fr_max)
try:
result = np.zeros((len(tt), self.nchannels))
indices = frames - self.buffer_startframe
result[in_time] = self.buffer[indices]
return result
except IndexError as error:
if indices.max() > len(self.buffer):
raise IOError(
"Error reading file '%s', " % self.filename
+ "trying to access beyond the end of the file"
)
else:
raise IOError(
"Error in file %s, " % (self.filename)
+ "At time t=%.02f-%.02f seconds, " % (tt[0], tt[-1])
+ "indices wanted: %d-%d, " % (indices.min(), indices.max())
+ "but len(buffer)=%d\n" % (len(self.buffer))
+ str(error)
)
else:
ind = int(self.fps * tt)
if ind < 0 or ind > self.nframes: # out of time: return 0
return np.zeros(self.nchannels)
if not (0 <= (ind - self.buffer_startframe) < len(self.buffer)):
# out of the buffer: recenter the buffer
self.buffer_around(ind)
# read the frame in the buffer
return self.buffer[ind - self.buffer_startframe]
|
https://github.com/Zulko/moviepy/issues/246
|
IndexError Traceback (most recent call last)
/home/xxx/venv/lib/python3.4/site-packages/moviepy/audio/io/readers.py in get_frame(self, tt)
186 indices = frames - self.buffer_startframe
--> 187 result[in_time] = self.buffer[indices]
188 return result
IndexError: index 120575 is out of bounds for axis 0 with size 120575
During handling of the above exception, another exception occurred:
OSError Traceback (most recent call last)
<ipython-input-2-7b8ec45b76f0> in <module>()
2
3 sound = AudioFileClip("sample_mono_short.flac")
----> 4 samples = sound.to_soundarray()
<decorator-gen-194> in to_soundarray(self, tt, fps, quantize, nbytes, buffersize)
/home/xxx/venv/lib/python3.4/site-packages/moviepy/decorators.py in requires_duration(f, clip, *a, **k)
52 raise ValueError("Attribute 'duration' not set")
53 else:
---> 54 return f(clip, *a, **k)
55
56
/home/xxx/venv/lib/python3.4/site-packages/moviepy/audio/AudioClip.py in to_soundarray(self, tt, fps, quantize, nbytes, buffersize)
112 if self.duration>max_duration:
113 return stacker(self.iter_chunks(fps=fps, quantize=quantize, nbytes=2,
--> 114 chunksize=buffersize))
115 else:
116 tt = np.arange(0, self.duration, 1.0/fps)
/home/xxx/venv/lib/python3.4/site-packages/numpy/core/shape_base.py in vstack(tup)
228
229 """
--> 230 return _nx.concatenate([atleast_2d(_m) for _m in tup], 0)
231
232 def hstack(tup):
/home/xxx/venv/lib/python3.4/site-packages/numpy/core/shape_base.py in <listcomp>(.0)
228
229 """
--> 230 return _nx.concatenate([atleast_2d(_m) for _m in tup], 0)
231
232 def hstack(tup):
/home/xxx/venv/lib/python3.4/site-packages/moviepy/audio/AudioClip.py in generator()
79 tt = (1.0/fps)*np.arange(pospos[i],pospos[i+1])
80 yield self.to_soundarray(tt, nbytes= nbytes, quantize=quantize, fps=fps,
---> 81 buffersize=chunksize)
82
83 if progress_bar:
<decorator-gen-194> in to_soundarray(self, tt, fps, quantize, nbytes, buffersize)
/home/xxx/venv/lib/python3.4/site-packages/moviepy/decorators.py in requires_duration(f, clip, *a, **k)
52 raise ValueError("Attribute 'duration' not set")
53 else:
---> 54 return f(clip, *a, **k)
55
56
/home/xxx/venv/lib/python3.4/site-packages/moviepy/audio/AudioClip.py in to_soundarray(self, tt, fps, quantize, nbytes, buffersize)
125 #print tt.max() - tt.min(), tt.min(), tt.max()
126
--> 127 snd_array = self.get_frame(tt)
128
129 if quantize:
<decorator-gen-136> in get_frame(self, t)
/home/xxx/venv/lib/python3.4/site-packages/moviepy/decorators.py in wrapper(f, *a, **kw)
87 new_kw = {k: fun(v) if k in varnames else v
88 for (k,v) in kw.items()}
---> 89 return f(*new_a, **new_kw)
90 return decorator.decorator(wrapper)
91
/home/xxx/venv/lib/python3.4/site-packages/moviepy/Clip.py in get_frame(self, t)
93 return frame
94 else:
---> 95 return self.make_frame(t)
96
97 def fl(self, fun, apply_to=[] , keep_duration=True):
/home/xxx/venv/lib/python3.4/site-packages/moviepy/audio/io/AudioFileClip.py in <lambda>(t)
69
70
---> 71 self.make_frame = lambda t: reader.get_frame(t)
72 self.nchannels = reader.nchannels
73
/home/xxx/venv/lib/python3.4/site-packages/moviepy/audio/io/readers.py in get_frame(self, tt)
191 "At time t=%.02f-%.02f seconds, "%(tt[0], tt[-1])+
192 "indices wanted: %d-%d, "%(indices.min(), indices.max())+
--> 193 "but len(buffer)=%d\n"%(len(self.buffer))+ str(error))
194
195 else:
OSError: Error in file sample_mono_short.flac, At time t=27.21-27.68 seconds, indices wanted: 100000-120687, but len(buffer)=120575
index 120575 is out of bounds for axis 0 with size 120575
|
IndexError
|
def process_template(self, cfg, minimize=None):
"""
Process the Policy Sentry template as a dict. This auto-detects whether or not the file is in CRUD mode or
Actions mode.
Arguments:
cfg: The loaded YAML as a dict. Must follow Policy Sentry dictated format.
minimize: Minimize the resulting statement with *safe* usage of wildcards to reduce policy length. Set this to the character length you want - for example, 0, or 4. Defaults to none.
Returns:
Dictionary: The rendered IAM JSON Policy
"""
if cfg.get("mode") == "crud":
logger.debug("CRUD mode selected")
check_crud_schema(cfg)
# EXCLUDE ACTIONS
if cfg.get("exclude-actions"):
if cfg.get("exclude-actions")[0] != "":
self.add_exclude_actions(cfg["exclude-actions"])
# WILDCARD ONLY SECTION
if cfg.get("wildcard-only"):
if cfg.get("wildcard-only").get("single-actions"):
if cfg["wildcard-only"]["single-actions"][0] != "":
provided_wildcard_actions = cfg["wildcard-only"]["single-actions"]
logger.debug(
f"Requested wildcard-only actions: {str(provided_wildcard_actions)}"
)
self.wildcard_only_single_actions = provided_wildcard_actions
if cfg.get("wildcard-only").get("service-read"):
if cfg["wildcard-only"]["service-read"][0] != "":
service_read = cfg["wildcard-only"]["service-read"]
logger.debug(
f"Requested wildcard-only actions: {str(service_read)}"
)
self.wildcard_only_service_read = service_read
if cfg.get("wildcard-only").get("service-write"):
if cfg["wildcard-only"]["service-write"][0] != "":
service_write = cfg["wildcard-only"]["service-write"]
logger.debug(
f"Requested wildcard-only actions: {str(service_write)}"
)
self.wildcard_only_service_write = service_write
if cfg.get("wildcard-only").get("service-list"):
if cfg["wildcard-only"]["service-list"][0] != "":
service_list = cfg["wildcard-only"]["service-list"]
logger.debug(
f"Requested wildcard-only actions: {str(service_list)}"
)
self.wildcard_only_service_list = service_list
if cfg.get("wildcard-only").get("service-tagging"):
if cfg["wildcard-only"]["service-tagging"][0] != "":
service_tagging = cfg["wildcard-only"]["service-tagging"]
logger.debug(
f"Requested wildcard-only actions: {str(service_tagging)}"
)
self.wildcard_only_service_tagging = service_tagging
if cfg.get("wildcard-only").get("service-permissions-management"):
if cfg["wildcard-only"]["service-permissions-management"][0] != "":
service_permissions_management = cfg["wildcard-only"][
"service-permissions-management"
]
logger.debug(
f"Requested wildcard-only actions: {str(service_permissions_management)}"
)
self.wildcard_only_service_permissions_management = (
service_permissions_management
)
# Process the wildcard-only section
self.process_wildcard_only_actions()
# Standard access levels
if cfg.get("read"):
if cfg["read"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['read'])}")
self.add_by_arn_and_access_level(cfg["read"], "Read")
if cfg.get("write"):
if cfg["write"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['write'])}")
self.add_by_arn_and_access_level(cfg["write"], "Write")
if cfg.get("list"):
if cfg["list"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['list'])}")
self.add_by_arn_and_access_level(cfg["list"], "List")
if cfg.get("tagging"):
if cfg["tagging"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['tagging'])}")
self.add_by_arn_and_access_level(cfg["tagging"], "Tagging")
if cfg.get("permissions-management"):
if cfg["permissions-management"][0] != "":
logger.debug(
f"Requested access to arns: {str(cfg['permissions-management'])}"
)
self.add_by_arn_and_access_level(
cfg["permissions-management"], "Permissions management"
)
# SKIP RESOURCE CONSTRAINTS
if cfg.get("skip-resource-constraints"):
if cfg["skip-resource-constraints"][0] != "":
logger.debug(
f"Requested override: the actions {str(cfg['skip-resource-constraints'])} will "
f"skip resource constraints."
)
self.add_skip_resource_constraints(cfg["skip-resource-constraints"])
for skip_resource_constraints_action in self.skip_resource_constraints:
self.add_action_without_resource_constraint(
skip_resource_constraints_action, "SkipResourceConstraints"
)
elif cfg.get("mode") == "actions":
check_actions_schema(cfg)
if "actions" in cfg.keys():
if cfg["actions"] is not None and cfg["actions"][0] != "":
self.add_by_list_of_actions(cfg["actions"])
rendered_policy = self.get_rendered_policy(minimize)
return rendered_policy
|
def process_template(self, cfg, minimize=None):
"""
Process the Policy Sentry template as a dict. This auto-detects whether or not the file is in CRUD mode or
Actions mode.
Arguments:
cfg: The loaded YAML as a dict. Must follow Policy Sentry dictated format.
minimize: Minimize the resulting statement with *safe* usage of wildcards to reduce policy length. Set this to the character length you want - for example, 0, or 4. Defaults to none.
Returns:
Dictionary: The rendered IAM JSON Policy
"""
if "mode" in cfg.keys():
if cfg["mode"] == "crud":
logger.debug("CRUD mode selected")
check_crud_schema(cfg)
if "exclude-actions" in cfg:
if cfg["exclude-actions"]:
if cfg["exclude-actions"][0] != "":
self.add_exclude_actions(cfg["exclude-actions"])
if "wildcard-only" in cfg.keys():
if "single-actions" in cfg["wildcard-only"]:
if cfg["wildcard-only"]["single-actions"]:
if cfg["wildcard-only"]["single-actions"][0] != "":
provided_wildcard_actions = cfg["wildcard-only"][
"single-actions"
]
logger.debug(
f"Requested wildcard-only actions: {str(provided_wildcard_actions)}"
)
self.wildcard_only_single_actions = (
provided_wildcard_actions
)
if "service-read" in cfg["wildcard-only"]:
if cfg["wildcard-only"]["service-read"]:
if cfg["wildcard-only"]["service-read"][0] != "":
service_read = cfg["wildcard-only"]["service-read"]
logger.debug(
f"Requested wildcard-only actions: {str(service_read)}"
)
self.wildcard_only_service_read = service_read
if "service-write" in cfg["wildcard-only"]:
if cfg["wildcard-only"]["service-write"]:
if cfg["wildcard-only"]["service-write"][0] != "":
service_write = cfg["wildcard-only"]["service-write"]
logger.debug(
f"Requested wildcard-only actions: {str(service_write)}"
)
self.wildcard_only_service_write = service_write
if "service-list" in cfg["wildcard-only"]:
if cfg["wildcard-only"]["service-list"]:
if cfg["wildcard-only"]["service-list"][0] != "":
service_list = cfg["wildcard-only"]["service-list"]
logger.debug(
f"Requested wildcard-only actions: {str(service_list)}"
)
self.wildcard_only_service_list = service_list
if "service-tagging" in cfg["wildcard-only"]:
if cfg["wildcard-only"]["service-tagging"]:
if cfg["wildcard-only"]["service-tagging"][0] != "":
service_tagging = cfg["wildcard-only"]["service-tagging"]
logger.debug(
f"Requested wildcard-only actions: {str(service_tagging)}"
)
self.wildcard_only_service_tagging = service_tagging
if "service-permissions-management" in cfg["wildcard-only"]:
if cfg["wildcard-only"]["service-permissions-management"]:
if (
cfg["wildcard-only"]["service-permissions-management"][0]
!= ""
):
service_permissions_management = cfg["wildcard-only"][
"service-permissions-management"
]
logger.debug(
f"Requested wildcard-only actions: {str(service_permissions_management)}"
)
self.wildcard_only_service_permissions_management = (
service_permissions_management
)
self.process_wildcard_only_actions()
if "read" in cfg.keys():
if cfg["read"] is not None and cfg["read"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['read'])}")
self.add_by_arn_and_access_level(cfg["read"], "Read")
if "write" in cfg.keys():
if cfg["write"] is not None and cfg["write"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['write'])}")
self.add_by_arn_and_access_level(cfg["write"], "Write")
if "list" in cfg.keys():
if cfg["list"] is not None and cfg["list"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['list'])}")
self.add_by_arn_and_access_level(cfg["list"], "List")
if "permissions-management" in cfg.keys():
if (
cfg["permissions-management"] is not None
and cfg["permissions-management"][0] != ""
):
logger.debug(
f"Requested access to arns: {str(cfg['permissions-management'])}"
)
self.add_by_arn_and_access_level(
cfg["permissions-management"],
"Permissions management",
)
if "tagging" in cfg.keys():
if cfg["tagging"] is not None and cfg["tagging"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['tagging'])}")
self.add_by_arn_and_access_level(cfg["tagging"], "Tagging")
if "skip-resource-constraints" in cfg.keys():
if cfg["skip-resource-constraints"]:
if cfg["skip-resource-constraints"][0] != "":
logger.debug(
f"Requested override: the actions {str(cfg['skip-resource-constraints'])} will "
f"skip resource constraints."
)
self.add_skip_resource_constraints(
cfg["skip-resource-constraints"]
)
for (
skip_resource_constraints_action
) in self.skip_resource_constraints:
self.add_action_without_resource_constraint(
skip_resource_constraints_action,
"SkipResourceConstraints",
)
if cfg["mode"] == "actions":
check_actions_schema(cfg)
if "actions" in cfg.keys():
if cfg["actions"] is not None and cfg["actions"][0] != "":
self.add_by_list_of_actions(cfg["actions"])
rendered_policy = self.get_rendered_policy(minimize)
return rendered_policy
|
https://github.com/salesforce/policy_sentry/issues/211
|
Traceback (most recent call last):
File "./examples/library-usage/writing/write_policy_with_access_levels.py", line 18, in <module>
policy = write_policy_with_template(crud_template)
File "./policy_sentry/venv/lib/python3.8/site-packages/policy_sentry/command/write_policy.py", line 77, in write_policy_with_template
policy = sid_group.process_template(cfg, minimize)
File "./policy_sentry/venv/lib/python3.8/site-packages/policy_sentry/writing/sid_group.py", line 472, in process_template
and cfg["permissions-management"][0] != ""
IndexError: list index out of range
|
IndexError
|
def transform_parameter(self):
# Depreciated placeholders:
# - $[taskcat_gets3contents]
# - $[taskcat_geturl]
for param_name, param_value in self._param_dict.items():
if isinstance(param_value, list):
_results_list = []
_nested_param_dict = {}
for idx, value in enumerate(param_value):
_nested_param_dict[idx] = value
nested_pg = ParamGen(
_nested_param_dict,
self.bucket_name,
self.region,
self._boto_client,
self.az_excludes,
)
nested_pg.transform_parameter()
for result_value in nested_pg.results.values():
_results_list.append(result_value)
self.param_value = _results_list
self.results.update({param_name: _results_list})
continue
# Setting the instance variables to reflect key/value pair we're working on.
self.param_name = param_name
self.param_value = param_value
# Convert from bytes to string.
self.convert_to_str()
# $[taskcat_random-numbers]
self._regex_replace_param_value(self.RE_GENNUMB, self._gen_rand_num(20))
# $[taskcat_random-string]
self._regex_replace_param_value(self.RE_GENRANDSTR, self._gen_rand_str(20))
# $[taskcat_autobucket]
self._regex_replace_param_value(self.RE_GENAUTOBUCKET, self._gen_autobucket())
# $[taskcat_genpass_X]
self._gen_password_wrapper(self.RE_GENPW, self.RE_PWTYPE, self.RE_COUNT)
# $[taskcat_ge[nt]az_#]
self._gen_az_wrapper(self.RE_GENAZ, self.RE_COUNT)
# $[taskcat_ge[nt]singleaz_#]
self._gen_single_az_wrapper(self.RE_GENAZ_SINGLE)
# $[taskcat_getkeypair]
self._regex_replace_param_value(self.RE_QSKEYPAIR, "cikey")
# $[taskcat_getlicensebucket]
self._regex_replace_param_value(self.RE_QSLICBUCKET, "override_this")
# $[taskcat_getmediabucket]
self._regex_replace_param_value(self.RE_QSMEDIABUCKET, "override_this")
# $[taskcat_getlicensecontent]
self._get_license_content_wrapper(self.RE_GETLICCONTENT)
# $[taskcat_getpresignedurl]
self._get_license_content_wrapper(self.RE_GETPRESIGNEDURL)
# $[taskcat_getval_X]
self._getval_wrapper(self.RE_GETVAL)
# $[taskcat_genuuid]
self._regex_replace_param_value(self.RE_GENUUID, self._gen_uuid())
self.results.update({self.param_name: self.param_value})
|
def transform_parameter(self):
# Depreciated placeholders:
# - $[taskcat_gets3contents]
# - $[taskcat_geturl]
for param_name, param_value in self._param_dict.items():
# Setting the instance variables to reflect key/value pair we're working on.
self.param_name = param_name
self.param_value = param_value
# Convert from bytes to string.
self.convert_to_str()
# $[taskcat_random-numbers]
self._regex_replace_param_value(self.RE_GENNUMB, self._gen_rand_num(20))
# $[taskcat_random-string]
self._regex_replace_param_value(self.RE_GENRANDSTR, self._gen_rand_str(20))
# $[taskcat_autobucket]
self._regex_replace_param_value(self.RE_GENAUTOBUCKET, self._gen_autobucket())
# $[taskcat_genpass_X]
self._gen_password_wrapper(self.RE_GENPW, self.RE_PWTYPE, self.RE_COUNT)
# $[taskcat_ge[nt]az_#]
self._gen_az_wrapper(self.RE_GENAZ, self.RE_COUNT)
# $[taskcat_ge[nt]singleaz_#]
self._gen_single_az_wrapper(self.RE_GENAZ_SINGLE)
# $[taskcat_getkeypair]
self._regex_replace_param_value(self.RE_QSKEYPAIR, "cikey")
# $[taskcat_getlicensebucket]
self._regex_replace_param_value(self.RE_QSLICBUCKET, "override_this")
# $[taskcat_getmediabucket]
self._regex_replace_param_value(self.RE_QSMEDIABUCKET, "override_this")
# $[taskcat_getlicensecontent]
self._get_license_content_wrapper(self.RE_GETLICCONTENT)
# $[taskcat_getpresignedurl]
self._get_license_content_wrapper(self.RE_GETPRESIGNEDURL)
# $[taskcat_getval_X]
self._getval_wrapper(self.RE_GETVAL)
# $[taskcat_genuuid]
self._regex_replace_param_value(self.RE_GENUUID, self._gen_uuid())
self.results.update({self.param_name: self.param_value})
|
https://github.com/aws-quickstart/taskcat/issues/443
|
taskcat -d test run
_ _ _
| |_ __ _ ___| | _____ __ _| |_
| __/ _` / __| |/ / __/ _` | __|
| || (_| \__ \ < (_| (_| | |_
\__\__,_|___/_|\_\___\__,_|\__|
version 0.9.8
/home/ubuntu/.local/lib/python3.6/site-packages/dataclasses_jsonschema/__init__.py:457: UserWarning: Unable to decode value for 'parameters: str'
warnings.warn(f"Unable to decode value for '{field}: {field_type_name}'")
[INFO ] : Lint passed for test mytest on template /home/ubuntu/taskcatrun/Create_ALB.template
[S3: -> ] s3://tcat-taskcat-poc-nlsq599i/taskcat-poc/Create_ALB.template
[ERROR ] : TypeError expected string or bytes-like object
Traceback (most recent call last):
File "/home/ubuntu/.local/lib/python3.6/site-packages/taskcat/_cli.py", line 70, in main
cli.run()
File "/home/ubuntu/.local/lib/python3.6/site-packages/taskcat/_cli_core.py", line 228, in run
return getattr(command(), subcommand)(**args)
File "/home/ubuntu/.local/lib/python3.6/site-packages/taskcat/_cli_modules/test.py", line 75, in run
parameters = config.get_rendered_parameters(buckets, regions, templates)
File "/home/ubuntu/.local/lib/python3.6/site-packages/taskcat/_config.py", line 295, in get_rendered_parameters
region_params, s3bucket.name, region.name, region.client
File "/home/ubuntu/.local/lib/python3.6/site-packages/taskcat/_template_params.py", line 61, in __init__
self.transform_parameter()
File "/home/ubuntu/.local/lib/python3.6/site-packages/taskcat/_template_params.py", line 76, in transform_parameter
self._regex_replace_param_value(self.RE_GENNUMB, self._gen_rand_num(20))
File "/home/ubuntu/.local/lib/python3.6/site-packages/taskcat/_template_params.py", line 348, in _regex_replace_param_value
if self.regxfind(regex_pattern, self.param_value):
File "/home/ubuntu/.local/lib/python3.6/site-packages/taskcat/_common_utils.py", line 75, in regxfind
security_group = re_object.search(data_line)
TypeError: expected string or bytes-like object
ubuntu@ubuntu-VirtualBox:~/taskcatrun$
|
TypeError
|
def _template_url_to_path(self, template_url):
# TODO: this code assumes a specific url schema, should rather attempt to
# resolve values from params/defaults
template_path = None
if isinstance(template_url, dict):
if "Fn::Sub" in template_url.keys():
if isinstance(template_url["Fn::Sub"], str):
template_path = template_url["Fn::Sub"].split("}")[-1]
else:
template_path = template_url["Fn::Sub"][0].split("}")[-1]
elif "Fn::Join" in list(template_url.keys())[0]:
template_path = template_url["Fn::Join"][1][-1]
elif isinstance(template_url, str):
template_path = "/".join(template_url.split("/")[-2:])
if isinstance(template_path, str):
template_path = self.project_root / template_path
if template_path.is_file():
return template_path
LOG.warning(
"Failed to discover path for %s, path %s does not exist",
template_url,
template_path,
)
return ""
|
def _template_url_to_path(self, template_url):
# TODO: this code assumes a specific url schema, should rather attempt to
# resolve values from params/defaults
if isinstance(template_url, dict):
if "Fn::Sub" in template_url.keys():
if isinstance(template_url["Fn::Sub"], str):
template_path = template_url["Fn::Sub"].split("}")[-1]
else:
template_path = template_url["Fn::Sub"][0].split("}")[-1]
elif "Fn::Join" in list(template_url.keys())[0]:
template_path = template_url["Fn::Join"][1][-1]
elif isinstance(template_url, str):
template_path = "/".join(template_url.split("/")[-2:])
template_path = self.project_root / template_path
if template_path.is_file():
return template_path
LOG.warning(
"Failed to discover path for %s, path %s does not exist",
template_url,
template_path,
)
return ""
|
https://github.com/aws-quickstart/taskcat/issues/432
|
version 0.9.8
[ERROR ] : TypeError expected str, bytes or os.PathLike object, not dict_node
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/taskcat/_cli.py", line 70, in main
cli.run()
File "/usr/local/lib/python3.7/site-packages/taskcat/_cli_core.py", line 228, in run
return getattr(command(), subcommand)(**args)
File "/usr/local/lib/python3.7/site-packages/taskcat/_cli_modules/test.py", line 60, in run
templates = config.get_templates(project_root_path)
File "/usr/local/lib/python3.7/site-packages/taskcat/_config.py", line 312, in get_templates
s3_key_prefix=f"{self.config.project.name}/",
File "/usr/local/lib/python3.7/site-packages/taskcat/_cfn/template.py", line 30, in __init__
self._find_children()
File "/usr/local/lib/python3.7/site-packages/taskcat/_cfn/template.py", line 111, in _find_children
resource["Properties"]["TemplateURL"]
File "/usr/local/lib/python3.7/site-packages/taskcat/_cfn/template.py", line 72, in _template_url_to_path
template_path = self.project_root / template_path
File "/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py", line 908, in __truediv__
return self._make_child((key,))
File "/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py", line 695, in _make_child
drv, root, parts = self._parse_args(args)
File "/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py", line 649, in _parse_args
a = os.fspath(a)
TypeError: expected str, bytes or os.PathLike object, not dict_node
|
TypeError
|
def _get_relative_url(self, path: str) -> str:
suffix = str(path).replace(str(self.project_root), "")
url = self.url_prefix() + suffix
return url
|
def _get_relative_url(self, path: str) -> str:
if not self.url:
return ""
suffix = str(self.template_path).replace(str(self.project_root), "")
suffix_length = len(suffix.lstrip("/").split("/"))
url_prefix = "/".join(self.url.split("/")[0:-suffix_length])
suffix = str(path).replace(str(self.project_root), "")
url = url_prefix + suffix
return url
|
https://github.com/aws-quickstart/taskcat/issues/432
|
version 0.9.8
[ERROR ] : TypeError expected str, bytes or os.PathLike object, not dict_node
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/taskcat/_cli.py", line 70, in main
cli.run()
File "/usr/local/lib/python3.7/site-packages/taskcat/_cli_core.py", line 228, in run
return getattr(command(), subcommand)(**args)
File "/usr/local/lib/python3.7/site-packages/taskcat/_cli_modules/test.py", line 60, in run
templates = config.get_templates(project_root_path)
File "/usr/local/lib/python3.7/site-packages/taskcat/_config.py", line 312, in get_templates
s3_key_prefix=f"{self.config.project.name}/",
File "/usr/local/lib/python3.7/site-packages/taskcat/_cfn/template.py", line 30, in __init__
self._find_children()
File "/usr/local/lib/python3.7/site-packages/taskcat/_cfn/template.py", line 111, in _find_children
resource["Properties"]["TemplateURL"]
File "/usr/local/lib/python3.7/site-packages/taskcat/_cfn/template.py", line 72, in _template_url_to_path
template_path = self.project_root / template_path
File "/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py", line 908, in __truediv__
return self._make_child((key,))
File "/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py", line 695, in _make_child
drv, root, parts = self._parse_args(args)
File "/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py", line 649, in _parse_args
a = os.fspath(a)
TypeError: expected str, bytes or os.PathLike object, not dict_node
|
TypeError
|
def __init__(self, param_dict, bucket_name, region, boto_client, az_excludes=None):
self.regxfind = CommonTools.regxfind
self._param_dict = param_dict
_missing_params = []
for param_name, param_value in param_dict.items():
if param_value is None:
_missing_params.append(param_name)
if _missing_params:
raise TaskCatException(
(
f"The following parameters have no value whatsoever. "
f"The CloudFormation stack will fail to launch. "
f"Please address. str({_missing_params})"
)
)
self.results = {}
self.mutated_params = {}
self.param_name = None
self.param_value = None
self.bucket_name = bucket_name
self._boto_client = boto_client
self.region = region
if not az_excludes:
self.az_excludes: Set[str] = set()
else:
self.az_excludes: Set[str] = az_excludes
self.transform_parameter()
|
def __init__(self, param_dict, bucket_name, region, boto_client, az_excludes=None):
self.regxfind = CommonTools.regxfind
self._param_dict = param_dict
self.results = {}
self.mutated_params = {}
self.param_name = None
self.param_value = None
self.bucket_name = bucket_name
self._boto_client = boto_client
self.region = region
if not az_excludes:
self.az_excludes: Set[str] = set()
else:
self.az_excludes: Set[str] = az_excludes
self.transform_parameter()
|
https://github.com/aws-quickstart/taskcat/issues/391
|
[ERROR ] : TypeError expected string or bytes-like object
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/taskcat/_cli.py", line 70, in main
cli.run()
File "/usr/local/lib/python3.7/site-packages/taskcat/_cli_core.py", line 228, in run
return getattr(command(), subcommand)(**args)
File "/usr/local/lib/python3.7/site-packages/taskcat/_cli_modules/test.py", line 75, in run
parameters = config.get_rendered_parameters(buckets, regions, templates)
File "/usr/local/lib/python3.7/site-packages/taskcat/_config.py", line 295, in get_rendered_parameters
region_params, s3bucket.name, region.name, region.client
File "/usr/local/lib/python3.7/site-packages/taskcat/_template_params.py", line 49, in __init__
self.transform_parameter()
File "/usr/local/lib/python3.7/site-packages/taskcat/_template_params.py", line 64, in transform_parameter
self._regex_replace_param_value(self.RE_GENNUMB, self._gen_rand_num(20))
File "/usr/local/lib/python3.7/site-packages/taskcat/_template_params.py", line 336, in _regex_replace_param_value
if self.regxfind(regex_pattern, self.param_value):
File "/usr/local/lib/python3.7/site-packages/taskcat/_common_utils.py", line 76, in regxfind
security_group = re_object.search(data_line)
TypeError: expected string or bytes-like object
|
TypeError
|
def _import_child( # pylint: disable=too-many-locals
cls, stack_properties: dict, parent_stack: "Stack"
) -> Optional["Stack"]:
url = ""
for event in parent_stack.events():
if event.physical_id == stack_properties["StackId"] and event.properties:
url = event.properties["TemplateURL"]
if url.startswith(parent_stack.template.url_prefix()):
# Template is part of the project, discovering path
relative_path = url.replace(parent_stack.template.url_prefix(), "").lstrip("/")
absolute_path = parent_stack.template.project_root / relative_path
else:
try:
# Assuming template is remote to project and downloading it
cfn_client = parent_stack.client
tempate_body = cfn_client.get_template(
StackName=stack_properties["StackId"]
)["TemplateBody"]
path = parent_stack.template.project_root / Stack.REMOTE_TEMPLATE_PATH
os.makedirs(path, exist_ok=True)
fname = (
"".join(
random.choice(string.ascii_lowercase) # nosec
for _ in range(16)
)
+ ".template"
)
absolute_path = path / fname
template_str = ordered_dump(tempate_body, Dumper=yaml.SafeDumper)
if not absolute_path.exists():
with open(absolute_path, "w") as fh:
fh.write(template_str)
except Exception as e: # pylint: disable=broad-except
LOG.warning(
f"Failed to attach child stack {stack_properties['StackId']} {str(e)}"
)
LOG.debug("traceback", exc_info=True)
return None
template = Template(
template_path=str(absolute_path),
project_root=parent_stack.template.project_root,
url=url,
)
stack = cls(
parent_stack.region,
stack_properties["StackId"],
template,
parent_stack.name,
parent_stack.uuid,
)
stack.set_stack_properties(stack_properties)
return stack
|
def _import_child(
cls, stack_properties: dict, parent_stack: "Stack"
) -> Optional["Stack"]:
url = ""
for event in parent_stack.events():
if event.physical_id == stack_properties["StackId"] and event.properties:
url = event.properties["TemplateURL"]
if url.startswith(parent_stack.template.url_prefix()):
# Template is part of the project, discovering path
relative_path = url.replace(parent_stack.template.url_prefix(), "").lstrip("/")
absolute_path = parent_stack.template.project_root / relative_path
else:
# Assuming template is remote to project and downloading it
cfn_client = parent_stack.client
tempate_body = cfn_client.get_template(StackName=stack_properties["StackId"])[
"TemplateBody"
]
path = parent_stack.template.project_root / Stack.REMOTE_TEMPLATE_PATH
os.makedirs(path, exist_ok=True)
fname = (
"".join(
random.choice(string.ascii_lowercase)
for _ in range(16) # nosec
)
+ ".template"
)
absolute_path = path / fname
if not absolute_path.exists():
with open(absolute_path, "w") as fh:
fh.write(tempate_body)
template = Template(
template_path=str(absolute_path),
project_root=parent_stack.template.project_root,
url=url,
)
stack = cls(
parent_stack.region,
stack_properties["StackId"],
template,
parent_stack.name,
parent_stack.uuid,
)
stack.set_stack_properties(stack_properties)
return stack
|
https://github.com/aws-quickstart/taskcat/issues/366
|
[ERROR ] : TypeError write() argument must be str, not collections.OrderedDict
Traceback (most recent call last):
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_cli.py", line 66, in main
cli.run()
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_cli_core.py", line 228, in run
return getattr(command(), subcommand)(**args)
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_cli_modules/test.py", line 81, in run
terminal_printer.report_test_progress(stacker=test_definition)
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_tui.py", line 24, in report_test_progress
self._print_stack_tree(stack, buffer=self.buffer)
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_tui.py", line 37, in _print_stack_tree
if stack.descendants():
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_cfn/stack.py", line 495, in descendants
self._fetch_children()
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_cfn/stack.py", line 481, in _fetch_children
stack_obj = Stack._import_child(stack, self)
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_cfn/stack.py", line 335, in _import_child
fh.write(tempate_body)
TypeError: write() argument must be str, not collections.OrderedDict
|
TypeError
|
def _fetch_children(self) -> None:
self._last_child_refresh = datetime.now()
for page in self.client.get_paginator("describe_stacks").paginate():
for stack in page["Stacks"]:
if self._children.filter(id=stack["StackId"]):
continue
if "ParentId" in stack.keys():
if self.id == stack["ParentId"]:
stack_obj = Stack._import_child(stack, self)
if stack_obj:
self._children.append(stack_obj)
|
def _fetch_children(self) -> None:
self._last_child_refresh = datetime.now()
for page in self.client.get_paginator("describe_stacks").paginate():
for stack in page["Stacks"]:
if self._children.filter(id=stack["StackId"]):
continue
if "ParentId" in stack.keys():
if self.id == stack["ParentId"]:
stack_obj = Stack._import_child(stack, self)
self._children.append(stack_obj)
|
https://github.com/aws-quickstart/taskcat/issues/366
|
[ERROR ] : TypeError write() argument must be str, not collections.OrderedDict
Traceback (most recent call last):
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_cli.py", line 66, in main
cli.run()
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_cli_core.py", line 228, in run
return getattr(command(), subcommand)(**args)
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_cli_modules/test.py", line 81, in run
terminal_printer.report_test_progress(stacker=test_definition)
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_tui.py", line 24, in report_test_progress
self._print_stack_tree(stack, buffer=self.buffer)
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_tui.py", line 37, in _print_stack_tree
if stack.descendants():
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_cfn/stack.py", line 495, in descendants
self._fetch_children()
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_cfn/stack.py", line 481, in _fetch_children
stack_obj = Stack._import_child(stack, self)
File "/home/ags/venv/venv-taskcat/lib/python3.6/site-packages/taskcat/_cfn/stack.py", line 335, in _import_child
fh.write(tempate_body)
TypeError: write() argument must be str, not collections.OrderedDict
|
TypeError
|
def deep_cleanup(self, testdata_list):
"""
This function deletes the AWS resources which were not deleted
by deleting CloudFormation stacks.
:param testdata_list: List of TestData objects
"""
for test in testdata_list:
failed_stack_ids = []
for stack in test.get_test_stacks():
if str(stack["status"]) == "DELETE_FAILED":
failed_stack_ids.append(stack["StackId"])
if len(failed_stack_ids) == 0:
print(
PrintMsg.INFO
+ "All stacks deleted successfully. Deep clean-up not required."
)
continue
print(
PrintMsg.INFO
+ "Few stacks failed to delete. Collecting resources for deep clean-up."
)
# get test region from the stack id
stackdata = CommonTools(failed_stack_ids[0]).parse_stack_info()
region = stackdata["region"]
session = boto3.session.Session(region_name=region)
s = Reaper(session)
failed_stacks = CfnResourceTools(self._boto_client).get_all_resources(
failed_stack_ids, region
)
# print all resources which failed to delete
if self.verbose:
print(PrintMsg.DEBUG + "Resources which failed to delete:\n")
for failed_stack in failed_stacks:
print(PrintMsg.DEBUG + "Stack Id: " + failed_stack["stackId"])
for res in failed_stack["resources"]:
print(
PrintMsg.DEBUG
+ "{0} = {1}, {2} = {3}, {4} = {5}".format(
"\n\t\tLogicalId",
res.get("logicalId"),
"\n\t\tPhysicalId",
res.get("physicalId"),
"\n\t\tType",
res.get("resourceType"),
)
)
s.delete_all(failed_stacks)
self.delete_autobucket()
|
def deep_cleanup(self, testdata_list):
"""
This function deletes the AWS resources which were not deleted
by deleting CloudFormation stacks.
:param testdata_list: List of TestData objects
"""
for test in testdata_list:
failed_stack_ids = []
for stack in test.get_test_stacks():
if str(stack["status"]) == "DELETE_FAILED":
failed_stack_ids.append(stack["StackId"])
if len(failed_stack_ids) == 0:
print(
PrintMsg.INFO
+ "All stacks deleted successfully. Deep clean-up not required."
)
continue
print(
PrintMsg.INFO
+ "Few stacks failed to delete. Collecting resources for deep clean-up."
)
# get test region from the stack id
stackdata = CommonTools(failed_stack_ids[0]).parse_stack_info()
region = stackdata["region"]
session = boto3.session.Session(region_name=region)
s = Reaper(session)
failed_stacks = CfnResourceTools.get_all_resources(failed_stack_ids, region)
# print all resources which failed to delete
if self.verbose:
print(PrintMsg.DEBUG + "Resources which failed to delete:\n")
for failed_stack in failed_stacks:
print(PrintMsg.DEBUG + "Stack Id: " + failed_stack["stackId"])
for res in failed_stack["resources"]:
print(
PrintMsg.DEBUG
+ "{0} = {1}, {2} = {3}, {4} = {5}".format(
"\n\t\tLogicalId",
res.get("logicalId"),
"\n\t\tPhysicalId",
res.get("physicalId"),
"\n\t\tType",
res.get("resourceType"),
)
)
s.delete_all(failed_stacks)
self.delete_autobucket()
|
https://github.com/aws-quickstart/taskcat/issues/194
|
22:49:35 [INFO ] :Few stacks failed to delete. Collecting resources for deep clean-up.
22:49:35 Traceback (most recent call last):
22:49:35 File "/var/lib/jenkins/.local/bin/taskcat", line 104, in <module>
22:49:35 main()
22:49:35 File "/var/lib/jenkins/.local/bin/taskcat", line 97, in main
22:49:35 tcat_instance.cleanup(testdata, 5)
22:49:35 File "/var/lib/jenkins/.local/lib/python3.6/site-packages/taskcat/stacker.py", line 1322, in cleanup
22:49:35 self.deep_cleanup(testdata_list)
22:49:35 File "/var/lib/jenkins/.local/lib/python3.6/site-packages/taskcat/stacker.py", line 1350, in deep_cleanup
22:49:35 failed_stacks = CfnResourceTools.get_all_resources(failed_stack_ids, region)
22:49:35 TypeError: get_all_resources() missing 1 required positional argument: 'region'
22:49:35 Build step 'Execute shell' marked build as failure
22:49:35 Checking console output
22:49:35 Build did not succeed and the project is configured to only push after a successful build, so no pushing will occur.
22:49:35 Finished: FAILURE
|
TypeError
|
def collect_resources(self, testdata_list, logpath):
"""
This function collects the AWS resources information created by the
CloudFormation stack for generating the report.
:param testdata_list: List of TestData object
:param logpath: Log file path
"""
resource = {}
print(PrintMsg.INFO + "(Collecting Resources)")
for test in testdata_list:
for stack in test.get_test_stacks():
stackinfo = CommonTools(stack["StackId"]).parse_stack_info()
# Get stack resources
resource[stackinfo["region"]] = CfnResourceTools(
self._boto_client
).get_resources(str(stackinfo["stack_name"]), str(stackinfo["region"]))
extension = ".txt"
test_logpath = "{}/{}-{}-{}{}".format(
logpath,
stackinfo["stack_name"],
stackinfo["region"],
"resources",
extension,
)
# Write resource logs
file = open(test_logpath, "w")
file.write(str(json.dumps(resource, indent=4, separators=(",", ": "))))
file.close()
|
def collect_resources(self, testdata_list, logpath):
"""
This function collects the AWS resources information created by the
CloudFormation stack for generating the report.
:param testdata_list: List of TestData object
:param logpath: Log file path
"""
resource = {}
print(PrintMsg.INFO + "(Collecting Resources)")
for test in testdata_list:
for stack in test.get_test_stacks():
stackinfo = CommonTools(stack["StackId"]).parse_stack_info()
# Get stack resources
resource[stackinfo["region"]] = self.get_resources(
str(stackinfo["stack_name"]), str(stackinfo["region"])
)
extension = ".txt"
test_logpath = "{}/{}-{}-{}{}".format(
logpath,
stackinfo["stack_name"],
stackinfo["region"],
"resources",
extension,
)
# Write resource logs
file = open(test_logpath, "w")
file.write(str(json.dumps(resource, indent=4, separators=(",", ": "))))
file.close()
|
https://github.com/aws-quickstart/taskcat/issues/194
|
22:49:35 [INFO ] :Few stacks failed to delete. Collecting resources for deep clean-up.
22:49:35 Traceback (most recent call last):
22:49:35 File "/var/lib/jenkins/.local/bin/taskcat", line 104, in <module>
22:49:35 main()
22:49:35 File "/var/lib/jenkins/.local/bin/taskcat", line 97, in main
22:49:35 tcat_instance.cleanup(testdata, 5)
22:49:35 File "/var/lib/jenkins/.local/lib/python3.6/site-packages/taskcat/stacker.py", line 1322, in cleanup
22:49:35 self.deep_cleanup(testdata_list)
22:49:35 File "/var/lib/jenkins/.local/lib/python3.6/site-packages/taskcat/stacker.py", line 1350, in deep_cleanup
22:49:35 failed_stacks = CfnResourceTools.get_all_resources(failed_stack_ids, region)
22:49:35 TypeError: get_all_resources() missing 1 required positional argument: 'region'
22:49:35 Build step 'Execute shell' marked build as failure
22:49:35 Checking console output
22:49:35 Build did not succeed and the project is configured to only push after a successful build, so no pushing will occur.
22:49:35 Finished: FAILURE
|
TypeError
|
def __init__(self, nametag="[taskcat]"):
self.nametag = "{1}{0}{2}".format(nametag, PrintMsg.name_color, PrintMsg.rst_color)
self.project = None
self.owner = None
self.banner = None
self.capabilities = []
self.verbose = False
self.config = "config.yml"
self.test_region = []
self.s3bucket = None
self.s3bucket_type = None
self.template_path = None
self.parameter_path = None
self.default_region = None
self._template_file = None
self._template_type = None
self._parameter_file = None
self._parameter_path = None
self.ddb_table = None
self._enable_dynamodb = False
self._termsize = 110
self._strict_syntax_json = True
self._banner = ""
self._auth_mode = None
self._report = False
self._use_global = False
self._parameters = {}
self.run_cleanup = True
self.public_s3_bucket = False
self._aws_access_key = None
self._aws_secret_key = None
self._boto_profile = None
self._boto_client = ClientFactory(logger=logger)
self._key_url_map = {}
self.retain_if_failed = False
self.tags = []
self.stack_prefix = ""
self.template_data = None
self.version = get_installed_version()
self.s3_url_prefix = ""
self.upload_only = False
self._max_bucket_name_length = 63
|
def __init__(self, nametag="[taskcat]"):
self.nametag = "{1}{0}{2}".format(nametag, PrintMsg.name_color, PrintMsg.rst_color)
self.project = None
self.owner = None
self.banner = None
self.capabilities = []
self.verbose = False
self.config = "config.yml"
self.test_region = []
self.s3bucket = None
self.s3bucket_type = None
self.template_path = None
self.parameter_path = None
self.default_region = None
self._template_file = None
self._template_type = None
self._parameter_file = None
self._parameter_path = None
self.ddb_table = None
self._enable_dynamodb = False
self._termsize = 110
self._strict_syntax_json = True
self._banner = ""
self._auth_mode = None
self._report = False
self._use_global = False
self._parameters = {}
self.run_cleanup = True
self.public_s3_bucket = False
self._aws_access_key = None
self._aws_secret_key = None
self._boto_profile = None
self._boto_client = ClientFactory(logger=logger)
self._key_url_map = {}
self.retain_if_failed = False
self.tags = []
self.stack_prefix = ""
self.template_data = None
self.version = get_installed_version()
self.s3_url_prefix = ""
self.upload_only = False
|
https://github.com/aws-quickstart/taskcat/issues/155
|
·[0;30;43m[INFO ]·[0m :Creating bucket taskcat-tag-CfnLintDemo-0a074b3f in us-east-2
Traceback (most recent call last):
File "/usr/local/bin/taskcat", line 79, in <module>
main()
File "/usr/local/bin/taskcat", line 65, in main
tcat_instance.stage_in_s3(taskcat_cfg)
File "/usr/local/lib/python3.6/site-packages/taskcat/stacker.py", line 428, in stage_in_s3
'LocationConstraint': self.get_default_region()
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 320, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 623, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidBucketName) when calling the CreateBucket operation: The specified bucket is not valid.
[Container] 2018/10/31 04:40:24 Command did not exit successfully taskcat -c $PROJECTNAME/ci/taskcat.yml exit status 1
|
botocore.exceptions.ClientError
|
def stage_in_s3(self, taskcat_cfg):
"""
Upload templates and other artifacts to s3.
This function creates the s3 bucket with name provided in the config yml file. If
no bucket name provided, it creates the s3 bucket using project name provided in
config yml file. And uploads the templates and other artifacts to the s3 bucket.
:param taskcat_cfg: Taskcat configuration provided in yml file
"""
if self.public_s3_bucket:
bucket_or_object_acl = "public-read"
else:
bucket_or_object_acl = "bucket-owner-read"
s3_client = self._boto_client.get("s3", region=self.get_default_region(), s3v4=True)
self.set_project(taskcat_cfg["global"]["qsname"])
if "s3bucket" in taskcat_cfg["global"].keys():
self.set_s3bucket(taskcat_cfg["global"]["s3bucket"])
self.set_s3bucket_type("defined")
print(PrintMsg.INFO + "Staging Bucket => " + self.get_s3bucket())
if len(self.get_s3bucket()) > self._max_bucket_name_length:
raise TaskCatException(
"The bucket name you provided is greater than 63 characters."
)
else:
auto_bucket = (
"taskcat-" + self.stack_prefix + "-" + self.get_project() + "-" + jobid[:8]
)
auto_bucket = auto_bucket.lower()
if len(auto_bucket) > self._max_bucket_name_length:
auto_bucket = auto_bucket[: self._max_bucket_name_length]
if self.get_default_region():
print(
"{0}Creating bucket {1} in {2}".format(
PrintMsg.INFO, auto_bucket, self.get_default_region()
)
)
if self.get_default_region() == "us-east-1":
response = s3_client.create_bucket(
ACL=bucket_or_object_acl, Bucket=auto_bucket
)
else:
response = s3_client.create_bucket(
ACL=bucket_or_object_acl,
Bucket=auto_bucket,
CreateBucketConfiguration={
"LocationConstraint": self.get_default_region()
},
)
self.set_s3bucket_type("auto")
else:
raise TaskCatException("Default_region = " + self.get_default_region())
if response["ResponseMetadata"]["HTTPStatusCode"] is 200:
print(PrintMsg.INFO + "Staging Bucket => [%s]" % auto_bucket)
self.set_s3bucket(auto_bucket)
else:
print(
"{0}Creating bucket {1} in {2}".format(
PrintMsg.INFO, auto_bucket, self.get_default_region()
)
)
response = s3_client.create_bucket(
ACL=bucket_or_object_acl,
Bucket=auto_bucket,
CreateBucketConfiguration={
"LocationConstraint": self.get_default_region()
},
)
if response["ResponseMetadata"]["HTTPStatusCode"] is 200:
print(PrintMsg.INFO + "Staging Bucket => [%s]" % auto_bucket)
self.set_s3bucket(auto_bucket)
if self.tags:
s3_client.put_bucket_tagging(
Bucket=auto_bucket, Tagging={"TagSet": self.tags}
)
if os.path.isdir(self.get_project()):
start_location = "{}/{}".format(".", self.get_project())
else:
print(
"""\t\t Hint: The name specfied as value of qsname ({})
must match the root directory of your project""".format(
self.get_project()
)
)
print(
"{0}!Cannot find directory [{1}] in {2}".format(
PrintMsg.ERROR, self.get_project(), os.getcwd()
)
)
raise TaskCatException("Please cd to where you project is located")
S3Sync(
s3_client,
self.get_s3bucket(),
self.get_project(),
start_location,
bucket_or_object_acl,
)
self.s3_url_prefix = "https://" + self.get_s3_hostname() + "/" + self.get_project()
if self.upload_only:
exit0("Upload completed successfully")
|
def stage_in_s3(self, taskcat_cfg):
"""
Upload templates and other artifacts to s3.
This function creates the s3 bucket with name provided in the config yml file. If
no bucket name provided, it creates the s3 bucket using project name provided in
config yml file. And uploads the templates and other artifacts to the s3 bucket.
:param taskcat_cfg: Taskcat configuration provided in yml file
"""
if self.public_s3_bucket:
bucket_or_object_acl = "public-read"
else:
bucket_or_object_acl = "bucket-owner-read"
s3_client = self._boto_client.get("s3", region=self.get_default_region(), s3v4=True)
self.set_project(taskcat_cfg["global"]["qsname"])
if "s3bucket" in taskcat_cfg["global"].keys():
self.set_s3bucket(taskcat_cfg["global"]["s3bucket"])
self.set_s3bucket_type("defined")
print(PrintMsg.INFO + "Staging Bucket => " + self.get_s3bucket())
else:
auto_bucket = (
"taskcat-" + self.stack_prefix + "-" + self.get_project() + "-" + jobid[:8]
)
if self.get_default_region():
print(
"{0}Creating bucket {1} in {2}".format(
PrintMsg.INFO, auto_bucket, self.get_default_region()
)
)
if self.get_default_region() == "us-east-1":
response = s3_client.create_bucket(
ACL=bucket_or_object_acl, Bucket=auto_bucket
)
else:
response = s3_client.create_bucket(
ACL=bucket_or_object_acl,
Bucket=auto_bucket,
CreateBucketConfiguration={
"LocationConstraint": self.get_default_region()
},
)
self.set_s3bucket_type("auto")
else:
raise TaskCatException("Default_region = " + self.get_default_region())
if response["ResponseMetadata"]["HTTPStatusCode"] is 200:
print(PrintMsg.INFO + "Staging Bucket => [%s]" % auto_bucket)
self.set_s3bucket(auto_bucket)
else:
print(
"{0}Creating bucket {1} in {2}".format(
PrintMsg.INFO, auto_bucket, self.get_default_region()
)
)
response = s3_client.create_bucket(
ACL=bucket_or_object_acl,
Bucket=auto_bucket,
CreateBucketConfiguration={
"LocationConstraint": self.get_default_region()
},
)
if response["ResponseMetadata"]["HTTPStatusCode"] is 200:
print(PrintMsg.INFO + "Staging Bucket => [%s]" % auto_bucket)
self.set_s3bucket(auto_bucket)
if self.tags:
s3_client.put_bucket_tagging(
Bucket=auto_bucket, Tagging={"TagSet": self.tags}
)
if os.path.isdir(self.get_project()):
start_location = "{}/{}".format(".", self.get_project())
else:
print(
"""\t\t Hint: The name specfied as value of qsname ({})
must match the root directory of your project""".format(
self.get_project()
)
)
print(
"{0}!Cannot find directory [{1}] in {2}".format(
PrintMsg.ERROR, self.get_project(), os.getcwd()
)
)
raise TaskCatException("Please cd to where you project is located")
S3Sync(
s3_client,
self.get_s3bucket(),
self.get_project(),
start_location,
bucket_or_object_acl,
)
self.s3_url_prefix = "https://" + self.get_s3_hostname() + "/" + self.get_project()
if self.upload_only:
exit0("Upload completed successfully")
|
https://github.com/aws-quickstart/taskcat/issues/155
|
·[0;30;43m[INFO ]·[0m :Creating bucket taskcat-tag-CfnLintDemo-0a074b3f in us-east-2
Traceback (most recent call last):
File "/usr/local/bin/taskcat", line 79, in <module>
main()
File "/usr/local/bin/taskcat", line 65, in main
tcat_instance.stage_in_s3(taskcat_cfg)
File "/usr/local/lib/python3.6/site-packages/taskcat/stacker.py", line 428, in stage_in_s3
'LocationConstraint': self.get_default_region()
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 320, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 623, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidBucketName) when calling the CreateBucket operation: The specified bucket is not valid.
[Container] 2018/10/31 04:40:24 Command did not exit successfully taskcat -c $PROJECTNAME/ci/taskcat.yml exit status 1
|
botocore.exceptions.ClientError
|
def __init__(self, nametag="[taskcat]"):
self.nametag = "{1}{0}{2}".format(nametag, PrintMsg.name_color, PrintMsg.rst_color)
self.project = None
self.owner = None
self.banner = None
self.capabilities = []
self.verbose = False
self.config = "config.yml"
self.test_region = []
self.s3bucket = None
self.s3bucket_type = None
self.template_path = None
self.parameter_path = None
self.default_region = None
self._template_file = None
self._template_type = None
self._parameter_file = None
self._parameter_path = None
self.ddb_table = None
self._enable_dynamodb = False
self._termsize = 110
self._strict_syntax_json = True
self._banner = ""
self._auth_mode = None
self._report = False
self._use_global = False
self._parameters = {}
self.run_cleanup = True
self.public_s3_bucket = False
self._aws_access_key = None
self._aws_secret_key = None
self._boto_profile = None
self._boto_client = ClientFactory(logger=logger)
self._key_url_map = {}
self.retain_if_failed = False
self.tags = []
self.stack_prefix = ""
self.template_data = None
self.version = get_installed_version()
self.s3_url_prefix = ""
self.upload_only = False
self._max_bucket_name_length = 63
|
def __init__(self, nametag="[taskcat]"):
self.nametag = "{1}{0}{2}".format(nametag, PrintMsg.name_color, PrintMsg.rst_color)
self.project = None
self.owner = None
self.banner = None
self.capabilities = []
self.verbose = False
self.config = "config.yml"
self.test_region = []
self.s3bucket = None
self.s3bucket_type = None
self.template_path = None
self.parameter_path = None
self.default_region = None
self._template_file = None
self._template_type = None
self._parameter_file = None
self._parameter_path = None
self.ddb_table = None
self._enable_dynamodb = False
self._termsize = 110
self._strict_syntax_json = True
self._banner = ""
self._auth_mode = None
self._report = False
self._use_global = False
self._parameters = {}
self.run_cleanup = True
self.public_s3_bucket = False
self._aws_access_key = None
self._aws_secret_key = None
self._boto_profile = None
self._boto_client = ClientFactory(logger=logger)
self._key_url_map = {}
self.multithread_upload = False
self.retain_if_failed = False
self.tags = []
self.stack_prefix = ""
self.template_data = None
self.version = get_installed_version()
|
https://github.com/aws-quickstart/taskcat/issues/155
|
·[0;30;43m[INFO ]·[0m :Creating bucket taskcat-tag-CfnLintDemo-0a074b3f in us-east-2
Traceback (most recent call last):
File "/usr/local/bin/taskcat", line 79, in <module>
main()
File "/usr/local/bin/taskcat", line 65, in main
tcat_instance.stage_in_s3(taskcat_cfg)
File "/usr/local/lib/python3.6/site-packages/taskcat/stacker.py", line 428, in stage_in_s3
'LocationConstraint': self.get_default_region()
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 320, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 623, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidBucketName) when calling the CreateBucket operation: The specified bucket is not valid.
[Container] 2018/10/31 04:40:24 Command did not exit successfully taskcat -c $PROJECTNAME/ci/taskcat.yml exit status 1
|
botocore.exceptions.ClientError
|
def stage_in_s3(self, taskcat_cfg):
"""
Upload templates and other artifacts to s3.
This function creates the s3 bucket with name provided in the config yml file. If
no bucket name provided, it creates the s3 bucket using project name provided in
config yml file. And uploads the templates and other artifacts to the s3 bucket.
:param taskcat_cfg: Taskcat configuration provided in yml file
"""
if self.public_s3_bucket:
bucket_or_object_acl = "public-read"
else:
bucket_or_object_acl = "bucket-owner-read"
s3_client = self._boto_client.get("s3", region=self.get_default_region(), s3v4=True)
self.set_project(taskcat_cfg["global"]["qsname"])
if "s3bucket" in taskcat_cfg["global"].keys():
self.set_s3bucket(taskcat_cfg["global"]["s3bucket"])
self.set_s3bucket_type("defined")
print(PrintMsg.INFO + "Staging Bucket => " + self.get_s3bucket())
if len(self.get_s3bucket()) > self._max_bucket_name_length:
raise TaskCatException(
"The bucket name you provided is greater than 63 characters."
)
try:
_ = s3_client.list_objects(Bucket=self.get_s3bucket())
except s3_client.exceptions.NoSuchBucket:
raise TaskCatException(
"The bucket you provided [{}] does not exist. Exiting.".format(
self.get_s3bucket()
)
)
except Exception:
raise
else:
auto_bucket = (
"taskcat-" + self.stack_prefix + "-" + self.get_project() + "-" + jobid[:8]
)
auto_bucket = auto_bucket.lower()
if len(auto_bucket) > self._max_bucket_name_length:
auto_bucket = auto_bucket[: self._max_bucket_name_length]
if self.get_default_region():
print(
"{0}Creating bucket {1} in {2}".format(
PrintMsg.INFO, auto_bucket, self.get_default_region()
)
)
if self.get_default_region() == "us-east-1":
response = s3_client.create_bucket(
ACL=bucket_or_object_acl, Bucket=auto_bucket
)
else:
response = s3_client.create_bucket(
ACL=bucket_or_object_acl,
Bucket=auto_bucket,
CreateBucketConfiguration={
"LocationConstraint": self.get_default_region()
},
)
self.set_s3bucket_type("auto")
else:
raise TaskCatException("Default_region = " + self.get_default_region())
if response["ResponseMetadata"]["HTTPStatusCode"] is 200:
print(PrintMsg.INFO + "Staging Bucket => [%s]" % auto_bucket)
self.set_s3bucket(auto_bucket)
else:
print(
"{0}Creating bucket {1} in {2}".format(
PrintMsg.INFO, auto_bucket, self.get_default_region()
)
)
response = s3_client.create_bucket(
ACL=bucket_or_object_acl,
Bucket=auto_bucket,
CreateBucketConfiguration={
"LocationConstraint": self.get_default_region()
},
)
if response["ResponseMetadata"]["HTTPStatusCode"] is 200:
print(PrintMsg.INFO + "Staging Bucket => [%s]" % auto_bucket)
self.set_s3bucket(auto_bucket)
if self.tags:
s3_client.put_bucket_tagging(
Bucket=auto_bucket, Tagging={"TagSet": self.tags}
)
if os.path.isdir(self.get_project()):
start_location = "{}/{}".format(".", self.get_project())
else:
print(
"""\t\t Hint: The name specfied as value of qsname ({})
must match the root directory of your project""".format(
self.get_project()
)
)
print(
"{0}!Cannot find directory [{1}] in {2}".format(
PrintMsg.ERROR, self.get_project(), os.getcwd()
)
)
raise TaskCatException("Please cd to where you project is located")
S3Sync(
s3_client,
self.get_s3bucket(),
self.get_project(),
start_location,
bucket_or_object_acl,
)
self.s3_url_prefix = "https://" + self.get_s3_hostname() + "/" + self.get_project()
if self.upload_only:
exit0("Upload completed successfully")
|
def stage_in_s3(self, taskcat_cfg):
"""
Upload templates and other artifacts to s3.
This function creates the s3 bucket with name provided in the config yml file. If
no bucket name provided, it creates the s3 bucket using project name provided in
config yml file. And uploads the templates and other artifacts to the s3 bucket.
:param taskcat_cfg: Taskcat configuration provided in yml file
"""
if self.public_s3_bucket:
bucket_or_object_acl = "public-read"
else:
bucket_or_object_acl = "bucket-owner-read"
s3_client = self._boto_client.get("s3", region=self.get_default_region(), s3v4=True)
self.set_project(taskcat_cfg["global"]["qsname"])
# TODO Update to alchemist upload
if "s3bucket" in taskcat_cfg["global"].keys():
self.set_s3bucket(taskcat_cfg["global"]["s3bucket"])
self.set_s3bucket_type("defined")
print(PrintMsg.INFO + "Staging Bucket => " + self.get_s3bucket())
else:
auto_bucket = (
"taskcat-" + self.stack_prefix + "-" + self.get_project() + "-" + jobid[:8]
)
if self.get_default_region():
print(
"{0}Creating bucket {1} in {2}".format(
PrintMsg.INFO, auto_bucket, self.get_default_region()
)
)
if self.get_default_region() == "us-east-1":
response = s3_client.create_bucket(
ACL=bucket_or_object_acl, Bucket=auto_bucket
)
else:
response = s3_client.create_bucket(
ACL=bucket_or_object_acl,
Bucket=auto_bucket,
CreateBucketConfiguration={
"LocationConstraint": self.get_default_region()
},
)
self.set_s3bucket_type("auto")
else:
raise TaskCatException("Default_region = " + self.get_default_region())
if response["ResponseMetadata"]["HTTPStatusCode"] is 200:
print(PrintMsg.INFO + "Staging Bucket => [%s]" % auto_bucket)
self.set_s3bucket(auto_bucket)
else:
print(
"{0}Creating bucket {1} in {2}".format(
PrintMsg.INFO, auto_bucket, self.get_default_region()
)
)
response = s3_client.create_bucket(
ACL=bucket_or_object_acl,
Bucket=auto_bucket,
CreateBucketConfiguration={
"LocationConstraint": self.get_default_region()
},
)
if response["ResponseMetadata"]["HTTPStatusCode"] is 200:
print(PrintMsg.INFO + "Staging Bucket => [%s]" % auto_bucket)
self.set_s3bucket(auto_bucket)
if self.tags:
s3_client.put_bucket_tagging(
Bucket=auto_bucket, Tagging={"TagSet": self.tags}
)
# TODO Remove after alchemist is implemented
if os.path.isdir(self.get_project()):
current_dir = "."
start_location = "{}/{}".format(".", self.get_project())
fsmap = buildmap(current_dir, start_location, partial_match=False)
else:
print(
"""\t\t Hint: The name specfied as value of qsname ({})
must match the root directory of your project""".format(
self.get_project()
)
)
print(
"{0}!Cannot find directory [{1}] in {2}".format(
PrintMsg.ERROR, self.get_project(), os.getcwd()
)
)
raise TaskCatException("Please cd to where you project is located")
if self.multithread_upload:
threads = 16
print(
PrintMsg.INFO + "Multithread upload enabled, spawning %s threads" % threads
)
pool = ThreadPool(threads)
func = partial(
self._s3_upload_file,
s3_client=s3_client,
bucket_or_object_acl=bucket_or_object_acl,
)
pool.map(func, fsmap)
pool.close()
pool.join()
else:
for filename in fsmap:
self._s3_upload_file(
filename, s3_client=s3_client, bucket_or_object_acl=bucket_or_object_acl
)
paginator = s3_client.get_paginator("list_objects")
operation_parameters = {"Bucket": self.get_s3bucket(), "Prefix": self.get_project()}
s3_pages = paginator.paginate(**operation_parameters)
for s3keys in s3_pages.search("Contents"):
print(
"{}[S3: -> ]{} s3://{}/{}".format(
PrintMsg.white,
PrintMsg.rst_color,
self.get_s3bucket(),
s3keys.get("Key"),
)
)
print(
"{} |Contents of S3 Bucket {} {}".format(
self.nametag, PrintMsg.header, PrintMsg.rst_color
)
)
print("\n")
|
https://github.com/aws-quickstart/taskcat/issues/155
|
·[0;30;43m[INFO ]·[0m :Creating bucket taskcat-tag-CfnLintDemo-0a074b3f in us-east-2
Traceback (most recent call last):
File "/usr/local/bin/taskcat", line 79, in <module>
main()
File "/usr/local/bin/taskcat", line 65, in main
tcat_instance.stage_in_s3(taskcat_cfg)
File "/usr/local/lib/python3.6/site-packages/taskcat/stacker.py", line 428, in stage_in_s3
'LocationConstraint': self.get_default_region()
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 320, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 623, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidBucketName) when calling the CreateBucket operation: The specified bucket is not valid.
[Container] 2018/10/31 04:40:24 Command did not exit successfully taskcat -c $PROJECTNAME/ci/taskcat.yml exit status 1
|
botocore.exceptions.ClientError
|
def get_global_region(self, yamlcfg):
"""
Returns a list of regions defined under global region in the yml config file.
:param yamlcfg: Content of the yml config file
:return: List of regions
"""
g_regions = []
for keys in yamlcfg["global"].keys():
if "region" in keys:
namespace = "global"
try:
iter(yamlcfg["global"]["regions"])
for region in yamlcfg["global"]["regions"]:
g_regions.append(region)
self._use_global = True
except TypeError as e:
print(PrintMsg.ERROR + "No regions defined in [%s]:" % namespace)
print(PrintMsg.ERROR + "Please correct region defs[%s]:" % namespace)
return g_regions
|
def get_global_region(self, yamlcfg):
"""
Returns a list of regions defined under global region in the yml config file.
:param yamlcfg: Content of the yml config file
:return: List of regions
"""
g_regions = []
for keys in yamlcfg["global"].keys():
if "region" in keys:
try:
iter(yamlcfg["global"]["regions"])
namespace = "global"
for region in yamlcfg["global"]["regions"]:
# print("found region %s" % region)
g_regions.append(region)
self._use_global = True
except TypeError:
print("No regions defined in [%s]:" % namespace)
print("Please correct region defs[%s]:" % namespace)
return g_regions
|
https://github.com/aws-quickstart/taskcat/issues/155
|
·[0;30;43m[INFO ]·[0m :Creating bucket taskcat-tag-CfnLintDemo-0a074b3f in us-east-2
Traceback (most recent call last):
File "/usr/local/bin/taskcat", line 79, in <module>
main()
File "/usr/local/bin/taskcat", line 65, in main
tcat_instance.stage_in_s3(taskcat_cfg)
File "/usr/local/lib/python3.6/site-packages/taskcat/stacker.py", line 428, in stage_in_s3
'LocationConstraint': self.get_default_region()
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 320, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 623, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidBucketName) when calling the CreateBucket operation: The specified bucket is not valid.
[Container] 2018/10/31 04:40:24 Command did not exit successfully taskcat -c $PROJECTNAME/ci/taskcat.yml exit status 1
|
botocore.exceptions.ClientError
|
def validate_template(self, taskcat_cfg, test_list):
"""
Returns TRUE if all the template files are valid, otherwise FALSE.
:param taskcat_cfg: TaskCat config object
:param test_list: List of tests
:return: TRUE if templates are valid, else FALSE
"""
# Load global regions
self.set_test_region(self.get_global_region(taskcat_cfg))
for test in test_list:
print(self.nametag + " :Validate Template in test[%s]" % test)
self.define_tests(taskcat_cfg, test)
try:
if self.verbose:
print(
PrintMsg.DEBUG + "Default region [%s]" % self.get_default_region()
)
cfn = self._boto_client.get(
"cloudformation", region=self.get_default_region()
)
result = cfn.validate_template(
TemplateURL=self.s3_url_prefix
+ "/templates/"
+ self.get_template_file()
)
print(PrintMsg.PASS + "Validated [%s]" % self.get_template_file())
if "Description" in result:
cfn_result = result["Description"]
print(PrintMsg.INFO + "Description [%s]" % textwrap.fill(cfn_result))
else:
print(
PrintMsg.INFO
+ "Please include a top-level description for template: [%s]"
% self.get_template_file()
)
if self.verbose:
cfn_params = json.dumps(
result["Parameters"], indent=11, separators=(",", ": ")
)
print(PrintMsg.DEBUG + "Parameters:")
print(cfn_params)
except TaskCatException:
raise
except Exception as e:
if self.verbose:
print(PrintMsg.DEBUG + str(e))
print(PrintMsg.FAIL + "Cannot validate %s" % self.get_template_file())
print(PrintMsg.INFO + "Deleting any automatically-created buckets...")
self.delete_autobucket()
raise TaskCatException("Cannot validate %s" % self.get_template_file())
print("\n")
return True
|
def validate_template(self, taskcat_cfg, test_list):
"""
Returns TRUE if all the template files are valid, otherwise FALSE.
:param taskcat_cfg: TaskCat config object
:param test_list: List of tests
:return: TRUE if templates are valid, else FALSE
"""
# Load global regions
self.set_test_region(self.get_global_region(taskcat_cfg))
for test in test_list:
print(self.nametag + " :Validate Template in test[%s]" % test)
self.define_tests(taskcat_cfg, test)
try:
if self.verbose:
print(
PrintMsg.DEBUG + "Default region [%s]" % self.get_default_region()
)
cfn = self._boto_client.get(
"cloudformation", region=self.get_default_region()
)
cfn.validate_template(TemplateURL=self.get_s3_url(self.get_template_file()))
result = cfn.validate_template(
TemplateURL=self.get_s3_url(self.get_template_file())
)
print(PrintMsg.PASS + "Validated [%s]" % self.get_template_file())
if "Description" in result:
cfn_result = result["Description"]
print(PrintMsg.INFO + "Description [%s]" % textwrap.fill(cfn_result))
else:
print(
PrintMsg.INFO
+ "Please include a top-level description for template: [%s]"
% self.get_template_file()
)
if self.verbose:
cfn_params = json.dumps(
result["Parameters"], indent=11, separators=(",", ": ")
)
print(PrintMsg.DEBUG + "Parameters:")
print(cfn_params)
except TaskCatException:
raise
except Exception as e:
if self.verbose:
print(PrintMsg.DEBUG + str(e))
print(PrintMsg.FAIL + "Cannot validate %s" % self.get_template_file())
print(PrintMsg.INFO + "Deleting any automatically-created buckets...")
self.delete_autobucket()
raise TaskCatException("Cannot validate %s" % self.get_template_file())
print("\n")
return True
|
https://github.com/aws-quickstart/taskcat/issues/155
|
·[0;30;43m[INFO ]·[0m :Creating bucket taskcat-tag-CfnLintDemo-0a074b3f in us-east-2
Traceback (most recent call last):
File "/usr/local/bin/taskcat", line 79, in <module>
main()
File "/usr/local/bin/taskcat", line 65, in main
tcat_instance.stage_in_s3(taskcat_cfg)
File "/usr/local/lib/python3.6/site-packages/taskcat/stacker.py", line 428, in stage_in_s3
'LocationConstraint': self.get_default_region()
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 320, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 623, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidBucketName) when calling the CreateBucket operation: The specified bucket is not valid.
[Container] 2018/10/31 04:40:24 Command did not exit successfully taskcat -c $PROJECTNAME/ci/taskcat.yml exit status 1
|
botocore.exceptions.ClientError
|
def stackcreate(self, taskcat_cfg, test_list, sprefix):
"""
This function creates CloudFormation stack for the given tests.
:param taskcat_cfg: TaskCat config as yaml object
:param test_list: List of tests
:param sprefix: Special prefix as string. Purpose of this param is to use it for tagging
the stack.
:return: List of TestData objects
"""
testdata_list = []
self.set_capabilities("CAPABILITY_NAMED_IAM")
for test in test_list:
testdata = TestData()
testdata.set_test_name(test)
print(
"{0}{1}|PREPARING TO LAUNCH => {2}{3}".format(
PrintMsg.INFO, PrintMsg.header, test, PrintMsg.rst_color
)
)
sname = str(sig)
stackname = sname + "-" + sprefix + "-" + test + "-" + jobid[:8]
self.define_tests(taskcat_cfg, test)
for region in self.get_test_region():
print(PrintMsg.INFO + "Preparing to launch in region [%s] " % region)
try:
cfn = self._boto_client.get("cloudformation", region=region)
s_parmsdata = self.get_contents(
"./" + self.get_project() + "/ci/" + self.get_parameter_file()
)
s_parms = json.loads(s_parmsdata)
s_include_params = self.get_param_includes(s_parms)
if s_include_params:
s_parms = s_include_params
j_params = self.generate_input_param_values(s_parms, region)
if self.verbose:
print(
PrintMsg.DEBUG + "Creating Boto Connection region=%s" % region
)
print(PrintMsg.DEBUG + "StackName=" + stackname)
print(PrintMsg.DEBUG + "DisableRollback=True")
print(PrintMsg.DEBUG + "TemplateURL=%s" % self.get_template_path())
print(PrintMsg.DEBUG + "Capabilities=%s" % self.get_capabilities())
print(PrintMsg.DEBUG + "Parameters:")
print(PrintMsg.DEBUG + "Tags:%s" % str(self.tags))
if self.get_template_type() == "json":
print(
json.dumps(
j_params,
sort_keys=True,
indent=11,
separators=(",", ": "),
)
)
try:
stackdata = cfn.create_stack(
StackName=stackname,
DisableRollback=True,
TemplateURL=self.get_template_path(),
Parameters=j_params,
Capabilities=self.get_capabilities(),
Tags=self.tags,
)
print(PrintMsg.INFO + "|CFN Execution mode [create_stack]")
except cfn.exceptions.ClientError as e:
if not str(e).endswith(
"cannot be used with templates containing Transforms."
):
raise
print(PrintMsg.INFO + "|CFN Execution mode [change_set]")
stack_cs_data = cfn.create_change_set(
StackName=stackname,
TemplateURL=self.get_template_path(),
Parameters=j_params,
Capabilities=self.get_capabilities(),
ChangeSetType="CREATE",
ChangeSetName=stackname + "-cs",
)
change_set_name = stack_cs_data["Id"]
# wait for change set
waiter = cfn.get_waiter("change_set_create_complete")
waiter.wait(
ChangeSetName=change_set_name,
WaiterConfig={
"Delay": 10,
"MaxAttempts": 26, # max lambda execute is 5 minutes
},
)
cfn.execute_change_set(ChangeSetName=change_set_name)
stackdata = {"StackId": stack_cs_data["StackId"]}
testdata.add_test_stack(stackdata)
except TaskCatException:
raise
except Exception as e:
if self.verbose:
print(PrintMsg.ERROR + str(e))
raise TaskCatException("Cannot launch %s" % self.get_template_file())
testdata_list.append(testdata)
print("\n")
for test in testdata_list:
for stack in test.get_test_stacks():
print(
"{} |{}LAUNCHING STACKS{}".format(
self.nametag, PrintMsg.header, PrintMsg.rst_color
)
)
print(
"{} {}{} {} {}".format(
PrintMsg.INFO,
PrintMsg.header,
test.get_test_name(),
str(stack["StackId"]).split(":stack", 1),
PrintMsg.rst_color,
)
)
return testdata_list
|
def stackcreate(self, taskcat_cfg, test_list, sprefix):
"""
This function creates CloudFormation stack for the given tests.
:param taskcat_cfg: TaskCat config as yaml object
:param test_list: List of tests
:param sprefix: Special prefix as string. Purpose of this param is to use it for tagging
the stack.
:return: List of TestData objects
"""
testdata_list = []
self.set_capabilities("CAPABILITY_NAMED_IAM")
for test in test_list:
testdata = TestData()
testdata.set_test_name(test)
print(
"{0}{1}|PREPARING TO LAUNCH => {2}{3}".format(
PrintMsg.INFO, PrintMsg.header, test, PrintMsg.rst_color
)
)
sname = str(sig)
stackname = sname + "-" + sprefix + "-" + test + "-" + jobid[:8]
self.define_tests(taskcat_cfg, test)
for region in self.get_test_region():
print(PrintMsg.INFO + "Preparing to launch in region [%s] " % region)
try:
cfn = self._boto_client.get("cloudformation", region=region)
s_parmsdata = self.get_s3contents(self.get_parameter_path())
s_parms = json.loads(s_parmsdata)
s_include_params = self.get_param_includes(s_parms)
if s_include_params:
s_parms = s_include_params
j_params = self.generate_input_param_values(s_parms, region)
if self.verbose:
print(
PrintMsg.DEBUG + "Creating Boto Connection region=%s" % region
)
print(PrintMsg.DEBUG + "StackName=" + stackname)
print(PrintMsg.DEBUG + "DisableRollback=True")
print(PrintMsg.DEBUG + "TemplateURL=%s" % self.get_template_path())
print(PrintMsg.DEBUG + "Capabilities=%s" % self.get_capabilities())
print(PrintMsg.DEBUG + "Parameters:")
print(PrintMsg.DEBUG + "Tags:%s" % str(self.tags))
if self.get_template_type() == "json":
print(
json.dumps(
j_params,
sort_keys=True,
indent=11,
separators=(",", ": "),
)
)
try:
stackdata = cfn.create_stack(
StackName=stackname,
DisableRollback=True,
TemplateURL=self.get_template_path(),
Parameters=j_params,
Capabilities=self.get_capabilities(),
Tags=self.tags,
)
print(PrintMsg.INFO + "|CFN Execution mode [create_stack]")
except cfn.ecxeptions.ClientError as e:
if not str(e).endswith(
"cannot be used with templates containing Transforms."
):
raise
print(PrintMsg.INFO + "|CFN Execution mode [change_set]")
stack_cs_data = cfn.create_change_set(
StackName=stackname,
TemplateURL=self.get_template_path(),
Parameters=j_params,
Capabilities=self.get_capabilities(),
ChangeSetType="CREATE",
ChangeSetName=stackname + "-cs",
)
change_set_name = stack_cs_data["Id"]
# wait for change set
waiter = cfn.get_waiter("change_set_create_complete")
waiter.wait(
ChangeSetName=change_set_name,
WaiterConfig={
"Delay": 10,
"MaxAttempts": 26, # max lambda execute is 5 minutes
},
)
cfn.execute_change_set(ChangeSetName=change_set_name)
stackdata = {"StackId": stack_cs_data["StackId"]}
testdata.add_test_stack(stackdata)
except TaskCatException:
raise
except Exception as e:
if self.verbose:
print(PrintMsg.ERROR + str(e))
raise TaskCatException("Cannot launch %s" % self.get_template_file())
testdata_list.append(testdata)
print("\n")
for test in testdata_list:
for stack in test.get_test_stacks():
print(
"{} |{}LAUNCHING STACKS{}".format(
self.nametag, PrintMsg.header, PrintMsg.rst_color
)
)
print(
"{} {}{} {} {}".format(
PrintMsg.INFO,
PrintMsg.header,
test.get_test_name(),
str(stack["StackId"]).split(":stack", 1),
PrintMsg.rst_color,
)
)
return testdata_list
|
https://github.com/aws-quickstart/taskcat/issues/155
|
·[0;30;43m[INFO ]·[0m :Creating bucket taskcat-tag-CfnLintDemo-0a074b3f in us-east-2
Traceback (most recent call last):
File "/usr/local/bin/taskcat", line 79, in <module>
main()
File "/usr/local/bin/taskcat", line 65, in main
tcat_instance.stage_in_s3(taskcat_cfg)
File "/usr/local/lib/python3.6/site-packages/taskcat/stacker.py", line 428, in stage_in_s3
'LocationConstraint': self.get_default_region()
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 320, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 623, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidBucketName) when calling the CreateBucket operation: The specified bucket is not valid.
[Container] 2018/10/31 04:40:24 Command did not exit successfully taskcat -c $PROJECTNAME/ci/taskcat.yml exit status 1
|
botocore.exceptions.ClientError
|
def validate_parameters(self, taskcat_cfg, test_list):
"""
This function validates the parameters file of the CloudFormation template.
:param taskcat_cfg: TaskCat config yaml object
:param test_list: List of tests
:return: TRUPrintMsg.ERROR if the parameters file is valid, else FALSE
"""
for test in test_list:
self.define_tests(taskcat_cfg, test)
print(self.nametag + " |Validate JSON input in test[%s]" % test)
if self.verbose:
print(PrintMsg.DEBUG + "parameter_path = %s" % self.get_parameter_path())
inputparms = self.get_contents(
"./" + self.get_project() + "/ci/" + self.get_parameter_file()
)
jsonstatus = self.check_json(inputparms)
if self.verbose:
print(PrintMsg.DEBUG + "jsonstatus = %s" % jsonstatus)
if jsonstatus:
print(PrintMsg.PASS + "Validated [%s]" % self.get_parameter_file())
else:
print(PrintMsg.DEBUG + "parameter_file = %s" % self.get_parameter_file())
raise TaskCatException("Cannot validate %s" % self.get_parameter_file())
return True
|
def validate_parameters(self, taskcat_cfg, test_list):
"""
This function validates the parameters file of the CloudFormation template.
:param taskcat_cfg: TaskCat config yaml object
:param test_list: List of tests
:return: TRUPrintMsg.ERROR if the parameters file is valid, else FALSE
"""
for test in test_list:
self.define_tests(taskcat_cfg, test)
print(self.nametag + " |Validate JSON input in test[%s]" % test)
if self.verbose:
print(PrintMsg.DEBUG + "parameter_path = %s" % self.get_parameter_path())
inputparms = self.get_s3contents(self.get_parameter_path())
jsonstatus = self.check_json(inputparms)
if self.verbose:
print(PrintMsg.DEBUG + "jsonstatus = %s" % jsonstatus)
if jsonstatus:
print(PrintMsg.PASS + "Validated [%s]" % self.get_parameter_file())
else:
print(PrintMsg.DEBUG + "parameter_file = %s" % self.get_parameter_file())
raise TaskCatException("Cannot validate %s" % self.get_parameter_file())
return True
|
https://github.com/aws-quickstart/taskcat/issues/155
|
·[0;30;43m[INFO ]·[0m :Creating bucket taskcat-tag-CfnLintDemo-0a074b3f in us-east-2
Traceback (most recent call last):
File "/usr/local/bin/taskcat", line 79, in <module>
main()
File "/usr/local/bin/taskcat", line 65, in main
tcat_instance.stage_in_s3(taskcat_cfg)
File "/usr/local/lib/python3.6/site-packages/taskcat/stacker.py", line 428, in stage_in_s3
'LocationConstraint': self.get_default_region()
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 320, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 623, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidBucketName) when calling the CreateBucket operation: The specified bucket is not valid.
[Container] 2018/10/31 04:40:24 Command did not exit successfully taskcat -c $PROJECTNAME/ci/taskcat.yml exit status 1
|
botocore.exceptions.ClientError
|
def interface(self):
parser = argparse.ArgumentParser(
description="""
Multi-Region CloudFormation Test Deployment Tool)
For more info see: http://taskcat.io
""",
prog="taskcat",
prefix_chars="-",
formatter_class=RawTextHelpFormatter,
)
parser.add_argument(
"-c",
"--config_yml",
type=str,
help=" (Config File Required!) \n "
"example here: https://raw.githubusercontent.com/aws-quickstart/"
"taskcat/master/examples/sample-taskcat-project/ci/taskcat.yml",
)
parser.add_argument(
"-P", "--boto_profile", type=str, help="Authenticate using boto profile"
)
parser.add_argument("-A", "--aws_access_key", type=str, help="AWS Access Key")
parser.add_argument("-S", "--aws_secret_key", type=str, help="AWS Secret Key")
parser.add_argument(
"-n",
"--no_cleanup",
action="store_true",
help="Sets cleanup to false (Does not teardown stacks)",
)
parser.add_argument(
"-N",
"--no_cleanup_failed",
action="store_true",
help="Sets cleaup to false if the stack launch fails (Does not teardown stacks if it experiences a failure)",
)
parser.add_argument(
"-p",
"--public_s3_bucket",
action="store_true",
help="Sets public_s3_bucket to True. (Accesses objects via public HTTP, not S3 API calls)",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enables verbosity"
)
parser.add_argument(
"-t",
"--tag",
action=AppendTag,
help="add tag to cloudformation stack, must be in the format TagKey=TagValue, multiple -t can be specified",
)
parser.add_argument(
"-s",
"--stack-prefix",
type=str,
default="tag",
help="set prefix for cloudformation stack name. only accepts lowercase letters, numbers and '-'",
)
parser.add_argument(
"-l",
"--lint",
type=str,
default="warn",
help="set linting 'strict' - will fail on errors and warnings, 'error' will fail on errors or 'warn' will "
"log errors to the console, but not fail",
)
parser.add_argument("-V", "--version", action="store_true", help="Prints Version")
parser.add_argument(
"-u",
"--upload-only",
action="store_true",
help="Sync local files with s3 and exit",
)
args = parser.parse_args()
if len(sys.argv) == 1:
self.welcome()
print(parser.print_help())
exit0()
if args.version:
print(get_installed_version())
exit0()
if args.upload_only:
self.upload_only = True
if not args.config_yml:
parser.error("-c (--config_yml) not passed (Config File Required!)")
print(parser.print_help())
raise TaskCatException("-c (--config_yml) not passed (Config File Required!)")
try:
self.tags = args.tags
except AttributeError:
pass
if not re.compile("^[a-z0-9\-]+$").match(args.stack_prefix):
raise TaskCatException(
"--stack-prefix only accepts lowercase letters, numbers and '-'"
)
self.stack_prefix = args.stack_prefix
if args.verbose:
self.verbose = True
# Overrides Defaults for cleanup but does not overwrite config.yml
if args.no_cleanup:
self.run_cleanup = False
if args.boto_profile is not None:
if args.aws_access_key is not None or args.aws_secret_key is not None:
parser.error(
"Cannot use boto profile -P (--boto_profile)"
+ "with --aws_access_key or --aws_secret_key"
)
print(parser.print_help())
raise TaskCatException(
"Cannot use boto profile -P (--boto_profile)"
+ "with --aws_access_key or --aws_secret_key"
)
if args.public_s3_bucket:
self.public_s3_bucket = True
if args.no_cleanup_failed:
if args.no_cleanup:
parser.error("Cannot use -n (--no_cleanup) with -N (--no_cleanup_failed)")
print(parser.print_help())
raise TaskCatException(
"Cannot use -n (--no_cleanup) with -N (--no_cleanup_failed)"
)
self.retain_if_failed = True
return args
|
def interface(self):
parser = argparse.ArgumentParser(
description="""
Multi-Region CloudFormation Test Deployment Tool)
For more info see: http://taskcat.io
""",
prog="taskcat",
prefix_chars="-",
formatter_class=RawTextHelpFormatter,
)
parser.add_argument(
"-c",
"--config_yml",
type=str,
help=" (Config File Required!) \n "
"example here: https://raw.githubusercontent.com/aws-quickstart/"
"taskcat/master/examples/sample-taskcat-project/ci/taskcat.yml",
)
parser.add_argument(
"-P", "--boto_profile", type=str, help="Authenticate using boto profile"
)
parser.add_argument("-A", "--aws_access_key", type=str, help="AWS Access Key")
parser.add_argument("-S", "--aws_secret_key", type=str, help="AWS Secret Key")
parser.add_argument(
"-n",
"--no_cleanup",
action="store_true",
help="Sets cleanup to false (Does not teardown stacks)",
)
parser.add_argument(
"-N",
"--no_cleanup_failed",
action="store_true",
help="Sets cleaup to false if the stack launch fails (Does not teardown stacks if it experiences a failure)",
)
parser.add_argument(
"-p",
"--public_s3_bucket",
action="store_true",
help="Sets public_s3_bucket to True. (Accesses objects via public HTTP, not S3 API calls)",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enables verbosity"
)
parser.add_argument(
"-m",
"--multithread_upload",
action="store_true",
help="Enables multithreaded upload to S3",
)
parser.add_argument(
"-t",
"--tag",
action=AppendTag,
help="add tag to cloudformation stack, must be in the format TagKey=TagValue, multiple -t can be specified",
)
parser.add_argument(
"-s",
"--stack-prefix",
type=str,
default="tag",
help="set prefix for cloudformation stack name. only accepts lowercase letters, numbers and '-'",
)
parser.add_argument(
"-l",
"--lint",
type=str,
default="warn",
help="set linting 'strict' - will fail on errors and warnings, 'error' will fail on errors or 'warn' will "
"log errors to the console, but not fail",
)
parser.add_argument("-V", "--version", action="store_true", help="Prints Version")
args = parser.parse_args()
if len(sys.argv) == 1:
self.welcome()
print(parser.print_help())
exit0()
if args.version:
print(get_installed_version())
exit0()
if not args.config_yml:
parser.error("-c (--config_yml) not passed (Config File Required!)")
print(parser.print_help())
raise TaskCatException("-c (--config_yml) not passed (Config File Required!)")
if args.multithread_upload:
self.multithread_upload = True
try:
self.tags = args.tags
except AttributeError:
pass
if not re.compile("^[a-z0-9\-]+$").match(args.stack_prefix):
raise TaskCatException(
"--stack-prefix only accepts lowercase letters, numbers and '-'"
)
self.stack_prefix = args.stack_prefix
if args.verbose:
self.verbose = True
# Overrides Defaults for cleanup but does not overwrite config.yml
if args.no_cleanup:
self.run_cleanup = False
if args.boto_profile is not None:
if args.aws_access_key is not None or args.aws_secret_key is not None:
parser.error(
"Cannot use boto profile -P (--boto_profile)"
+ "with --aws_access_key or --aws_secret_key"
)
print(parser.print_help())
raise TaskCatException(
"Cannot use boto profile -P (--boto_profile)"
+ "with --aws_access_key or --aws_secret_key"
)
if args.public_s3_bucket:
self.public_s3_bucket = True
if args.no_cleanup_failed:
if args.no_cleanup:
parser.error("Cannot use -n (--no_cleanup) with -N (--no_cleanup_failed)")
print(parser.print_help())
raise TaskCatException(
"Cannot use -n (--no_cleanup) with -N (--no_cleanup_failed)"
)
self.retain_if_failed = True
return args
|
https://github.com/aws-quickstart/taskcat/issues/155
|
·[0;30;43m[INFO ]·[0m :Creating bucket taskcat-tag-CfnLintDemo-0a074b3f in us-east-2
Traceback (most recent call last):
File "/usr/local/bin/taskcat", line 79, in <module>
main()
File "/usr/local/bin/taskcat", line 65, in main
tcat_instance.stage_in_s3(taskcat_cfg)
File "/usr/local/lib/python3.6/site-packages/taskcat/stacker.py", line 428, in stage_in_s3
'LocationConstraint': self.get_default_region()
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 320, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 623, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidBucketName) when calling the CreateBucket operation: The specified bucket is not valid.
[Container] 2018/10/31 04:40:24 Command did not exit successfully taskcat -c $PROJECTNAME/ci/taskcat.yml exit status 1
|
botocore.exceptions.ClientError
|
def __init__(self, boundaries, ncolors, clip=False, *, extend="neither"):
"""
Parameters
----------
boundaries : array-like
Monotonically increasing sequence of at least 2 boundaries.
ncolors : int
Number of colors in the colormap to be used.
clip : bool, optional
If clip is ``True``, out of range values are mapped to 0 if they
are below ``boundaries[0]`` or mapped to ``ncolors - 1`` if they
are above ``boundaries[-1]``.
If clip is ``False``, out of range values are mapped to -1 if
they are below ``boundaries[0]`` or mapped to *ncolors* if they are
above ``boundaries[-1]``. These are then converted to valid indices
by `Colormap.__call__`.
extend : {'neither', 'both', 'min', 'max'}, default: 'neither'
Extend the number of bins to include one or both of the
regions beyond the boundaries. For example, if ``extend``
is 'min', then the color to which the region between the first
pair of boundaries is mapped will be distinct from the first
color in the colormap, and by default a
`~matplotlib.colorbar.Colorbar` will be drawn with
the triangle extension on the left or lower end.
Returns
-------
int16 scalar or array
Notes
-----
*boundaries* defines the edges of bins, and data falling within a bin
is mapped to the color with the same index.
If the number of bins, including any extensions, is less than
*ncolors*, the color index is chosen by linear interpolation, mapping
the ``[0, nbins - 1]`` range onto the ``[0, ncolors - 1]`` range.
"""
if clip and extend != "neither":
raise ValueError("'clip=True' is not compatible with 'extend'")
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
if self.N < 2:
raise ValueError(
"You must provide at least 2 boundaries "
f"(1 region) but you passed in {boundaries!r}"
)
self.Ncmap = ncolors
self.extend = extend
self._n_regions = self.N - 1 # number of colors needed
self._offset = 0
if extend in ("min", "both"):
self._n_regions += 1
self._offset = 1
if extend in ("max", "both"):
self._n_regions += 1
if self._n_regions > self.Ncmap:
raise ValueError(
f"There are {self._n_regions} color bins "
"including extensions, but ncolors = "
f"{ncolors}; ncolors must equal or exceed the "
"number of bins"
)
|
def __init__(self, boundaries, ncolors, clip=False, *, extend="neither"):
"""
Parameters
----------
boundaries : array-like
Monotonically increasing sequence of boundaries
ncolors : int
Number of colors in the colormap to be used
clip : bool, optional
If clip is ``True``, out of range values are mapped to 0 if they
are below ``boundaries[0]`` or mapped to ``ncolors - 1`` if they
are above ``boundaries[-1]``.
If clip is ``False``, out of range values are mapped to -1 if
they are below ``boundaries[0]`` or mapped to *ncolors* if they are
above ``boundaries[-1]``. These are then converted to valid indices
by `Colormap.__call__`.
extend : {'neither', 'both', 'min', 'max'}, default: 'neither'
Extend the number of bins to include one or both of the
regions beyond the boundaries. For example, if ``extend``
is 'min', then the color to which the region between the first
pair of boundaries is mapped will be distinct from the first
color in the colormap, and by default a
`~matplotlib.colorbar.Colorbar` will be drawn with
the triangle extension on the left or lower end.
Returns
-------
int16 scalar or array
Notes
-----
*boundaries* defines the edges of bins, and data falling within a bin
is mapped to the color with the same index.
If the number of bins, including any extensions, is less than
*ncolors*, the color index is chosen by linear interpolation, mapping
the ``[0, nbins - 1]`` range onto the ``[0, ncolors - 1]`` range.
"""
if clip and extend != "neither":
raise ValueError("'clip=True' is not compatible with 'extend'")
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
self.extend = extend
self._N = self.N - 1 # number of colors needed
self._offset = 0
if extend in ("min", "both"):
self._N += 1
self._offset = 1
if extend in ("max", "both"):
self._N += 1
if self._N > self.Ncmap:
raise ValueError(
f"There are {self._N} color bins including "
f"extensions, but ncolors = {ncolors}; "
"ncolors must equal or exceed the number of "
"bins"
)
|
https://github.com/matplotlib/matplotlib/issues/17579
|
---------------------------------------------------------------------------
ZeroDivisionError Traceback (most recent call last)
<ipython-input-160-8a8135eaeb3c> in <module>
2 bounds = [0, 1]
3 norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
----> 4 norm(0.5)
/usr/local/lib/python3.6/dist-packages/matplotlib/colors.py in __call__(self, value, clip)
1442 iret[xx >= b] = i
1443 if self._interp:
-> 1444 scalefac = (self.Ncmap - 1) / (self.N - 2)
1445 iret = (iret * scalefac).astype(np.int16)
1446 iret[xx < self.vmin] = -1
ZeroDivisionError: division by zero
|
ZeroDivisionError
|
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
xx, is_scalar = self.process_value(value)
mask = np.ma.getmaskarray(xx)
# Fill masked values a value above the upper boundary
xx = np.atleast_1d(xx.filled(self.vmax + 1))
if clip:
np.clip(xx, self.vmin, self.vmax, out=xx)
max_col = self.Ncmap - 1
else:
max_col = self.Ncmap
# this gives us the bins in the lookup table in the range
# [0, _n_regions - 1] (the offset is baked in in the init)
iret = np.digitize(xx, self.boundaries) - 1 + self._offset
# if we have more colors than regions, stretch the region
# index computed above to full range of the color bins. This
# will make use of the full range (but skip some of the colors
# in the middle) such that the first region is mapped to the
# first color and the last region is mapped to the last color.
if self.Ncmap > self._n_regions:
if self._n_regions == 1:
# special case the 1 region case, pick the middle color
iret[iret == 0] = (self.Ncmap - 1) // 2
else:
# otherwise linearly remap the values from the region index
# to the color index spaces
iret = (self.Ncmap - 1) / (self._n_regions - 1) * iret
# cast to 16bit integers in all cases
iret = iret.astype(np.int16)
iret[xx < self.vmin] = -1
iret[xx >= self.vmax] = max_col
ret = np.ma.array(iret, mask=mask)
if is_scalar:
ret = int(ret[0]) # assume python scalar
return ret
|
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
xx, is_scalar = self.process_value(value)
mask = np.ma.getmaskarray(xx)
xx = np.atleast_1d(xx.filled(self.vmax + 1))
if clip:
np.clip(xx, self.vmin, self.vmax, out=xx)
max_col = self.Ncmap - 1
else:
max_col = self.Ncmap
iret = np.digitize(xx, self.boundaries) - 1 + self._offset
if self.Ncmap > self._N:
scalefac = (self.Ncmap - 1) / (self._N - 1)
iret = (iret * scalefac).astype(np.int16)
iret[xx < self.vmin] = -1
iret[xx >= self.vmax] = max_col
ret = np.ma.array(iret, mask=mask)
if is_scalar:
ret = int(ret[0]) # assume python scalar
return ret
|
https://github.com/matplotlib/matplotlib/issues/17579
|
---------------------------------------------------------------------------
ZeroDivisionError Traceback (most recent call last)
<ipython-input-160-8a8135eaeb3c> in <module>
2 bounds = [0, 1]
3 norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
----> 4 norm(0.5)
/usr/local/lib/python3.6/dist-packages/matplotlib/colors.py in __call__(self, value, clip)
1442 iret[xx >= b] = i
1443 if self._interp:
-> 1444 scalefac = (self.Ncmap - 1) / (self.N - 2)
1445 iret = (iret * scalefac).astype(np.int16)
1446 iret[xx < self.vmin] = -1
ZeroDivisionError: division by zero
|
ZeroDivisionError
|
def to_rgba(c, alpha=None):
"""
Convert *c* to an RGBA color.
Parameters
----------
c : Matplotlib color or ``np.ma.masked``
alpha : scalar, optional
If *alpha* is not ``None``, it forces the alpha value, except if *c* is
``"none"`` (case-insensitive), which always maps to ``(0, 0, 0, 0)``.
Returns
-------
tuple
Tuple of ``(r, g, b, a)`` scalars.
"""
# Special-case nth color syntax because it should not be cached.
if _is_nth_color(c):
from matplotlib import rcParams
prop_cycler = rcParams["axes.prop_cycle"]
colors = prop_cycler.by_key().get("color", ["k"])
c = colors[int(c[1:]) % len(colors)]
try:
rgba = _colors_full_map.cache[c, alpha]
except (KeyError, TypeError): # Not in cache, or unhashable.
rgba = None
if rgba is None: # Suppress exception chaining of cache lookup failure.
rgba = _to_rgba_no_colorcycle(c, alpha)
try:
_colors_full_map.cache[c, alpha] = rgba
except TypeError:
pass
return rgba
|
def to_rgba(c, alpha=None):
"""
Convert *c* to an RGBA color.
Parameters
----------
c : Matplotlib color
alpha : scalar, optional
If *alpha* is not ``None``, it forces the alpha value, except if *c* is
``"none"`` (case-insensitive), which always maps to ``(0, 0, 0, 0)``.
Returns
-------
tuple
Tuple of ``(r, g, b, a)`` scalars.
"""
# Special-case nth color syntax because it should not be cached.
if _is_nth_color(c):
from matplotlib import rcParams
prop_cycler = rcParams["axes.prop_cycle"]
colors = prop_cycler.by_key().get("color", ["k"])
c = colors[int(c[1:]) % len(colors)]
try:
rgba = _colors_full_map.cache[c, alpha]
except (KeyError, TypeError): # Not in cache, or unhashable.
rgba = None
if rgba is None: # Suppress exception chaining of cache lookup failure.
rgba = _to_rgba_no_colorcycle(c, alpha)
try:
_colors_full_map.cache[c, alpha] = rgba
except TypeError:
pass
return rgba
|
https://github.com/matplotlib/matplotlib/issues/14301
|
Traceback (most recent call last):
File "D:\test.py", line 9, in <module>
ax.scatter(x, y, edgecolor=c)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\__init__.py", line 1589, in inner
return func(ax, *map(sanitize_sequence, args), **kwargs)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\axes\_axes.py", line 4490, in scatter
alpha=alpha
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\collections.py", line 883, in __init__
Collection.__init__(self, **kwargs)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\collections.py", line 128, in __init__
self.set_edgecolor(edgecolors)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\collections.py", line 728, in set_edgecolor
self._set_edgecolor(c)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\collections.py", line 712, in _set_edgecolor
self._edgecolors = mcolors.to_rgba_array(c, self._alpha)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\colors.py", line 286, in to_rgba_array
result[i] = to_rgba(cc, alpha)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\colors.py", line 177, in to_rgba
rgba = _to_rgba_no_colorcycle(c, alpha)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\colors.py", line 238, in _to_rgba_no_colorcycle
raise ValueError("Invalid RGBA argument: {!r}".format(orig_c))
ValueError: Invalid RGBA argument: masked
|
ValueError
|
def _to_rgba_no_colorcycle(c, alpha=None):
"""Convert *c* to an RGBA color, with no support for color-cycle syntax.
If *alpha* is not ``None``, it forces the alpha value, except if *c* is
``"none"`` (case-insensitive), which always maps to ``(0, 0, 0, 0)``.
"""
orig_c = c
if c is np.ma.masked:
return (0.0, 0.0, 0.0, 0.0)
if isinstance(c, str):
if c.lower() == "none":
return (0.0, 0.0, 0.0, 0.0)
# Named color.
try:
# This may turn c into a non-string, so we check again below.
c = _colors_full_map[c]
except KeyError:
try:
c = _colors_full_map[c.lower()]
except KeyError:
pass
else:
if len(orig_c) == 1:
cbook.warn_deprecated(
"3.1",
message="Support for uppercase "
"single-letter colors is deprecated since Matplotlib "
"%(since)s and will be removed %(removal)s; please "
"use lowercase instead.",
)
if isinstance(c, str):
# hex color with no alpha.
match = re.match(r"\A#[a-fA-F0-9]{6}\Z", c)
if match:
return tuple(int(n, 16) / 255 for n in [c[1:3], c[3:5], c[5:7]]) + (
alpha if alpha is not None else 1.0,
)
# hex color with alpha.
match = re.match(r"\A#[a-fA-F0-9]{8}\Z", c)
if match:
color = [int(n, 16) / 255 for n in [c[1:3], c[3:5], c[5:7], c[7:9]]]
if alpha is not None:
color[-1] = alpha
return tuple(color)
# string gray.
try:
c = float(c)
except ValueError:
pass
else:
if not (0 <= c <= 1):
raise ValueError(
f"Invalid string grayscale value {orig_c!r}. "
f"Value must be within 0-1 range"
)
return c, c, c, alpha if alpha is not None else 1.0
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
# tuple color.
c = np.array(c)
if not np.can_cast(c.dtype, float, "same_kind") or c.ndim != 1:
# Test the dtype explicitly as `map(float, ...)`, `np.array(...,
# float)` and `np.array(...).astype(float)` all convert "0.5" to 0.5.
# Test dimensionality to reject single floats.
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
# Return a tuple to prevent the cached value from being modified.
c = tuple(c.astype(float))
if len(c) not in [3, 4]:
raise ValueError("RGBA sequence should have length 3 or 4")
if len(c) == 3 and alpha is None:
alpha = 1
if alpha is not None:
c = c[:3] + (alpha,)
if any(elem < 0 or elem > 1 for elem in c):
raise ValueError("RGBA values should be within 0-1 range")
return c
|
def _to_rgba_no_colorcycle(c, alpha=None):
"""Convert *c* to an RGBA color, with no support for color-cycle syntax.
If *alpha* is not ``None``, it forces the alpha value, except if *c* is
``"none"`` (case-insensitive), which always maps to ``(0, 0, 0, 0)``.
"""
orig_c = c
if isinstance(c, str):
if c.lower() == "none":
return (0.0, 0.0, 0.0, 0.0)
# Named color.
try:
# This may turn c into a non-string, so we check again below.
c = _colors_full_map[c]
except KeyError:
try:
c = _colors_full_map[c.lower()]
except KeyError:
pass
else:
if len(orig_c) == 1:
cbook.warn_deprecated(
"3.1",
message="Support for uppercase "
"single-letter colors is deprecated since Matplotlib "
"%(since)s and will be removed %(removal)s; please "
"use lowercase instead.",
)
if isinstance(c, str):
# hex color with no alpha.
match = re.match(r"\A#[a-fA-F0-9]{6}\Z", c)
if match:
return tuple(int(n, 16) / 255 for n in [c[1:3], c[3:5], c[5:7]]) + (
alpha if alpha is not None else 1.0,
)
# hex color with alpha.
match = re.match(r"\A#[a-fA-F0-9]{8}\Z", c)
if match:
color = [int(n, 16) / 255 for n in [c[1:3], c[3:5], c[5:7], c[7:9]]]
if alpha is not None:
color[-1] = alpha
return tuple(color)
# string gray.
try:
c = float(c)
except ValueError:
pass
else:
if not (0 <= c <= 1):
raise ValueError(
f"Invalid string grayscale value {orig_c!r}. "
f"Value must be within 0-1 range"
)
return c, c, c, alpha if alpha is not None else 1.0
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
# tuple color.
c = np.array(c)
if not np.can_cast(c.dtype, float, "same_kind") or c.ndim != 1:
# Test the dtype explicitly as `map(float, ...)`, `np.array(...,
# float)` and `np.array(...).astype(float)` all convert "0.5" to 0.5.
# Test dimensionality to reject single floats.
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
# Return a tuple to prevent the cached value from being modified.
c = tuple(c.astype(float))
if len(c) not in [3, 4]:
raise ValueError("RGBA sequence should have length 3 or 4")
if len(c) == 3 and alpha is None:
alpha = 1
if alpha is not None:
c = c[:3] + (alpha,)
if any(elem < 0 or elem > 1 for elem in c):
raise ValueError("RGBA values should be within 0-1 range")
return c
|
https://github.com/matplotlib/matplotlib/issues/14301
|
Traceback (most recent call last):
File "D:\test.py", line 9, in <module>
ax.scatter(x, y, edgecolor=c)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\__init__.py", line 1589, in inner
return func(ax, *map(sanitize_sequence, args), **kwargs)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\axes\_axes.py", line 4490, in scatter
alpha=alpha
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\collections.py", line 883, in __init__
Collection.__init__(self, **kwargs)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\collections.py", line 128, in __init__
self.set_edgecolor(edgecolors)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\collections.py", line 728, in set_edgecolor
self._set_edgecolor(c)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\collections.py", line 712, in _set_edgecolor
self._edgecolors = mcolors.to_rgba_array(c, self._alpha)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\colors.py", line 286, in to_rgba_array
result[i] = to_rgba(cc, alpha)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\colors.py", line 177, in to_rgba
rgba = _to_rgba_no_colorcycle(c, alpha)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\colors.py", line 238, in _to_rgba_no_colorcycle
raise ValueError("Invalid RGBA argument: {!r}".format(orig_c))
ValueError: Invalid RGBA argument: masked
|
ValueError
|
def to_rgba_array(c, alpha=None):
"""Convert *c* to a (n, 4) array of RGBA colors.
If *alpha* is not ``None``, it forces the alpha value. If *c* is
``"none"`` (case-insensitive) or an empty list, an empty array is returned.
If *c* is a masked array, an ndarray is returned with a (0, 0, 0, 0)
row for each masked value or row in *c*.
"""
# Special-case inputs that are already arrays, for performance. (If the
# array has the wrong kind or shape, raise the error during one-at-a-time
# conversion.)
if (
isinstance(c, np.ndarray)
and c.dtype.kind in "if"
and c.ndim == 2
and c.shape[1] in [3, 4]
):
mask = c.mask.any(axis=1) if np.ma.is_masked(c) else None
c = np.ma.getdata(c)
if c.shape[1] == 3:
result = np.column_stack([c, np.zeros(len(c))])
result[:, -1] = alpha if alpha is not None else 1.0
elif c.shape[1] == 4:
result = c.copy()
if alpha is not None:
result[:, -1] = alpha
if mask is not None:
result[mask] = 0
if np.any((result < 0) | (result > 1)):
raise ValueError("RGBA values should be within 0-1 range")
return result
# Handle single values.
# Note that this occurs *after* handling inputs that are already arrays, as
# `to_rgba(c, alpha)` (below) is expensive for such inputs, due to the need
# to format the array in the ValueError message(!).
if cbook._str_lower_equal(c, "none"):
return np.zeros((0, 4), float)
try:
return np.array([to_rgba(c, alpha)], float)
except (ValueError, TypeError):
pass
# Convert one at a time.
if isinstance(c, str):
# Single string as color sequence.
# This is deprecated and will be removed in the future.
try:
result = np.array([to_rgba(cc, alpha) for cc in c])
except ValueError:
raise ValueError(
"'%s' is neither a valid single color nor a color sequence "
"consisting of single character color specifiers such as "
"'rgb'. Note also that the latter is deprecated." % c
)
else:
cbook.warn_deprecated(
"3.2",
message="Using a string of single "
"character colors as a color sequence is "
"deprecated. Use an explicit list instead.",
)
else:
result = np.array([to_rgba(cc, alpha) for cc in c])
return result
|
def to_rgba_array(c, alpha=None):
"""Convert *c* to a (n, 4) array of RGBA colors.
If *alpha* is not ``None``, it forces the alpha value. If *c* is
``"none"`` (case-insensitive) or an empty list, an empty array is returned.
"""
# Special-case inputs that are already arrays, for performance. (If the
# array has the wrong kind or shape, raise the error during one-at-a-time
# conversion.)
if (
isinstance(c, np.ndarray)
and c.dtype.kind in "if"
and c.ndim == 2
and c.shape[1] in [3, 4]
):
if c.shape[1] == 3:
result = np.column_stack([c, np.zeros(len(c))])
result[:, -1] = alpha if alpha is not None else 1.0
elif c.shape[1] == 4:
result = c.copy()
if alpha is not None:
result[:, -1] = alpha
if np.any((result < 0) | (result > 1)):
raise ValueError("RGBA values should be within 0-1 range")
return result
# Handle single values.
# Note that this occurs *after* handling inputs that are already arrays, as
# `to_rgba(c, alpha)` (below) is expensive for such inputs, due to the need
# to format the array in the ValueError message(!).
if cbook._str_lower_equal(c, "none"):
return np.zeros((0, 4), float)
try:
return np.array([to_rgba(c, alpha)], float)
except (ValueError, TypeError):
pass
# Convert one at a time.
if isinstance(c, str):
# Single string as color sequence.
# This is deprecated and will be removed in the future.
try:
result = np.array([to_rgba(cc, alpha) for cc in c])
except ValueError:
raise ValueError(
"'%s' is neither a valid single color nor a color sequence "
"consisting of single character color specifiers such as "
"'rgb'. Note also that the latter is deprecated." % c
)
else:
cbook.warn_deprecated(
"3.2",
message="Using a string of single "
"character colors as a color sequence is "
"deprecated. Use an explicit list instead.",
)
else:
result = np.array([to_rgba(cc, alpha) for cc in c])
return result
|
https://github.com/matplotlib/matplotlib/issues/14301
|
Traceback (most recent call last):
File "D:\test.py", line 9, in <module>
ax.scatter(x, y, edgecolor=c)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\__init__.py", line 1589, in inner
return func(ax, *map(sanitize_sequence, args), **kwargs)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\axes\_axes.py", line 4490, in scatter
alpha=alpha
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\collections.py", line 883, in __init__
Collection.__init__(self, **kwargs)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\collections.py", line 128, in __init__
self.set_edgecolor(edgecolors)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\collections.py", line 728, in set_edgecolor
self._set_edgecolor(c)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\collections.py", line 712, in _set_edgecolor
self._edgecolors = mcolors.to_rgba_array(c, self._alpha)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\colors.py", line 286, in to_rgba_array
result[i] = to_rgba(cc, alpha)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\colors.py", line 177, in to_rgba
rgba = _to_rgba_no_colorcycle(c, alpha)
File "C:\ProgramData\Miniconda3\lib\site-packages\matplotlib\colors.py", line 238, in _to_rgba_no_colorcycle
raise ValueError("Invalid RGBA argument: {!r}".format(orig_c))
ValueError: Invalid RGBA argument: masked
|
ValueError
|
def draw(self, renderer):
if not self.get_visible():
return
self._update(renderer) # update the tick
size = self._ticksize
path_trans = self.get_transform()
gc = renderer.new_gc()
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self.get_markeredgewidth())
gc.set_alpha(self._alpha)
offset = renderer.points_to_pixels(size)
marker_scale = Affine2D().scale(offset, offset)
if self.get_tick_out():
add_angle = 180
else:
add_angle = 0
marker_rotation = Affine2D()
marker_transform = marker_scale + marker_rotation
for loc, angle in self.locs_angles:
marker_rotation.clear().rotate_deg(angle + add_angle)
locs = path_trans.transform_non_affine(np.array([loc]))
if self.axes and not self.axes.viewLim.contains(*locs[0]):
continue
renderer.draw_markers(
gc,
self._tickvert_path,
marker_transform,
Path(locs),
path_trans.get_affine(),
)
gc.restore()
|
def draw(self, renderer):
if not self.get_visible():
return
self._update(renderer) # update the tick
size = self._ticksize
path_trans = self.get_transform()
gc = renderer.new_gc()
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self.get_markeredgewidth())
gc.set_alpha(self._alpha)
offset = renderer.points_to_pixels(size)
marker_scale = Affine2D().scale(offset, offset)
if self.get_tick_out():
add_angle = 180
else:
add_angle = 0
marker_rotation = Affine2D()
marker_transform = marker_scale + marker_rotation
for loc, angle in self.locs_angles:
marker_rotation.clear().rotate_deg(angle + add_angle)
locs = path_trans.transform_non_affine([loc])
if self.axes and not self.axes.viewLim.contains(*locs[0]):
continue
renderer.draw_markers(
gc,
self._tickvert_path,
marker_transform,
Path(locs),
path_trans.get_affine(),
)
gc.restore()
|
https://github.com/matplotlib/matplotlib/issues/12208
|
Traceback (most recent call last):
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 519, in _draw_idle
self.draw()
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/backends/backend_agg.py", line 433, in draw
self.figure.draw(self.renderer)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/artist.py", line 55, in draw_wrapper
return draw(artist, renderer, *args, **kwargs)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/figure.py", line 1475, in draw
renderer, self, artists, self.suppressComposite)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/image.py", line 141, in _draw_list_compositing_images
a.draw(renderer)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/mpl_toolkits/axes_grid1/parasite_axes.py", line 286, in draw
self._get_base_axes_attr("draw")(self, renderer)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/mpl_toolkits/axisartist/axislines.py", line 756, in draw
super(Axes, self).draw(renderer, inframe)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/artist.py", line 55, in draw_wrapper
return draw(artist, renderer, *args, **kwargs)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/axes/_base.py", line 2607, in draw
mimage._draw_list_compositing_images(renderer, self, artists)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/image.py", line 141, in _draw_list_compositing_images
a.draw(renderer)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/artist.py", line 55, in draw_wrapper
return draw(artist, renderer, *args, **kwargs)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/mpl_toolkits/axisartist/axis_artist.py", line 1479, in draw
self._draw_ticks(renderer)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/mpl_toolkits/axisartist/axis_artist.py", line 1211, in _draw_ticks
self.major_ticks.draw(renderer)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/mpl_toolkits/axisartist/axis_artist.py", line 330, in draw
locs = path_trans.transform_non_affine([loc])
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/transforms.py", line 2489, in transform_non_affine
return self._a.transform_non_affine(points)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/transforms.py", line 2263, in transform_non_affine
x_points = x.transform_non_affine(points)[:, 0:1]
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/transforms.py", line 2489, in transform_non_affine
return self._a.transform_non_affine(points)
File "/opt/anaconda2/envs/py2_conda52/lib/python2.7/site-packages/matplotlib/transforms.py", line 2265, in transform_non_affine
x_points = x.transform_non_affine(points[:, 0])
TypeError: list indices must be integers, not tuple
|
TypeError
|
def _onKeyDown(self, evt):
"""Capture key press."""
key = self._get_key(evt)
FigureCanvasBase.key_press_event(self, key, guiEvent=evt)
if self:
evt.Skip()
|
def _onKeyDown(self, evt):
"""Capture key press."""
key = self._get_key(evt)
evt.Skip()
FigureCanvasBase.key_press_event(self, key, guiEvent=evt)
|
https://github.com/matplotlib/matplotlib/issues/3690
|
[michael@localhost play]$ gdb --args python segfault-gui.py
GNU gdb (GDB) Fedora 7.7.1-19.fc20
Copyright (C) 2014 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law. Type "show copying"
and "show warranty" for details.
This GDB was configured as "x86_64-redhat-linux-gnu".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
<http://www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
<http://www.gnu.org/software/gdb/documentation/>.
For help, type "help".
Type "apropos word" to search for commands related to "word"...
Reading symbols from python...Reading symbols from /usr/lib/debug/usr/bin/python2.7.debug...done.
done.
(gdb) r
Starting program: /usr/bin/python segfault-gui.py
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/lib64/libthread_db.so.1".
Traceback (most recent call last):
File "/usr/share/gdb/auto-load/usr/lib64/libgobject-2.0.so.0.3800.2-gdb.py", line 9, in <module>
from gobject import register
File "/usr/share/glib-2.0/gdb/gobject.py", line 3, in <module>
import gdb.backtrace
ImportError: No module named backtrace
Missing separate debuginfo for /lib64/libgraphite2.so.3
Try: yum --enablerepo='*debug*' install /usr/lib/debug/.build-id/90/82e46860379c3dff9004eb8c9834e50afbb528.debug
[New Thread 0x7fffee6b1700 (LWP 3733)]
[New Thread 0x7fffedeb0700 (LWP 3734)]
[New Thread 0x7fffed6af700 (LWP 3735)]
[New Thread 0x7fffeceae700 (LWP 3736)]
[Thread 0x7fffeceae700 (LWP 3736) exited]
Program received signal SIGSEGV, Segmentation fault.
wxEvtHandler::SearchDynamicEventTable (this=this@entry=0x23e08d0, event=...)
at src/common/event.cpp:1412
1412 if ((event.GetEventType() == entry->m_eventType) && (entry->m_fn != 0))
(gdb) bt
#0 wxEvtHandler::SearchDynamicEventTable (this=this@entry=0x23e08d0,
event=...) at src/common/event.cpp:1412
#1 0x0000003a91ef09f2 in wxEvtHandler::ProcessEvent (this=0x23e08d0,
event=...) at src/common/event.cpp:1297
#2 0x0000003a924079f7 in gtk_window_key_press_callback (
widget=widget@entry=0x10f4170, gdk_event=0x23e66b0,
win=win@entry=0x23e08d0) at src/gtk/window.cpp:1034
#3 0x0000003a94b484ec in _gtk_marshal_BOOLEAN__BOXED (closure=0x1fdd550,
return_value=0x7fffffffc970, n_param_values=<optimized out>,
param_values=0x7fffffffca20, invocation_hint=<optimized out>,
marshal_data=<optimized out>) at gtkmarshalers.c:86
#4 0x0000003a81e10298 in g_closure_invoke (closure=0x1fdd550,
return_value=return_value@entry=0x7fffffffc970, n_param_values=2,
param_values=param_values@entry=0x7fffffffca20,
invocation_hint=invocation_hint@entry=0x7fffffffc9c0) at gclosure.c:777
#5 0x0000003a81e2235d in signal_emit_unlocked_R (node=node@entry=0x102b7b0,
detail=detail@entry=0, instance=instance@entry=0x10f4170,
emission_return=emission_return@entry=0x7fffffffcad0,
instance_and_params=instance_and_params@entry=0x7fffffffca20)
at gsignal.c:3586
#6 0x0000003a81e29ddd in g_signal_emit_valist (instance=<optimized out>,
signal_id=<optimized out>, detail=<optimized out>,
var_args=var_args@entry=0x7fffffffcbb0) at gsignal.c:3340
#7 0x0000003a81e2a3af in g_signal_emit (instance=instance@entry=0x10f4170,
signal_id=<optimized out>, detail=detail@entry=0) at gsignal.c:3386
#8 0x0000003a94c777a4 in gtk_widget_event_internal (
widget=widget@entry=0x10f4170, event=event@entry=0x23e66b0)
at gtkwidget.c:5017
#9 0x0000003a94c77a79 in IA__gtk_widget_event (
widget=widget@entry=0x10f4170, event=event@entry=0x23e66b0)
at gtkwidget.c:4814
#10 0x0000003a94c8d2bb in IA__gtk_window_propagate_key_event (
window=window@entry=0x10645b0, event=event@entry=0x23e66b0)
at gtkwindow.c:5199
#11 0x0000003a94c8fe13 in gtk_window_key_press_event (widget=0x10645b0,
event=0x23e66b0) at gtkwindow.c:5229
#12 0x0000003a94b484ec in _gtk_marshal_BOOLEAN__BOXED (closure=0x102b760,
return_value=0x7fffffffce40, n_param_values=<optimized out>,
param_values=0x7fffffffcef0, invocation_hint=<optimized out>,
marshal_data=<optimized out>) at gtkmarshalers.c:86
#13 0x0000003a81e10298 in g_closure_invoke (closure=closure@entry=0x102b760,
return_value=return_value@entry=0x7fffffffce40, n_param_values=2,
param_values=param_values@entry=0x7fffffffcef0,
invocation_hint=invocation_hint@entry=0x7fffffffce90) at gclosure.c:777
#14 0x0000003a81e2211b in signal_emit_unlocked_R (node=node@entry=0x102b7b0,
detail=detail@entry=0, instance=instance@entry=0x10645b0,
emission_return=emission_return@entry=0x7fffffffcfa0,
instance_and_params=instance_and_params@entry=0x7fffffffcef0)
at gsignal.c:3624
---Type <return> to continue, or q <return> to quit---
#15 0x0000003a81e29ddd in g_signal_emit_valist (instance=<optimized out>,
signal_id=<optimized out>, detail=<optimized out>,
var_args=var_args@entry=0x7fffffffd080) at gsignal.c:3340
#16 0x0000003a81e2a3af in g_signal_emit (instance=instance@entry=0x10645b0,
signal_id=<optimized out>, detail=detail@entry=0) at gsignal.c:3386
#17 0x0000003a94c777a4 in gtk_widget_event_internal (
widget=widget@entry=0x10645b0, event=event@entry=0x23e66b0)
at gtkwidget.c:5017
#18 0x0000003a94c77a79 in IA__gtk_widget_event (
widget=widget@entry=0x10645b0, event=event@entry=0x23e66b0)
at gtkwidget.c:4814
#19 0x0000003a94b467e7 in IA__gtk_propagate_event (widget=0x10645b0,
event=0x23e66b0) at gtkmain.c:2464
#20 0x0000003a94b46b0b in IA__gtk_main_do_event (event=0x23e66b0)
at gtkmain.c:1685
#21 0x0000003a9526040c in gdk_event_dispatch (source=source@entry=0x867810,
callback=<optimized out>, user_data=<optimized out>)
at gdkevents-x11.c:2403
#22 0x0000003a80a492a6 in g_main_dispatch (context=0xc1f0b0) at gmain.c:3066
#23 g_main_context_dispatch (context=context@entry=0xc1f0b0) at gmain.c:3642
#24 0x0000003a80a49628 in g_main_context_iterate (context=0xc1f0b0,
block=block@entry=1, dispatch=dispatch@entry=1, self=<optimized out>)
at gmain.c:3713
#25 0x0000003a80a49a3a in g_main_loop_run (loop=0x10e9ae0) at gmain.c:3907
#26 0x0000003a94b45b57 in IA__gtk_main () at gtkmain.c:1257
#27 0x0000003a923f718a in wxEventLoop::Run (this=0x10f6310)
at src/gtk/evtloop.cpp:76
#28 0x0000003a92476a3b in wxAppBase::MainLoop (this=this@entry=0xa93950)
at src/common/appcmn.cpp:312
#29 0x00007ffff14293af in wxPyApp::MainLoop (this=0xa93950)
at src/helpers.cpp:215
#30 0x00007ffff146a81c in _wrap_PyApp_MainLoop (args=<optimized out>)
at src/gtk/_core_wrap.cpp:31691
#31 0x00000039a7ce16f2 in ext_do_call (nk=<optimized out>,
na=<optimized out>, flags=<optimized out>, pp_stack=0x7fffffffd5a0,
func=<built-in function PyApp_MainLoop>)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:4408
#32 PyEval_EvalFrameEx (
f=f@entry=Frame 0x10f8670, for file /usr/lib64/python2.7/site-packages/wx-2.8-gtk2-unicode/wx/_core.py, line 7306, in MainLoop (args=(<App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>,), kwargs={}), throwflag=throwflag@entry=0)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:2779
#33 0x00000039a7ce21dd in PyEval_EvalCodeEx (co=<optimized out>,
globals=<optimized out>, locals=locals@entry=0x0,
args=args@entry=0x7ffff1751728, argcount=1, kws=kws@entry=0x0,
kwcount=kwcount@entry=0, defs=defs@entry=0x0, defcount=defcount@entry=0,
closure=0x0) at /usr/src/debug/Python-2.7.5/Python/ceval.c:3330
---Type <return> to continue, or q <return> to quit---
#34 0x00000039a7c6f0d8 in function_call (func=<function at remote 0x827500>,
arg=(<App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>,), kw=0x0)
at /usr/src/debug/Python-2.7.5/Objects/funcobject.c:526
#35 0x00000039a7c4a0d3 in PyObject_Call (
func=func@entry=<function at remote 0x827500>,
arg=arg@entry=(<App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>,), kw=kw@entry=0x0)
at /usr/src/debug/Python-2.7.5/Objects/abstract.c:2529
#36 0x00000039a7c590c5 in instancemethod_call (
func=<function at remote 0x827500>,
arg=(<App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>,), kw=0x0)
at /usr/src/debug/Python-2.7.5/Objects/classobject.c:2602
#37 0x00000039a7c4a0d3 in PyObject_Call (
func=func@entry=<instancemethod at remote 0x8b6280>,
arg=arg@entry=(<App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>,), kw=kw@entry=0x0)
at /usr/src/debug/Python-2.7.5/Objects/abstract.c:2529
#38 0x00000039a7cde37c in do_call (nk=<optimized out>, na=1,
pp_stack=0x7fffffffdaf0, func=<instancemethod at remote 0x8b6280>)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:4316
#39 call_function (oparg=<optimized out>, pp_stack=0x7fffffffdaf0)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:4121
#40 PyEval_EvalFrameEx (
f=f@entry=Frame 0x10f84b0, for file /usr/lib64/python2.7/site-packages/wx-2.8-gtk2-unicode/wx/_core.py, line 8010, in MainLoop (self=<App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>),
throwflag=throwflag@entry=0)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:2740
#41 0x00000039a7ce0980 in fast_function (nk=<optimized out>, na=1, n=1,
pp_stack=0x7fffffffdc50, func=<function at remote 0x8298c0>)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:4184
#42 call_function (oparg=<optimized out>, pp_stack=0x7fffffffdc50)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:4119
#43 PyEval_EvalFrameEx (
f=f@entry=Frame 0x6cc750, for file segfault-gui.py, line 23, in <module> (), throwflag=throwflag@entry=0)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:2740
#44 0x00000039a7ce21dd in PyEval_EvalCodeEx (co=co@entry=0x7ffff7ed79b0,
globals=globals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject a---Type <return> to continue, or q <return> to quit---
t remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>},
locals=locals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>}, args=args@entry=0x0, argcount=argcount@entry=0,
kws=kws@entry=0x0, kwcount=kwcount@entry=0, defs=defs@entry=0x0,
defcount=defcount@entry=0, closure=closure@entry=0x0)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:3330
#45 0x00000039a7ce22e2 in PyEval_EvalCode (co=co@entry=0x7ffff7ed79b0,
globals=globals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>},
locals=locals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>}) at /usr/src/debug/Python-2.7.5/Python/ceval.c:689
#46 0x00000039a7cfb71f in run_mod (mod=<optimized out>,
filename=filename@entry=0x7fffffffe3aa "segfault-gui.py",
globals=globals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>},
locals=locals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, ---Type <return> to continue, or q <return> to quit---
y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>}, flags=flags@entry=0x7fffffffdeb0,
arena=arena@entry=0x6b9570)
at /usr/src/debug/Python-2.7.5/Python/pythonrun.c:1373
#47 0x00000039a7cfc8de in PyRun_FileExFlags (fp=fp@entry=0x6b7bc0,
filename=filename@entry=0x7fffffffe3aa "segfault-gui.py",
start=start@entry=257,
globals=globals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>},
locals=locals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>}, closeit=closeit@entry=1,
flags=flags@entry=0x7fffffffdeb0)
at /usr/src/debug/Python-2.7.5/Python/pythonrun.c:1359
#48 0x00000039a7cfdb69 in PyRun_SimpleFileExFlags (fp=fp@entry=0x6b7bc0,
filename=filename@entry=0x7fffffffe3aa "segfault-gui.py",
closeit=closeit@entry=1, flags=flags@entry=0x7fffffffdeb0)
at /usr/src/debug/Python-2.7.5/Python/pythonrun.c:951
#49 0x00000039a7cfe093 in PyRun_AnyFileExFlags (fp=fp@entry=0x6b7bc0,
filename=filename@entry=0x7fffffffe3aa "segfault-gui.py",
closeit=closeit@entry=1, flags=flags@entry=0x7fffffffdeb0)
at /usr/src/debug/Python-2.7.5/Python/pythonrun.c:755
#50 0x00000039a7d0eb7f in Py_Main (argc=<optimized out>, argv=<optimized out>)
at /usr/src/debug/Python-2.7.5/Modules/main.c:640
#51 0x0000003a7ee21d65 in __libc_start_main (main=0x4006f0 <main>, argc=2,
argv=0x7fffffffe078, init=<optimized out>, fini=<optimized out>,
rtld_fini=<optimized out>, stack_end=0x7fffffffe068) at libc-start.c:285
#52 0x0000000000400721 in _start ()
|
ImportError
|
def _onKeyUp(self, evt):
"""Release key."""
key = self._get_key(evt)
FigureCanvasBase.key_release_event(self, key, guiEvent=evt)
if self:
evt.Skip()
|
def _onKeyUp(self, evt):
"""Release key."""
key = self._get_key(evt)
evt.Skip()
FigureCanvasBase.key_release_event(self, key, guiEvent=evt)
|
https://github.com/matplotlib/matplotlib/issues/3690
|
[michael@localhost play]$ gdb --args python segfault-gui.py
GNU gdb (GDB) Fedora 7.7.1-19.fc20
Copyright (C) 2014 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law. Type "show copying"
and "show warranty" for details.
This GDB was configured as "x86_64-redhat-linux-gnu".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
<http://www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
<http://www.gnu.org/software/gdb/documentation/>.
For help, type "help".
Type "apropos word" to search for commands related to "word"...
Reading symbols from python...Reading symbols from /usr/lib/debug/usr/bin/python2.7.debug...done.
done.
(gdb) r
Starting program: /usr/bin/python segfault-gui.py
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/lib64/libthread_db.so.1".
Traceback (most recent call last):
File "/usr/share/gdb/auto-load/usr/lib64/libgobject-2.0.so.0.3800.2-gdb.py", line 9, in <module>
from gobject import register
File "/usr/share/glib-2.0/gdb/gobject.py", line 3, in <module>
import gdb.backtrace
ImportError: No module named backtrace
Missing separate debuginfo for /lib64/libgraphite2.so.3
Try: yum --enablerepo='*debug*' install /usr/lib/debug/.build-id/90/82e46860379c3dff9004eb8c9834e50afbb528.debug
[New Thread 0x7fffee6b1700 (LWP 3733)]
[New Thread 0x7fffedeb0700 (LWP 3734)]
[New Thread 0x7fffed6af700 (LWP 3735)]
[New Thread 0x7fffeceae700 (LWP 3736)]
[Thread 0x7fffeceae700 (LWP 3736) exited]
Program received signal SIGSEGV, Segmentation fault.
wxEvtHandler::SearchDynamicEventTable (this=this@entry=0x23e08d0, event=...)
at src/common/event.cpp:1412
1412 if ((event.GetEventType() == entry->m_eventType) && (entry->m_fn != 0))
(gdb) bt
#0 wxEvtHandler::SearchDynamicEventTable (this=this@entry=0x23e08d0,
event=...) at src/common/event.cpp:1412
#1 0x0000003a91ef09f2 in wxEvtHandler::ProcessEvent (this=0x23e08d0,
event=...) at src/common/event.cpp:1297
#2 0x0000003a924079f7 in gtk_window_key_press_callback (
widget=widget@entry=0x10f4170, gdk_event=0x23e66b0,
win=win@entry=0x23e08d0) at src/gtk/window.cpp:1034
#3 0x0000003a94b484ec in _gtk_marshal_BOOLEAN__BOXED (closure=0x1fdd550,
return_value=0x7fffffffc970, n_param_values=<optimized out>,
param_values=0x7fffffffca20, invocation_hint=<optimized out>,
marshal_data=<optimized out>) at gtkmarshalers.c:86
#4 0x0000003a81e10298 in g_closure_invoke (closure=0x1fdd550,
return_value=return_value@entry=0x7fffffffc970, n_param_values=2,
param_values=param_values@entry=0x7fffffffca20,
invocation_hint=invocation_hint@entry=0x7fffffffc9c0) at gclosure.c:777
#5 0x0000003a81e2235d in signal_emit_unlocked_R (node=node@entry=0x102b7b0,
detail=detail@entry=0, instance=instance@entry=0x10f4170,
emission_return=emission_return@entry=0x7fffffffcad0,
instance_and_params=instance_and_params@entry=0x7fffffffca20)
at gsignal.c:3586
#6 0x0000003a81e29ddd in g_signal_emit_valist (instance=<optimized out>,
signal_id=<optimized out>, detail=<optimized out>,
var_args=var_args@entry=0x7fffffffcbb0) at gsignal.c:3340
#7 0x0000003a81e2a3af in g_signal_emit (instance=instance@entry=0x10f4170,
signal_id=<optimized out>, detail=detail@entry=0) at gsignal.c:3386
#8 0x0000003a94c777a4 in gtk_widget_event_internal (
widget=widget@entry=0x10f4170, event=event@entry=0x23e66b0)
at gtkwidget.c:5017
#9 0x0000003a94c77a79 in IA__gtk_widget_event (
widget=widget@entry=0x10f4170, event=event@entry=0x23e66b0)
at gtkwidget.c:4814
#10 0x0000003a94c8d2bb in IA__gtk_window_propagate_key_event (
window=window@entry=0x10645b0, event=event@entry=0x23e66b0)
at gtkwindow.c:5199
#11 0x0000003a94c8fe13 in gtk_window_key_press_event (widget=0x10645b0,
event=0x23e66b0) at gtkwindow.c:5229
#12 0x0000003a94b484ec in _gtk_marshal_BOOLEAN__BOXED (closure=0x102b760,
return_value=0x7fffffffce40, n_param_values=<optimized out>,
param_values=0x7fffffffcef0, invocation_hint=<optimized out>,
marshal_data=<optimized out>) at gtkmarshalers.c:86
#13 0x0000003a81e10298 in g_closure_invoke (closure=closure@entry=0x102b760,
return_value=return_value@entry=0x7fffffffce40, n_param_values=2,
param_values=param_values@entry=0x7fffffffcef0,
invocation_hint=invocation_hint@entry=0x7fffffffce90) at gclosure.c:777
#14 0x0000003a81e2211b in signal_emit_unlocked_R (node=node@entry=0x102b7b0,
detail=detail@entry=0, instance=instance@entry=0x10645b0,
emission_return=emission_return@entry=0x7fffffffcfa0,
instance_and_params=instance_and_params@entry=0x7fffffffcef0)
at gsignal.c:3624
---Type <return> to continue, or q <return> to quit---
#15 0x0000003a81e29ddd in g_signal_emit_valist (instance=<optimized out>,
signal_id=<optimized out>, detail=<optimized out>,
var_args=var_args@entry=0x7fffffffd080) at gsignal.c:3340
#16 0x0000003a81e2a3af in g_signal_emit (instance=instance@entry=0x10645b0,
signal_id=<optimized out>, detail=detail@entry=0) at gsignal.c:3386
#17 0x0000003a94c777a4 in gtk_widget_event_internal (
widget=widget@entry=0x10645b0, event=event@entry=0x23e66b0)
at gtkwidget.c:5017
#18 0x0000003a94c77a79 in IA__gtk_widget_event (
widget=widget@entry=0x10645b0, event=event@entry=0x23e66b0)
at gtkwidget.c:4814
#19 0x0000003a94b467e7 in IA__gtk_propagate_event (widget=0x10645b0,
event=0x23e66b0) at gtkmain.c:2464
#20 0x0000003a94b46b0b in IA__gtk_main_do_event (event=0x23e66b0)
at gtkmain.c:1685
#21 0x0000003a9526040c in gdk_event_dispatch (source=source@entry=0x867810,
callback=<optimized out>, user_data=<optimized out>)
at gdkevents-x11.c:2403
#22 0x0000003a80a492a6 in g_main_dispatch (context=0xc1f0b0) at gmain.c:3066
#23 g_main_context_dispatch (context=context@entry=0xc1f0b0) at gmain.c:3642
#24 0x0000003a80a49628 in g_main_context_iterate (context=0xc1f0b0,
block=block@entry=1, dispatch=dispatch@entry=1, self=<optimized out>)
at gmain.c:3713
#25 0x0000003a80a49a3a in g_main_loop_run (loop=0x10e9ae0) at gmain.c:3907
#26 0x0000003a94b45b57 in IA__gtk_main () at gtkmain.c:1257
#27 0x0000003a923f718a in wxEventLoop::Run (this=0x10f6310)
at src/gtk/evtloop.cpp:76
#28 0x0000003a92476a3b in wxAppBase::MainLoop (this=this@entry=0xa93950)
at src/common/appcmn.cpp:312
#29 0x00007ffff14293af in wxPyApp::MainLoop (this=0xa93950)
at src/helpers.cpp:215
#30 0x00007ffff146a81c in _wrap_PyApp_MainLoop (args=<optimized out>)
at src/gtk/_core_wrap.cpp:31691
#31 0x00000039a7ce16f2 in ext_do_call (nk=<optimized out>,
na=<optimized out>, flags=<optimized out>, pp_stack=0x7fffffffd5a0,
func=<built-in function PyApp_MainLoop>)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:4408
#32 PyEval_EvalFrameEx (
f=f@entry=Frame 0x10f8670, for file /usr/lib64/python2.7/site-packages/wx-2.8-gtk2-unicode/wx/_core.py, line 7306, in MainLoop (args=(<App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>,), kwargs={}), throwflag=throwflag@entry=0)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:2779
#33 0x00000039a7ce21dd in PyEval_EvalCodeEx (co=<optimized out>,
globals=<optimized out>, locals=locals@entry=0x0,
args=args@entry=0x7ffff1751728, argcount=1, kws=kws@entry=0x0,
kwcount=kwcount@entry=0, defs=defs@entry=0x0, defcount=defcount@entry=0,
closure=0x0) at /usr/src/debug/Python-2.7.5/Python/ceval.c:3330
---Type <return> to continue, or q <return> to quit---
#34 0x00000039a7c6f0d8 in function_call (func=<function at remote 0x827500>,
arg=(<App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>,), kw=0x0)
at /usr/src/debug/Python-2.7.5/Objects/funcobject.c:526
#35 0x00000039a7c4a0d3 in PyObject_Call (
func=func@entry=<function at remote 0x827500>,
arg=arg@entry=(<App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>,), kw=kw@entry=0x0)
at /usr/src/debug/Python-2.7.5/Objects/abstract.c:2529
#36 0x00000039a7c590c5 in instancemethod_call (
func=<function at remote 0x827500>,
arg=(<App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>,), kw=0x0)
at /usr/src/debug/Python-2.7.5/Objects/classobject.c:2602
#37 0x00000039a7c4a0d3 in PyObject_Call (
func=func@entry=<instancemethod at remote 0x8b6280>,
arg=arg@entry=(<App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>,), kw=kw@entry=0x0)
at /usr/src/debug/Python-2.7.5/Objects/abstract.c:2529
#38 0x00000039a7cde37c in do_call (nk=<optimized out>, na=1,
pp_stack=0x7fffffffdaf0, func=<instancemethod at remote 0x8b6280>)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:4316
#39 call_function (oparg=<optimized out>, pp_stack=0x7fffffffdaf0)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:4121
#40 PyEval_EvalFrameEx (
f=f@entry=Frame 0x10f84b0, for file /usr/lib64/python2.7/site-packages/wx-2.8-gtk2-unicode/wx/_core.py, line 8010, in MainLoop (self=<App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>),
throwflag=throwflag@entry=0)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:2740
#41 0x00000039a7ce0980 in fast_function (nk=<optimized out>, na=1, n=1,
pp_stack=0x7fffffffdc50, func=<function at remote 0x8298c0>)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:4184
#42 call_function (oparg=<optimized out>, pp_stack=0x7fffffffdc50)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:4119
#43 PyEval_EvalFrameEx (
f=f@entry=Frame 0x6cc750, for file segfault-gui.py, line 23, in <module> (), throwflag=throwflag@entry=0)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:2740
#44 0x00000039a7ce21dd in PyEval_EvalCodeEx (co=co@entry=0x7ffff7ed79b0,
globals=globals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject a---Type <return> to continue, or q <return> to quit---
t remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>},
locals=locals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>}, args=args@entry=0x0, argcount=argcount@entry=0,
kws=kws@entry=0x0, kwcount=kwcount@entry=0, defs=defs@entry=0x0,
defcount=defcount@entry=0, closure=closure@entry=0x0)
at /usr/src/debug/Python-2.7.5/Python/ceval.c:3330
#45 0x00000039a7ce22e2 in PyEval_EvalCode (co=co@entry=0x7ffff7ed79b0,
globals=globals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>},
locals=locals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>}) at /usr/src/debug/Python-2.7.5/Python/ceval.c:689
#46 0x00000039a7cfb71f in run_mod (mod=<optimized out>,
filename=filename@entry=0x7fffffffe3aa "segfault-gui.py",
globals=globals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>},
locals=locals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, ---Type <return> to continue, or q <return> to quit---
y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>}, flags=flags@entry=0x7fffffffdeb0,
arena=arena@entry=0x6b9570)
at /usr/src/debug/Python-2.7.5/Python/pythonrun.c:1373
#47 0x00000039a7cfc8de in PyRun_FileExFlags (fp=fp@entry=0x6b7bc0,
filename=filename@entry=0x7fffffffe3aa "segfault-gui.py",
start=start@entry=257,
globals=globals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>},
locals=locals@entry={'script': '\nimport matplotlib.pyplot as pl\nimport numpy as np\n\nx = np.linspace(0, 1, 100)\ny = x ** 1.4\npl.figure()\npl.plot(x, y)\npl.show()\n', '__builtins__': <module at remote 0x7ffff7f7eb08>, '__file__': 'segfault-gui.py', '__package__': None, 'frame': <Main(this=<PySwigObject at remote 0xe86cf0>) at remote 0x7ffff1928d50>, '__name__': '__main__', 'app': <App(this=<PySwigObject at remote 0x7ffff7ef0360>, stdioWin=None, saveStdio=(<file at remote 0x7ffff7f99150>, <file at remote 0x7ffff7f991e0>)) at remote 0x7ffff1977490>, 'Main': <type at remote 0xc86090>, '__doc__': None, 'wx': <module at remote 0x7ffff7ee8910>}, closeit=closeit@entry=1,
flags=flags@entry=0x7fffffffdeb0)
at /usr/src/debug/Python-2.7.5/Python/pythonrun.c:1359
#48 0x00000039a7cfdb69 in PyRun_SimpleFileExFlags (fp=fp@entry=0x6b7bc0,
filename=filename@entry=0x7fffffffe3aa "segfault-gui.py",
closeit=closeit@entry=1, flags=flags@entry=0x7fffffffdeb0)
at /usr/src/debug/Python-2.7.5/Python/pythonrun.c:951
#49 0x00000039a7cfe093 in PyRun_AnyFileExFlags (fp=fp@entry=0x6b7bc0,
filename=filename@entry=0x7fffffffe3aa "segfault-gui.py",
closeit=closeit@entry=1, flags=flags@entry=0x7fffffffdeb0)
at /usr/src/debug/Python-2.7.5/Python/pythonrun.c:755
#50 0x00000039a7d0eb7f in Py_Main (argc=<optimized out>, argv=<optimized out>)
at /usr/src/debug/Python-2.7.5/Modules/main.c:640
#51 0x0000003a7ee21d65 in __libc_start_main (main=0x4006f0 <main>, argc=2,
argv=0x7fffffffe078, init=<optimized out>, fini=<optimized out>,
rtld_fini=<optimized out>, stack_end=0x7fffffffe068) at libc-start.c:285
#52 0x0000000000400721 in _start ()
|
ImportError
|
def errorbar(
self,
x,
y,
yerr=None,
xerr=None,
fmt="",
ecolor=None,
elinewidth=None,
capsize=None,
barsabove=False,
lolims=False,
uplims=False,
xlolims=False,
xuplims=False,
errorevery=1,
capthick=None,
**kwargs,
):
"""
Plot an errorbar graph.
Plot x versus y with error deltas in yerr and xerr.
Vertical errorbars are plotted if yerr is not None.
Horizontal errorbars are plotted if xerr is not None.
x, y, xerr, and yerr can all be scalars, which plots a
single error bar at x, y.
Parameters
----------
x : scalar or array-like
y : scalar or array-like
xerr/yerr : scalar or array-like, shape(N,) or shape(2,N), optional
If a scalar number, len(N) array-like object, or a N-element
array-like object, errorbars are drawn at +/-value relative
to the data. Default is None.
If a sequence of shape 2xN, errorbars are drawn at -row1
and +row2 relative to the data.
fmt : plot format string, optional, default: None
The plot format symbol. If fmt is 'none' (case-insensitive),
only the errorbars are plotted. This is used for adding
errorbars to a bar plot, for example. Default is '',
an empty plot format string; properties are
then identical to the defaults for :meth:`plot`.
ecolor : mpl color, optional, default: None
A matplotlib color arg which gives the color the errorbar lines;
if None, use the color of the line connecting the markers.
elinewidth : scalar, optional, default: None
The linewidth of the errorbar lines. If None, use the linewidth.
capsize : scalar, optional, default: None
The length of the error bar caps in points; if None, it will
take the value from ``errorbar.capsize``
:data:`rcParam<matplotlib.rcParams>`.
capthick : scalar, optional, default: None
An alias kwarg to markeredgewidth (a.k.a. - mew). This
setting is a more sensible name for the property that
controls the thickness of the error bar cap in points. For
backwards compatibility, if mew or markeredgewidth are given,
then they will over-ride capthick. This may change in future
releases.
barsabove : bool, optional, default: False
if True , will plot the errorbars above the plot
symbols. Default is below.
lolims / uplims / xlolims / xuplims : bool, optional, default:None
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*. To use limits with inverted
axes, :meth:`set_xlim` or :meth:`set_ylim` must be called
before :meth:`errorbar`.
errorevery : positive integer, optional, default:1
subsamples the errorbars. e.g., if errorevery=5, errorbars for
every 5-th datapoint will be plotted. The data plot itself still
shows all data points.
Returns
-------
plotline : :class:`~matplotlib.lines.Line2D` instance
x, y plot markers and/or line
caplines : list of :class:`~matplotlib.lines.Line2D` instances
error bar cap
barlinecols : list of :class:`~matplotlib.collections.LineCollection`
horizontal and vertical error ranges.
Other Parameters
----------------
**kwargs :
All other keyword arguments are passed on to the plot
command for the markers. For example, this code makes big red
squares with thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s', mfc='red',
mec='green', ms=20, mew=4)
where mfc, mec, ms and mew are aliases for the longer
property names, markerfacecolor, markeredgecolor, markersize
and markeredgewidth.
Valid kwargs for the marker properties are
%(Line2D)s
"""
kwargs = cbook.normalize_kwargs(kwargs, _alias_map)
# anything that comes in as 'None', drop so the default thing
# happens down stream
kwargs = {k: v for k, v in kwargs.items() if v is not None}
kwargs.setdefault("zorder", 2)
if errorevery < 1:
raise ValueError("errorevery has to be a strictly positive integer")
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold:
self.cla()
holdstate = self._hold
self._hold = True
if fmt is None:
fmt = "none"
msg = (
"Use of None object as fmt keyword argument to "
+ "suppress plotting of data values is deprecated "
+ 'since 1.4; use the string "none" instead.'
)
warnings.warn(msg, mplDeprecation, stacklevel=1)
plot_line = fmt.lower() != "none"
label = kwargs.pop("label", None)
fmt_style_kwargs = {
k: v
for k, v in zip(("linestyle", "marker", "color"), _process_plot_format(fmt))
if v is not None
}
if fmt == "none":
# Remove alpha=0 color that _process_plot_format returns
fmt_style_kwargs.pop("color")
if "color" in kwargs or "color" in fmt_style_kwargs or ecolor is not None:
base_style = {}
if "color" in kwargs:
base_style["color"] = kwargs.pop("color")
else:
base_style = six.next(self._get_lines.prop_cycler)
base_style["label"] = "_nolegend_"
base_style.update(fmt_style_kwargs)
if "color" not in base_style:
base_style["color"] = "C0"
if ecolor is None:
ecolor = base_style["color"]
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr] * len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr] * len(y)
# make the style dict for the 'normal' plot line
plot_line_style = dict(base_style)
plot_line_style.update(**kwargs)
if barsabove:
plot_line_style["zorder"] = kwargs["zorder"] - 0.1
else:
plot_line_style["zorder"] = kwargs["zorder"] + 0.1
# make the style dict for the line collections (the bars)
eb_lines_style = dict(base_style)
eb_lines_style.pop("marker", None)
eb_lines_style.pop("linestyle", None)
eb_lines_style["color"] = ecolor
if elinewidth:
eb_lines_style["linewidth"] = elinewidth
elif "linewidth" in kwargs:
eb_lines_style["linewidth"] = kwargs["linewidth"]
for key in ("transform", "alpha", "zorder", "rasterized"):
if key in kwargs:
eb_lines_style[key] = kwargs[key]
# set up cap style dictionary
eb_cap_style = dict(base_style)
# eject any marker information from format string
eb_cap_style.pop("marker", None)
eb_cap_style.pop("ls", None)
eb_cap_style["linestyle"] = "none"
if capsize is None:
capsize = rcParams["errorbar.capsize"]
if capsize > 0:
eb_cap_style["markersize"] = 2.0 * capsize
if capthick is not None:
eb_cap_style["markeredgewidth"] = capthick
# For backwards-compat, allow explicit setting of
# 'markeredgewidth' to over-ride capthick.
for key in ("markeredgewidth", "transform", "alpha", "zorder", "rasterized"):
if key in kwargs:
eb_cap_style[key] = kwargs[key]
eb_cap_style["color"] = ecolor
data_line = None
if plot_line:
data_line = mlines.Line2D(x, y, **plot_line_style)
self.add_line(data_line)
barcols = []
caplines = []
# arrays fine here, they are booleans and hence not units
def _bool_asarray_helper(d, expected):
if not iterable(d):
return np.asarray([d] * expected, bool)
else:
return np.asarray(d, bool)
lolims = _bool_asarray_helper(lolims, len(x))
uplims = _bool_asarray_helper(uplims, len(x))
xlolims = _bool_asarray_helper(xlolims, len(x))
xuplims = _bool_asarray_helper(xuplims, len(x))
everymask = np.arange(len(x)) % errorevery == 0
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs) == len(ys)
assert len(xs) == len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
def extract_err(err, data):
"""private function to compute error bars
Parameters
----------
err : iterable
xerr or yerr from errorbar
data : iterable
x or y from errorbar
"""
try:
a, b = err
except (TypeError, ValueError):
pass
else:
if iterable(a) and iterable(b):
# using list comps rather than arrays to preserve units
low = [thisx - thiserr for (thisx, thiserr) in cbook.safezip(data, a)]
high = [thisx + thiserr for (thisx, thiserr) in cbook.safezip(data, b)]
return low, high
# Check if xerr is scalar or symmetric. Asymmetric is handled
# above. This prevents Nx2 arrays from accidentally
# being accepted, when the user meant the 2xN transpose.
# special case for empty lists
if len(err) > 1:
fe = safe_first_element(err)
if len(err) != len(data) or np.size(fe) > 1:
raise ValueError("err must be [ scalar | N, Nx1 or 2xN array-like ]")
# using list comps rather than arrays to preserve units
low = [thisx - thiserr for (thisx, thiserr) in cbook.safezip(data, err)]
high = [thisx + thiserr for (thisx, thiserr) in cbook.safezip(data, err)]
return low, high
if xerr is not None:
left, right = extract_err(xerr, x)
# select points without upper/lower limits in x and
# draw normal errorbars for these points
noxlims = ~(xlolims | xuplims)
if noxlims.any() or len(noxlims) == 0:
yo, _ = xywhere(y, right, noxlims & everymask)
lo, ro = xywhere(left, right, noxlims & everymask)
barcols.append(self.hlines(yo, lo, ro, **eb_lines_style))
if capsize > 0:
caplines.append(mlines.Line2D(lo, yo, marker="|", **eb_cap_style))
caplines.append(mlines.Line2D(ro, yo, marker="|", **eb_cap_style))
if xlolims.any():
yo, _ = xywhere(y, right, xlolims & everymask)
lo, ro = xywhere(x, right, xlolims & everymask)
barcols.append(self.hlines(yo, lo, ro, **eb_lines_style))
rightup, yup = xywhere(right, y, xlolims & everymask)
if self.xaxis_inverted():
marker = mlines.CARETLEFTBASE
else:
marker = mlines.CARETRIGHTBASE
caplines.append(
mlines.Line2D(rightup, yup, ls="None", marker=marker, **eb_cap_style)
)
if capsize > 0:
xlo, ylo = xywhere(x, y, xlolims & everymask)
caplines.append(mlines.Line2D(xlo, ylo, marker="|", **eb_cap_style))
if xuplims.any():
yo, _ = xywhere(y, right, xuplims & everymask)
lo, ro = xywhere(left, x, xuplims & everymask)
barcols.append(self.hlines(yo, lo, ro, **eb_lines_style))
leftlo, ylo = xywhere(left, y, xuplims & everymask)
if self.xaxis_inverted():
marker = mlines.CARETRIGHTBASE
else:
marker = mlines.CARETLEFTBASE
caplines.append(
mlines.Line2D(leftlo, ylo, ls="None", marker=marker, **eb_cap_style)
)
if capsize > 0:
xup, yup = xywhere(x, y, xuplims & everymask)
caplines.append(mlines.Line2D(xup, yup, marker="|", **eb_cap_style))
if yerr is not None:
lower, upper = extract_err(yerr, y)
# select points without upper/lower limits in y and
# draw normal errorbars for these points
noylims = ~(lolims | uplims)
if noylims.any() or len(noylims) == 0:
xo, _ = xywhere(x, lower, noylims & everymask)
lo, uo = xywhere(lower, upper, noylims & everymask)
barcols.append(self.vlines(xo, lo, uo, **eb_lines_style))
if capsize > 0:
caplines.append(mlines.Line2D(xo, lo, marker="_", **eb_cap_style))
caplines.append(mlines.Line2D(xo, uo, marker="_", **eb_cap_style))
if lolims.any():
xo, _ = xywhere(x, lower, lolims & everymask)
lo, uo = xywhere(y, upper, lolims & everymask)
barcols.append(self.vlines(xo, lo, uo, **eb_lines_style))
xup, upperup = xywhere(x, upper, lolims & everymask)
if self.yaxis_inverted():
marker = mlines.CARETDOWNBASE
else:
marker = mlines.CARETUPBASE
caplines.append(
mlines.Line2D(xup, upperup, ls="None", marker=marker, **eb_cap_style)
)
if capsize > 0:
xlo, ylo = xywhere(x, y, lolims & everymask)
caplines.append(mlines.Line2D(xlo, ylo, marker="_", **eb_cap_style))
if uplims.any():
xo, _ = xywhere(x, lower, uplims & everymask)
lo, uo = xywhere(lower, y, uplims & everymask)
barcols.append(self.vlines(xo, lo, uo, **eb_lines_style))
xlo, lowerlo = xywhere(x, lower, uplims & everymask)
if self.yaxis_inverted():
marker = mlines.CARETUPBASE
else:
marker = mlines.CARETDOWNBASE
caplines.append(
mlines.Line2D(xlo, lowerlo, ls="None", marker=marker, **eb_cap_style)
)
if capsize > 0:
xup, yup = xywhere(x, y, uplims & everymask)
caplines.append(mlines.Line2D(xup, yup, marker="_", **eb_cap_style))
for l in caplines:
self.add_line(l)
self.autoscale_view()
self._hold = holdstate
errorbar_container = ErrorbarContainer(
(data_line, tuple(caplines), tuple(barcols)),
has_xerr=(xerr is not None),
has_yerr=(yerr is not None),
label=label,
)
self.containers.append(errorbar_container)
return errorbar_container # (l0, caplines, barcols)
|
def errorbar(
self,
x,
y,
yerr=None,
xerr=None,
fmt="",
ecolor=None,
elinewidth=None,
capsize=None,
barsabove=False,
lolims=False,
uplims=False,
xlolims=False,
xuplims=False,
errorevery=1,
capthick=None,
**kwargs,
):
"""
Plot an errorbar graph.
Plot x versus y with error deltas in yerr and xerr.
Vertical errorbars are plotted if yerr is not None.
Horizontal errorbars are plotted if xerr is not None.
x, y, xerr, and yerr can all be scalars, which plots a
single error bar at x, y.
Parameters
----------
x : scalar or array-like
y : scalar or array-like
xerr/yerr : scalar or array-like, shape(N,) or shape(2,N), optional
If a scalar number, len(N) array-like object, or a N-element
array-like object, errorbars are drawn at +/-value relative
to the data. Default is None.
If a sequence of shape 2xN, errorbars are drawn at -row1
and +row2 relative to the data.
fmt : plot format string, optional, default: None
The plot format symbol. If fmt is 'none' (case-insensitive),
only the errorbars are plotted. This is used for adding
errorbars to a bar plot, for example. Default is '',
an empty plot format string; properties are
then identical to the defaults for :meth:`plot`.
ecolor : mpl color, optional, default: None
A matplotlib color arg which gives the color the errorbar lines;
if None, use the color of the line connecting the markers.
elinewidth : scalar, optional, default: None
The linewidth of the errorbar lines. If None, use the linewidth.
capsize : scalar, optional, default: None
The length of the error bar caps in points; if None, it will
take the value from ``errorbar.capsize``
:data:`rcParam<matplotlib.rcParams>`.
capthick : scalar, optional, default: None
An alias kwarg to markeredgewidth (a.k.a. - mew). This
setting is a more sensible name for the property that
controls the thickness of the error bar cap in points. For
backwards compatibility, if mew or markeredgewidth are given,
then they will over-ride capthick. This may change in future
releases.
barsabove : bool, optional, default: False
if True , will plot the errorbars above the plot
symbols. Default is below.
lolims / uplims / xlolims / xuplims : bool, optional, default:None
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*. To use limits with inverted
axes, :meth:`set_xlim` or :meth:`set_ylim` must be called
before :meth:`errorbar`.
errorevery : positive integer, optional, default:1
subsamples the errorbars. e.g., if errorevery=5, errorbars for
every 5-th datapoint will be plotted. The data plot itself still
shows all data points.
Returns
-------
plotline : :class:`~matplotlib.lines.Line2D` instance
x, y plot markers and/or line
caplines : list of :class:`~matplotlib.lines.Line2D` instances
error bar cap
barlinecols : list of :class:`~matplotlib.collections.LineCollection`
horizontal and vertical error ranges.
Other Parameters
----------------
**kwargs :
All other keyword arguments are passed on to the plot
command for the markers. For example, this code makes big red
squares with thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s', mfc='red',
mec='green', ms=20, mew=4)
where mfc, mec, ms and mew are aliases for the longer
property names, markerfacecolor, markeredgecolor, markersize
and markeredgewidth.
Valid kwargs for the marker properties are
%(Line2D)s
"""
kwargs = cbook.normalize_kwargs(kwargs, _alias_map)
# anything that comes in as 'None', drop so the default thing
# happens down stream
kwargs = {k: v for k, v in kwargs.items() if v is not None}
kwargs.setdefault("zorder", 2)
if errorevery < 1:
raise ValueError("errorevery has to be a strictly positive integer")
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold:
self.cla()
holdstate = self._hold
self._hold = True
if fmt is None:
fmt = "none"
msg = (
"Use of None object as fmt keyword argument to "
+ "suppress plotting of data values is deprecated "
+ 'since 1.4; use the string "none" instead.'
)
warnings.warn(msg, mplDeprecation, stacklevel=1)
plot_line = fmt.lower() != "none"
label = kwargs.pop("label", None)
fmt_style_kwargs = {
k: v
for k, v in zip(("linestyle", "marker", "color"), _process_plot_format(fmt))
if v is not None
}
if fmt == "none":
# Remove alpha=0 color that _process_plot_format returns
fmt_style_kwargs.pop("color")
if "color" in kwargs or "color" in fmt_style_kwargs or ecolor is not None:
base_style = {}
if "color" in kwargs:
base_style["color"] = kwargs.pop("color")
else:
base_style = six.next(self._get_lines.prop_cycler)
base_style["label"] = "_nolegend_"
base_style.update(fmt_style_kwargs)
if "color" not in base_style:
base_style["color"] = "C0"
if ecolor is None:
ecolor = base_style["color"]
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr] * len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr] * len(y)
# make the style dict for the 'normal' plot line
plot_line_style = dict(base_style)
plot_line_style.update(**kwargs)
if barsabove:
plot_line_style["zorder"] = kwargs["zorder"] - 0.1
else:
plot_line_style["zorder"] = kwargs["zorder"] + 0.1
# make the style dict for the line collections (the bars)
eb_lines_style = dict(base_style)
eb_lines_style.pop("marker", None)
eb_lines_style.pop("linestyle", None)
eb_lines_style["color"] = ecolor
if elinewidth:
eb_lines_style["linewidth"] = elinewidth
elif "linewidth" in kwargs:
eb_lines_style["linewidth"] = kwargs["linewidth"]
for key in ("transform", "alpha", "zorder", "rasterized"):
if key in kwargs:
eb_lines_style[key] = kwargs[key]
# set up cap style dictionary
eb_cap_style = dict(base_style)
# eject any marker information from format string
eb_cap_style.pop("marker", None)
eb_cap_style.pop("ls", None)
eb_cap_style["linestyle"] = "none"
if capsize is None:
capsize = rcParams["errorbar.capsize"]
if capsize > 0:
eb_cap_style["markersize"] = 2.0 * capsize
if capthick is not None:
eb_cap_style["markeredgewidth"] = capthick
# For backwards-compat, allow explicit setting of
# 'markeredgewidth' to over-ride capthick.
for key in ("markeredgewidth", "transform", "alpha", "zorder", "rasterized"):
if key in kwargs:
eb_cap_style[key] = kwargs[key]
eb_cap_style["color"] = ecolor
data_line = None
if plot_line:
data_line = mlines.Line2D(x, y, **plot_line_style)
self.add_line(data_line)
barcols = []
caplines = []
# arrays fine here, they are booleans and hence not units
def _bool_asarray_helper(d, expected):
if not iterable(d):
return np.asarray([d] * expected, bool)
else:
return np.asarray(d, bool)
lolims = _bool_asarray_helper(lolims, len(x))
uplims = _bool_asarray_helper(uplims, len(x))
xlolims = _bool_asarray_helper(xlolims, len(x))
xuplims = _bool_asarray_helper(xuplims, len(x))
everymask = np.arange(len(x)) % errorevery == 0
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs) == len(ys)
assert len(xs) == len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
def extract_err(err, data):
"""private function to compute error bars
Parameters
----------
err : iterable
xerr or yerr from errorbar
data : iterable
x or y from errorbar
"""
try:
a, b = err
except (TypeError, ValueError):
pass
else:
if iterable(a) and iterable(b):
# using list comps rather than arrays to preserve units
low = [thisx - thiserr for (thisx, thiserr) in cbook.safezip(data, a)]
high = [thisx + thiserr for (thisx, thiserr) in cbook.safezip(data, b)]
return low, high
# Check if xerr is scalar or symmetric. Asymmetric is handled
# above. This prevents Nx2 arrays from accidentally
# being accepted, when the user meant the 2xN transpose.
# special case for empty lists
if len(err) > 1:
fe = safe_first_element(err)
if len(err) != len(data) or np.size(fe) > 1:
raise ValueError("err must be [ scalar | N, Nx1 or 2xN array-like ]")
# using list comps rather than arrays to preserve units
low = [thisx - thiserr for (thisx, thiserr) in cbook.safezip(data, err)]
high = [thisx + thiserr for (thisx, thiserr) in cbook.safezip(data, err)]
return low, high
if xerr is not None:
left, right = extract_err(xerr, x)
# select points without upper/lower limits in x and
# draw normal errorbars for these points
noxlims = ~(xlolims | xuplims)
if noxlims.any():
yo, _ = xywhere(y, right, noxlims & everymask)
lo, ro = xywhere(left, right, noxlims & everymask)
barcols.append(self.hlines(yo, lo, ro, **eb_lines_style))
if capsize > 0:
caplines.append(mlines.Line2D(lo, yo, marker="|", **eb_cap_style))
caplines.append(mlines.Line2D(ro, yo, marker="|", **eb_cap_style))
if xlolims.any():
yo, _ = xywhere(y, right, xlolims & everymask)
lo, ro = xywhere(x, right, xlolims & everymask)
barcols.append(self.hlines(yo, lo, ro, **eb_lines_style))
rightup, yup = xywhere(right, y, xlolims & everymask)
if self.xaxis_inverted():
marker = mlines.CARETLEFTBASE
else:
marker = mlines.CARETRIGHTBASE
caplines.append(
mlines.Line2D(rightup, yup, ls="None", marker=marker, **eb_cap_style)
)
if capsize > 0:
xlo, ylo = xywhere(x, y, xlolims & everymask)
caplines.append(mlines.Line2D(xlo, ylo, marker="|", **eb_cap_style))
if xuplims.any():
yo, _ = xywhere(y, right, xuplims & everymask)
lo, ro = xywhere(left, x, xuplims & everymask)
barcols.append(self.hlines(yo, lo, ro, **eb_lines_style))
leftlo, ylo = xywhere(left, y, xuplims & everymask)
if self.xaxis_inverted():
marker = mlines.CARETRIGHTBASE
else:
marker = mlines.CARETLEFTBASE
caplines.append(
mlines.Line2D(leftlo, ylo, ls="None", marker=marker, **eb_cap_style)
)
if capsize > 0:
xup, yup = xywhere(x, y, xuplims & everymask)
caplines.append(mlines.Line2D(xup, yup, marker="|", **eb_cap_style))
if yerr is not None:
lower, upper = extract_err(yerr, y)
# select points without upper/lower limits in y and
# draw normal errorbars for these points
noylims = ~(lolims | uplims)
if noylims.any():
xo, _ = xywhere(x, lower, noylims & everymask)
lo, uo = xywhere(lower, upper, noylims & everymask)
barcols.append(self.vlines(xo, lo, uo, **eb_lines_style))
if capsize > 0:
caplines.append(mlines.Line2D(xo, lo, marker="_", **eb_cap_style))
caplines.append(mlines.Line2D(xo, uo, marker="_", **eb_cap_style))
if lolims.any():
xo, _ = xywhere(x, lower, lolims & everymask)
lo, uo = xywhere(y, upper, lolims & everymask)
barcols.append(self.vlines(xo, lo, uo, **eb_lines_style))
xup, upperup = xywhere(x, upper, lolims & everymask)
if self.yaxis_inverted():
marker = mlines.CARETDOWNBASE
else:
marker = mlines.CARETUPBASE
caplines.append(
mlines.Line2D(xup, upperup, ls="None", marker=marker, **eb_cap_style)
)
if capsize > 0:
xlo, ylo = xywhere(x, y, lolims & everymask)
caplines.append(mlines.Line2D(xlo, ylo, marker="_", **eb_cap_style))
if uplims.any():
xo, _ = xywhere(x, lower, uplims & everymask)
lo, uo = xywhere(lower, y, uplims & everymask)
barcols.append(self.vlines(xo, lo, uo, **eb_lines_style))
xlo, lowerlo = xywhere(x, lower, uplims & everymask)
if self.yaxis_inverted():
marker = mlines.CARETUPBASE
else:
marker = mlines.CARETDOWNBASE
caplines.append(
mlines.Line2D(xlo, lowerlo, ls="None", marker=marker, **eb_cap_style)
)
if capsize > 0:
xup, yup = xywhere(x, y, uplims & everymask)
caplines.append(mlines.Line2D(xup, yup, marker="_", **eb_cap_style))
for l in caplines:
self.add_line(l)
self.autoscale_view()
self._hold = holdstate
errorbar_container = ErrorbarContainer(
(data_line, tuple(caplines), tuple(barcols)),
has_xerr=(xerr is not None),
has_yerr=(yerr is not None),
label=label,
)
self.containers.append(errorbar_container)
return errorbar_container # (l0, caplines, barcols)
|
https://github.com/matplotlib/matplotlib/issues/9699
|
Traceback (most recent call last):
File "legend_test.py", line 6, in <module>
plt.legend()
File "/usr/lib/python3/dist-packages/matplotlib/pyplot.py", line 3553, in legend
ret = gca().legend(*args, **kwargs)
File "/usr/lib/python3/dist-packages/matplotlib/axes/_axes.py", line 538, in legend
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
File "/usr/lib/python3/dist-packages/matplotlib/legend.py", line 385, in __init__
self._init_legend_box(handles, labels, markerfirst)
File "/usr/lib/python3/dist-packages/matplotlib/legend.py", line 654, in _init_legend_box
fontsize, handlebox))
File "/usr/lib/python3/dist-packages/matplotlib/legend_handler.py", line 119, in legend_artist
fontsize, handlebox.get_transform())
File "/usr/lib/python3/dist-packages/matplotlib/legend_handler.py", line 477, in create_artists
self.update_prop(coll, barlinecols[0], legend)
IndexError: tuple index out of range
|
IndexError
|
def _autolev(self, N):
"""
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
"""
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N + 1)
zmax = self.zmax
zmin = self.zmin
lev = self.locator.tick_values(zmin, zmax)
self._auto = True
if self.filled:
return lev
# For line contours, drop levels outside the data range.
return lev[(lev > zmin) & (lev < zmax)]
|
def _autolev(self, z, N):
"""
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
"""
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N + 1)
zmax = self.zmax
zmin = self.zmin
lev = self.locator.tick_values(zmin, zmax)
self._auto = True
if self.filled:
return lev
# For line contours, drop levels outside the data range.
return lev[(lev > zmin) & (lev < zmax)]
|
https://github.com/matplotlib/matplotlib/issues/6270
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-37a1d06c84a1> in <module>()
30 # the label
31 plt.figure()
---> 32 CS = plt.contour(X, Y, Z,locator=matplotlib.ticker.LinearLocator(10))
33 plt.clabel(CS, inline=1, fontsize=10)
34 plt.title('Simplest default with labels')
/home/w2naf/code/matplotlib/lib/matplotlib/pyplot.pyc in contour(*args, **kwargs)
2764 ax.hold(hold)
2765 try:
-> 2766 ret = ax.contour(*args, **kwargs)
2767 finally:
2768 ax.hold(washold)
/home/w2naf/code/matplotlib/lib/matplotlib/__init__.pyc in inner(ax, *args, **kwargs)
1810 warnings.warn(msg % (label_namer, func.__name__),
1811 RuntimeWarning, stacklevel=2)
-> 1812 return func(ax, *args, **kwargs)
1813 pre_doc = inner.__doc__
1814 if pre_doc is None:
/home/w2naf/code/matplotlib/lib/matplotlib/axes/_axes.pyc in contour(self, *args, **kwargs)
5642 self.cla()
5643 kwargs['filled'] = False
-> 5644 return mcontour.QuadContourSet(self, *args, **kwargs)
5645 contour.__doc__ = mcontour.QuadContourSet.contour_doc
5646
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in __init__(self, ax, *args, **kwargs)
1422 are described in QuadContourSet.contour_doc.
1423 """
-> 1424 ContourSet.__init__(self, ax, *args, **kwargs)
1425
1426 def _process_args(self, *args, **kwargs):
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in __init__(self, ax, *args, **kwargs)
861 self._transform = kwargs.get('transform', None)
862
--> 863 self._process_args(*args, **kwargs)
864 self._process_levels()
865
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in _process_args(self, *args, **kwargs)
1443 self._corner_mask = mpl.rcParams['contour.corner_mask']
1444
-> 1445 x, y, z = self._contour_args(args, kwargs)
1446
1447 _mask = ma.getmask(z)
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in _contour_args(self, args, kwargs)
1538 warnings.warn('Log scale: values of z <= 0 have been masked')
1539 self.zmin = z.min()
-> 1540 self._contour_level_args(z, args)
1541 return (x, y, z)
1542
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in _contour_level_args(self, z, args)
1167 if self.levels is None:
1168 if len(args) == 0:
-> 1169 lev = self._autolev(z, 7)
1170 else:
1171 level_arg = args[0]
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in _autolev(self, z, N)
1149 zmax = self.zmax
1150 zmin = self.zmin
-> 1151 lev = self.locator.tick_values(zmin, zmax)
1152 self._auto = True
1153 if self.filled:
/home/w2naf/code/matplotlib/lib/matplotlib/ticker.pyc in tick_values(self, vmin, vmax)
1173 vmin, vmax = vmax, vmin
1174
-> 1175 if (vmin, vmax) in self.presets:
1176 return self.presets[(vmin, vmax)]
1177
TypeError: unhashable type: 'MaskedArray'
|
TypeError
|
def _contour_level_args(self, z, args):
"""
Determine the contour levels and store in self.levels.
"""
if self.filled:
fn = "contourf"
else:
fn = "contour"
self._auto = False
if self.levels is None:
if len(args) == 0:
lev = self._autolev(7)
else:
level_arg = args[0]
try:
if type(level_arg) == int:
lev = self._autolev(level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError("Last %s arg must give levels; see help(%s)" % (fn, fn))
self.levels = lev
if self.filled and len(self.levels) < 2:
raise ValueError("Filled contours require at least 2 levels.")
if len(self.levels) > 1 and np.amin(np.diff(self.levels)) <= 0.0:
if hasattr(self, "_corner_mask") and self._corner_mask == "legacy":
warnings.warn("Contour levels are not increasing")
else:
raise ValueError("Contour levels must be increasing")
|
def _contour_level_args(self, z, args):
"""
Determine the contour levels and store in self.levels.
"""
if self.filled:
fn = "contourf"
else:
fn = "contour"
self._auto = False
if self.levels is None:
if len(args) == 0:
lev = self._autolev(z, 7)
else:
level_arg = args[0]
try:
if type(level_arg) == int:
lev = self._autolev(z, level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError("Last %s arg must give levels; see help(%s)" % (fn, fn))
self.levels = lev
if self.filled and len(self.levels) < 2:
raise ValueError("Filled contours require at least 2 levels.")
if len(self.levels) > 1 and np.amin(np.diff(self.levels)) <= 0.0:
if hasattr(self, "_corner_mask") and self._corner_mask == "legacy":
warnings.warn("Contour levels are not increasing")
else:
raise ValueError("Contour levels must be increasing")
|
https://github.com/matplotlib/matplotlib/issues/6270
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-37a1d06c84a1> in <module>()
30 # the label
31 plt.figure()
---> 32 CS = plt.contour(X, Y, Z,locator=matplotlib.ticker.LinearLocator(10))
33 plt.clabel(CS, inline=1, fontsize=10)
34 plt.title('Simplest default with labels')
/home/w2naf/code/matplotlib/lib/matplotlib/pyplot.pyc in contour(*args, **kwargs)
2764 ax.hold(hold)
2765 try:
-> 2766 ret = ax.contour(*args, **kwargs)
2767 finally:
2768 ax.hold(washold)
/home/w2naf/code/matplotlib/lib/matplotlib/__init__.pyc in inner(ax, *args, **kwargs)
1810 warnings.warn(msg % (label_namer, func.__name__),
1811 RuntimeWarning, stacklevel=2)
-> 1812 return func(ax, *args, **kwargs)
1813 pre_doc = inner.__doc__
1814 if pre_doc is None:
/home/w2naf/code/matplotlib/lib/matplotlib/axes/_axes.pyc in contour(self, *args, **kwargs)
5642 self.cla()
5643 kwargs['filled'] = False
-> 5644 return mcontour.QuadContourSet(self, *args, **kwargs)
5645 contour.__doc__ = mcontour.QuadContourSet.contour_doc
5646
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in __init__(self, ax, *args, **kwargs)
1422 are described in QuadContourSet.contour_doc.
1423 """
-> 1424 ContourSet.__init__(self, ax, *args, **kwargs)
1425
1426 def _process_args(self, *args, **kwargs):
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in __init__(self, ax, *args, **kwargs)
861 self._transform = kwargs.get('transform', None)
862
--> 863 self._process_args(*args, **kwargs)
864 self._process_levels()
865
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in _process_args(self, *args, **kwargs)
1443 self._corner_mask = mpl.rcParams['contour.corner_mask']
1444
-> 1445 x, y, z = self._contour_args(args, kwargs)
1446
1447 _mask = ma.getmask(z)
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in _contour_args(self, args, kwargs)
1538 warnings.warn('Log scale: values of z <= 0 have been masked')
1539 self.zmin = z.min()
-> 1540 self._contour_level_args(z, args)
1541 return (x, y, z)
1542
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in _contour_level_args(self, z, args)
1167 if self.levels is None:
1168 if len(args) == 0:
-> 1169 lev = self._autolev(z, 7)
1170 else:
1171 level_arg = args[0]
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in _autolev(self, z, N)
1149 zmax = self.zmax
1150 zmin = self.zmin
-> 1151 lev = self.locator.tick_values(zmin, zmax)
1152 self._auto = True
1153 if self.filled:
/home/w2naf/code/matplotlib/lib/matplotlib/ticker.pyc in tick_values(self, vmin, vmax)
1173 vmin, vmax = vmax, vmin
1174
-> 1175 if (vmin, vmax) in self.presets:
1176 return self.presets[(vmin, vmax)]
1177
TypeError: unhashable type: 'MaskedArray'
|
TypeError
|
def _contour_args(self, args, kwargs):
if self.filled:
fn = "contourf"
else:
fn = "contour"
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
args = args[1:]
elif Nargs <= 4:
x, y, z = self._check_xyz(args[:3], kwargs)
args = args[3:]
else:
raise TypeError("Too many arguments to %s; see help(%s)" % (fn, fn))
z = ma.masked_invalid(z, copy=False)
self.zmax = float(z.max())
self.zmin = float(z.min())
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn("Log scale: values of z <= 0 have been masked")
self.zmin = float(z.min())
self._contour_level_args(z, args)
return (x, y, z)
|
def _contour_args(self, args, kwargs):
if self.filled:
fn = "contourf"
else:
fn = "contour"
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
args = args[1:]
elif Nargs <= 4:
x, y, z = self._check_xyz(args[:3], kwargs)
args = args[3:]
else:
raise TypeError("Too many arguments to %s; see help(%s)" % (fn, fn))
z = ma.masked_invalid(z, copy=False)
self.zmax = ma.maximum(z)
self.zmin = ma.minimum(z)
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn("Log scale: values of z <= 0 have been masked")
self.zmin = z.min()
self._contour_level_args(z, args)
return (x, y, z)
|
https://github.com/matplotlib/matplotlib/issues/6270
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-37a1d06c84a1> in <module>()
30 # the label
31 plt.figure()
---> 32 CS = plt.contour(X, Y, Z,locator=matplotlib.ticker.LinearLocator(10))
33 plt.clabel(CS, inline=1, fontsize=10)
34 plt.title('Simplest default with labels')
/home/w2naf/code/matplotlib/lib/matplotlib/pyplot.pyc in contour(*args, **kwargs)
2764 ax.hold(hold)
2765 try:
-> 2766 ret = ax.contour(*args, **kwargs)
2767 finally:
2768 ax.hold(washold)
/home/w2naf/code/matplotlib/lib/matplotlib/__init__.pyc in inner(ax, *args, **kwargs)
1810 warnings.warn(msg % (label_namer, func.__name__),
1811 RuntimeWarning, stacklevel=2)
-> 1812 return func(ax, *args, **kwargs)
1813 pre_doc = inner.__doc__
1814 if pre_doc is None:
/home/w2naf/code/matplotlib/lib/matplotlib/axes/_axes.pyc in contour(self, *args, **kwargs)
5642 self.cla()
5643 kwargs['filled'] = False
-> 5644 return mcontour.QuadContourSet(self, *args, **kwargs)
5645 contour.__doc__ = mcontour.QuadContourSet.contour_doc
5646
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in __init__(self, ax, *args, **kwargs)
1422 are described in QuadContourSet.contour_doc.
1423 """
-> 1424 ContourSet.__init__(self, ax, *args, **kwargs)
1425
1426 def _process_args(self, *args, **kwargs):
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in __init__(self, ax, *args, **kwargs)
861 self._transform = kwargs.get('transform', None)
862
--> 863 self._process_args(*args, **kwargs)
864 self._process_levels()
865
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in _process_args(self, *args, **kwargs)
1443 self._corner_mask = mpl.rcParams['contour.corner_mask']
1444
-> 1445 x, y, z = self._contour_args(args, kwargs)
1446
1447 _mask = ma.getmask(z)
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in _contour_args(self, args, kwargs)
1538 warnings.warn('Log scale: values of z <= 0 have been masked')
1539 self.zmin = z.min()
-> 1540 self._contour_level_args(z, args)
1541 return (x, y, z)
1542
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in _contour_level_args(self, z, args)
1167 if self.levels is None:
1168 if len(args) == 0:
-> 1169 lev = self._autolev(z, 7)
1170 else:
1171 level_arg = args[0]
/home/w2naf/code/matplotlib/lib/matplotlib/contour.pyc in _autolev(self, z, N)
1149 zmax = self.zmax
1150 zmin = self.zmin
-> 1151 lev = self.locator.tick_values(zmin, zmax)
1152 self._auto = True
1153 if self.filled:
/home/w2naf/code/matplotlib/lib/matplotlib/ticker.pyc in tick_values(self, vmin, vmax)
1173 vmin, vmax = vmax, vmin
1174
-> 1175 if (vmin, vmax) in self.presets:
1176 return self.presets[(vmin, vmax)]
1177
TypeError: unhashable type: 'MaskedArray'
|
TypeError
|
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s = s.replace(b"\\", b"\\\\")
s = s.replace(b"(", b"\\(")
s = s.replace(b")", b"\\)")
s = s.replace(b"'", b"\\251")
s = s.replace(b"`", b"\\301")
s = re.sub(rb"[^ -~\n]", lambda x: rb"\%03o" % ord(x.group()), s)
return s.decode("ascii")
|
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s = s.replace("\\", "\\\\")
s = s.replace("(", "\\(")
s = s.replace(")", "\\)")
s = s.replace("'", "\\251")
s = s.replace("`", "\\301")
s = re.sub(r"[^ -~\n]", lambda x: r"\%03o" % ord(x.group()), s)
return s
|
https://github.com/matplotlib/matplotlib/issues/6226
|
Traceback (most recent call last):
File "/home/tps/PyCharmProjects/test/test_PlotWindow.py", line 323, in saveFigRButtonClicked
savefig(fname)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/pyplot.py", line 688, in savefig
res = fig.savefig(*args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/figure.py", line 1565, in savefig
self.canvas.print_figure(*args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/backends/backend_qt5agg.py", line 196, in print_figure
FigureCanvasAgg.print_figure(self, *args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/backend_bases.py", line 2232, in print_figure
**kwargs)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/backends/backend_ps.py", line 995, in print_eps
return self._print_ps(outfile, 'eps', *args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/backends/backend_ps.py", line 1023, in _print_ps
**kwargs)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/backends/backend_ps.py", line 1113, in _print_figure
self.figure.draw(renderer)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/artist.py", line 61, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/figure.py", line 1159, in draw
func(*args)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/artist.py", line 61, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/axes/_base.py", line 2324, in draw
a.draw(renderer)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/artist.py", line 61, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/matplotlib/collections.py", line 1694, in draw
renderer.draw_gouraud_triangles(gc, verts, colors, transform.frozen())
File "/usr/local/lib/python3.4/dist-packages/matplotlib/backends/backend_ps.py", line 867, in draw_gouraud_triangles
stream = quote_ps_string(streamarr.tostring())
File "/usr/local/lib/python3.4/dist-packages/matplotlib/backends/backend_ps.py", line 170, in quote_ps_string
s=s.replace("\\", "\\\\")
TypeError: expected bytes, bytearray or buffer compatible object
|
TypeError
|
def scatter(
self,
x,
y,
s=None,
c=None,
marker="o",
cmap=None,
norm=None,
vmin=None,
vmax=None,
alpha=None,
linewidths=None,
verts=None,
edgecolors=None,
**kwargs,
):
"""
Make a scatter plot of x vs y, where x and y are sequence like objects
of the same length.
Parameters
----------
x, y : array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, ), optional
size in points^2. Default is `rcParams['lines.markersize'] ** 2`.
c : color, sequence, or sequence of color, optional, default: 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs
(see below). Note that `c` should not be a single numeric RGB or
RGBA sequence because that is indistinguishable from an array of
values to be colormapped. `c` can be a 2-D array in which the
rows are RGB or RGBA, however, including the case of a single
row to specify the same color for all points.
marker : `~matplotlib.markers.MarkerStyle`, optional, default: 'o'
See `~matplotlib.markers` for more information on the different
styles of markers scatter supports. `marker` can be either
an instance of the class or the text shorthand for a particular
marker.
cmap : `~matplotlib.colors.Colormap`, optional, default: None
A `~matplotlib.colors.Colormap` instance or registered name.
`cmap` is only used if `c` is an array of floats. If None,
defaults to rc `image.cmap`.
norm : `~matplotlib.colors.Normalize`, optional, default: None
A `~matplotlib.colors.Normalize` instance is used to scale
luminance data to 0, 1. `norm` is only used if `c` is an array of
floats. If `None`, use the default :func:`normalize`.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque)
linewidths : scalar or array_like, optional, default: None
If None, defaults to (lines.linewidth,).
edgecolors : color or sequence of color, optional, default: None
If None, defaults to 'face'
If 'face', the edge color will always be the same as
the face color.
If it is 'none', the patch boundary will not
be drawn.
For non-filled markers, the `edgecolors` kwarg
is ignored and forced to 'face' internally.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.Collection` properties
Notes
------
Any or all of `x`, `y`, `s`, and `c` may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Fundamentally, scatter works with 1-D arrays; `x`, `y`, `s`,
and `c` may be input as 2-D arrays, but within scatter
they will be flattened. The exception is `c`, which
will be flattened only if its size matches the size of `x`
and `y`.
Examples
--------
.. plot:: mpl_examples/shapes_and_collections/scatter_demo.py
"""
if not self._hold:
self.cla()
# Process **kwargs to handle aliases, conflicts with explicit kwargs:
facecolors = None
edgecolors = kwargs.pop("edgecolor", edgecolors)
fc = kwargs.pop("facecolors", None)
fc = kwargs.pop("facecolor", fc)
if fc is not None:
facecolors = fc
co = kwargs.pop("color", None)
if co is not None:
try:
mcolors.colorConverter.to_rgba_array(co)
except ValueError:
raise ValueError(
"'color' kwarg must be an mpl color"
" spec or sequence of color specs.\n"
"For a sequence of values to be"
" color-mapped, use the 'c' kwarg instead."
)
if edgecolors is None:
edgecolors = co
if facecolors is None:
facecolors = co
if c is None:
if facecolors is not None:
c = facecolors
else:
c = "b" # The original default
if edgecolors is None and not rcParams["_internal.classic_mode"]:
edgecolors = "face"
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# np.ma.ravel yields an ndarray, not a masked array,
# unless its argument is a masked array.
x = np.ma.ravel(x)
y = np.ma.ravel(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
if s is None:
if rcParams["_internal.classic_mode"]:
s = 20
else:
s = rcParams["lines.markersize"] ** 2.0
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
# After this block, c_array will be None unless
# c is an array for mapping. The potential ambiguity
# with a sequence of 3 or 4 numbers is resolved in
# favor of mapping, not rgb or rgba.
try:
c_array = np.asanyarray(c, dtype=float)
if c_array.size == x.size:
c = np.ma.ravel(c_array)
else:
# Wrong size; it must not be intended for mapping.
c_array = None
except ValueError:
# Failed to make a floating-point array; c must be color specs.
c_array = None
if c_array is None:
colors = c # must be acceptable as PathCollection facecolors
else:
colors = None # use cmap, norm after collection is created
# c will be unchanged unless it is the same length as x:
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
scales = s # Renamed for readability below.
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if isinstance(marker, mmarkers.MarkerStyle):
marker_obj = marker
else:
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(marker_obj.get_transform())
if not marker_obj.is_filled():
edgecolors = "face"
linewidths = rcParams["lines.linewidth"]
offsets = np.dstack((x, y))
collection = mcoll.PathCollection(
(path,),
scales,
facecolors=colors,
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=kwargs.pop("transform", self.transData),
alpha=alpha,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.update(kwargs)
if colors is None:
if norm is not None and not isinstance(norm, mcolors.Normalize):
msg = "'norm' must be an instance of 'mcolors.Normalize'"
raise ValueError(msg)
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
# The margin adjustment is a hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important.
# Also, only bother with this padding if there is anything to draw.
if self._xmargin < 0.05 and x.size > 0:
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0:
self.set_ymargin(0.05)
self.add_collection(collection)
self.autoscale_view()
return collection
|
def scatter(
self,
x,
y,
s=None,
c=None,
marker="o",
cmap=None,
norm=None,
vmin=None,
vmax=None,
alpha=None,
linewidths=None,
verts=None,
edgecolors=None,
**kwargs,
):
"""
Make a scatter plot of x vs y, where x and y are sequence like objects
of the same length.
Parameters
----------
x, y : array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, ), optional
size in points^2. Default is `rcParams['lines.markersize'] ** 2`.
c : color, sequence, or sequence of color, optional, default: 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs
(see below). Note that `c` should not be a single numeric RGB or
RGBA sequence because that is indistinguishable from an array of
values to be colormapped. `c` can be a 2-D array in which the
rows are RGB or RGBA, however, including the case of a single
row to specify the same color for all points.
marker : `~matplotlib.markers.MarkerStyle`, optional, default: 'o'
See `~matplotlib.markers` for more information on the different
styles of markers scatter supports. `marker` can be either
an instance of the class or the text shorthand for a particular
marker.
cmap : `~matplotlib.colors.Colormap`, optional, default: None
A `~matplotlib.colors.Colormap` instance or registered name.
`cmap` is only used if `c` is an array of floats. If None,
defaults to rc `image.cmap`.
norm : `~matplotlib.colors.Normalize`, optional, default: None
A `~matplotlib.colors.Normalize` instance is used to scale
luminance data to 0, 1. `norm` is only used if `c` is an array of
floats. If `None`, use the default :func:`normalize`.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque)
linewidths : scalar or array_like, optional, default: None
If None, defaults to (lines.linewidth,).
edgecolors : color or sequence of color, optional, default: None
If None, defaults to 'face'
If 'face', the edge color will always be the same as
the face color.
If it is 'none', the patch boundary will not
be drawn.
For non-filled markers, the `edgecolors` kwarg
is ignored and forced to 'face' internally.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.Collection` properties
Notes
------
Any or all of `x`, `y`, `s`, and `c` may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Fundamentally, scatter works with 1-D arrays; `x`, `y`, `s`,
and `c` may be input as 2-D arrays, but within scatter
they will be flattened. The exception is `c`, which
will be flattened only if its size matches the size of `x`
and `y`.
Examples
--------
.. plot:: mpl_examples/shapes_and_collections/scatter_demo.py
"""
if not self._hold:
self.cla()
# Process **kwargs to handle aliases, conflicts with explicit kwargs:
facecolors = None
ec = kwargs.pop("edgecolor", None)
if ec is not None:
edgecolors = ec
fc = kwargs.pop("facecolor", None)
if fc is not None:
facecolors = fc
fc = kwargs.pop("facecolors", None)
if fc is not None:
facecolors = fc
# 'color' should be deprecated in scatter, or clearly defined;
# since it isn't, I am giving it low priority.
co = kwargs.pop("color", None)
if co is not None:
if edgecolors is None:
edgecolors = co
if facecolors is None:
facecolors = co
if c is None:
if facecolors is not None:
c = facecolors
else:
c = "b" # The original default
if edgecolors is None and not rcParams["_internal.classic_mode"]:
edgecolors = "face"
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# np.ma.ravel yields an ndarray, not a masked array,
# unless its argument is a masked array.
x = np.ma.ravel(x)
y = np.ma.ravel(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
if s is None:
if rcParams["_internal.classic_mode"]:
s = 20
else:
s = rcParams["lines.markersize"] ** 2.0
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
# After this block, c_array will be None unless
# c is an array for mapping. The potential ambiguity
# with a sequence of 3 or 4 numbers is resolved in
# favor of mapping, not rgb or rgba.
try:
c_array = np.asanyarray(c, dtype=float)
if c_array.size == x.size:
c = np.ma.ravel(c_array)
else:
# Wrong size; it must not be intended for mapping.
c_array = None
except ValueError:
# Failed to make a floating-point array; c must be color specs.
c_array = None
if c_array is None:
colors = c # must be acceptable as PathCollection facecolors
else:
colors = None # use cmap, norm after collection is created
# c will be unchanged unless it is the same length as x:
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
scales = s # Renamed for readability below.
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if isinstance(marker, mmarkers.MarkerStyle):
marker_obj = marker
else:
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(marker_obj.get_transform())
if not marker_obj.is_filled():
edgecolors = "face"
linewidths = rcParams["lines.linewidth"]
offsets = np.dstack((x, y))
collection = mcoll.PathCollection(
(path,),
scales,
facecolors=colors,
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=kwargs.pop("transform", self.transData),
alpha=alpha,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.update(kwargs)
if colors is None:
if norm is not None and not isinstance(norm, mcolors.Normalize):
msg = "'norm' must be an instance of 'mcolors.Normalize'"
raise ValueError(msg)
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
# The margin adjustment is a hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important.
# Also, only bother with this padding if there is anything to draw.
if self._xmargin < 0.05 and x.size > 0:
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0:
self.set_ymargin(0.05)
self.add_collection(collection)
self.autoscale_view()
return collection
|
https://github.com/matplotlib/matplotlib/issues/6266
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgba(self, arg, alpha)
367 raise ValueError(
--> 368 'length of rgba sequence should be either 3 or 4')
369 else:
ValueError: length of rgba sequence should be either 3 or 4
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgba_array(self, c, alpha)
398 # Single value? Put it in an array with a single row.
--> 399 return np.array([self.to_rgba(c, alpha)], dtype=np.float)
400 except ValueError:
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgba(self, arg, alpha)
375 raise ValueError(
--> 376 'to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
377
ValueError: to_rgba: Invalid rgba arg "6574 0.000165
6674 0.000167
6774 0.000168
6873 0.000165
6874 0.000170
6973 0.000166
6974 0.000171
7073 0.000167
7074 0.000173
7172 0.000163
7173 0.000168
7174 0.000174
7272 0.000164
7273 0.000169
7274 0.000176
7372 0.000165
7373 0.000170
7374 0.000177
7472 0.000166
7473 0.000172
7474 0.000179
7571 0.000162
7572 0.000167
7573 0.000173
7574 0.000181
7671 0.000163
7672 0.000168
7673 0.000175
7674 0.000183
7771 0.000163
...
9670 0.000193
9671 0.000207
9672 0.000221
9673 0.000236
9765 0.000138
9766 0.000148
9767 0.000158
9768 0.000170
9769 0.000183
9770 0.000196
9771 0.000211
9772 0.000226
9864 0.000130
9865 0.000140
9866 0.000150
9867 0.000161
9868 0.000173
9869 0.000186
9870 0.000200
9871 0.000215
9872 0.000230
9964 0.000131
9965 0.000142
9966 0.000152
9967 0.000163
9968 0.000176
9969 0.000189
9970 0.000204
9971 0.000219
9972 0.000234
Name: sum_prob, dtype: float64"
length of rgba sequence should be either 3 or 4
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgb(self, arg)
321 raise ValueError(
--> 322 'cannot convert argument to rgb sequence')
323
ValueError: cannot convert argument to rgb sequence
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgba(self, arg, alpha)
369 else:
--> 370 r, g, b = self.to_rgb(arg)
371 if alpha is None:
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgb(self, arg)
327 raise ValueError(
--> 328 'to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
329 # Error messages could be improved by handling TypeError
ValueError: to_rgb: Invalid rgb arg "0.000165462141918"
cannot convert argument to rgb sequence
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-40-c01684ee4af6> in <module>()
22 #plt.show()
23
---> 24 ax.scatter("lon", "lat", color=good_data["sum_prob"], lw = 0, data=good_data, transform=ll_proj)
25 #ax.scatter("lon", "lat", color="sum_prob", lw = 0, data=best, transform=ll_proj)
C:\portabel\miniconda\envs\zalando\lib\site-packages\cartopy\mpl\geoaxes.py in scatter(self, *args, **kwargs)
1179 '(PlateCarree or RotatedPole).')
1180
-> 1181 result = matplotlib.axes.Axes.scatter(self, *args, **kwargs)
1182 self.autoscale_view()
1183 return result
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\__init__.py in inner(ax, *args, **kwargs)
1809 warnings.warn(msg % (label_namer, func.__name__),
1810 RuntimeWarning, stacklevel=2)
-> 1811 return func(ax, *args, **kwargs)
1812 pre_doc = inner.__doc__
1813 if pre_doc is None:
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\axes\_axes.py in scatter(self, x, y, s, c, marker, cmap, norm, vmin, vmax, alpha, linewidths, verts, edgecolors, **kwargs)
3891 offsets=offsets,
3892 transOffset=kwargs.pop('transform', self.transData),
-> 3893 alpha=alpha
3894 )
3895 collection.set_transform(mtransforms.IdentityTransform())
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\collections.py in __init__(self, paths, sizes, **kwargs)
829 """
830
--> 831 Collection.__init__(self, **kwargs)
832 self.set_paths(paths)
833 self.set_sizes(sizes)
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\collections.py in __init__(self, edgecolors, facecolors, linewidths, linestyles, antialiaseds, offsets, transOffset, norm, cmap, pickradius, hatch, urls, offset_position, zorder, **kwargs)
114 cm.ScalarMappable.__init__(self, norm, cmap)
115
--> 116 self.set_edgecolor(edgecolors)
117 self.set_facecolor(facecolors)
118 self.set_linewidth(linewidths)
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\collections.py in set_edgecolor(self, c)
658 c = mpl.rcParams['patch.edgecolor']
659 self._edgecolors_original = c
--> 660 self._edgecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)
661 self.stale = True
662
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgba_array(self, c, alpha)
420 result = np.zeros((nc, 4), dtype=np.float)
421 for i, cc in enumerate(c):
--> 422 result[i] = self.to_rgba(cc, alpha)
423 return result
424
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgba(self, arg, alpha)
374 except (TypeError, ValueError) as exc:
375 raise ValueError(
--> 376 'to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
377
378 def to_rgba_array(self, c, alpha=None):
ValueError: to_rgba: Invalid rgba arg "0.000165462141918"
to_rgb: Invalid rgb arg "0.000165462141918"
cannot convert argument to rgb sequence
|
ValueError
|
def scatter(
self,
x,
y,
s=20,
c=None,
marker="o",
cmap=None,
norm=None,
vmin=None,
vmax=None,
alpha=None,
linewidths=None,
verts=None,
edgecolors=None,
**kwargs,
):
"""
Make a scatter plot of x vs y, where x and y are sequence like objects
of the same length.
Parameters
----------
x, y : array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, ), optional, default: 20
size in points^2.
c : color, sequence, or sequence of color, optional, default: 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs
(see below). Note that `c` should not be a single numeric RGB or
RGBA sequence because that is indistinguishable from an array of
values to be colormapped. `c` can be a 2-D array in which the
rows are RGB or RGBA, however, including the case of a single
row to specify the same color for all points.
marker : `~matplotlib.markers.MarkerStyle`, optional, default: 'o'
See `~matplotlib.markers` for more information on the different
styles of markers scatter supports. `marker` can be either
an instance of the class or the text shorthand for a particular
marker.
cmap : `~matplotlib.colors.Colormap`, optional, default: None
A `~matplotlib.colors.Colormap` instance or registered name.
`cmap` is only used if `c` is an array of floats. If None,
defaults to rc `image.cmap`.
norm : `~matplotlib.colors.Normalize`, optional, default: None
A `~matplotlib.colors.Normalize` instance is used to scale
luminance data to 0, 1. `norm` is only used if `c` is an array of
floats. If `None`, use the default :func:`normalize`.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque)
linewidths : scalar or array_like, optional, default: None
If None, defaults to (lines.linewidth,).
edgecolors : color or sequence of color, optional, default: None
If None, defaults to (patch.edgecolor).
If 'face', the edge color will always be the same as
the face color. If it is 'none', the patch boundary will not
be drawn. For non-filled markers, the `edgecolors` kwarg
is ignored; color is determined by `c`.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.Collection` properties
Notes
------
Any or all of `x`, `y`, `s`, and `c` may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Fundamentally, scatter works with 1-D arrays; `x`, `y`, `s`,
and `c` may be input as 2-D arrays, but within scatter
they will be flattened. The exception is `c`, which
will be flattened only if its size matches the size of `x`
and `y`.
Examples
--------
.. plot:: mpl_examples/shapes_and_collections/scatter_demo.py
"""
if not self._hold:
self.cla()
# Process **kwargs to handle aliases, conflicts with explicit kwargs:
facecolors = None
edgecolors = kwargs.pop("edgecolor", edgecolors)
fc = kwargs.pop("facecolors", None)
fc = kwargs.pop("facecolor", fc)
if fc is not None:
facecolors = fc
co = kwargs.pop("color", None)
if co is not None:
try:
mcolors.colorConverter.to_rgba_array(co)
except ValueError:
raise ValueError(
"'color' kwarg must be an mpl color"
" spec or sequence of color specs.\n"
"For a sequence of values to be"
" color-mapped, use the 'c' kwarg instead."
)
if edgecolors is None:
edgecolors = co
if facecolors is None:
facecolors = co
if c is None:
if facecolors is not None:
c = facecolors
else:
c = "b" # The original default
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# np.ma.ravel yields an ndarray, not a masked array,
# unless its argument is a masked array.
x = np.ma.ravel(x)
y = np.ma.ravel(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
# After this block, c_array will be None unless
# c is an array for mapping. The potential ambiguity
# with a sequence of 3 or 4 numbers is resolved in
# favor of mapping, not rgb or rgba.
try:
c_array = np.asanyarray(c, dtype=float)
if c_array.size == x.size:
c = np.ma.ravel(c_array)
else:
# Wrong size; it must not be intended for mapping.
c_array = None
except ValueError:
# Failed to make a floating-point array; c must be color specs.
c_array = None
if c_array is None:
colors = c # must be acceptable as PathCollection facecolors
else:
colors = None # use cmap, norm after collection is created
# c will be unchanged unless it is the same length as x:
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
scales = s # Renamed for readability below.
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if isinstance(marker, mmarkers.MarkerStyle):
marker_obj = marker
else:
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(marker_obj.get_transform())
if not marker_obj.is_filled():
edgecolors = "face"
offsets = np.dstack((x, y))
collection = mcoll.PathCollection(
(path,),
scales,
facecolors=colors,
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=kwargs.pop("transform", self.transData),
alpha=alpha,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.update(kwargs)
if colors is None:
if norm is not None and not isinstance(norm, mcolors.Normalize):
msg = "'norm' must be an instance of 'mcolors.Normalize'"
raise ValueError(msg)
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
# The margin adjustment is a hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important.
# Also, only bother with this padding if there is anything to draw.
if self._xmargin < 0.05 and x.size > 0:
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0:
self.set_ymargin(0.05)
self.add_collection(collection)
self.autoscale_view()
return collection
|
def scatter(
self,
x,
y,
s=20,
c=None,
marker="o",
cmap=None,
norm=None,
vmin=None,
vmax=None,
alpha=None,
linewidths=None,
verts=None,
edgecolors=None,
**kwargs,
):
"""
Make a scatter plot of x vs y, where x and y are sequence like objects
of the same length.
Parameters
----------
x, y : array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, ), optional, default: 20
size in points^2.
c : color, sequence, or sequence of color, optional, default: 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs
(see below). Note that `c` should not be a single numeric RGB or
RGBA sequence because that is indistinguishable from an array of
values to be colormapped. `c` can be a 2-D array in which the
rows are RGB or RGBA, however, including the case of a single
row to specify the same color for all points.
marker : `~matplotlib.markers.MarkerStyle`, optional, default: 'o'
See `~matplotlib.markers` for more information on the different
styles of markers scatter supports. `marker` can be either
an instance of the class or the text shorthand for a particular
marker.
cmap : `~matplotlib.colors.Colormap`, optional, default: None
A `~matplotlib.colors.Colormap` instance or registered name.
`cmap` is only used if `c` is an array of floats. If None,
defaults to rc `image.cmap`.
norm : `~matplotlib.colors.Normalize`, optional, default: None
A `~matplotlib.colors.Normalize` instance is used to scale
luminance data to 0, 1. `norm` is only used if `c` is an array of
floats. If `None`, use the default :func:`normalize`.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque)
linewidths : scalar or array_like, optional, default: None
If None, defaults to (lines.linewidth,).
edgecolors : color or sequence of color, optional, default: None
If None, defaults to (patch.edgecolor).
If 'face', the edge color will always be the same as
the face color. If it is 'none', the patch boundary will not
be drawn. For non-filled markers, the `edgecolors` kwarg
is ignored; color is determined by `c`.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.Collection` properties
Notes
------
Any or all of `x`, `y`, `s`, and `c` may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Fundamentally, scatter works with 1-D arrays; `x`, `y`, `s`,
and `c` may be input as 2-D arrays, but within scatter
they will be flattened. The exception is `c`, which
will be flattened only if its size matches the size of `x`
and `y`.
Examples
--------
.. plot:: mpl_examples/shapes_and_collections/scatter_demo.py
"""
if not self._hold:
self.cla()
# Process **kwargs to handle aliases, conflicts with explicit kwargs:
facecolors = None
ec = kwargs.pop("edgecolor", None)
if ec is not None:
edgecolors = ec
fc = kwargs.pop("facecolor", None)
if fc is not None:
facecolors = fc
fc = kwargs.pop("facecolors", None)
if fc is not None:
facecolors = fc
# 'color' should be deprecated in scatter, or clearly defined;
# since it isn't, I am giving it low priority.
co = kwargs.pop("color", None)
if co is not None:
if edgecolors is None:
edgecolors = co
if facecolors is None:
facecolors = co
if c is None:
if facecolors is not None:
c = facecolors
else:
c = "b" # The original default
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# np.ma.ravel yields an ndarray, not a masked array,
# unless its argument is a masked array.
x = np.ma.ravel(x)
y = np.ma.ravel(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
# After this block, c_array will be None unless
# c is an array for mapping. The potential ambiguity
# with a sequence of 3 or 4 numbers is resolved in
# favor of mapping, not rgb or rgba.
try:
c_array = np.asanyarray(c, dtype=float)
if c_array.size == x.size:
c = np.ma.ravel(c_array)
else:
# Wrong size; it must not be intended for mapping.
c_array = None
except ValueError:
# Failed to make a floating-point array; c must be color specs.
c_array = None
if c_array is None:
colors = c # must be acceptable as PathCollection facecolors
else:
colors = None # use cmap, norm after collection is created
# c will be unchanged unless it is the same length as x:
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
scales = s # Renamed for readability below.
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if isinstance(marker, mmarkers.MarkerStyle):
marker_obj = marker
else:
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(marker_obj.get_transform())
if not marker_obj.is_filled():
edgecolors = "face"
offsets = np.dstack((x, y))
collection = mcoll.PathCollection(
(path,),
scales,
facecolors=colors,
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=kwargs.pop("transform", self.transData),
alpha=alpha,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.update(kwargs)
if colors is None:
if norm is not None and not isinstance(norm, mcolors.Normalize):
msg = "'norm' must be an instance of 'mcolors.Normalize'"
raise ValueError(msg)
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
# The margin adjustment is a hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important.
# Also, only bother with this padding if there is anything to draw.
if self._xmargin < 0.05 and x.size > 0:
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0:
self.set_ymargin(0.05)
self.add_collection(collection)
self.autoscale_view()
return collection
|
https://github.com/matplotlib/matplotlib/issues/6266
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgba(self, arg, alpha)
367 raise ValueError(
--> 368 'length of rgba sequence should be either 3 or 4')
369 else:
ValueError: length of rgba sequence should be either 3 or 4
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgba_array(self, c, alpha)
398 # Single value? Put it in an array with a single row.
--> 399 return np.array([self.to_rgba(c, alpha)], dtype=np.float)
400 except ValueError:
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgba(self, arg, alpha)
375 raise ValueError(
--> 376 'to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
377
ValueError: to_rgba: Invalid rgba arg "6574 0.000165
6674 0.000167
6774 0.000168
6873 0.000165
6874 0.000170
6973 0.000166
6974 0.000171
7073 0.000167
7074 0.000173
7172 0.000163
7173 0.000168
7174 0.000174
7272 0.000164
7273 0.000169
7274 0.000176
7372 0.000165
7373 0.000170
7374 0.000177
7472 0.000166
7473 0.000172
7474 0.000179
7571 0.000162
7572 0.000167
7573 0.000173
7574 0.000181
7671 0.000163
7672 0.000168
7673 0.000175
7674 0.000183
7771 0.000163
...
9670 0.000193
9671 0.000207
9672 0.000221
9673 0.000236
9765 0.000138
9766 0.000148
9767 0.000158
9768 0.000170
9769 0.000183
9770 0.000196
9771 0.000211
9772 0.000226
9864 0.000130
9865 0.000140
9866 0.000150
9867 0.000161
9868 0.000173
9869 0.000186
9870 0.000200
9871 0.000215
9872 0.000230
9964 0.000131
9965 0.000142
9966 0.000152
9967 0.000163
9968 0.000176
9969 0.000189
9970 0.000204
9971 0.000219
9972 0.000234
Name: sum_prob, dtype: float64"
length of rgba sequence should be either 3 or 4
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgb(self, arg)
321 raise ValueError(
--> 322 'cannot convert argument to rgb sequence')
323
ValueError: cannot convert argument to rgb sequence
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgba(self, arg, alpha)
369 else:
--> 370 r, g, b = self.to_rgb(arg)
371 if alpha is None:
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgb(self, arg)
327 raise ValueError(
--> 328 'to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
329 # Error messages could be improved by handling TypeError
ValueError: to_rgb: Invalid rgb arg "0.000165462141918"
cannot convert argument to rgb sequence
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-40-c01684ee4af6> in <module>()
22 #plt.show()
23
---> 24 ax.scatter("lon", "lat", color=good_data["sum_prob"], lw = 0, data=good_data, transform=ll_proj)
25 #ax.scatter("lon", "lat", color="sum_prob", lw = 0, data=best, transform=ll_proj)
C:\portabel\miniconda\envs\zalando\lib\site-packages\cartopy\mpl\geoaxes.py in scatter(self, *args, **kwargs)
1179 '(PlateCarree or RotatedPole).')
1180
-> 1181 result = matplotlib.axes.Axes.scatter(self, *args, **kwargs)
1182 self.autoscale_view()
1183 return result
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\__init__.py in inner(ax, *args, **kwargs)
1809 warnings.warn(msg % (label_namer, func.__name__),
1810 RuntimeWarning, stacklevel=2)
-> 1811 return func(ax, *args, **kwargs)
1812 pre_doc = inner.__doc__
1813 if pre_doc is None:
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\axes\_axes.py in scatter(self, x, y, s, c, marker, cmap, norm, vmin, vmax, alpha, linewidths, verts, edgecolors, **kwargs)
3891 offsets=offsets,
3892 transOffset=kwargs.pop('transform', self.transData),
-> 3893 alpha=alpha
3894 )
3895 collection.set_transform(mtransforms.IdentityTransform())
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\collections.py in __init__(self, paths, sizes, **kwargs)
829 """
830
--> 831 Collection.__init__(self, **kwargs)
832 self.set_paths(paths)
833 self.set_sizes(sizes)
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\collections.py in __init__(self, edgecolors, facecolors, linewidths, linestyles, antialiaseds, offsets, transOffset, norm, cmap, pickradius, hatch, urls, offset_position, zorder, **kwargs)
114 cm.ScalarMappable.__init__(self, norm, cmap)
115
--> 116 self.set_edgecolor(edgecolors)
117 self.set_facecolor(facecolors)
118 self.set_linewidth(linewidths)
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\collections.py in set_edgecolor(self, c)
658 c = mpl.rcParams['patch.edgecolor']
659 self._edgecolors_original = c
--> 660 self._edgecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)
661 self.stale = True
662
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgba_array(self, c, alpha)
420 result = np.zeros((nc, 4), dtype=np.float)
421 for i, cc in enumerate(c):
--> 422 result[i] = self.to_rgba(cc, alpha)
423 return result
424
C:\portabel\miniconda\envs\zalando\lib\site-packages\matplotlib\colors.py in to_rgba(self, arg, alpha)
374 except (TypeError, ValueError) as exc:
375 raise ValueError(
--> 376 'to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
377
378 def to_rgba_array(self, c, alpha=None):
ValueError: to_rgba: Invalid rgba arg "0.000165462141918"
to_rgb: Invalid rgb arg "0.000165462141918"
cannot convert argument to rgb sequence
|
ValueError
|
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or iterable of :class:`Artists`.
If an iterable is used, we assume it is a homogeneous sequence (all
:class:`Artists` are of the same type) and it is your responsibility
to make sure this is so.
"""
if cbook.iterable(o):
# Wrapped in list instead of doing try-except around next(iter(o))
o = list(o)
if len(o):
o = o[0]
self.oorig = o
if not inspect.isclass(o):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
|
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or sequence of :class:`Artists`.
If a sequence is used, we assume it is a homogeneous sequence (all
:class:`Artists` are of the same type) and it is your responsibility
to make sure this is so.
"""
if cbook.iterable(o) and len(o):
o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
|
https://github.com/matplotlib/matplotlib/issues/6212
|
import matplotlib.pyplot as plt
import itertools
lines1 = plt.plot(range(3), range(3), range(5), range(5))
lines2 = plt.plot(range(4), range(4), range(6), range(6))
plt.setp(itertools.chain(lines1, lines2), color='red')
Traceback (most recent call last):
File "<ipython-input-6-2f274dd0d4c1>", line 1, in <module>
plt.setp(itertools.chain(lines1, lines2), color='red')
File "/home/jfoxrabi/miniconda3/lib/python3.5/site-packages/matplotlib/pyplot.py", line 351, in setp
return _setp(*args, **kwargs)
File "/home/jfoxrabi/miniconda3/lib/python3.5/site-packages/matplotlib/artist.py", line 1437, in setp
insp = ArtistInspector(obj)
File "/home/jfoxrabi/miniconda3/lib/python3.5/site-packages/matplotlib/artist.py", line 1032, in __init__
if cbook.iterable(o) and len(o):
TypeError: object of type 'itertools.chain' has no len()
|
TypeError
|
def setp(obj, *args, **kwargs):
"""
Set a property on an artist object.
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide
the name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
:func:`setp` operates on a single instance or a iterable of
instances. If you are in query mode introspecting the possible
values, only the first instance in the sequence is used. When
actually setting values, all the instances will be set. e.g.,
suppose you have a list of two lines, the following will make both
lines thicker and red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the MATLAB style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', 'r') # MATLAB style
>>> setp(lines, linewidth=2, color='r') # python style
"""
if not cbook.iterable(obj):
objs = [obj]
else:
objs = list(cbook.flatten(obj))
insp = ArtistInspector(objs[0])
if len(kwargs) == 0 and len(args) == 0:
print("\n".join(insp.pprint_setters()))
return
if len(kwargs) == 0 and len(args) == 1:
print(insp.pprint_setters(prop=args[0]))
return
if len(args) % 2:
raise ValueError("The set args must be string, value pairs")
# put args into ordereddict to maintain order
funcvals = OrderedDict()
for i in range(0, len(args) - 1, 2):
funcvals[args[i]] = args[i + 1]
ret = [o.update(funcvals) for o in objs]
ret.extend([o.set(**kwargs) for o in objs])
return [x for x in cbook.flatten(ret)]
|
def setp(obj, *args, **kwargs):
"""
Set a property on an artist object.
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide the
name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
:func:`setp` operates on a single instance or a list of instances.
If you are in query mode introspecting the possible values, only
the first instance in the sequence is used. When actually setting
values, all the instances will be set. e.g., suppose you have a
list of two lines, the following will make both lines thicker and
red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the MATLAB style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', 'r') # MATLAB style
>>> setp(lines, linewidth=2, color='r') # python style
"""
insp = ArtistInspector(obj)
if len(kwargs) == 0 and len(args) == 0:
print("\n".join(insp.pprint_setters()))
return
if len(kwargs) == 0 and len(args) == 1:
print(insp.pprint_setters(prop=args[0]))
return
if not cbook.iterable(obj):
objs = [obj]
else:
objs = list(cbook.flatten(obj))
if len(args) % 2:
raise ValueError("The set args must be string, value pairs")
# put args into ordereddict to maintain order
funcvals = OrderedDict()
for i in range(0, len(args) - 1, 2):
funcvals[args[i]] = args[i + 1]
ret = [o.update(funcvals) for o in objs]
ret.extend([o.set(**kwargs) for o in objs])
return [x for x in cbook.flatten(ret)]
|
https://github.com/matplotlib/matplotlib/issues/6212
|
import matplotlib.pyplot as plt
import itertools
lines1 = plt.plot(range(3), range(3), range(5), range(5))
lines2 = plt.plot(range(4), range(4), range(6), range(6))
plt.setp(itertools.chain(lines1, lines2), color='red')
Traceback (most recent call last):
File "<ipython-input-6-2f274dd0d4c1>", line 1, in <module>
plt.setp(itertools.chain(lines1, lines2), color='red')
File "/home/jfoxrabi/miniconda3/lib/python3.5/site-packages/matplotlib/pyplot.py", line 351, in setp
return _setp(*args, **kwargs)
File "/home/jfoxrabi/miniconda3/lib/python3.5/site-packages/matplotlib/artist.py", line 1437, in setp
insp = ArtistInspector(obj)
File "/home/jfoxrabi/miniconda3/lib/python3.5/site-packages/matplotlib/artist.py", line 1032, in __init__
if cbook.iterable(o) and len(o):
TypeError: object of type 'itertools.chain' has no len()
|
TypeError
|
def remove(self):
for c in cbook.flatten(self, scalarp=lambda x: isinstance(x, martist.Artist)):
c.remove()
if self._remove_method:
self._remove_method(self)
|
def remove(self):
for c in self:
c.remove()
if self._remove_method:
self._remove_method(self)
|
https://github.com/matplotlib/matplotlib/issues/5692
|
In [7]: c = ax.stem([1,2],[2,1])
In [8]: c.remove()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-8-dbc7beefa92d> in <module>()
----> 1 c.remove()
python2.7/site-packages/matplotlib/container.pyc in remove(self)
33 def remove(self):
34 for c in self:
---> 35 c.remove()
36
37 if self._remove_method:
TypeError: remove() takes exactly one argument (0 given)
|
TypeError
|
def figure_edit(axes, parent=None):
"""Edit matplotlib figure options"""
sep = (None, None) # separator
has_curve = len(axes.get_lines()) > 0
# Get / General
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
general = [
("Title", axes.get_title()),
sep,
(None, "<b>X-Axis</b>"),
("Min", xmin),
("Max", xmax),
("Label", axes.get_xlabel()),
("Scale", [axes.get_xscale(), "linear", "log"]),
sep,
(None, "<b>Y-Axis</b>"),
("Min", ymin),
("Max", ymax),
("Label", axes.get_ylabel()),
("Scale", [axes.get_yscale(), "linear", "log"]),
sep,
("(Re-)Generate automatic legend", False),
]
if has_curve:
# Get / Curves
linedict = {}
for line in axes.get_lines():
label = line.get_label()
if label == "_nolegend_":
continue
linedict[label] = line
curves = []
linestyles = list(six.iteritems(LINESTYLES))
markers = list(six.iteritems(MARKERS))
curvelabels = sorted(linedict.keys())
for label in curvelabels:
line = linedict[label]
curvedata = [
("Label", label),
sep,
(None, "<b>Line</b>"),
("Style", [line.get_linestyle()] + linestyles),
("Width", line.get_linewidth()),
("Color", line.get_color()),
sep,
(None, "<b>Marker</b>"),
("Style", [line.get_marker()] + markers),
("Size", line.get_markersize()),
("Facecolor", line.get_markerfacecolor()),
("Edgecolor", line.get_markeredgecolor()),
]
curves.append([curvedata, label, ""])
# make sure that there is at least one displayed curve
has_curve = bool(curves)
datalist = [(general, "Axes", "")]
if has_curve:
datalist.append((curves, "Curves", ""))
def apply_callback(data):
"""This function will be called to apply changes"""
if has_curve:
general, curves = data
else:
(general,) = data
# Set / General
(
title,
xmin,
xmax,
xlabel,
xscale,
ymin,
ymax,
ylabel,
yscale,
generate_legend,
) = general
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_title(title)
axes.set_xlim(xmin, xmax)
axes.set_xlabel(xlabel)
axes.set_ylim(ymin, ymax)
axes.set_ylabel(ylabel)
if has_curve:
# Set / Curves
for index, curve in enumerate(curves):
line = linedict[curvelabels[index]]
(
label,
linestyle,
linewidth,
color,
marker,
markersize,
markerfacecolor,
markeredgecolor,
) = curve
line.set_label(label)
line.set_linestyle(linestyle)
line.set_linewidth(linewidth)
line.set_color(color)
if marker is not "none":
line.set_marker(marker)
line.set_markersize(markersize)
line.set_markerfacecolor(markerfacecolor)
line.set_markeredgecolor(markeredgecolor)
# re-generate legend, if checkbox is checked
if generate_legend:
if axes.legend_ is not None:
old_legend = axes.get_legend()
new_legend = axes.legend(ncol=old_legend._ncol)
new_legend.draggable(old_legend._draggable is not None)
else:
new_legend = axes.legend()
new_legend.draggable(True)
# Redraw
figure = axes.get_figure()
figure.canvas.draw()
data = formlayout.fedit(
datalist,
title="Figure options",
parent=parent,
icon=get_icon("qt4_editor_options.svg"),
apply=apply_callback,
)
if data is not None:
apply_callback(data)
|
def figure_edit(axes, parent=None):
"""Edit matplotlib figure options"""
sep = (None, None) # separator
has_curve = len(axes.get_lines()) > 0
# Get / General
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
general = [
("Title", axes.get_title()),
sep,
(None, "<b>X-Axis</b>"),
("Min", xmin),
("Max", xmax),
("Label", axes.get_xlabel()),
("Scale", [axes.get_xscale(), "linear", "log"]),
sep,
(None, "<b>Y-Axis</b>"),
("Min", ymin),
("Max", ymax),
("Label", axes.get_ylabel()),
("Scale", [axes.get_yscale(), "linear", "log"]),
sep,
("(Re-)Generate automatic legend", False),
]
if has_curve:
# Get / Curves
linedict = {}
for line in axes.get_lines():
label = line.get_label()
if label == "_nolegend_":
continue
linedict[label] = line
curves = []
linestyles = list(six.iteritems(LINESTYLES))
markers = list(six.iteritems(MARKERS))
curvelabels = sorted(linedict.keys())
for label in curvelabels:
line = linedict[label]
curvedata = [
("Label", label),
sep,
(None, "<b>Line</b>"),
("Style", [line.get_linestyle()] + linestyles),
("Width", line.get_linewidth()),
("Color", line.get_color()),
sep,
(None, "<b>Marker</b>"),
("Style", [line.get_marker()] + markers),
("Size", line.get_markersize()),
("Facecolor", line.get_markerfacecolor()),
("Edgecolor", line.get_markeredgecolor()),
]
curves.append([curvedata, label, ""])
datalist = [(general, "Axes", "")]
if has_curve:
datalist.append((curves, "Curves", ""))
def apply_callback(data):
"""This function will be called to apply changes"""
if has_curve:
general, curves = data
else:
(general,) = data
# Set / General
(
title,
xmin,
xmax,
xlabel,
xscale,
ymin,
ymax,
ylabel,
yscale,
generate_legend,
) = general
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_title(title)
axes.set_xlim(xmin, xmax)
axes.set_xlabel(xlabel)
axes.set_ylim(ymin, ymax)
axes.set_ylabel(ylabel)
if has_curve:
# Set / Curves
for index, curve in enumerate(curves):
line = linedict[curvelabels[index]]
(
label,
linestyle,
linewidth,
color,
marker,
markersize,
markerfacecolor,
markeredgecolor,
) = curve
line.set_label(label)
line.set_linestyle(linestyle)
line.set_linewidth(linewidth)
line.set_color(color)
if marker is not "none":
line.set_marker(marker)
line.set_markersize(markersize)
line.set_markerfacecolor(markerfacecolor)
line.set_markeredgecolor(markeredgecolor)
# re-generate legend, if checkbox is checked
if generate_legend:
if axes.legend_ is not None:
old_legend = axes.get_legend()
new_legend = axes.legend(ncol=old_legend._ncol)
new_legend.draggable(old_legend._draggable is not None)
else:
new_legend = axes.legend()
new_legend.draggable(True)
# Redraw
figure = axes.get_figure()
figure.canvas.draw()
data = formlayout.fedit(
datalist,
title="Figure options",
parent=parent,
icon=get_icon("qt4_editor_options.svg"),
apply=apply_callback,
)
if data is not None:
apply_callback(data)
|
https://github.com/matplotlib/matplotlib/issues/4323
|
$ ipython --pylab
Python 3.4.3 (default, Mar 25 2015, 17:13:50)
Type "copyright", "credits" or "license" for more information.
IPython 3.0.0 -- An enhanced Interactive Python.
? -> Introduction and overview of IPython's features.
%quickref -> Quick reference.
help -> Python's own help system.
object? -> Details about 'object', use 'object??' for extra details.
romUsing matplotlib backend: Qt5Agg
Using matplotlib backend: Qt5Agg
se
In [1]: from seaborn import *; tsplot([1, 2])
Out[1]: <matplotlib.axes._subplots.AxesSubplot at 0x7f0262043d30>
# <------ click on green tick
In [2]: Traceback (most recent call last):
File "/usr/lib/python3.4/site-packages/matplotlib/backends/backend_qt5.py", line 653, in edit_parameters
figureoptions.figure_edit(axes, self)
File "/usr/lib/python3.4/site-packages/matplotlib/backends/qt_editor/figureoptions.py", line 146, in figure_edit
apply=apply_callback)
File "/usr/lib/python3.4/site-packages/matplotlib/backends/qt_editor/formlayout.py", line 510, in fedit
dialog = FormDialog(data, title, comment, icon, parent, apply)
File "/usr/lib/python3.4/site-packages/matplotlib/backends/qt_editor/formlayout.py", line 416, in __init__
parent=self)
File "/usr/lib/python3.4/site-packages/matplotlib/backends/qt_editor/formlayout.py", line 389, in __init__
if len(data[0]) == 3:
IndexError: list index out of range
If you suspect this is an IPython bug, please report it at:
https://github.com/ipython/ipython/issues
or send an email to the mailing list at ipython-dev@scipy.org
You can print a more detailed traceback right now with "%tb", or use "%debug"
to interactively debug it.
Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
%config Application.verbose_crash=True
|
IndexError
|
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError("You must first set the image array")
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
bg = (np.array(bg) * 255).astype(np.uint8)
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
# The extra cast-to-int is only needed for python2
width = int(round(width * magnification))
height = int(round(height * magnification))
if self._rgbacache is None:
A = self.to_rgba(self._A, bytes=True)
self._rgbacache = A
if self._A.ndim == 2:
self.is_grayscale = self.cmap.is_gray()
else:
A = self._rgbacache
vl = self.axes.viewLim
im = _image.pcolor2(
self._Ax, self._Ay, A, height, width, (vl.x0, vl.x1, vl.y0, vl.y1), bg
)
im.is_grayscale = self.is_grayscale
return im
|
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError("You must first set the image array")
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
bg = (np.array(bg) * 255).astype(np.uint8)
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
width = width * magnification
height = height * magnification
if self._rgbacache is None:
A = self.to_rgba(self._A, bytes=True)
self._rgbacache = A
if self._A.ndim == 2:
self.is_grayscale = self.cmap.is_gray()
else:
A = self._rgbacache
vl = self.axes.viewLim
im = _image.pcolor2(
self._Ax, self._Ay, A, height, width, (vl.x0, vl.x1, vl.y0, vl.y1), bg
)
im.is_grayscale = self.is_grayscale
return im
|
https://github.com/matplotlib/matplotlib/issues/4227
|
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 341, in resizeEvent
self.draw()
File "/usr/lib/python2.7/site-packages/matplotlib/backends/backend_qt5agg.py", line 143, in draw
FigureCanvasAgg.draw(self)
File "/usr/lib/python2.7/site-packages/matplotlib/backends/backend_agg.py", line 475, in draw
self.figure.draw(self.renderer)
File "/usr/lib/python2.7/site-packages/matplotlib/artist.py", line 60, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/matplotlib/figure.py", line 1094, in draw
func(*args)
File "/usr/lib/python2.7/site-packages/matplotlib/artist.py", line 60, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/matplotlib/axes/_base.py", line 2096, in draw
a.draw(renderer)
File "/usr/lib/python2.7/site-packages/matplotlib/artist.py", line 60, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/matplotlib/image.py", line 881, in draw
im = self.make_image(renderer.get_image_magnification())
File "/usr/lib/python2.7/site-packages/matplotlib/image.py", line 869, in make_image
bg)
TypeError: integer argument expected, got float
|
TypeError
|
def eventplot(
self,
positions,
orientation="horizontal",
lineoffsets=1,
linelengths=1,
linewidths=None,
colors=None,
linestyles="solid",
**kwargs,
):
"""
Plot identical parallel lines at specific positions.
Call signature::
eventplot(positions, orientation='horizontal', lineoffsets=0,
linelengths=1, linewidths=None, color =None,
linestyles='solid'
Plot parallel lines at the given positions. positions should be a 1D
or 2D array-like object, with each row corresponding to a row or column
of lines.
This type of plot is commonly used in neuroscience for representing
neural events, where it is commonly called a spike raster, dot raster,
or raster plot.
However, it is useful in any situation where you wish to show the
timing or position of multiple sets of discrete events, such as the
arrival times of people to a business on each day of the month or the
date of hurricanes each year of the last century.
*orientation* : [ 'horizonal' | 'vertical' ]
'horizonal' : the lines will be vertical and arranged in rows
"vertical' : lines will be horizontal and arranged in columns
*lineoffsets* :
A float or array-like containing floats.
*linelengths* :
A float or array-like containing floats.
*linewidths* :
A float or array-like containing floats.
*colors*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed) or a list of such sequences
*linestyles* :
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ] or an array of these
values
For linelengths, linewidths, colors, and linestyles, if only a single
value is given, that value is applied to all lines. If an array-like
is given, it must have the same length as positions, and each value
will be applied to the corresponding row or column in positions.
Returns a list of :class:`matplotlib.collections.EventCollection`
objects that were added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
**Example:**
.. plot:: mpl_examples/pylab_examples/eventplot_demo.py
"""
self._process_unit_info(
xdata=positions, ydata=[lineoffsets, linelengths], kwargs=kwargs
)
# We do the conversion first since not all unitized data is uniform
positions = self.convert_xunits(positions)
lineoffsets = self.convert_yunits(lineoffsets)
linelengths = self.convert_yunits(linelengths)
if not iterable(positions):
positions = [positions]
elif any(iterable(position) for position in positions):
positions = [np.asanyarray(position) for position in positions]
else:
positions = [np.asanyarray(positions)]
if len(positions) == 0:
return []
if not iterable(lineoffsets):
lineoffsets = [lineoffsets]
if not iterable(linelengths):
linelengths = [linelengths]
if not iterable(linewidths):
linewidths = [linewidths]
if not iterable(colors):
colors = [colors]
if hasattr(linestyles, "lower") or not iterable(linestyles):
linestyles = [linestyles]
lineoffsets = np.asarray(lineoffsets)
linelengths = np.asarray(linelengths)
linewidths = np.asarray(linewidths)
if len(lineoffsets) == 0:
lineoffsets = [None]
if len(linelengths) == 0:
linelengths = [None]
if len(linewidths) == 0:
lineoffsets = [None]
if len(linewidths) == 0:
lineoffsets = [None]
if len(colors) == 0:
colors = [None]
if len(lineoffsets) == 1 and len(positions) != 1:
lineoffsets = np.tile(lineoffsets, len(positions))
lineoffsets[0] = 0
lineoffsets = np.cumsum(lineoffsets)
if len(linelengths) == 1:
linelengths = np.tile(linelengths, len(positions))
if len(linewidths) == 1:
linewidths = np.tile(linewidths, len(positions))
if len(colors) == 1:
if colors[0] is None:
colors = colors * len(positions)
else:
colors = np.asanyarray(colors)
colors = np.tile(colors, [len(positions), 1])
if len(linestyles) == 1:
linestyles = [linestyles] * len(positions)
if len(lineoffsets) != len(positions):
raise ValueError("lineoffsets and positions are unequal sized sequences")
if len(linelengths) != len(positions):
raise ValueError("linelengths and positions are unequal sized sequences")
if len(linewidths) != len(positions):
raise ValueError("linewidths and positions are unequal sized sequences")
if len(colors) != len(positions):
raise ValueError("colors and positions are unequal sized sequences")
if len(linestyles) != len(positions):
raise ValueError("linestyles and positions are unequal sized sequences")
colls = []
for position, lineoffset, linelength, linewidth, color, linestyle in zip(
positions, lineoffsets, linelengths, linewidths, colors, linestyles
):
coll = mcoll.EventCollection(
position,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle,
)
self.add_collection(coll, autolim=False)
coll.update(kwargs)
colls.append(coll)
if len(positions) > 0:
# try to get min/max
min_max = [(np.min(_p), np.max(_p)) for _p in positions if len(_p) > 0]
# if we have any non-empty positions, try to autoscale
if len(min_max) > 0:
mins, maxes = zip(*min_max)
minpos = np.min(mins)
maxpos = np.max(maxes)
minline = (lineoffsets - linelengths).min()
maxline = (lineoffsets + linelengths).max()
if colls[0].is_horizontal():
corners = (minpos, minline), (maxpos, maxline)
else:
corners = (minline, minpos), (maxline, maxpos)
self.update_datalim(corners)
self.autoscale_view()
return colls
|
def eventplot(
self,
positions,
orientation="horizontal",
lineoffsets=1,
linelengths=1,
linewidths=None,
colors=None,
linestyles="solid",
**kwargs,
):
"""
Plot identical parallel lines at specific positions.
Call signature::
eventplot(positions, orientation='horizontal', lineoffsets=0,
linelengths=1, linewidths=None, color =None,
linestyles='solid'
Plot parallel lines at the given positions. positions should be a 1D
or 2D array-like object, with each row corresponding to a row or column
of lines.
This type of plot is commonly used in neuroscience for representing
neural events, where it is commonly called a spike raster, dot raster,
or raster plot.
However, it is useful in any situation where you wish to show the
timing or position of multiple sets of discrete events, such as the
arrival times of people to a business on each day of the month or the
date of hurricanes each year of the last century.
*orientation* : [ 'horizonal' | 'vertical' ]
'horizonal' : the lines will be vertical and arranged in rows
"vertical' : lines will be horizontal and arranged in columns
*lineoffsets* :
A float or array-like containing floats.
*linelengths* :
A float or array-like containing floats.
*linewidths* :
A float or array-like containing floats.
*colors*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed) or a list of such sequences
*linestyles* :
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ] or an array of these
values
For linelengths, linewidths, colors, and linestyles, if only a single
value is given, that value is applied to all lines. If an array-like
is given, it must have the same length as positions, and each value
will be applied to the corresponding row or column in positions.
Returns a list of :class:`matplotlib.collections.EventCollection`
objects that were added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
**Example:**
.. plot:: mpl_examples/pylab_examples/eventplot_demo.py
"""
self._process_unit_info(
xdata=positions, ydata=[lineoffsets, linelengths], kwargs=kwargs
)
# We do the conversion first since not all unitized data is uniform
positions = self.convert_xunits(positions)
lineoffsets = self.convert_yunits(lineoffsets)
linelengths = self.convert_yunits(linelengths)
if not iterable(positions):
positions = [positions]
elif any(iterable(position) for position in positions):
positions = [np.asanyarray(position) for position in positions]
else:
positions = [np.asanyarray(positions)]
if len(positions) == 0:
return []
if not iterable(lineoffsets):
lineoffsets = [lineoffsets]
if not iterable(linelengths):
linelengths = [linelengths]
if not iterable(linewidths):
linewidths = [linewidths]
if not iterable(colors):
colors = [colors]
if hasattr(linestyles, "lower") or not iterable(linestyles):
linestyles = [linestyles]
lineoffsets = np.asarray(lineoffsets)
linelengths = np.asarray(linelengths)
linewidths = np.asarray(linewidths)
if len(lineoffsets) == 0:
lineoffsets = [None]
if len(linelengths) == 0:
linelengths = [None]
if len(linewidths) == 0:
lineoffsets = [None]
if len(linewidths) == 0:
lineoffsets = [None]
if len(colors) == 0:
colors = [None]
if len(lineoffsets) == 1 and len(positions) != 1:
lineoffsets = np.tile(lineoffsets, len(positions))
lineoffsets[0] = 0
lineoffsets = np.cumsum(lineoffsets)
if len(linelengths) == 1:
linelengths = np.tile(linelengths, len(positions))
if len(linewidths) == 1:
linewidths = np.tile(linewidths, len(positions))
if len(colors) == 1:
colors = np.asanyarray(colors)
colors = np.tile(colors, [len(positions), 1])
if len(linestyles) == 1:
linestyles = [linestyles] * len(positions)
if len(lineoffsets) != len(positions):
raise ValueError("lineoffsets and positions are unequal sized sequences")
if len(linelengths) != len(positions):
raise ValueError("linelengths and positions are unequal sized sequences")
if len(linewidths) != len(positions):
raise ValueError("linewidths and positions are unequal sized sequences")
if len(colors) != len(positions):
raise ValueError("colors and positions are unequal sized sequences")
if len(linestyles) != len(positions):
raise ValueError("linestyles and positions are unequal sized sequences")
colls = []
for position, lineoffset, linelength, linewidth, color, linestyle in zip(
positions, lineoffsets, linelengths, linewidths, colors, linestyles
):
coll = mcoll.EventCollection(
position,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle,
)
self.add_collection(coll, autolim=False)
coll.update(kwargs)
colls.append(coll)
if len(positions) > 0:
# try to get min/max
min_max = [(np.min(_p), np.max(_p)) for _p in positions if len(_p) > 0]
# if we have any non-empty positions, try to autoscale
if len(min_max) > 0:
mins, maxes = zip(*min_max)
minpos = np.min(mins)
maxpos = np.max(maxes)
minline = (lineoffsets - linelengths).min()
maxline = (lineoffsets + linelengths).max()
if colls[0].is_horizontal():
corners = (minpos, minline), (maxpos, maxline)
else:
corners = (minline, minpos), (maxline, maxpos)
self.update_datalim(corners)
self.autoscale_view()
return colls
|
https://github.com/matplotlib/matplotlib/issues/3728
|
In [1]: events = np.random.exponential(0.5, size=100)
In [2]: plt.eventplot(events)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-2-51aef4c82a54> in <module>()
----> 1 plt.eventplot(events)
/usr/lib/pymodules/python2.7/matplotlib/pyplot.pyc in eventplot(positions, orientation, lineoffsets, linelengths, linewidths, colors, linestyles, hold, **kwargs)
2718 lineoffsets=lineoffsets, linelengths=linelengths,
2719 linewidths=linewidths, colors=colors,
-> 2720 linestyles=linestyles, **kwargs)
2721 draw_if_interactive()
2722 finally:
/usr/lib/pymodules/python2.7/matplotlib/axes.pyc in eventplot(self, positions, orientation, lineoffsets, linelengths, linewidths, colors, linestyles, **kwargs)
3981 linewidth=linewidth,
3982 color=color,
-> 3983 linestyle=linestyle)
3984 self.add_collection(coll)
3985 coll.update(kwargs)
/usr/lib/pymodules/python2.7/matplotlib/collections.pyc in __init__(self, positions, orientation, lineoffset, linelength, linewidth, color, linestyle, antialiased, **kwargs)
1181 antialiaseds=antialiased,
1182 linestyles=linestyle,
-> 1183 **kwargs)
1184
1185 self._linelength = linelength
/usr/lib/pymodules/python2.7/matplotlib/collections.pyc in __init__(self, segments, linewidths, colors, antialiaseds, linestyles, offsets, transOffset, norm, cmap, pickradius, zorder, **kwargs)
997 self.set_linestyles(linestyles)
998
--> 999 colors = mcolors.colorConverter.to_rgba_array(colors)
1000
1001 Collection.__init__(
/usr/lib/pymodules/python2.7/matplotlib/colors.pyc in to_rgba_array(self, c, alpha)
390 if isinstance(c, np.ndarray):
391 if c.ndim != 2 and c.dtype.kind not in 'SU':
--> 392 raise ValueError("Color array must be two-dimensional")
393 if (c.ndim == 2 and c.shape[1] == 4 and c.dtype.kind == 'f'):
394 if (c.ravel() > 1).any() or (c.ravel() < 0).any():
ValueError: Color array must be two-dimensional
|
ValueError
|
def _init(self):
if True: # not self._initialized:
if not self.Q._initialized:
self.Q._init()
self._set_transform()
_pivot = self.Q.pivot
self.Q.pivot = self.pivot[self.labelpos]
# Hack: save and restore the Umask
_mask = self.Q.Umask
self.Q.Umask = ma.nomask
self.verts = self.Q._make_verts(np.array([self.U]), np.zeros((1,)))
self.Q.Umask = _mask
self.Q.pivot = _pivot
kw = self.Q.polykw
kw.update(self.kw)
self.vector = mcollections.PolyCollection(
self.verts,
offsets=[(self.X, self.Y)],
transOffset=self.get_transform(),
**kw,
)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self.vector.set_figure(self.get_figure())
self._initialized = True
|
def _init(self):
if True: # not self._initialized:
self._set_transform()
_pivot = self.Q.pivot
self.Q.pivot = self.pivot[self.labelpos]
# Hack: save and restore the Umask
_mask = self.Q.Umask
self.Q.Umask = ma.nomask
self.verts = self.Q._make_verts(np.array([self.U]), np.zeros((1,)))
self.Q.Umask = _mask
self.Q.pivot = _pivot
kw = self.Q.polykw
kw.update(self.kw)
self.vector = mcollections.PolyCollection(
self.verts,
offsets=[(self.X, self.Y)],
transOffset=self.get_transform(),
**kw,
)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self.vector.set_figure(self.get_figure())
self._initialized = True
|
https://github.com/matplotlib/matplotlib/issues/2616
|
Exception in Tkinter callback
Traceback (most recent call last):
File "/usr/lib/python2.7/lib-tk/Tkinter.py", line 1437, in __call__
return self.func(*args)
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_tkagg.py", line 236, in resize
self.show()
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_tkagg.py", line 239, in draw
FigureCanvasAgg.draw(self)
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_agg.py", line 421, in draw
self.figure.draw(self.renderer)
File "/usr/lib/pymodules/python2.7/matplotlib/artist.py", line 55, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/pymodules/python2.7/matplotlib/figure.py", line 898, in draw
func(*args)
File "/usr/lib/pymodules/python2.7/matplotlib/artist.py", line 55, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/pymodules/python2.7/matplotlib/axes.py", line 1997, in draw
a.draw(renderer)
File "/usr/lib/pymodules/python2.7/matplotlib/artist.py", line 55, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/pymodules/python2.7/matplotlib/quiver.py", line 304, in draw
self._init()
File "/usr/lib/pymodules/python2.7/matplotlib/quiver.py", line 272, in _init
np.zeros((1,)))
File "/usr/lib/pymodules/python2.7/matplotlib/quiver.py", line 571, in _make_verts
length = a * (widthu_per_lenu / (self.scale * self.width))
TypeError: unsupported operand type(s) for *: 'float' and 'NoneType'
|
TypeError
|
def _init(self):
"""
Initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: # not self._initialized:
trans = self._set_transform()
ax = self.ax
sx, sy = trans.inverted().transform_point((ax.bbox.width, ax.bbox.height))
self.span = sx
if self.width is None:
sn = max(8, min(25, math.sqrt(self.N)))
self.width = 0.06 * self.span / sn
# _make_verts sets self.scale if not already specified
if not self._initialized and self.scale is None:
self._make_verts(self.U, self.V)
self._initialized = True
|
def _init(self):
"""
Initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: # not self._initialized:
trans = self._set_transform()
ax = self.ax
sx, sy = trans.inverted().transform_point((ax.bbox.width, ax.bbox.height))
self.span = sx
if self.width is None:
sn = max(8, min(25, math.sqrt(self.N)))
self.width = 0.06 * self.span / sn
|
https://github.com/matplotlib/matplotlib/issues/2616
|
Exception in Tkinter callback
Traceback (most recent call last):
File "/usr/lib/python2.7/lib-tk/Tkinter.py", line 1437, in __call__
return self.func(*args)
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_tkagg.py", line 236, in resize
self.show()
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_tkagg.py", line 239, in draw
FigureCanvasAgg.draw(self)
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_agg.py", line 421, in draw
self.figure.draw(self.renderer)
File "/usr/lib/pymodules/python2.7/matplotlib/artist.py", line 55, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/pymodules/python2.7/matplotlib/figure.py", line 898, in draw
func(*args)
File "/usr/lib/pymodules/python2.7/matplotlib/artist.py", line 55, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/pymodules/python2.7/matplotlib/axes.py", line 1997, in draw
a.draw(renderer)
File "/usr/lib/pymodules/python2.7/matplotlib/artist.py", line 55, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/pymodules/python2.7/matplotlib/quiver.py", line 304, in draw
self._init()
File "/usr/lib/pymodules/python2.7/matplotlib/quiver.py", line 272, in _init
np.zeros((1,)))
File "/usr/lib/pymodules/python2.7/matplotlib/quiver.py", line 571, in _make_verts
length = a * (widthu_per_lenu / (self.scale * self.width))
TypeError: unsupported operand type(s) for *: 'float' and 'NoneType'
|
TypeError
|
def figimage(
self,
X,
xo=0,
yo=0,
alpha=None,
norm=None,
cmap=None,
vmin=None,
vmax=None,
origin=None,
**kwargs,
):
"""
Adds a non-resampled image to the figure.
call signatures::
figimage(X, **kwargs)
adds a non-resampled array *X* to the figure.
::
figimage(X, xo, yo)
with pixel offsets *xo*, *yo*,
*X* must be a float array:
* If *X* is MxN, assume luminance (grayscale)
* If *X* is MxNx3, assume RGB
* If *X* is MxNx4, assume RGBA
Optional keyword arguments:
========= =========================================================
Keyword Description
========= =========================================================
xo or yo An integer, the *x* and *y* image offset in pixels
cmap a :class:`matplotlib.colors.Colormap` instance, eg
cm.jet. If *None*, default to the rc ``image.cmap``
value
norm a :class:`matplotlib.colors.Normalize` instance. The
default is normalization(). This scales luminance -> 0-1
vmin|vmax are used to scale a luminance image to 0-1. If either
is *None*, the min and max of the luminance values will
be used. Note if you pass a norm instance, the settings
for *vmin* and *vmax* will be ignored.
alpha the alpha blending value, default is *None*
origin [ 'upper' | 'lower' ] Indicates where the [0,0] index of
the array is in the upper left or lower left corner of
the axes. Defaults to the rc image.origin value
========= =========================================================
figimage complements the axes image
(:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled
to fit the current axes. If you want a resampled image to
fill the entire figure, you can define an
:class:`~matplotlib.axes.Axes` with size [0,1,0,1].
An :class:`matplotlib.image.FigureImage` instance is returned.
.. plot:: mpl_examples/pylab_examples/figimage_demo.py
Additional kwargs are Artist kwargs passed on to
:class:`~matplotlib.image.FigureImage`
"""
if not self._hold:
self.clf()
im = FigureImage(self, cmap, norm, xo, yo, origin, **kwargs)
im.set_array(X)
im.set_alpha(alpha)
if norm is None:
im.set_clim(vmin, vmax)
self.images.append(im)
im._remove_method = lambda h: self.images.remove(h)
return im
|
def figimage(
self,
X,
xo=0,
yo=0,
alpha=None,
norm=None,
cmap=None,
vmin=None,
vmax=None,
origin=None,
**kwargs,
):
"""
Adds a non-resampled image to the figure.
call signatures::
figimage(X, **kwargs)
adds a non-resampled array *X* to the figure.
::
figimage(X, xo, yo)
with pixel offsets *xo*, *yo*,
*X* must be a float array:
* If *X* is MxN, assume luminance (grayscale)
* If *X* is MxNx3, assume RGB
* If *X* is MxNx4, assume RGBA
Optional keyword arguments:
========= =========================================================
Keyword Description
========= =========================================================
xo or yo An integer, the *x* and *y* image offset in pixels
cmap a :class:`matplotlib.colors.Colormap` instance, eg
cm.jet. If *None*, default to the rc ``image.cmap``
value
norm a :class:`matplotlib.colors.Normalize` instance. The
default is normalization(). This scales luminance -> 0-1
vmin|vmax are used to scale a luminance image to 0-1. If either
is *None*, the min and max of the luminance values will
be used. Note if you pass a norm instance, the settings
for *vmin* and *vmax* will be ignored.
alpha the alpha blending value, default is *None*
origin [ 'upper' | 'lower' ] Indicates where the [0,0] index of
the array is in the upper left or lower left corner of
the axes. Defaults to the rc image.origin value
========= =========================================================
figimage complements the axes image
(:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled
to fit the current axes. If you want a resampled image to
fill the entire figure, you can define an
:class:`~matplotlib.axes.Axes` with size [0,1,0,1].
An :class:`matplotlib.image.FigureImage` instance is returned.
.. plot:: mpl_examples/pylab_examples/figimage_demo.py
Additional kwargs are Artist kwargs passed on to
:class:`~matplotlib.image.FigureImage`
"""
if not self._hold:
self.clf()
im = FigureImage(self, cmap, norm, xo, yo, origin, **kwargs)
im.set_array(X)
im.set_alpha(alpha)
if norm is None:
im.set_clim(vmin, vmax)
self.images.append(im)
return im
|
https://github.com/matplotlib/matplotlib/issues/1747
|
Traceback (most recent call last):
File "<pyshell#6>", line 1, in <module>
caption.remove()
File "C:\Programme\Python27\lib\site-packages\matplotlib\artist.py",
line 134, in remove
raise NotImplementedError('cannot remove artist')
NotImplementedError: cannot remove artist
|
NotImplementedError
|
def text(self, x, y, s, *args, **kwargs):
"""
Add text to figure.
Call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text to figure at location *x*, *y* (relative 0-1
coords). See :func:`~matplotlib.pyplot.text` for the meaning
of the other arguments.
kwargs control the :class:`~matplotlib.text.Text` properties:
%(Text)s
"""
override = _process_text_args({}, *args, **kwargs)
t = Text(x=x, y=y, text=s)
t.update(override)
self._set_artist_props(t)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
return t
|
def text(self, x, y, s, *args, **kwargs):
"""
Add text to figure.
Call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text to figure at location *x*, *y* (relative 0-1
coords). See :func:`~matplotlib.pyplot.text` for the meaning
of the other arguments.
kwargs control the :class:`~matplotlib.text.Text` properties:
%(Text)s
"""
override = _process_text_args({}, *args, **kwargs)
t = Text(x=x, y=y, text=s)
t.update(override)
self._set_artist_props(t)
self.texts.append(t)
return t
|
https://github.com/matplotlib/matplotlib/issues/1747
|
Traceback (most recent call last):
File "<pyshell#6>", line 1, in <module>
caption.remove()
File "C:\Programme\Python27\lib\site-packages\matplotlib\artist.py",
line 134, in remove
raise NotImplementedError('cannot remove artist')
NotImplementedError: cannot remove artist
|
NotImplementedError
|
def find_previous_method(
self, base_method, top_method, pre_method_list, visited_methods=None
):
"""
Find the previous method based on base method before top method.
This will append the method into pre_method_list.
:param base_method: the base function which needs to be searched.
:param top_method: the top-level function which calls the basic function.
:param pre_method_list: list is used to track each function.
:param visited_methods: set with tested method.
:return: None
"""
if visited_methods is None:
visited_methods = set()
class_name, method_name = base_method
method_set = self.apkinfo.upperfunc(class_name, method_name)
visited_methods.add(base_method)
if method_set is not None:
if top_method in method_set:
pre_method_list.append(base_method)
else:
for item in method_set:
# prevent to test the tested methods.
if item in visited_methods:
continue
self.find_previous_method(
item, top_method, pre_method_list, visited_methods
)
|
def find_previous_method(self, base_method, top_method, pre_method_list):
"""
Find the previous method based on base method before top method.
This will append the method into pre_method_list.
:param base_method: the base function which needs to be searched.
:param top_method: the top-level function which calls the basic function.
:param pre_method_list: list is used to track each function.
:return: None
"""
class_name, method_name = base_method
method_set = self.apkinfo.upperfunc(class_name, method_name)
if method_set is not None:
if top_method in method_set:
pre_method_list.append(base_method)
else:
for item in method_set:
# prevent some functions from looking for themselves.
if item == base_method:
continue
self.find_previous_method(item, top_method, pre_method_list)
|
https://github.com/quark-engine/quark-engine/issues/46
|
Traceback (most recent call last):
File "/Users/nick/Desktop/quark-engine/quark/Objects/xrule.py", line 71, in find_previous_method
item, top_method, pre_method_list)
File "/Users/nick/Desktop/quark-engine/quark/Objects/xrule.py", line 71, in find_previous_method
item, top_method, pre_method_list)
File "/Users/nick/Desktop/quark-engine/quark/Objects/xrule.py", line 71, in find_previous_method
item, top_method, pre_method_list)
[Previous line repeated 989 more times]
File "/Users/nick/Desktop/quark-engine/quark/Objects/xrule.py", line 59, in find_previous_method
method_set = self.apkinfo.upperfunc(class_name, method_name)
File "/Users/nick/Desktop/quark-engine/quark/Objects/apkinfo.py", line 60, in upperfunc
method_set = self.find_method(class_name, method_name)
File "/Users/nick/Desktop/quark-engine/quark/Objects/apkinfo.py", line 44, in find_method
if len(list(result)) > 0:
File "/Users/nick/.local/share/virtualenvs/quark-engine-NJwlz3ey/src/androguard/androguard/core/analysis/analysis.py", line 1776, in find_methods
classname = bytes(mutf8.MUTF8String.from_str(classname))
File "/Users/nick/.local/share/virtualenvs/quark-engine-NJwlz3ey/src/androguard/androguard/core/mutf8.py", line 102, in from_str
c = cls(encode(s))
File "/Users/nick/.local/share/virtualenvs/quark-engine-NJwlz3ey/src/androguard/androguard/core/mutf8.py", line 64, in encode
ord_array = [i for i in map(lambda x: ord(x), s)]
File "/Users/nick/.local/share/virtualenvs/quark-engine-NJwlz3ey/src/androguard/androguard/core/mutf8.py", line 64, in <listcomp>
ord_array = [i for i in map(lambda x: ord(x), s)]
File "/Users/nick/.local/share/virtualenvs/quark-engine-NJwlz3ey/src/androguard/androguard/core/mutf8.py", line 64, in <lambda>
ord_array = [i for i in map(lambda x: ord(x), s)]
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def find_f_previous_method(self, base, top):
"""
Find the previous method based on base method
before top method.
This will append the method into self.pre_method0
:param base:
:param top:
:return: None
"""
method_set = self.upperFunc(base[0], base[1])
if method_set is not None:
if top in method_set:
self.pre_method0.append(base)
else:
for item in method_set:
# prevent some functions from looking for themselves.
if item == base:
continue
self.find_f_previous_method(item, top)
|
def find_f_previous_method(self, base, top):
"""
Find the previous method based on base method
before top method.
This will append the method into self.pre_method0
:param base:
:param top:
:return: None
"""
method_set = self.upperFunc(base[0], base[1])
if method_set is not None:
if top in method_set:
self.pre_method0.append(base)
else:
for item in method_set:
self.find_f_previous_method(item, top)
|
https://github.com/quark-engine/quark-engine/issues/18
|
Traceback (most recent call last):
File "main.py", line 172, in find_f_previous_method
self.find_f_previous_method(item, top)
File "main.py", line 172, in find_f_previous_method
self.find_f_previous_method(item, top)
File "main.py", line 172, in find_f_previous_method
self.find_f_previous_method(item, top)
[Previous line repeated 989 more times]
File "main.py", line 164, in find_f_previous_method
method_set = self.upperFunc(base[0], base[1])
File "main.py", line 89, in upperFunc
method_set = self.find_method(class_name, method_name)
File "main.py", line 72, in find_method
if len(list(result)) > 0:
File "/Users/nick/.local/share/virtualenvs/quark-engine-PkXmWEj6/src/androguard/androguard/core/analysis/analysis.py", line 1776, in find_methods
classname = bytes(mutf8.MUTF8String.from_str(classname))
File "/Users/nick/.local/share/virtualenvs/quark-engine-PkXmWEj6/src/androguard/androguard/core/mutf8.py", line 102, in from_str
c = cls(encode(s))
File "/Users/nick/.local/share/virtualenvs/quark-engine-PkXmWEj6/src/androguard/androguard/core/mutf8.py", line 64, in encode
ord_array = [i for i in map(lambda x: ord(x), s)]
File "/Users/nick/.local/share/virtualenvs/quark-engine-PkXmWEj6/src/androguard/androguard/core/mutf8.py", line 64, in <listcomp>
ord_array = [i for i in map(lambda x: ord(x), s)]
File "/Users/nick/.local/share/virtualenvs/quark-engine-PkXmWEj6/src/androguard/androguard/core/mutf8.py", line 64, in <lambda>
ord_array = [i for i in map(lambda x: ord(x), s)]
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def find_s_previous_method(self, base, top):
"""
Find the previous method based on base method
before top method.
This will append the method into self.pre_method1
:param base:
:param top:
:return: None
"""
method_set = self.upperFunc(base[0], base[1])
if method_set is not None:
if top in method_set:
self.pre_method1.append(base)
else:
for item in method_set:
# prevent some functions from looking for themselves.
if item == base:
continue
self.find_s_previous_method(item, top)
|
def find_s_previous_method(self, base, top):
"""
Find the previous method based on base method
before top method.
This will append the method into self.pre_method1
:param base:
:param top:
:return: None
"""
method_set = self.upperFunc(base[0], base[1])
if method_set is not None:
if top in method_set:
self.pre_method1.append(base)
else:
for item in method_set:
self.find_s_previous_method(item, top)
|
https://github.com/quark-engine/quark-engine/issues/18
|
Traceback (most recent call last):
File "main.py", line 172, in find_f_previous_method
self.find_f_previous_method(item, top)
File "main.py", line 172, in find_f_previous_method
self.find_f_previous_method(item, top)
File "main.py", line 172, in find_f_previous_method
self.find_f_previous_method(item, top)
[Previous line repeated 989 more times]
File "main.py", line 164, in find_f_previous_method
method_set = self.upperFunc(base[0], base[1])
File "main.py", line 89, in upperFunc
method_set = self.find_method(class_name, method_name)
File "main.py", line 72, in find_method
if len(list(result)) > 0:
File "/Users/nick/.local/share/virtualenvs/quark-engine-PkXmWEj6/src/androguard/androguard/core/analysis/analysis.py", line 1776, in find_methods
classname = bytes(mutf8.MUTF8String.from_str(classname))
File "/Users/nick/.local/share/virtualenvs/quark-engine-PkXmWEj6/src/androguard/androguard/core/mutf8.py", line 102, in from_str
c = cls(encode(s))
File "/Users/nick/.local/share/virtualenvs/quark-engine-PkXmWEj6/src/androguard/androguard/core/mutf8.py", line 64, in encode
ord_array = [i for i in map(lambda x: ord(x), s)]
File "/Users/nick/.local/share/virtualenvs/quark-engine-PkXmWEj6/src/androguard/androguard/core/mutf8.py", line 64, in <listcomp>
ord_array = [i for i in map(lambda x: ord(x), s)]
File "/Users/nick/.local/share/virtualenvs/quark-engine-PkXmWEj6/src/androguard/androguard/core/mutf8.py", line 64, in <lambda>
ord_array = [i for i in map(lambda x: ord(x), s)]
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def compare(self, action: int, checksum: bytes) -> bool:
if self.action != action or self.checksum != checksum:
return False
if utime.ticks_ms() >= self.deadline:
if self.workflow is not None:
# We crossed the deadline, kill the running confirmation
# workflow. `self.workflow` is reset in the finally
# handler in `confirm_workflow`.
loop.close(self.workflow)
return False
return True
|
def compare(self, action: int, checksum: bytes) -> bool:
if self.action != action or self.checksum != checksum:
return False
if utime.ticks_ms() >= self.deadline:
if self.workflow is not None:
loop.close(self.workflow)
return False
return True
|
https://github.com/trezor/trezor-firmware/issues/448
|
Traceback (most recent call last):
File "/home/andrew/firmware/core/src/apps/webauthn/__init__.py", line 329, in handle_reports
File "/home/andrew/firmware/core/src/apps/webauthn/__init__.py", line 468, in dispatch_cmd
File "/home/andrew/firmware/core/src/apps/webauthn/__init__.py", line 541, in msg_register
File "/home/andrew/firmware/core/src/apps/webauthn/__init__.py", line 363, in setup
AttributeError: 'module' object has no attribute 'workflows'
|
AttributeError
|
def setup(self, action: int, checksum: bytes, app_id: bytes) -> bool:
if workflow.tasks or self.workflow:
# If any other workflow is running, we bail out.
return False
self.action = action
self.checksum = checksum
self.app_id = app_id
self.confirmed = None
self.workflow = self.confirm_workflow()
loop.schedule(self.workflow)
return True
|
def setup(self, action: int, checksum: bytes, app_id: bytes) -> bool:
if workflow.workflows:
return False
self.action = action
self.checksum = checksum
self.app_id = app_id
self.confirmed = None
self.workflow = self.confirm_workflow()
loop.schedule(self.workflow)
return True
|
https://github.com/trezor/trezor-firmware/issues/448
|
Traceback (most recent call last):
File "/home/andrew/firmware/core/src/apps/webauthn/__init__.py", line 329, in handle_reports
File "/home/andrew/firmware/core/src/apps/webauthn/__init__.py", line 468, in dispatch_cmd
File "/home/andrew/firmware/core/src/apps/webauthn/__init__.py", line 541, in msg_register
File "/home/andrew/firmware/core/src/apps/webauthn/__init__.py", line 363, in setup
AttributeError: 'module' object has no attribute 'workflows'
|
AttributeError
|
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
|
def get(self, request, *args, **kwargs):
order = request.GET.get("order", "")
if not (
(not order.startswith("-") or order.count("-") == 1)
and (order.lstrip("-") in self.all_sorts)
):
order = self.get_default_sort_order(request)
self.order = order
return super(QueryStringSortMixin, self).get(request, *args, **kwargs)
|
https://github.com/DMOJ/online-judge/issues/1006
|
Traceback (most recent call last):
File "/code/site/siteenv/lib/python3.6/site-packages/django/db/backends/utils.py", line 64, in execute
return self.cursor.execute(sql, params)
File "/code/site/siteenv/lib/python3.6/site-packages/django/db/backends/mysql/base.py", line 101, in execute
return self.cursor.execute(query, args)
File "/code/site/siteenv/lib/python3.6/site-packages/MySQLdb/cursors.py", line 206, in execute
res = self._query(query)
File "/code/site/siteenv/lib/python3.6/site-packages/MySQLdb/cursors.py", line 312, in _query
db.query(q)
File "/code/site/siteenv/lib/python3.6/site-packages/MySQLdb/connections.py", line 224, in query
_mysql.connection.query(self, query)
MySQLdb._exceptions.DataError: (1406, "Data too long for column 'code' at row 1")
|
MySQLdb._exceptions.DataError
|
def render(self, name, value, attrs=None, renderer=None):
text = super(TextInput, self).render(name, value, attrs)
return mark_safe(
text
+ format_html(
"""\
<a href="#" onclick="return false;" class="button" id="id_{0}_regen">Regenerate</a>
<script type="text/javascript">
(function ($) {{
$(document).ready(function () {{
$('#id_{0}_regen').click(function () {{
var length = 100,
charset = "abcdefghijklnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`~!@#$%^&*()_+-=|[]{{}};:,<>./?",
key = "";
for (var i = 0, n = charset.length; i < length; ++i) {{
key += charset.charAt(Math.floor(Math.random() * n));
}}
$('#id_{0}').val(key);
}});
}});
}})(django.jQuery);
</script>
""",
name,
)
)
|
def render(self, name, value, attrs=None):
text = super(TextInput, self).render(name, value, attrs)
return mark_safe(
text
+ format_html(
"""\
<a href="#" onclick="return false;" class="button" id="id_{0}_regen">Regenerate</a>
<script type="text/javascript">
(function ($) {{
$(document).ready(function () {{
$('#id_{0}_regen').click(function () {{
var length = 100,
charset = "abcdefghijklnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`~!@#$%^&*()_+-=|[]{{}};:,<>./?",
key = "";
for (var i = 0, n = charset.length; i < length; ++i) {{
key += charset.charAt(Math.floor(Math.random() * n));
}}
$('#id_{0}').val(key);
}});
}});
}})(django.jQuery);
</script>
""",
name,
)
)
|
https://github.com/DMOJ/online-judge/issues/1042
|
Traceback (most recent call last):
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/core/handlers/base.py", line 156, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/core/handlers/base.py", line 154, in _get_response
response = response.render()
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/response.py", line 106, in render
self.content = self.rendered_content
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/response.py", line 83, in rendered_content
content = template.render(context, self._request)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/backends/django.py", line 61, in render
return self.template.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 171, in render
return self._render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 163, in _render
return self.nodelist.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/loader_tags.py", line 150, in render
return compiled_parent._render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 163, in _render
return self.nodelist.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/loader_tags.py", line 150, in render
return compiled_parent._render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 163, in _render
return self.nodelist.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/loader_tags.py", line 150, in render
return compiled_parent._render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 163, in _render
return self.nodelist.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/loader_tags.py", line 62, in render
result = block.nodelist.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/loader_tags.py", line 62, in render
result = block.nodelist.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/defaulttags.py", line 209, in render
nodelist.append(node.render_annotated(context))
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/loader_tags.py", line 188, in render
return template.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 173, in render
return self._render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 163, in _render
return self.nodelist.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/defaulttags.py", line 209, in render
nodelist.append(node.render_annotated(context))
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/defaulttags.py", line 209, in render
nodelist.append(node.render_annotated(context))
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/defaulttags.py", line 309, in render
return nodelist.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/defaulttags.py", line 309, in render
return nodelist.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 937, in render
bit = node.render_annotated(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 904, in render_annotated
return self.render(context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 993, in render
return render_value_in_context(output, context)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/template/base.py", line 972, in render_value_in_context
value = str(value)
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/utils/html.py", line 397, in <lambda>
klass.__str__ = lambda self: mark_safe(klass_str(self))
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/forms/boundfield.py", line 33, in __str__
return self.as_widget()
File "/code/dmoj-virtenv3/lib/python3.5/site-packages/django/forms/boundfield.py", line 93, in as_widget
renderer=self.form.renderer,
TypeError: render() got an unexpected keyword argument 'renderer'
|
TypeError
|
def _preheat(schema_name: str) -> "GraphQLSchema":
"""
Loads the SDL and converts it to a GraphQLSchema instance before baking
each registered objects of this schema.
:param schema_name: name of the schema to treat
:type schema_name: str
:return: a pre-baked GraphQLSchema instance
:rtype: GraphQLSchema
"""
schema_info = SchemaRegistry.find_schema_info(schema_name)
sdl = schema_info["sdl"]
schema = schema_from_sdl(sdl, schema_name=schema_name)
schema_info["inst"] = schema
return schema
|
def _preheat(schema_name: str) -> "GraphQLSchema":
"""
Loads the SDL and converts it to a GraphQLSchema instance before baking
each registered objects of this schema.
:param schema_name: name of the schema to treat
:type schema_name: str
:return: a pre-baked GraphQLSchema instance
:rtype: GraphQLSchema
"""
schema_info = SchemaRegistry.find_schema_info(schema_name)
sdl = schema_info["sdl"]
schema = schema_from_sdl(sdl, schema_name=schema_name)
for object_ids in _SCHEMA_OBJECT_IDS:
for obj in schema_info.get(object_ids, {}).values():
obj.bake(schema)
schema_info["inst"] = schema
return schema
|
https://github.com/tartiflette/tartiflette/issues/292
|
Traceback (most recent call last):
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/schema/schema.py", line 348, in get_field_by_name
return self.type_definitions[parent_name].find_field(field_name)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/types/object.py", line 125, in find_field
return self.implemented_fields[name]
KeyError: 'world'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/morse/Documents/GitHub/play-tartiflette/src/__main__.py", line 54, in <module>
run()
File "/Users/morse/Documents/GitHub/play-tartiflette/src/__main__.py", line 52, in run
web.run_app(app, port=8090,)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web.py", line 415, in run_app
reuse_port=reuse_port))
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web.py", line 287, in _run_app
await runner.setup()
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web_runner.py", line 203, in setup
self._server = await self._make_server()
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web_runner.py", line 302, in _make_server
await self._app.startup()
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web_app.py", line 389, in startup
await self.on_startup.send(self)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/signals.py", line 34, in send
await receiver(*args, **kwargs) # type: ignore
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette_aiohttp/__init__.py", line 97, in _cook_on_startup
sdl=sdl, schema_name=schema_name, modules=modules
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/engine.py", line 250, in cook
schema_name, custom_default_resolver, custom_default_type_resolver
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 65, in bake
schema = SchemaBakery._preheat(schema_name)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 39, in _preheat
obj.bake(schema)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/resolver/resolver.py", line 67, in bake
field = schema.get_field_by_name(self.name)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/schema/schema.py", line 351, in get_field_by_name
f"field `{name}` was not found in GraphQL schema."
tartiflette.types.exceptions.tartiflette.UnknownSchemaFieldResolver: field `Query.world` was not found in GraphQL schema.
|
KeyError
|
def _validate_non_empty_object(self) -> List[str]:
"""
Validates that object types implement at least one fields.
:return: a list of errors
:rtype: List[str]
"""
errors = []
for type_name, gql_type in self.type_definitions.items():
if isinstance(gql_type, GraphQLObjectType) and not [
field_name
for field_name in gql_type.implemented_fields
if not field_name.startswith("__")
]:
errors.append(f"Type < {type_name} > has no fields.")
return errors
|
def _validate_non_empty_object(self) -> List[str]:
"""
Validates that object types implement at least one fields.
:return: a list of errors
:rtype: List[str]
"""
errors = []
for type_name, gql_type in self.type_definitions.items():
if (
isinstance(gql_type, GraphQLObjectType)
and not gql_type.implemented_fields.values()
):
errors.append(f"Type < {type_name} > has no fields.")
return errors
|
https://github.com/tartiflette/tartiflette/issues/292
|
Traceback (most recent call last):
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/schema/schema.py", line 348, in get_field_by_name
return self.type_definitions[parent_name].find_field(field_name)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/types/object.py", line 125, in find_field
return self.implemented_fields[name]
KeyError: 'world'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/morse/Documents/GitHub/play-tartiflette/src/__main__.py", line 54, in <module>
run()
File "/Users/morse/Documents/GitHub/play-tartiflette/src/__main__.py", line 52, in run
web.run_app(app, port=8090,)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web.py", line 415, in run_app
reuse_port=reuse_port))
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web.py", line 287, in _run_app
await runner.setup()
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web_runner.py", line 203, in setup
self._server = await self._make_server()
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web_runner.py", line 302, in _make_server
await self._app.startup()
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web_app.py", line 389, in startup
await self.on_startup.send(self)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/signals.py", line 34, in send
await receiver(*args, **kwargs) # type: ignore
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette_aiohttp/__init__.py", line 97, in _cook_on_startup
sdl=sdl, schema_name=schema_name, modules=modules
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/engine.py", line 250, in cook
schema_name, custom_default_resolver, custom_default_type_resolver
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 65, in bake
schema = SchemaBakery._preheat(schema_name)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 39, in _preheat
obj.bake(schema)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/resolver/resolver.py", line 67, in bake
field = schema.get_field_by_name(self.name)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/schema/schema.py", line 351, in get_field_by_name
f"field `{name}` was not found in GraphQL schema."
tartiflette.types.exceptions.tartiflette.UnknownSchemaFieldResolver: field `Query.world` was not found in GraphQL schema.
|
KeyError
|
async def bake(
self,
custom_default_resolver: Optional[Callable] = None,
custom_default_type_resolver: Optional[Callable] = None,
) -> None:
"""
Bake the final schema (it should not change after this) used for
execution.
:param custom_default_resolver: callable that will replace the builtin
default_resolver
:param custom_default_type_resolver: callable that will replace the
tartiflette `default_type_resolver` (will be called on abstract types
to deduct the type of a result)
:type custom_default_resolver: Optional[Callable]
:type custom_default_type_resolver: Optional[Callable]
"""
self.default_type_resolver = custom_default_type_resolver or default_type_resolver
self._inject_introspection_fields()
self._validate_extensions() # Validate this before bake
# TODO maybe a pre_bake/post_bake thing
try:
self._bake_extensions()
except Exception: # pylint: disable=broad-except
# Exceptions should be collected at validation time
pass
SchemaRegistry.bake_registered_objects(self)
try:
await self._bake_types(custom_default_resolver)
except Exception: # pylint: disable=broad-except
# Exceptions should be collected at validation time
pass
self._validate()
# Bake introspection attributes
self._operation_types = {
"query": self.type_definitions.get(self.query_operation_name),
"mutation": self.type_definitions.get(self.mutation_operation_name),
"subscription": self.type_definitions.get(self.subscription_operation_name),
}
self.queryType = self._operation_types["query"]
self.mutationType = self._operation_types["mutation"]
self.subscriptionType = self._operation_types["subscription"]
self.directives = list(self._directive_definitions.values())
for type_name, type_definition in self.type_definitions.items():
if not type_name.startswith("__"):
self.types.append(type_definition)
|
async def bake(
self,
custom_default_resolver: Optional[Callable] = None,
custom_default_type_resolver: Optional[Callable] = None,
) -> None:
"""
Bake the final schema (it should not change after this) used for
execution.
:param custom_default_resolver: callable that will replace the builtin
default_resolver
:param custom_default_type_resolver: callable that will replace the
tartiflette `default_type_resolver` (will be called on abstract types
to deduct the type of a result)
:type custom_default_resolver: Optional[Callable]
:type custom_default_type_resolver: Optional[Callable]
"""
self.default_type_resolver = custom_default_type_resolver or default_type_resolver
self._inject_introspection_fields()
self._validate_extensions() # Validate this before bake
# TODO maybe a pre_bake/post_bake thing
try:
self._bake_extensions()
await self._bake_types(custom_default_resolver)
except Exception: # pylint: disable=broad-except
# Exceptions should be collected at validation time
pass
self._validate()
# Bake introspection attributes
self._operation_types = {
"query": self.type_definitions.get(self.query_operation_name),
"mutation": self.type_definitions.get(self.mutation_operation_name),
"subscription": self.type_definitions.get(self.subscription_operation_name),
}
self.queryType = self._operation_types["query"]
self.mutationType = self._operation_types["mutation"]
self.subscriptionType = self._operation_types["subscription"]
self.directives = list(self._directive_definitions.values())
for type_name, type_definition in self.type_definitions.items():
if not type_name.startswith("__"):
self.types.append(type_definition)
|
https://github.com/tartiflette/tartiflette/issues/292
|
Traceback (most recent call last):
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/schema/schema.py", line 348, in get_field_by_name
return self.type_definitions[parent_name].find_field(field_name)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/types/object.py", line 125, in find_field
return self.implemented_fields[name]
KeyError: 'world'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/morse/Documents/GitHub/play-tartiflette/src/__main__.py", line 54, in <module>
run()
File "/Users/morse/Documents/GitHub/play-tartiflette/src/__main__.py", line 52, in run
web.run_app(app, port=8090,)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web.py", line 415, in run_app
reuse_port=reuse_port))
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web.py", line 287, in _run_app
await runner.setup()
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web_runner.py", line 203, in setup
self._server = await self._make_server()
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web_runner.py", line 302, in _make_server
await self._app.startup()
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/web_app.py", line 389, in startup
await self.on_startup.send(self)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/aiohttp/signals.py", line 34, in send
await receiver(*args, **kwargs) # type: ignore
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette_aiohttp/__init__.py", line 97, in _cook_on_startup
sdl=sdl, schema_name=schema_name, modules=modules
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/engine.py", line 250, in cook
schema_name, custom_default_resolver, custom_default_type_resolver
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 65, in bake
schema = SchemaBakery._preheat(schema_name)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 39, in _preheat
obj.bake(schema)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/resolver/resolver.py", line 67, in bake
field = schema.get_field_by_name(self.name)
File "/Users/morse/.pyenv/versions/3.7.2/lib/python3.7/site-packages/tartiflette/schema/schema.py", line 351, in get_field_by_name
f"field `{name}` was not found in GraphQL schema."
tartiflette.types.exceptions.tartiflette.UnknownSchemaFieldResolver: field `Query.world` was not found in GraphQL schema.
|
KeyError
|
def register_sdl(
schema_name: str,
sdl: Union[str, List[str], GraphQLSchema],
exclude_builtins_scalars: Optional[List[str]] = None,
) -> None:
SchemaRegistry._schemas.setdefault(schema_name, {})
# Maybe read them one and use them a lot :p
sdl_files_list = _get_builtins_sdl_files(exclude_builtins_scalars)
full_sdl = ""
if isinstance(sdl, list):
sdl_files_list += sdl
elif os.path.isfile(sdl):
sdl_files_list.append(sdl)
elif os.path.isdir(sdl):
sdl_files_list += glob(os.path.join(sdl, "**/*.sdl"), recursive=True) + glob(
os.path.join(sdl, "**/*.graphql"), recursive=True
)
else:
full_sdl = sdl
# Convert SDL files into big schema and parse it
for filepath in sdl_files_list:
with open(filepath, "r") as sdl_file:
full_sdl += "\n" + sdl_file.read()
SchemaRegistry._schemas[schema_name]["sdl"] = full_sdl
|
def register_sdl(
schema_name: str,
sdl: Union[str, List[str], GraphQLSchema],
exclude_builtins_scalars: Optional[List[str]] = None,
) -> None:
SchemaRegistry._schemas.setdefault(schema_name, {})
# Maybe read them one and use them a lot :p
sdl_files_list = _get_builtins_sdl_files(exclude_builtins_scalars)
full_sdl = ""
if isinstance(sdl, list):
sdl_files_list += sdl
elif os.path.isfile(sdl):
sdl_files_list.append(sdl)
elif os.path.isdir(sdl):
sdl_files_list += glob(os.path.join(sdl, "**/*.sdl"), recursive=True) + glob(
os.path.join(sdl, "**/*.graphql"), recursive=True
)
else:
full_sdl = sdl
# Convert SDL files into big schema and parse it
for filepath in sdl_files_list:
with open(filepath, "r") as sdl_file:
full_sdl += " " + sdl_file.read().replace("\n", " ")
SchemaRegistry._schemas[schema_name]["sdl"] = full_sdl
|
https://github.com/tartiflette/tartiflette/issues/201
|
Traceback (most recent call last):
File "/home/bkc/PythonEnvironments/SFI.Projects.EntryWizard/bin/manifest-graphql", line 11, in <module>
load_entry_point('entry-wizard', 'console_scripts', 'manifest-graphql')()
File "/home/bkc/src/SFI/SFI.Projects.EntryWizard/entry_wizard/manifest_system/scripts/graphql_server.py", line 12, in main
result = run()
File "/home/bkc/src/SFI/SFI.Projects.EntryWizard/entry_wizard/graphql/server/app.py", line 59, in run
engine = get_engine(schema_folder=schema_folder)
File "/home/bkc/src/SFI/SFI.Projects.EntryWizard/entry_wizard/graphql/server/app.py", line 27, in get_engine
"entry_wizard.graphql.resolvers.vendor",
File "/home/bkc/PythonEnvironments/SFI.Projects.EntryWizard/lib/python3.7/site-packages/tartiflette/engine.py", line 57, in __init__
schema_name, custom_default_resolver, exclude_builtins_scalars
File "/home/bkc/PythonEnvironments/SFI.Projects.EntryWizard/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 41, in bake
schema = SchemaBakery._preheat(schema_name, exclude_builtins_scalars)
File "/home/bkc/PythonEnvironments/SFI.Projects.EntryWizard/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 21, in _preheat
build_graphql_schema_from_sdl(sdl, schema=schema)
File "/home/bkc/PythonEnvironments/SFI.Projects.EntryWizard/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 30, in build_graphql_schema_from_sdl
sdl, parse_graphql_sdl_to_ast(sdl), schema=schema
File "/home/bkc/PythonEnvironments/SFI.Projects.EntryWizard/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 53, in parse_graphql_sdl_to_ast
return gqlsdl(sdl)
File "/home/bkc/PythonEnvironments/SFI.Projects.EntryWizard/lib/python3.7/site-packages/lark/lark.py", line 223, in parse
return self.parser.parse(text)
File "/home/bkc/PythonEnvironments/SFI.Projects.EntryWizard/lib/python3.7/site-packages/lark/parser_frontends.py", line 38, in parse
return self.parser.parse(token_stream, *[sps] if sps is not NotImplemented else [])
File "/home/bkc/PythonEnvironments/SFI.Projects.EntryWizard/lib/python3.7/site-packages/lark/parsers/lalr_parser.py", line 82, in parse
_action, arg = get_action('$END')
File "/home/bkc/PythonEnvironments/SFI.Projects.EntryWizard/lib/python3.7/site-packages/lark/parsers/lalr_parser.py", line 49, in get_action
raise UnexpectedToken(token, expected, state=state) # TODO filter out rules from expected
lark.exceptions.UnexpectedToken: Unexpected token Token(BANG, '!') at line 1, column 3985.
Expected: SCHEMA, IDENT, TYPE, UNION, SCALAR, INTERFACE, MUTATION, RPAR, ENUM, STRING, QUERY, INPUT, LONG_STRING, TYPE_SYSTEM_DIRECTIVE_LOCATION, IMPLEMENTS, AT, EQUAL, DIRECTIVE, SUBSCRIPTION, RBRACE, EXTEND, ON, RSQB
|
lark.exceptions.UnexpectedToken
|
def __call__(self, implementation):
if not iscoroutinefunction(implementation.on_field_execution):
raise NonAwaitableDirective("%s is not awaitable" % repr(implementation))
SchemaRegistry.register_directive(self._schema_name, self)
self._implementation = implementation
return implementation
|
def __call__(self, implementation):
if not iscoroutinefunction(implementation.on_execution):
raise NonAwaitableDirective("%s is not awaitable" % repr(implementation))
SchemaRegistry.register_directive(self._schema_name, self)
self._implementation = implementation
return implementation
|
https://github.com/tartiflette/tartiflette/issues/133
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/src/app/***/__main__.py", line 10, in <module>
sys.exit(run())
File "/usr/src/app/***/app.py", line 425, in run
"utils/sdl-generator/schema.sdl",
File "/usr/src/app/***/engines/tartiflette.py", line 70, in __init__
error_coercer=_error_coercer,
File "/usr/local/lib/python3.7/site-packages/tartiflette/engine.py", line 26, in __init__
schema_name, custom_default_resolver, exclude_builtins_scalars
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 41, in bake
schema = SchemaBakery._preheat(schema_name, exclude_builtins_scalars)
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 21, in _preheat
build_graphql_schema_from_sdl(sdl, schema=schema)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 30, in build_graphql_schema_from_sdl
sdl, parse_graphql_sdl_to_ast(sdl), schema=schema
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 76, in transform_ast_to_schema
transformer.transform(raw_ast)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 93, in transform
tree = t.transform(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 107, in transform
subtree.children = list(self._transform_children(subtree.children))
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 44, in _transform_children
yield self._transform_tree(c) if isinstance(c, Tree) else c
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 103, in _transform_tree
return self._call_userfunc(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 37, in _call_userfunc
return f(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 232, in f
return _f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/transformers/schema_transformer.py", line 389, in input_value_definition
child, child.__class__.__name__
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode: Unexpected AST node `SchemaNode(type='directives', value={'maxLength': {'limit': 512}})`, type `SchemaNode`
|
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode
|
async def create_source_event_stream(
self,
execution_ctx: ExecutionContext,
request_ctx: Optional[Dict[str, Any]],
parent_result: Optional[Any] = None,
):
if not self.subscribe:
raise GraphQLError(
"Can't execute a subscription query on a field which doesn't "
"provide a source event stream with < @Subscription >."
)
info = Info(
query_field=self,
schema_field=self.field_executor.schema_field,
schema=self.schema,
path=self.path,
location=self.location,
execution_ctx=execution_ctx,
)
return self.subscribe(
parent_result,
await coerce_arguments(
self.field_executor.schema_field.arguments,
self.arguments,
request_ctx,
info,
),
request_ctx,
info,
)
|
async def create_source_event_stream(
self,
execution_ctx: ExecutionContext,
request_ctx: Optional[Dict[str, Any]],
parent_result: Optional[Any] = None,
):
if not self.subscribe:
raise GraphQLError(
"Can't execute a subscription query on a field which doesn't "
"provide a source event stream with < @Subscription >."
)
# TODO: refactor this to have re-usable code with `_ResolverExecutor`
arguments = self.field_executor.schema_field.get_arguments_default_values()
arguments.update(
{argument.name: argument.value for argument in self.arguments.values()}
)
return self.subscribe(
parent_result,
arguments,
request_ctx,
Info(
query_field=self,
schema_field=self.field_executor.schema_field,
schema=self.schema,
path=self.path,
location=self.location,
execution_ctx=execution_ctx,
),
)
|
https://github.com/tartiflette/tartiflette/issues/133
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/src/app/***/__main__.py", line 10, in <module>
sys.exit(run())
File "/usr/src/app/***/app.py", line 425, in run
"utils/sdl-generator/schema.sdl",
File "/usr/src/app/***/engines/tartiflette.py", line 70, in __init__
error_coercer=_error_coercer,
File "/usr/local/lib/python3.7/site-packages/tartiflette/engine.py", line 26, in __init__
schema_name, custom_default_resolver, exclude_builtins_scalars
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 41, in bake
schema = SchemaBakery._preheat(schema_name, exclude_builtins_scalars)
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 21, in _preheat
build_graphql_schema_from_sdl(sdl, schema=schema)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 30, in build_graphql_schema_from_sdl
sdl, parse_graphql_sdl_to_ast(sdl), schema=schema
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 76, in transform_ast_to_schema
transformer.transform(raw_ast)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 93, in transform
tree = t.transform(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 107, in transform
subtree.children = list(self._transform_children(subtree.children))
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 44, in _transform_children
yield self._transform_tree(c) if isinstance(c, Tree) else c
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 103, in _transform_tree
return self._call_userfunc(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 37, in _call_userfunc
return f(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 232, in f
return _f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/transformers/schema_transformer.py", line 389, in input_value_definition
child, child.__class__.__name__
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode: Unexpected AST node `SchemaNode(type='directives', value={'maxLength': {'limit': 512}})`, type `SchemaNode`
|
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.