after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def orpca(
X,
rank,
fast=False,
lambda1=None,
lambda2=None,
method=None,
learning_rate=None,
init=None,
training_samples=None,
momentum=None,
):
"""
This function performs Online Robust PCA
with missing or corrupted data.
Parameters
----------
X : {numpy array, iterator}
[nfeatures x nsamples] matrix of observations
or an iterator that yields samples, each with nfeatures elements.
rank : int
The model dimensionality.
lambda1 : {None, float}
Nuclear norm regularization parameter.
If None, set to 1 / sqrt(nsamples)
lambda2 : {None, float}
Sparse error regularization parameter.
If None, set to 1 / sqrt(nsamples)
method : {None, 'CF', 'BCD', 'SGD', 'MomentumSGD'}
'CF' - Closed-form solver
'BCD' - Block-coordinate descent
'SGD' - Stochastic gradient descent
'MomentumSGD' - Stochastic gradient descent with momentum
If None, set to 'CF'
learning_rate : {None, float}
Learning rate for the stochastic gradient
descent algorithm
If None, set to 1
init : {None, 'qr', 'rand', np.ndarray}
'qr' - QR-based initialization
'rand' - Random initialization
np.ndarray if the shape [nfeatures x rank].
If None, set to 'qr'
training_samples : {None, integer}
Specifies the number of training samples to use in
the 'qr' initialization
If None, set to 10
momentum : {None, float}
Momentum parameter for 'MomentumSGD' method, should be
a float between 0 and 1.
If None, set to 0.5
Returns
-------
Xhat : numpy array
is the [nfeatures x nsamples] low-rank matrix
Ehat : numpy array
is the [nfeatures x nsamples] sparse error matrix
U, S, V : numpy arrays
are the results of an SVD on Xhat
Notes
-----
The ORPCA code is based on a transcription of MATLAB code obtained from
the following research paper:
Jiashi Feng, Huan Xu and Shuicheng Yuan, "Online Robust PCA via
Stochastic Optimization", Advances in Neural Information Processing
Systems 26, (2013), pp. 404-412.
It has been updated to include a new initialization method based
on a QR decomposition of the first n "training" samples of the data.
A stochastic gradient descent (SGD) solver is also implemented,
along with a MomentumSGD solver for improved convergence and robustness
with respect to local minima. More information about the gradient descent
methods and choosing appropriate parameters can be found here:
Sebastian Ruder, "An overview of gradient descent optimization
algorithms", arXiv:1609.04747, (2016), http://arxiv.org/abs/1609.04747.
"""
X = X.T
_orpca = ORPCA(
rank,
fast=fast,
lambda1=lambda1,
lambda2=lambda2,
method=method,
learning_rate=learning_rate,
init=init,
training_samples=training_samples,
momentum=momentum,
)
_orpca._setup(X, normalize=True)
_orpca.fit(X)
Xhat, Ehat, U, S, V = _orpca.finish()
return Xhat.T, Ehat, U, S, V
|
def orpca(
X,
rank,
fast=False,
lambda1=None,
lambda2=None,
method=None,
learning_rate=None,
init=None,
training_samples=None,
momentum=None,
):
"""
This function performs Online Robust PCA
with missing or corrupted data.
Parameters
----------
X : {numpy array, iterator}
[nfeatures x nsamples] matrix of observations
or an iterator that yields samples, each with nfeatures elements.
rank : int
The model dimensionality.
lambda1 : {None, float}
Nuclear norm regularization parameter.
If None, set to 1 / sqrt(nsamples)
lambda2 : {None, float}
Sparse error regularization parameter.
If None, set to 1 / sqrt(nsamples)
method : {None, 'CF', 'BCD', 'SGD', 'MomentumSGD'}
'CF' - Closed-form solver
'BCD' - Block-coordinate descent
'SGD' - Stochastic gradient descent
'MomentumSGD' - Stochastic gradient descent with momentum
If None, set to 'CF'
learning_rate : {None, float}
Learning rate for the stochastic gradient
descent algorithm
If None, set to 1
init : {None, 'qr', 'rand', np.ndarray}
'qr' - QR-based initialization
'rand' - Random initialization
np.ndarray if the shape [nfeatures x rank].
If None, set to 'qr'
training_samples : {None, integer}
Specifies the number of training samples to use in
the 'qr' initialization
If None, set to 10
momentum : {None, float}
Momentum parameter for 'MomentumSGD' method, should be
a float between 0 and 1.
If None, set to 0.5
Returns
-------
Xhat : numpy array
is the [nfeatures x nsamples] low-rank matrix
Ehat : numpy array
is the [nfeatures x nsamples] sparse error matrix
U, S, V : numpy arrays
are the results of an SVD on Xhat
Notes
-----
The ORPCA code is based on a transcription of MATLAB code obtained from
the following research paper:
Jiashi Feng, Huan Xu and Shuicheng Yuan, "Online Robust PCA via
Stochastic Optimization", Advances in Neural Information Processing
Systems 26, (2013), pp. 404-412.
It has been updated to include a new initialization method based
on a QR decomposition of the first n "training" samples of the data.
A stochastic gradient descent (SGD) solver is also implemented,
along with a MomentumSGD solver for improved convergence and robustness
with respect to local minima. More information about the gradient descent
methods and choosing appropriate parameters can be found here:
Sebastian Ruder, "An overview of gradient descent optimization
algorithms", arXiv:1609.04747, (2016), http://arxiv.org/abs/1609.04747.
"""
_orpca = ORPCA(
rank,
fast=fast,
lambda1=lambda1,
lambda2=lambda2,
method=method,
learning_rate=learning_rate,
init=init,
training_samples=training_samples,
momentum=momentum,
)
_orpca._setup(X, normalize=True)
_orpca.fit(X)
return _orpca.finish()
|
https://github.com/hyperspy/hyperspy/issues/1557
|
import hyperspy.api as hs
from hyperspy.tests.mva.test_rpca import TestORPCA
test = TestORPCA()
test.setup_method("ml") # why the setup_methods all take a "method" argument, I don't know
test.A.shape
(256, 1024)
s = hs.signals.Signal1D(test.A)
s.data[s.data < 0] = 0
s.decomposition(True, algorithm="ORPCA", output_dimension=5)
WARNING:hyperspy.learn.rpca:No method specified. Defaulting to 'CF' (closed-form solver)
WARNING:hyperspy.learn.rpca:No initialization specified. Defaulting to 'qr' initialization
WARNING:hyperspy.learn.rpca:Number of training samples for 'qr' method not specified. Defaulting to 10 samples
WARNING:hyperspy.learn.rpca:Nuclear norm regularization parameter is set to default: 1 / sqrt(nfeatures)
WARNING:hyperspy.learn.rpca:Sparse regularization parameter is set to default: 1 / sqrt(nfeatures)
/Users/thomas/miniconda3/envs/hs/lib/python3.5/site-packages/tqdm/_tqdm.py:65: DeprecationWarning: sys.getcheckinterval() and sys.setcheckinterval() are deprecated. Use sys.setswitchinterval() instead.
sys.setcheckinterval(100)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-38-1e32f72a728b> in <module>()
6 s = hs.signals.Signal1D(A.A)
7 s.data[s.data < 0] = 0
----> 8 s.decomposition(True, algorithm="ORPCA", output_dimension=5)
/Users/thomas/Dropbox/0_Git/hyperspy/hyperspy/learn/mva.py in decomposition(self, normalize_poissonian_noise, algorithm, output_dimension, centre, auto_transpose, navigation_mask, signal_mask, var_array, var_func, polyfit, reproject, return_info, **kwargs)
434 # Rescale the results if the noise was normalized
435 if normalize_poissonian_noise is True:
--> 436 target.factors[:] *= self._root_bH.T
437 target.loadings[:] *= self._root_aG
438
ValueError: operands could not be broadcast together with shapes (256,5) (1024,1) (256,5)
|
ValueError
|
def fft_correlation(in1, in2, normalize=False):
"""Correlation of two N-dimensional arrays using FFT.
Adapted from scipy's fftconvolve.
Parameters
----------
in1, in2 : array
normalize: bool
If True performs phase correlation
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
size = s1 + s2 - 1
# Use 2**n-sized FFT
fsize = (2 ** np.ceil(np.log2(size))).astype("int")
IN1 = fftn(in1, fsize)
IN1 *= fftn(in2, fsize).conjugate()
if normalize is True:
ret = ifftn(np.nan_to_num(IN1 / np.absolute(IN1))).real.copy()
else:
ret = ifftn(IN1).real.copy()
del IN1
return ret
|
def fft_correlation(in1, in2, normalize=False):
"""Correlation of two N-dimensional arrays using FFT.
Adapted from scipy's fftconvolve.
Parameters
----------
in1, in2 : array
normalize: bool
If True performs phase correlation
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
size = s1 + s2 - 1
# Use 2**n-sized FFT
fsize = 2 ** np.ceil(np.log2(size))
IN1 = fftn(in1, fsize)
IN1 *= fftn(in2, fsize).conjugate()
if normalize is True:
ret = ifftn(np.nan_to_num(IN1 / np.absolute(IN1))).real.copy()
else:
ret = ifftn(IN1).real.copy()
del IN1
return ret
|
https://github.com/hyperspy/hyperspy/issues/1411
|
Traceback (most recent call last):
File "/home/bm424/Documents/phd/dev/test/venv/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2881, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-14-1876e2251fb7>", line 1, in <module>
roi(dat)
File "/home/bm424/Documents/phd/dev/test/venv/lib/python3.5/site-packages/hyperspy/roi.py", line 1138, in __call__
order=order)
File "/home/bm424/Documents/phd/dev/test/venv/lib/python3.5/site-packages/hyperspy/roi.py", line 1074, in profile_line
linewidth=linewidth)
File "/home/bm424/Documents/phd/dev/test/venv/lib/python3.5/site-packages/hyperspy/roi.py", line 998, in _line_profile_coordinates
data[0, :, :] = np.tile(line_col, [linewidth, 1]).T
File "/home/bm424/Documents/phd/dev/test/venv/lib/python3.5/site-packages/numpy/lib/shape_base.py", line 881, in tile
return c.reshape(shape_out)
TypeError: 'numpy.float64' object cannot be interpreted as an integer
|
TypeError
|
def _line_profile_coordinates(src, dst, linewidth=1):
"""Return the coordinates of the profile of an image along a scan line.
Parameters
----------
src : 2-tuple of numeric scalar (float or int)
The start point of the scan line.
dst : 2-tuple of numeric scalar (float or int)
The end point of the scan line.
linewidth : int, optional
Width of the scan, perpendicular to the line
Returns
-------
coords : array, shape (2, N, C), float
The coordinates of the profile along the scan line. The length of
the profile is the ceil of the computed length of the scan line.
Notes
-----
This is a utility method meant to be used internally by skimage
functions. The destination point is included in the profile, in
contrast to standard numpy indexing.
"""
src_row, src_col = src = np.asarray(src, dtype=float)
dst_row, dst_col = dst = np.asarray(dst, dtype=float)
d_row, d_col = dst - src
theta = np.arctan2(d_row, d_col)
length = np.ceil(np.hypot(d_row, d_col) + 1).astype(int)
# we add one above because we include the last point in the profile
# (in contrast to standard numpy indexing)
line_col = np.linspace(src_col, dst_col, length)
line_row = np.linspace(src_row, dst_row, length)
data = np.zeros((2, length, int(linewidth)))
data[0, :, :] = np.tile(line_col, [int(linewidth), 1]).T
data[1, :, :] = np.tile(line_row, [int(linewidth), 1]).T
if linewidth != 1:
# we subtract 1 from linewidth to change from pixel-counting
# (make this line 3 pixels wide) to point distances (the
# distance between pixel centers)
col_width = (linewidth - 1) * np.sin(-theta) / 2
row_width = (linewidth - 1) * np.cos(theta) / 2
row_off = np.linspace(-row_width, row_width, linewidth)
col_off = np.linspace(-col_width, col_width, linewidth)
data[0, :, :] += np.tile(col_off, [length, 1])
data[1, :, :] += np.tile(row_off, [length, 1])
return data
|
def _line_profile_coordinates(src, dst, linewidth=1):
"""Return the coordinates of the profile of an image along a scan line.
Parameters
----------
src : 2-tuple of numeric scalar (float or int)
The start point of the scan line.
dst : 2-tuple of numeric scalar (float or int)
The end point of the scan line.
linewidth : int, optional
Width of the scan, perpendicular to the line
Returns
-------
coords : array, shape (2, N, C), float
The coordinates of the profile along the scan line. The length of
the profile is the ceil of the computed length of the scan line.
Notes
-----
This is a utility method meant to be used internally by skimage
functions. The destination point is included in the profile, in
contrast to standard numpy indexing.
"""
src_row, src_col = src = np.asarray(src, dtype=float)
dst_row, dst_col = dst = np.asarray(dst, dtype=float)
d_row, d_col = dst - src
theta = np.arctan2(d_row, d_col)
length = np.ceil(np.hypot(d_row, d_col) + 1).astype(int)
# we add one above because we include the last point in the profile
# (in contrast to standard numpy indexing)
line_col = np.linspace(src_col, dst_col, length)
line_row = np.linspace(src_row, dst_row, length)
data = np.zeros((2, length, int(linewidth)))
data[0, :, :] = np.tile(line_col, [linewidth, 1]).T
data[1, :, :] = np.tile(line_row, [linewidth, 1]).T
if linewidth != 1:
# we subtract 1 from linewidth to change from pixel-counting
# (make this line 3 pixels wide) to point distances (the
# distance between pixel centers)
col_width = (linewidth - 1) * np.sin(-theta) / 2
row_width = (linewidth - 1) * np.cos(theta) / 2
row_off = np.linspace(-row_width, row_width, linewidth)
col_off = np.linspace(-col_width, col_width, linewidth)
data[0, :, :] += np.tile(col_off, [length, 1])
data[1, :, :] += np.tile(row_off, [length, 1])
return data
|
https://github.com/hyperspy/hyperspy/issues/1411
|
Traceback (most recent call last):
File "/home/bm424/Documents/phd/dev/test/venv/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2881, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-14-1876e2251fb7>", line 1, in <module>
roi(dat)
File "/home/bm424/Documents/phd/dev/test/venv/lib/python3.5/site-packages/hyperspy/roi.py", line 1138, in __call__
order=order)
File "/home/bm424/Documents/phd/dev/test/venv/lib/python3.5/site-packages/hyperspy/roi.py", line 1074, in profile_line
linewidth=linewidth)
File "/home/bm424/Documents/phd/dev/test/venv/lib/python3.5/site-packages/hyperspy/roi.py", line 998, in _line_profile_coordinates
data[0, :, :] = np.tile(line_col, [linewidth, 1]).T
File "/home/bm424/Documents/phd/dev/test/venv/lib/python3.5/site-packages/numpy/lib/shape_base.py", line 881, in tile
return c.reshape(shape_out)
TypeError: 'numpy.float64' object cannot be interpreted as an integer
|
TypeError
|
def __init__(
self,
signal1D,
auto_background=True,
auto_add_edges=True,
ll=None,
GOS=None,
dictionary=None,
):
Model1D.__init__(self, signal1D)
self.signal1D = signal1D
self._suspend_auto_fine_structure_width = False
self.convolved = False
self.low_loss = ll
self.GOS = GOS
self.edges = []
self._background_components = []
if dictionary is not None:
auto_background = False
auto_add_edges = False
self._load_dictionary(dictionary)
if auto_background is True:
background = PowerLaw()
self.append(background)
if self.signal.subshells and auto_add_edges is True:
self._add_edges_from_subshells_names()
|
def __init__(
self,
signal1D,
auto_background=True,
auto_add_edges=True,
ll=None,
GOS=None,
dictionary=None,
):
Model1D.__init__(self, signal1D)
self._suspend_auto_fine_structure_width = False
self.convolved = False
self.low_loss = ll
self.GOS = GOS
self.edges = []
self._background_components = []
if dictionary is not None:
auto_background = False
auto_add_edges = False
self._load_dictionary(dictionary)
if auto_background is True:
background = PowerLaw()
self.append(background)
if self.signal.subshells and auto_add_edges is True:
self._add_edges_from_subshells_names()
|
https://github.com/hyperspy/hyperspy/issues/1427
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-21-23a6348573b8> in <module>()
----> 1 m = s.create_model()
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/_signals/eels.py in create_model(self, ll, auto_background, auto_add_edges, GOS, dictionary)
1252 auto_background=auto_background,
1253 auto_add_edges=auto_add_edges,
-> 1254 GOS=GOS,
1255 dictionary=dictionary)
1256 return model
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/models/eelsmodel.py in __init__(self, signal1D, auto_background, auto_add_edges, ll, GOS, dictionary)
91
92 if self.signal.subshells and auto_add_edges is True:
---> 93 self._add_edges_from_subshells_names()
94
95 @property
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/models/eelsmodel.py in _add_edges_from_subshells_names(self, e_shells)
190 # we reassing the value of self.GOS
191 self.GOS = master_edge.GOS._name
--> 192 self.append(master_edge)
193 element = master_edge.element
194 while len(e_shells) > 0:
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/models/eelsmodel.py in append(self, component)
111 super(EELSModel, self).append(component)
112 if isinstance(component, EELSCLEdge):
--> 113 tem = self.signal.metadata.Acquisition_instrument.TEM
114 component.set_microscope_parameters(
115 E0=tem.beam_energy,
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/misc/utils.py in __getattribute__(self, name)
333 name = name.decode()
334 name = slugify(name, valid_variable_name=True)
--> 335 item = super(DictionaryTreeBrowser, self).__getattribute__(name)
336 if isinstance(item, dict) and '_dtb_value_' in item and "key" in item:
337 return item['_dtb_value_']
AttributeError: 'DictionaryTreeBrowser' object has no attribute 'Acquisition_instrument'
|
AttributeError
|
def signal1D(self, value):
if isinstance(value, EELSSpectrum):
self._signal = value
if self.signal._are_microscope_parameters_missing():
raise ValueError(
"The required microscope parameters are not defined in "
"the EELS spectrum signal metadata. Use "
"``set_microscope_parameters`` to set them."
)
else:
raise ValueError(
"This attribute can only contain an EELSSpectrum "
"but an object of type %s was provided" % str(type(value))
)
|
def signal1D(self, value):
if isinstance(value, EELSSpectrum):
self._signal = value
self.signal._are_microscope_parameters_missing()
else:
raise ValueError(
"This attribute can only contain an EELSSpectrum "
"but an object of type %s was provided" % str(type(value))
)
|
https://github.com/hyperspy/hyperspy/issues/1427
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-21-23a6348573b8> in <module>()
----> 1 m = s.create_model()
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/_signals/eels.py in create_model(self, ll, auto_background, auto_add_edges, GOS, dictionary)
1252 auto_background=auto_background,
1253 auto_add_edges=auto_add_edges,
-> 1254 GOS=GOS,
1255 dictionary=dictionary)
1256 return model
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/models/eelsmodel.py in __init__(self, signal1D, auto_background, auto_add_edges, ll, GOS, dictionary)
91
92 if self.signal.subshells and auto_add_edges is True:
---> 93 self._add_edges_from_subshells_names()
94
95 @property
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/models/eelsmodel.py in _add_edges_from_subshells_names(self, e_shells)
190 # we reassing the value of self.GOS
191 self.GOS = master_edge.GOS._name
--> 192 self.append(master_edge)
193 element = master_edge.element
194 while len(e_shells) > 0:
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/models/eelsmodel.py in append(self, component)
111 super(EELSModel, self).append(component)
112 if isinstance(component, EELSCLEdge):
--> 113 tem = self.signal.metadata.Acquisition_instrument.TEM
114 component.set_microscope_parameters(
115 E0=tem.beam_energy,
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/misc/utils.py in __getattribute__(self, name)
333 name = name.decode()
334 name = slugify(name, valid_variable_name=True)
--> 335 item = super(DictionaryTreeBrowser, self).__getattribute__(name)
336 if isinstance(item, dict) and '_dtb_value_' in item and "key" in item:
337 return item['_dtb_value_']
AttributeError: 'DictionaryTreeBrowser' object has no attribute 'Acquisition_instrument'
|
AttributeError
|
def _get_microscope_name(self, ImageTags):
try:
if ImageTags.Session_Info.Microscope != "[]":
return ImageTags.Session_Info.Microscope
except AttributeError:
if "Name" in ImageTags["Microscope_Info"].keys():
return ImageTags.Microscope_Info.Name
|
def _get_microscope_name(self, ImageTags):
try:
if ImageTags.Session_Info.Microscope != "[]":
return ImageTags.Session_Info.Microscope
except AttributeError:
return ImageTags.Microscope_Info.Name
|
https://github.com/hyperspy/hyperspy/issues/1293
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-98fa7e0685f8> in <module>()
----> 1 wedge = hs.load()
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\io.py in load(filenames, signal_type, stack, stack_axis, new_axis_name, mmap, mmap_dir, **kwds)
219 objects = [load_single_file(filename,
220 **kwds)
--> 221 for filename in filenames]
222
223 if hyperspy.defaults_parser.preferences.Plot.plot_on_load:
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\io.py in <listcomp>(.0)
219 objects = [load_single_file(filename,
220 **kwds)
--> 221 for filename in filenames]
222
223 if hyperspy.defaults_parser.preferences.Plot.plot_on_load:
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\io.py in load_single_file(filename, signal_type, **kwds)
263 reader=reader,
264 signal_type=signal_type,
--> 265 **kwds)
266
267
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\io.py in load_with_reader(filename, reader, signal_type, **kwds)
271 **kwds):
272 file_data_list = reader.file_reader(filename,
--> 273 **kwds)
274 objects = []
275
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\io_plugins\digital_micrograph.py in file_reader(filename, record_by, order)
991 'original_metadata': dm.tags_dict,
992 'post_process': post_process,
--> 993 'mapping': image.get_mapping(),
994 })
995
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\io_plugins\digital_micrograph.py in get_mapping(self)
837 if "Microscope_Info" in self.imdict.ImageTags.keys():
838 is_TEM = (
--> 839 'TEM' == self.imdict.ImageTags.Microscope_Info.Illumination_Mode)
840 is_diffraction = (
841 'DIFFRACTION' == self.imdict.ImageTags.Microscope_Info.Imaging_Mode)
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\misc\utils.py in __getattribute__(self, name)
333 name = name.decode()
334 name = slugify(name, valid_variable_name=True)
--> 335 item = super(DictionaryTreeBrowser, self).__getattribute__(name)
336 if isinstance(item, dict) and '_dtb_value_' in item and "key" in item:
337 return item['_dtb_value_']
AttributeError: 'DictionaryTreeBrowser' object has no attribute 'Illumination_Mode'
|
AttributeError
|
def get_mapping(self):
is_scanning = "DigiScan" in self.imdict.ImageTags.keys()
mapping = {
"ImageList.TagGroup0.ImageTags.DataBar.Acquisition Date": (
"General.date",
self._get_date,
),
"ImageList.TagGroup0.ImageTags.DataBar.Acquisition Time": (
"General.time",
self._get_time,
),
"ImageList.TagGroup0.ImageTags.Microscope Info.Voltage": (
"Acquisition_instrument.TEM.beam_energy",
lambda x: x / 1e3,
),
"ImageList.TagGroup0.ImageTags.Microscope Info.Stage Position.Stage Alpha": (
"Acquisition_instrument.TEM.tilt_stage",
None,
),
"ImageList.TagGroup0.ImageTags.Microscope Info.Illumination Mode": (
"Acquisition_instrument.TEM.acquisition_mode",
self._get_mode,
),
"ImageList.TagGroup0.ImageTags.Microscope Info.Probe Current (nA)": (
"Acquisition_instrument.TEM.beam_current",
None,
),
"ImageList.TagGroup0.ImageTags.Session Info.Operator": (
"General.authors",
self._parse_string,
),
"ImageList.TagGroup0.ImageTags.Session Info.Specimen": (
"Sample.description",
self._parse_string,
),
}
if "Microscope_Info" in self.imdict.ImageTags.keys():
is_TEM = is_diffraction = None
if "Illumination_Mode" in self.imdict.ImageTags["Microscope_Info"].keys():
is_TEM = "TEM" == self.imdict.ImageTags.Microscope_Info.Illumination_Mode
if "Imaging_Mode" in self.imdict.ImageTags["Microscope_Info"].keys():
is_diffraction = (
"DIFFRACTION" == self.imdict.ImageTags.Microscope_Info.Imaging_Mode
)
if is_TEM:
if is_diffraction:
mapping.update(
{
"ImageList.TagGroup0.ImageTags.Microscope Info.Indicated Magnification": (
"Acquisition_instrument.TEM.camera_length",
None,
),
}
)
else:
mapping.update(
{
"ImageList.TagGroup0.ImageTags.Microscope Info.Indicated Magnification": (
"Acquisition_instrument.TEM.magnification",
None,
),
}
)
else:
mapping.update(
{
"ImageList.TagGroup0.ImageTags.Microscope Info.STEM Camera Length": (
"Acquisition_instrument.TEM.camera_length",
None,
),
"ImageList.TagGroup0.ImageTags.Microscope Info.Indicated Magnification": (
"Acquisition_instrument.TEM.magnification",
None,
),
}
)
mapping.update(
{
"ImageList.TagGroup0.ImageTags": (
"Acquisition_instrument.TEM.microscope",
self._get_microscope_name,
),
"ImageList.TagGroup0.ImageData.Calibrations.Brightness.Units": (
"Signal.quantity",
self._get_quantity,
),
"ImageList.TagGroup0.ImageData.Calibrations.Brightness.Scale": (
"Signal.Noise_properties.Variance_linear_model.gain_factor",
None,
),
"ImageList.TagGroup0.ImageData.Calibrations.Brightness.Origin": (
"Signal.Noise_properties.Variance_linear_model.gain_offset",
None,
),
}
)
if self.signal_type == "EELS":
if is_scanning:
mapped_attribute = "dwell_time"
else:
mapped_attribute = "exposure"
mapping.update(
{
"ImageList.TagGroup0.ImageTags.EELS.Acquisition.Date": (
"General.date",
self._get_date,
),
"ImageList.TagGroup0.ImageTags.EELS.Acquisition.Start time": (
"General.time",
self._get_time,
),
"ImageList.TagGroup0.ImageTags.EELS.Experimental Conditions."
+ "Collection semi-angle (mrad)": (
"Acquisition_instrument.TEM.Detector.EELS.collection_angle",
None,
),
"ImageList.TagGroup0.ImageTags.EELS.Experimental Conditions."
+ "Convergence semi-angle (mrad)": (
"Acquisition_instrument.TEM.convergence_angle",
None,
),
"ImageList.TagGroup0.ImageTags.EELS.Acquisition.Integration time (s)": (
"Acquisition_instrument.TEM.Detector.EELS.%s" % mapped_attribute,
None,
),
"ImageList.TagGroup0.ImageTags.EELS.Acquisition.Number_of_frames": (
"Acquisition_instrument.TEM.Detector.EELS.frame_number",
None,
),
"ImageList.TagGroup0.ImageTags.EELS_Spectrometer.Aperture_label": (
"Acquisition_instrument.TEM.Detector.EELS.aperture_size",
lambda string: float(string.replace(" mm", "")),
),
"ImageList.TagGroup0.ImageTags.EELS Spectrometer.Instrument name": (
"Acquisition_instrument.TEM.Detector.EELS.spectrometer",
None,
),
}
)
elif self.signal_type == "EDS_TEM":
mapping.update(
{
"ImageList.TagGroup0.ImageTags.EDS.Acquisition.Date": (
"General.date",
self._get_date,
),
"ImageList.TagGroup0.ImageTags.EDS.Acquisition.Start time": (
"General.time",
self._get_time,
),
"ImageList.TagGroup0.ImageTags.EDS.Detector_Info.Azimuthal_angle": (
"Acquisition_instrument.TEM.Detector.EDS.azimuth_angle",
None,
),
"ImageList.TagGroup0.ImageTags.EDS.Detector_Info.Elevation_angle": (
"Acquisition_instrument.TEM.Detector.EDS.elevation_angle",
None,
),
"ImageList.TagGroup0.ImageTags.EDS.Solid_angle": (
"Acquisition_instrument.TEM.Detector.EDS.solid_angle",
None,
),
"ImageList.TagGroup0.ImageTags.EDS.Live_time": (
"Acquisition_instrument.TEM.Detector.EDS.live_time",
None,
),
"ImageList.TagGroup0.ImageTags.EDS.Real_time": (
"Acquisition_instrument.TEM.Detector.EDS.real_time",
None,
),
}
)
elif "DigiScan" in self.imdict.ImageTags.keys():
mapping.update(
{
"ImageList.TagGroup0.ImageTags.DigiScan.Sample Time": (
"Acquisition_instrument.TEM.dwell_time",
lambda x: x / 1e6,
),
}
)
else:
mapping.update(
{
"ImageList.TagGroup0.ImageTags.Acquisition.Parameters.Detector."
+ "exposure_s": ("Acquisition_instrument.TEM.exposure_time", None),
}
)
return mapping
|
def get_mapping(self):
is_scanning = "DigiScan" in self.imdict.ImageTags.keys()
mapping = {
"ImageList.TagGroup0.ImageTags.DataBar.Acquisition Date": (
"General.date",
self._get_date,
),
"ImageList.TagGroup0.ImageTags.DataBar.Acquisition Time": (
"General.time",
self._get_time,
),
"ImageList.TagGroup0.ImageTags.Microscope Info.Voltage": (
"Acquisition_instrument.TEM.beam_energy",
lambda x: x / 1e3,
),
"ImageList.TagGroup0.ImageTags.Microscope Info.Stage Position.Stage Alpha": (
"Acquisition_instrument.TEM.tilt_stage",
None,
),
"ImageList.TagGroup0.ImageTags.Microscope Info.Illumination Mode": (
"Acquisition_instrument.TEM.acquisition_mode",
self._get_mode,
),
"ImageList.TagGroup0.ImageTags.Microscope Info.Probe Current (nA)": (
"Acquisition_instrument.TEM.beam_current",
None,
),
"ImageList.TagGroup0.ImageTags.Session Info.Operator": (
"General.authors",
self._parse_string,
),
"ImageList.TagGroup0.ImageTags.Session Info.Specimen": (
"Sample.description",
self._parse_string,
),
}
if "Microscope_Info" in self.imdict.ImageTags.keys():
is_TEM = "TEM" == self.imdict.ImageTags.Microscope_Info.Illumination_Mode
is_diffraction = (
"DIFFRACTION" == self.imdict.ImageTags.Microscope_Info.Imaging_Mode
)
if is_TEM:
if is_diffraction:
mapping.update(
{
"ImageList.TagGroup0.ImageTags.Microscope Info.Indicated Magnification": (
"Acquisition_instrument.TEM.camera_length",
None,
),
}
)
else:
mapping.update(
{
"ImageList.TagGroup0.ImageTags.Microscope Info.Indicated Magnification": (
"Acquisition_instrument.TEM.magnification",
None,
),
}
)
else:
mapping.update(
{
"ImageList.TagGroup0.ImageTags.Microscope Info.STEM Camera Length": (
"Acquisition_instrument.TEM.camera_length",
None,
),
"ImageList.TagGroup0.ImageTags.Microscope Info.Indicated Magnification": (
"Acquisition_instrument.TEM.magnification",
None,
),
}
)
mapping.update(
{
"ImageList.TagGroup0.ImageTags": (
"Acquisition_instrument.TEM.microscope",
self._get_microscope_name,
),
"ImageList.TagGroup0.ImageData.Calibrations.Brightness.Units": (
"Signal.quantity",
self._get_quantity,
),
"ImageList.TagGroup0.ImageData.Calibrations.Brightness.Scale": (
"Signal.Noise_properties.Variance_linear_model.gain_factor",
None,
),
"ImageList.TagGroup0.ImageData.Calibrations.Brightness.Origin": (
"Signal.Noise_properties.Variance_linear_model.gain_offset",
None,
),
}
)
if self.signal_type == "EELS":
if is_scanning:
mapped_attribute = "dwell_time"
else:
mapped_attribute = "exposure"
mapping.update(
{
"ImageList.TagGroup0.ImageTags.EELS.Acquisition.Date": (
"General.date",
self._get_date,
),
"ImageList.TagGroup0.ImageTags.EELS.Acquisition.Start time": (
"General.time",
self._get_time,
),
"ImageList.TagGroup0.ImageTags.EELS.Experimental Conditions."
+ "Collection semi-angle (mrad)": (
"Acquisition_instrument.TEM.Detector.EELS.collection_angle",
None,
),
"ImageList.TagGroup0.ImageTags.EELS.Experimental Conditions."
+ "Convergence semi-angle (mrad)": (
"Acquisition_instrument.TEM.convergence_angle",
None,
),
"ImageList.TagGroup0.ImageTags.EELS.Acquisition.Integration time (s)": (
"Acquisition_instrument.TEM.Detector.EELS.%s" % mapped_attribute,
None,
),
"ImageList.TagGroup0.ImageTags.EELS.Acquisition.Number_of_frames": (
"Acquisition_instrument.TEM.Detector.EELS.frame_number",
None,
),
"ImageList.TagGroup0.ImageTags.EELS_Spectrometer.Aperture_label": (
"Acquisition_instrument.TEM.Detector.EELS.aperture_size",
lambda string: float(string.replace(" mm", "")),
),
"ImageList.TagGroup0.ImageTags.EELS Spectrometer.Instrument name": (
"Acquisition_instrument.TEM.Detector.EELS.spectrometer",
None,
),
}
)
elif self.signal_type == "EDS_TEM":
mapping.update(
{
"ImageList.TagGroup0.ImageTags.EDS.Acquisition.Date": (
"General.date",
self._get_date,
),
"ImageList.TagGroup0.ImageTags.EDS.Acquisition.Start time": (
"General.time",
self._get_time,
),
"ImageList.TagGroup0.ImageTags.EDS.Detector_Info.Azimuthal_angle": (
"Acquisition_instrument.TEM.Detector.EDS.azimuth_angle",
None,
),
"ImageList.TagGroup0.ImageTags.EDS.Detector_Info.Elevation_angle": (
"Acquisition_instrument.TEM.Detector.EDS.elevation_angle",
None,
),
"ImageList.TagGroup0.ImageTags.EDS.Solid_angle": (
"Acquisition_instrument.TEM.Detector.EDS.solid_angle",
None,
),
"ImageList.TagGroup0.ImageTags.EDS.Live_time": (
"Acquisition_instrument.TEM.Detector.EDS.live_time",
None,
),
"ImageList.TagGroup0.ImageTags.EDS.Real_time": (
"Acquisition_instrument.TEM.Detector.EDS.real_time",
None,
),
}
)
elif "DigiScan" in self.imdict.ImageTags.keys():
mapping.update(
{
"ImageList.TagGroup0.ImageTags.DigiScan.Sample Time": (
"Acquisition_instrument.TEM.dwell_time",
lambda x: x / 1e6,
),
}
)
else:
mapping.update(
{
"ImageList.TagGroup0.ImageTags.Acquisition.Parameters.Detector."
+ "exposure_s": ("Acquisition_instrument.TEM.exposure_time", None),
}
)
return mapping
|
https://github.com/hyperspy/hyperspy/issues/1293
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-98fa7e0685f8> in <module>()
----> 1 wedge = hs.load()
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\io.py in load(filenames, signal_type, stack, stack_axis, new_axis_name, mmap, mmap_dir, **kwds)
219 objects = [load_single_file(filename,
220 **kwds)
--> 221 for filename in filenames]
222
223 if hyperspy.defaults_parser.preferences.Plot.plot_on_load:
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\io.py in <listcomp>(.0)
219 objects = [load_single_file(filename,
220 **kwds)
--> 221 for filename in filenames]
222
223 if hyperspy.defaults_parser.preferences.Plot.plot_on_load:
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\io.py in load_single_file(filename, signal_type, **kwds)
263 reader=reader,
264 signal_type=signal_type,
--> 265 **kwds)
266
267
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\io.py in load_with_reader(filename, reader, signal_type, **kwds)
271 **kwds):
272 file_data_list = reader.file_reader(filename,
--> 273 **kwds)
274 objects = []
275
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\io_plugins\digital_micrograph.py in file_reader(filename, record_by, order)
991 'original_metadata': dm.tags_dict,
992 'post_process': post_process,
--> 993 'mapping': image.get_mapping(),
994 })
995
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\io_plugins\digital_micrograph.py in get_mapping(self)
837 if "Microscope_Info" in self.imdict.ImageTags.keys():
838 is_TEM = (
--> 839 'TEM' == self.imdict.ImageTags.Microscope_Info.Illumination_Mode)
840 is_diffraction = (
841 'DIFFRACTION' == self.imdict.ImageTags.Microscope_Info.Imaging_Mode)
C:\Anaconda3\lib\site-packages\hyperspy-1.2+dev-py3.5.egg\hyperspy\misc\utils.py in __getattribute__(self, name)
333 name = name.decode()
334 name = slugify(name, valid_variable_name=True)
--> 335 item = super(DictionaryTreeBrowser, self).__getattribute__(name)
336 if isinstance(item, dict) and '_dtb_value_' in item and "key" in item:
337 return item['_dtb_value_']
AttributeError: 'DictionaryTreeBrowser' object has no attribute 'Illumination_Mode'
|
AttributeError
|
def align_zero_loss_peak(
self,
calibrate=True,
also_align=[],
print_stats=True,
subpixel=True,
mask=None,
signal_range=None,
show_progressbar=None,
**kwargs,
):
"""Align the zero-loss peak.
This function first aligns the spectra using the result of
`estimate_zero_loss_peak_centre` and afterward, if subpixel is True,
proceeds to align with subpixel accuracy using `align1D`. The offset
is automatically correct if `calibrate` is True.
Parameters
----------
calibrate : bool
If True, set the offset of the spectral axis so that the
zero-loss peak is at position zero.
also_align : list of signals
A list containing other spectra of identical dimensions to
align using the shifts applied to the current spectrum.
If `calibrate` is True, the calibration is also applied to
the spectra in the list.
print_stats : bool
If True, print summary statistics of the ZLP maximum before
the aligment.
subpixel : bool
If True, perform the alignment with subpixel accuracy
using cross-correlation.
mask : Signal1D of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
signal_range : tuple of integers, tuple of floats. Optional
Will only search for the ZLP within the signal_range. If given
in integers, the range will be in index values. If given floats,
the range will be in spectrum values. Useful if there are features
in the spectrum which are more intense than the ZLP.
Default is searching in the whole signal.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Examples
--------
>>>> s_ll.align_zero_loss_peak()
Aligning both the lowloss signal and another signal
>>>> s_ll.align_zero_loss_peak(also_align=[s])
Aligning within a narrow range of the lowloss signal
>>>> s_ll.align_zero_loss_peak(signal_range=(-10.,10.))
See Also
--------
estimate_zero_loss_peak_centre, align1D, estimate_shift1D.
Notes
-----
Any extra keyword arguments are passed to `align1D`. For
more information read its docstring.
"""
def substract_from_offset(value, signals):
for signal in signals:
signal.axes_manager[-1].offset -= value
def estimate_zero_loss_peak_centre(s, mask, signal_range):
if signal_range:
zlpc = s.isig[
signal_range[0] : signal_range[1]
].estimate_zero_loss_peak_centre(mask=mask)
else:
zlpc = s.estimate_zero_loss_peak_centre(mask=mask)
return zlpc
zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
mean_ = without_nans(zlpc.data).mean()
if print_stats is True:
print()
print(underline("Initial ZLP position statistics"))
zlpc.print_summary_statistics()
for signal in also_align + [self]:
signal.shift1D(-zlpc.data + mean_, show_progressbar=show_progressbar)
if calibrate is True:
zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
substract_from_offset(without_nans(zlpc.data).mean(), also_align + [self])
if subpixel is False:
return
left, right = -3.0, 3.0
if calibrate is False:
mean_ = without_nans(
estimate_zero_loss_peak_centre(self, mask, signal_range).data
).mean()
left += mean_
right += mean_
left = (
left if left > self.axes_manager[-1].axis[0] else self.axes_manager[-1].axis[0]
)
right = (
right
if right < self.axes_manager[-1].axis[-1]
else self.axes_manager[-1].axis[-1]
)
if self.axes_manager.navigation_size > 1:
self.align1D(
left,
right,
also_align=also_align,
show_progressbar=show_progressbar,
**kwargs,
)
zlpc = self.estimate_zero_loss_peak_centre(mask=mask)
if calibrate is True:
substract_from_offset(without_nans(zlpc.data).mean(), also_align + [self])
|
def align_zero_loss_peak(
self,
calibrate=True,
also_align=[],
print_stats=True,
subpixel=True,
mask=None,
signal_range=None,
show_progressbar=None,
**kwargs,
):
"""Align the zero-loss peak.
This function first aligns the spectra using the result of
`estimate_zero_loss_peak_centre` and afterward, if subpixel is True,
proceeds to align with subpixel accuracy using `align1D`. The offset
is automatically correct if `calibrate` is True.
Parameters
----------
calibrate : bool
If True, set the offset of the spectral axis so that the
zero-loss peak is at position zero.
also_align : list of signals
A list containing other spectra of identical dimensions to
align using the shifts applied to the current spectrum.
If `calibrate` is True, the calibration is also applied to
the spectra in the list.
print_stats : bool
If True, print summary statistics of the ZLP maximum before
the aligment.
subpixel : bool
If True, perform the alignment with subpixel accuracy
using cross-correlation.
mask : Signal1D of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
signal_range : tuple of integers, tuple of floats. Optional
Will only search for the ZLP within the signal_range. If given
in integers, the range will be in index values. If given floats,
the range will be in spectrum values. Useful if there are features
in the spectrum which are more intense than the ZLP.
Default is searching in the whole signal.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Examples
--------
>>>> s_ll.align_zero_loss_peak()
Aligning both the lowloss signal and another signal
>>>> s_ll.align_zero_loss_peak(also_align=[s])
Aligning within a narrow range of the lowloss signal
>>>> s_ll.align_zero_loss_peak(signal_range=(-10.,10.))
See Also
--------
estimate_zero_loss_peak_centre, align1D, estimate_shift1D.
Notes
-----
Any extra keyword arguments are passed to `align1D`. For
more information read its docstring.
"""
def substract_from_offset(value, signals):
for signal in signals:
signal.axes_manager[-1].offset -= value
def estimate_zero_loss_peak_centre(s, mask, signal_range):
if signal_range:
zlpc = s.isig[
signal_range[0] : signal_range[1]
].estimate_zero_loss_peak_centre(mask=mask)
else:
zlpc = s.estimate_zero_loss_peak_centre(mask=mask)
return zlpc
zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
mean_ = without_nans(zlpc.data).mean()
if print_stats is True:
print()
print(underline("Initial ZLP position statistics"))
zlpc.print_summary_statistics()
for signal in also_align + [self]:
signal.shift1D(-zlpc.data + mean_, show_progressbar=show_progressbar)
if calibrate is True:
zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
substract_from_offset(without_nans(zlpc.data).mean(), also_align + [self])
if subpixel is False:
return
left, right = -3.0, 3.0
if calibrate is False:
mean_ = without_nans(
estimate_zero_loss_peak_centre(self, mask, signal_range).data
).mean()
left += mean_
right += mean_
left = (
left if left > self.axes_manager[-1].axis[0] else self.axes_manager[-1].axis[0]
)
right = (
right
if right < self.axes_manager[-1].axis[-1]
else self.axes_manager[-1].axis[-1]
)
self.align1D(
left, right, also_align=also_align, show_progressbar=show_progressbar, **kwargs
)
zlpc = self.estimate_zero_loss_peak_centre(mask=mask)
if calibrate is True:
substract_from_offset(without_nans(zlpc.data).mean(), also_align + [self])
|
https://github.com/hyperspy/hyperspy/issues/1301
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-20-f890f17f75fd> in <module>()
----> 1 s.align_zero_loss_peak()
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/_signals/eels.py in align_zero_loss_peak(self, calibrate, also_align, print_stats, subpixel, mask, signal_range, show_progressbar, **kwargs)
293 also_align=also_align,
294 show_progressbar=show_progressbar,
--> 295 **kwargs)
296 zlpc = self.estimate_zero_loss_peak_centre(mask=mask)
297 if calibrate is True:
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/_signals/signal1d.py in align1D(self, start, end, reference_indices, max_shift, interpolate, number_of_interpolation_points, interpolation_method, crop, expand, fill_value, also_align, mask, show_progressbar)
650 number_of_interpolation_points=number_of_interpolation_points,
651 mask=mask,
--> 652 show_progressbar=show_progressbar)
653 for signal in also_align + [self]:
654 signal.shift1D(shift_array=shift_array,
/home/magnunor/Documents/HyperSpy/hyperspy/hyperspy/_signals/signal1d.py in estimate_shift1D(self, start, end, reference_indices, max_shift, interpolate, number_of_interpolation_points, mask, show_progressbar)
551 dat = interpolate1D(ip, dat)
552 shift_array[indices] = np.argmax(
--> 553 np.correlate(ref, dat, 'full')) - len(ref) + 1
554 pbar.update(1)
555
IndexError: too many indices for array
|
IndexError
|
def get_lines_intensity(
self,
xray_lines=None,
integration_windows=2.0,
background_windows=None,
plot_result=False,
only_one=True,
only_lines=("a",),
**kwargs,
):
"""Return the intensity map of selected Xray lines.
The intensities, the number of X-ray counts, are computed by
suming the spectrum over the
different X-ray lines. The sum window width
is calculated from the energy resolution of the detector
as defined in 'energy_resolution_MnKa' of the metadata.
Backgrounds average in provided windows can be subtracted from the
intensities.
Parameters
----------
xray_lines: {None, "best", list of string}
If None,
if `metadata.Sample.elements.xray_lines` contains a
list of lines use those.
If `metadata.Sample.elements.xray_lines` is undefined
or empty but `metadata.Sample.elements` is defined,
use the same syntax as `add_line` to select a subset of lines
for the operation.
Alternatively, provide an iterable containing
a list of valid X-ray lines symbols.
integration_windows: Float or array
If float, the width of the integration windows is the
'integration_windows_width' times the calculated FWHM of the line.
Else provide an array for which each row corresponds to a X-ray
line. Each row contains the left and right value of the window.
background_windows: None or 2D array of float
If None, no background subtraction. Else, the backgrounds average
in the windows are subtracted from the return intensities.
'background_windows' provides the position of the windows in
energy. Each line corresponds to a X-ray line. In a line, the two
first values correspond to the limits of the left window and the
two last values correspond to the limits of the right window.
plot_result : bool
If True, plot the calculated line intensities. If the current
object is a single spectrum it prints the result instead.
only_one : bool
If False, use all the lines of each element in the data spectral
range. If True use only the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, use only the given lines.
kwargs
The extra keyword arguments for plotting. See
`utils.plot.plot_signals`
Returns
-------
intensities : list
A list containing the intensities as BaseSignal subclasses.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.get_lines_intensity(['Mn_Ka'], plot_result=True)
Mn_La at 0.63316 keV : Intensity = 96700.00
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.plot(['Mn_Ka'], integration_windows=2.1)
>>> s.get_lines_intensity(['Mn_Ka'],
>>> integration_windows=2.1, plot_result=True)
Mn_Ka at 5.8987 keV : Intensity = 53597.00
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.set_elements(['Mn'])
>>> s.set_lines(['Mn_Ka'])
>>> bw = s.estimate_background_windows()
>>> s.plot(background_windows=bw)
>>> s.get_lines_intensity(background_windows=bw, plot_result=True)
Mn_Ka at 5.8987 keV : Intensity = 46716.00
See also
--------
set_elements, add_elements, estimate_background_windows,
plot
"""
only_lines = utils_eds._parse_only_lines(only_lines)
xray_lines = self._get_xray_lines(
xray_lines, only_one=only_one, only_lines=only_lines
)
xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)
for xray in xray_not_here:
warnings.warn(
"%s is not in the data energy range." % xray
+ "You can remove it with"
+ "s.metadata.Sample.xray_lines.remove('%s')" % xray
)
if hasattr(integration_windows, "__iter__") is False:
integration_windows = self.estimate_integration_windows(
windows_width=integration_windows, xray_lines=xray_lines
)
intensities = []
ax = self.axes_manager.signal_axes[0]
# test Signal1D (0D problem)
# signal_to_index = self.axes_manager.navigation_dimension - 2
for i, (Xray_line, window) in enumerate(zip(xray_lines, integration_windows)):
line_energy, line_FWHM = self._get_line_energy(Xray_line, FWHM_MnKa="auto")
element, line = utils_eds._get_element_and_line(Xray_line)
img = self.isig[window[0] : window[1]].integrate1D(-1)
if np.issubdtype(img.data.dtype, np.integer):
# The operations below require a float dtype with the default
# numpy casting rule ('same_kind')
img.change_dtype("float")
if background_windows is not None:
bw = background_windows[i]
# TODO: test to prevent slicing bug. To be reomved when fixed
indexes = [float(ax.value2index(de)) for de in list(bw) + window]
if indexes[0] == indexes[1]:
bck1 = self.isig[bw[0]]
else:
bck1 = self.isig[bw[0] : bw[1]].integrate1D(-1)
if indexes[2] == indexes[3]:
bck2 = self.isig[bw[2]]
else:
bck2 = self.isig[bw[2] : bw[3]].integrate1D(-1)
corr_factor = (indexes[5] - indexes[4]) / (
(indexes[1] - indexes[0]) + (indexes[3] - indexes[2])
)
img -= (bck1 + bck2) * corr_factor
img.metadata.General.title = "X-ray line intensity of %s: %s at %.2f %s" % (
self.metadata.General.title,
Xray_line,
line_energy,
self.axes_manager.signal_axes[0].units,
)
if img.axes_manager.navigation_dimension >= 2:
img = img.as_signal2D([0, 1])
elif img.axes_manager.navigation_dimension == 1:
img.axes_manager.set_signal_dimension(1)
if plot_result and img.axes_manager.signal_size == 1:
print(
"%s at %s %s : Intensity = %.2f"
% (Xray_line, line_energy, ax.units, img.data)
)
img.metadata.set_item("Sample.elements", ([element]))
img.metadata.set_item("Sample.xray_lines", ([Xray_line]))
intensities.append(img)
if plot_result and img.axes_manager.signal_size != 1:
utils.plot.plot_signals(intensities, **kwargs)
return intensities
|
def get_lines_intensity(
self,
xray_lines=None,
integration_windows=2.0,
background_windows=None,
plot_result=False,
only_one=True,
only_lines=("a",),
**kwargs,
):
"""Return the intensity map of selected Xray lines.
The intensities, the number of X-ray counts, are computed by
suming the spectrum over the
different X-ray lines. The sum window width
is calculated from the energy resolution of the detector
as defined in 'energy_resolution_MnKa' of the metadata.
Backgrounds average in provided windows can be subtracted from the
intensities.
Parameters
----------
xray_lines: {None, "best", list of string}
If None,
if `metadata.Sample.elements.xray_lines` contains a
list of lines use those.
If `metadata.Sample.elements.xray_lines` is undefined
or empty but `metadata.Sample.elements` is defined,
use the same syntax as `add_line` to select a subset of lines
for the operation.
Alternatively, provide an iterable containing
a list of valid X-ray lines symbols.
integration_windows: Float or array
If float, the width of the integration windows is the
'integration_windows_width' times the calculated FWHM of the line.
Else provide an array for which each row corresponds to a X-ray
line. Each row contains the left and right value of the window.
background_windows: None or 2D array of float
If None, no background subtraction. Else, the backgrounds average
in the windows are subtracted from the return intensities.
'background_windows' provides the position of the windows in
energy. Each line corresponds to a X-ray line. In a line, the two
first values correspond to the limits of the left window and the
two last values correspond to the limits of the right window.
plot_result : bool
If True, plot the calculated line intensities. If the current
object is a single spectrum it prints the result instead.
only_one : bool
If False, use all the lines of each element in the data spectral
range. If True use only the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, use only the given lines.
kwargs
The extra keyword arguments for plotting. See
`utils.plot.plot_signals`
Returns
-------
intensities : list
A list containing the intensities as BaseSignal subclasses.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.get_lines_intensity(['Mn_Ka'], plot_result=True)
Mn_La at 0.63316 keV : Intensity = 96700.00
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.plot(['Mn_Ka'], integration_windows=2.1)
>>> s.get_lines_intensity(['Mn_Ka'],
>>> integration_windows=2.1, plot_result=True)
Mn_Ka at 5.8987 keV : Intensity = 53597.00
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.set_elements(['Mn'])
>>> s.set_lines(['Mn_Ka'])
>>> bw = s.estimate_background_windows()
>>> s.plot(background_windows=bw)
>>> s.get_lines_intensity(background_windows=bw, plot_result=True)
Mn_Ka at 5.8987 keV : Intensity = 46716.00
See also
--------
set_elements, add_elements, estimate_background_windows,
plot
"""
only_lines = utils_eds._parse_only_lines(only_lines)
xray_lines = self._get_xray_lines(
xray_lines, only_one=only_one, only_lines=only_lines
)
xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)
for xray in xray_not_here:
warnings.warn(
"%s is not in the data energy range." % xray
+ "You can remove it with"
+ "s.metadata.Sample.xray_lines.remove('%s')" % xray
)
if hasattr(integration_windows, "__iter__") is False:
integration_windows = self.estimate_integration_windows(
windows_width=integration_windows, xray_lines=xray_lines
)
intensities = []
ax = self.axes_manager.signal_axes[0]
# test Signal1D (0D problem)
# signal_to_index = self.axes_manager.navigation_dimension - 2
for i, (Xray_line, window) in enumerate(zip(xray_lines, integration_windows)):
line_energy, line_FWHM = self._get_line_energy(Xray_line, FWHM_MnKa="auto")
element, line = utils_eds._get_element_and_line(Xray_line)
img = self.isig[window[0] : window[1]].integrate1D(-1)
if background_windows is not None:
bw = background_windows[i]
# TODO: test to prevent slicing bug. To be reomved when fixed
indexes = [float(ax.value2index(de)) for de in list(bw) + window]
if indexes[0] == indexes[1]:
bck1 = self.isig[bw[0]]
else:
bck1 = self.isig[bw[0] : bw[1]].integrate1D(-1)
if indexes[2] == indexes[3]:
bck2 = self.isig[bw[2]]
else:
bck2 = self.isig[bw[2] : bw[3]].integrate1D(-1)
corr_factor = (indexes[5] - indexes[4]) / (
(indexes[1] - indexes[0]) + (indexes[3] - indexes[2])
)
img -= (bck1 + bck2) * corr_factor
img.metadata.General.title = "X-ray line intensity of %s: %s at %.2f %s" % (
self.metadata.General.title,
Xray_line,
line_energy,
self.axes_manager.signal_axes[0].units,
)
if img.axes_manager.navigation_dimension >= 2:
img = img.as_signal2D([0, 1])
elif img.axes_manager.navigation_dimension == 1:
img.axes_manager.set_signal_dimension(1)
if plot_result and img.axes_manager.signal_size == 1:
print(
"%s at %s %s : Intensity = %.2f"
% (Xray_line, line_energy, ax.units, img.data)
)
img.metadata.set_item("Sample.elements", ([element]))
img.metadata.set_item("Sample.xray_lines", ([Xray_line]))
intensities.append(img)
if plot_result and img.axes_manager.signal_size != 1:
utils.plot.plot_signals(intensities, **kwargs)
return intensities
|
https://github.com/hyperspy/hyperspy/issues/1175
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-18-0c6100594c5b> in <module>()
----> 1 inten = Pt_wedge.get_lines_intensity(integration_windows=iw_Pt, background_windows=bw_Pt)
2 inten[0].plot()
/Users/macark/Documents/hyperspy/hyperspy/_signals/eds.py in get_lines_intensity(self, xray_lines, integration_windows, background_windows, plot_result, only_one, only_lines, **kwargs)
660 corr_factor = (indexes[5] - indexes[4]) / (
661 (indexes[1] - indexes[0]) + (indexes[3] - indexes[2]))
--> 662 img -= (bck1 + bck2) * corr_factor
663 img.metadata.General.title = (
664 'X-ray line intensity of %s: %s at %.2f %s' %
/Users/macark/Documents/hyperspy/hyperspy/signal.py in __isub__(self, other)
/Users/macark/Documents/hyperspy/hyperspy/signal.py in _binary_operator_ruler(self, other, op_name)
1554 odata = other._data_aligned_with_axes
1555 if op_name in INPLACE_OPERATORS:
-> 1556 self.data = getattr(sdata, op_name)(odata)
1557 self.axes_manager._sort_axes()
1558 return self
TypeError: Cannot cast ufunc subtract output from dtype('float64') to dtype('uint64') with casting rule 'same_kind'
|
TypeError
|
def load(self, filename):
"""Load the results of a previous decomposition and
demixing analysis from a file.
Parameters
----------
filename : string
"""
decomposition = np.load(filename)
for key, value in decomposition.items():
if value.dtype == np.dtype("object"):
value = None
setattr(self, key, value)
_logger.info("\n%s loaded correctly" % filename)
# For compatibility with old version ##################
if hasattr(self, "algorithm"):
self.decomposition_algorithm = self.algorithm
del self.algorithm
if hasattr(self, "V"):
self.explained_variance = self.V
del self.V
if hasattr(self, "w"):
self.unmixing_matrix = self.w
del self.w
if hasattr(self, "variance2one"):
del self.variance2one
if hasattr(self, "centered"):
del self.centered
if hasattr(self, "pca_algorithm"):
self.decomposition_algorithm = self.pca_algorithm
del self.pca_algorithm
if hasattr(self, "ica_algorithm"):
self.bss_algorithm = self.ica_algorithm
del self.ica_algorithm
if hasattr(self, "v"):
self.loadings = self.v
del self.v
if hasattr(self, "scores"):
self.loadings = self.scores
del self.scores
if hasattr(self, "pc"):
self.loadings = self.pc
del self.pc
if hasattr(self, "ica_scores"):
self.bss_loadings = self.ica_scores
del self.ica_scores
if hasattr(self, "ica_factors"):
self.bss_factors = self.ica_factors
del self.ica_factors
#
# Output_dimension is an array after loading, convert it to int
if hasattr(self, "output_dimension") and self.output_dimension is not None:
self.output_dimension = int(self.output_dimension)
_logger.info(self._summary())
|
def load(self, filename):
"""Load the results of a previous decomposition and
demixing analysis from a file.
Parameters
----------
filename : string
"""
decomposition = np.load(filename)
for key, value in decomposition.items():
if value.dtype == np.dtype("object"):
value = None
setattr(self, key, value)
_logger.info("\n%s loaded correctly" % filename)
# For compatibility with old version ##################
if hasattr(self, "algorithm"):
self.decomposition_algorithm = self.algorithm
del self.algorithm
if hasattr(self, "V"):
self.explained_variance = self.V
del self.V
if hasattr(self, "w"):
self.unmixing_matrix = self.w
del self.w
if hasattr(self, "variance2one"):
del self.variance2one
if hasattr(self, "centered"):
del self.centered
if hasattr(self, "pca_algorithm"):
self.decomposition_algorithm = self.pca_algorithm
del self.pca_algorithm
if hasattr(self, "ica_algorithm"):
self.bss_algorithm = self.ica_algorithm
del self.ica_algorithm
if hasattr(self, "v"):
self.loadings = self.v
del self.v
if hasattr(self, "scores"):
self.loadings = self.scores
del self.scores
if hasattr(self, "pc"):
self.loadings = self.pc
del self.pc
if hasattr(self, "ica_scores"):
self.bss_loadings = self.ica_scores
del self.ica_scores
if hasattr(self, "ica_factors"):
self.bss_factors = self.ica_factors
del self.ica_factors
#
# Output_dimension is an array after loading, convert it to int
if hasattr(self, "output_dimension") and self.output_dimension is not None:
self.output_dimension = int(self.output_dimension)
self.summary()
|
https://github.com/hyperspy/hyperspy/issues/1145
|
s = hs.signals.Signal1D(np.random.rand(10,15, 1024))
s.decomposition(True)
s2 = s.get_decomposition_model(9)
s.blind_source_separation(9)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-34-ef9c7adc7bb5> in <module>()
----> 1 s.blind_source_separation(9)
/home/eric/Python_prog/hyperspy/hyperspy/learn/mva.py in blind_source_separation(self, number_of_components, algorithm, diff_order, diff_axes, factors, comp_list, mask, on_loadings, pretreatment, **kwargs)
528 if not hasattr(lr, 'factors') or lr.factors is None:
529 raise AttributeError(
--> 530 'A decomposition must be performed before blind '
531 'source seperation or factors must be provided.')
532
AttributeError: A decomposition must be performed before blind source seperation or factors must be provided.
|
AttributeError
|
def summary(self):
"""Prints a summary of the decomposition and demixing parameters
to the stdout
"""
print(self._summary())
|
def summary(self):
"""Prints a summary of the decomposition and demixing parameters
to the stdout
"""
summary_str = (
"Decomposition parameters:\n"
"-------------------------\n\n"
+ ("Decomposition algorithm : \t%s\n" % self.decomposition_algorithm)
+ ("Poissonian noise normalization : %s\n" % self.poissonian_noise_normalized)
+ ("Output dimension : %s\n" % self.output_dimension)
+ ("Centre : %s" % self.centre)
)
if self.bss_algorithm is not None:
summary_str += (
"\n\nDemixing parameters:\n"
"------------------------\n"
+ ("BSS algorithm : %s" % self.bss_algorithm)
+ ("Number of components : %i" % len(self.unmixing_matrix))
)
_logger.info(summary_str)
|
https://github.com/hyperspy/hyperspy/issues/1145
|
s = hs.signals.Signal1D(np.random.rand(10,15, 1024))
s.decomposition(True)
s2 = s.get_decomposition_model(9)
s.blind_source_separation(9)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-34-ef9c7adc7bb5> in <module>()
----> 1 s.blind_source_separation(9)
/home/eric/Python_prog/hyperspy/hyperspy/learn/mva.py in blind_source_separation(self, number_of_components, algorithm, diff_order, diff_axes, factors, comp_list, mask, on_loadings, pretreatment, **kwargs)
528 if not hasattr(lr, 'factors') or lr.factors is None:
529 raise AttributeError(
--> 530 'A decomposition must be performed before blind '
531 'source seperation or factors must be provided.')
532
AttributeError: A decomposition must be performed before blind source seperation or factors must be provided.
|
AttributeError
|
def _load_dictionary(self, file_data_dict):
"""Load data from dictionary.
Parameters
----------
file_data_dict : dictionary
A dictionary containing at least a 'data' keyword with an array of
arbitrary dimensions. Additionally the dictionary can contain the
following items:
data : numpy array
The signal data. It can be an array of any dimensions.
axes : dictionary (optional)
Dictionary to define the axes (see the
documentation of the AxesManager class for more details).
attributes : dictionary (optional)
A dictionary whose items are stored as attributes.
metadata : dictionary (optional)
A dictionary containing a set of parameters
that will to stores in the `metadata` attribute.
Some parameters might be mandatory in some cases.
original_metadata : dictionary (optional)
A dictionary containing a set of parameters
that will to stores in the `original_metadata` attribute. It
typically contains all the parameters that has been
imported from the original data file.
"""
self.data = file_data_dict["data"]
if "models" in file_data_dict:
self.models._add_dictionary(file_data_dict["models"])
if "axes" not in file_data_dict:
file_data_dict["axes"] = self._get_undefined_axes_list()
self.axes_manager = AxesManager(file_data_dict["axes"])
if "metadata" not in file_data_dict:
file_data_dict["metadata"] = {}
if "original_metadata" not in file_data_dict:
file_data_dict["original_metadata"] = {}
if "attributes" in file_data_dict:
for key, value in file_data_dict["attributes"].items():
if hasattr(self, key):
if isinstance(value, dict):
for k, v in value.items():
eval("self.%s.__setattr__(k,v)" % key)
else:
self.__setattr__(key, value)
self.original_metadata.add_dictionary(file_data_dict["original_metadata"])
self.metadata.add_dictionary(file_data_dict["metadata"])
if "title" not in self.metadata.General:
self.metadata.General.title = ""
if self._signal_type or not self.metadata.has_item("Signal.signal_type"):
self.metadata.Signal.signal_type = self._signal_type
if "learning_results" in file_data_dict:
self.learning_results.__dict__.update(file_data_dict["learning_results"])
|
def _load_dictionary(self, file_data_dict):
"""Load data from dictionary.
Parameters
----------
file_data_dict : dictionary
A dictionary containing at least a 'data' keyword with an array of
arbitrary dimensions. Additionally the dictionary can contain the
following items:
data : numpy array
The signal data. It can be an array of any dimensions.
axes : dictionary (optional)
Dictionary to define the axes (see the
documentation of the AxesManager class for more details).
attributes : dictionary (optional)
A dictionary whose items are stored as attributes.
metadata : dictionary (optional)
A dictionary containing a set of parameters
that will to stores in the `metadata` attribute.
Some parameters might be mandatory in some cases.
original_metadata : dictionary (optional)
A dictionary containing a set of parameters
that will to stores in the `original_metadata` attribute. It
typically contains all the parameters that has been
imported from the original data file.
"""
self.data = file_data_dict["data"]
if "models" in file_data_dict:
self.models._add_dictionary(file_data_dict["models"])
if "axes" not in file_data_dict:
file_data_dict["axes"] = self._get_undefined_axes_list()
self.axes_manager = AxesManager(file_data_dict["axes"])
if "metadata" not in file_data_dict:
file_data_dict["metadata"] = {}
if "original_metadata" not in file_data_dict:
file_data_dict["original_metadata"] = {}
if "attributes" in file_data_dict:
for key, value in file_data_dict["attributes"].items():
if hasattr(self, key):
if isinstance(value, dict):
for k, v in value.items():
eval("self.%s.__setattr__(k,v)" % key)
else:
self.__setattr__(key, value)
self.original_metadata.add_dictionary(file_data_dict["original_metadata"])
self.metadata.add_dictionary(file_data_dict["metadata"])
if "title" not in self.metadata.General:
self.metadata.General.title = ""
if self._signal_type or not self.metadata.has_item("Signal.signal_type"):
self.metadata.Signal.signal_type = self._signal_type
|
https://github.com/hyperspy/hyperspy/issues/1145
|
s = hs.signals.Signal1D(np.random.rand(10,15, 1024))
s.decomposition(True)
s2 = s.get_decomposition_model(9)
s.blind_source_separation(9)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-34-ef9c7adc7bb5> in <module>()
----> 1 s.blind_source_separation(9)
/home/eric/Python_prog/hyperspy/hyperspy/learn/mva.py in blind_source_separation(self, number_of_components, algorithm, diff_order, diff_axes, factors, comp_list, mask, on_loadings, pretreatment, **kwargs)
528 if not hasattr(lr, 'factors') or lr.factors is None:
529 raise AttributeError(
--> 530 'A decomposition must be performed before blind '
531 'source seperation or factors must be provided.')
532
AttributeError: A decomposition must be performed before blind source seperation or factors must be provided.
|
AttributeError
|
def ensure_parameters_in_bounds(self):
"""For all active components, snaps their free parameter values to
be within their boundaries (if bounded). Does not touch the array of
values.
"""
for component in self:
if component.active:
for param in component.free_parameters:
bmin = -np.inf if param.bmin is None else param.bmin
bmax = np.inf if param.bmax is None else param.bmax
if param._number_of_elements == 1:
if not bmin <= param.value <= bmax:
min_d = np.abs(param.value - bmin)
max_d = np.abs(param.value - bmax)
if min_d < max_d:
param.value = bmin
else:
param.value = bmax
else:
values = np.array(param.value)
if param.bmin is not None:
minmask = values < bmin
values[minmask] = bmin
if param.bmax is not None:
maxmask = values > bmax
values[maxmask] = bmax
param.value = tuple(values)
|
def ensure_parameters_in_bounds(self):
"""For all active components, snaps their free parameter values to
be within their boundaries (if bounded). Does not touch the array of
values.
"""
for component in self:
if component.active:
for param in component.free_parameters:
bmin = -np.inf if param.bmin is None else param.bmin
bmax = np.inf if param.bmax is None else param.bmax
if not bmin <= param.value <= bmax:
min_d = np.abs(param.value - bmin)
max_d = np.abs(param.value - bmax)
if min_d < max_d:
param.value = bmin
else:
param.value = bmax
|
https://github.com/hyperspy/hyperspy/issues/1062
|
TypeError Traceback (most recent call last)
<ipython-input-9-b81665222e1f> in <module>()
----> 1 m1.multifit(fitter='mpfit', bounded=True, kind='smart')
/home/magnunor/Documents/HyperSpy_project/HyperSpy/hyperspy/model.py in multifit(self, mask, fetch_only_fixed, autosave, autosave_every, show_progressbar, **kwargs)
1446 if mask is None or not mask[index[::-1]]:
1447 self.fetch_stored_values(only_fixed=fetch_only_fixed)
-> 1448 self.fit(**kwargs)
1449 i += 1
1450 if maxval > 0:
/home/magnunor/Documents/HyperSpy_project/HyperSpy/hyperspy/models/eelsmodel.py in fit(self, fitter, method, grad, bounded, ext_bounding, update_plot, kind, **kwargs)
344 ext_bounding=ext_bounding,
345 update_plot=update_plot,
--> 346 **kwargs)
347 elif kind == 'std':
348 Model.fit(self,
/home/magnunor/Documents/HyperSpy_project/HyperSpy/hyperspy/models/eelsmodel.py in smart_fit(self, start_energy, **kwargs)
383 # Fit the edges
384 for i in range(0, len(self._active_edges)):
--> 385 self._fit_edge(i, start_energy, **kwargs)
386
387 def _get_first_ionization_edge_energy(self, start_energy=None):
/home/magnunor/Documents/HyperSpy_project/HyperSpy/hyperspy/models/eelsmodel.py in _fit_edge(self, edgenumber, start_energy, **kwargs)
558 self.set_signal_range(start_energy, nextedgeenergy)
559 self.enable_fine_structure(to_activate_fs)
--> 560 self.fit(**kwargs)
561
562 self.enable_edges(edges_to_activate)
/home/magnunor/Documents/HyperSpy_project/HyperSpy/hyperspy/models/eelsmodel.py in fit(self, fitter, method, grad, bounded, ext_bounding, update_plot, kind, **kwargs)
353 ext_bounding=ext_bounding,
354 update_plot=update_plot,
--> 355 **kwargs)
356 else:
357 raise ValueError('kind must be either \'std\' or \'smart\'.'
/home/magnunor/Documents/HyperSpy_project/HyperSpy/hyperspy/model.py in fit(self, fitter, method, grad, bounded, ext_bounding, update_plot, **kwargs)
1185 # this has to be done before setting the p0, so moved things
1186 # around
-> 1187 self.ensure_parameters_in_bounds()
1188
1189 self.p_std = None
/home/magnunor/Documents/HyperSpy_project/HyperSpy/hyperspy/model.py in ensure_parameters_in_bounds(self)
602 bmin = -np.inf if param.bmin is None else param.bmin
603 bmax = np.inf if param.bmax is None else param.bmax
--> 604 if not bmin <= param.value <= bmax:
605 min_d = np.abs(param.value - bmin)
606 max_d = np.abs(param.value - bmax)
TypeError: unorderable types: float() <= tuple()
|
TypeError
|
def _export_factors(
self,
factors,
folder=None,
comp_ids=None,
multiple_files=None,
save_figures=False,
save_figures_format="png",
factor_prefix=None,
factor_format=None,
comp_label=None,
cmap=plt.cm.gray,
plot_shifts=True,
plot_char=4,
img_data=None,
same_window=False,
calibrate=True,
quiver_color="white",
vector_scale=1,
no_nans=True,
per_row=3,
):
from hyperspy.signals import Spectrum, Image
if multiple_files is None:
multiple_files = preferences.MachineLearning.multiple_files
if factor_format is None:
factor_format = preferences.MachineLearning.export_factors_default_file_format
# Select the desired factors
if comp_ids is None:
comp_ids = range(factors.shape[1])
elif not hasattr(comp_ids, "__iter__"):
comp_ids = range(comp_ids)
mask = np.zeros(factors.shape[1], dtype=np.bool)
for idx in comp_ids:
mask[idx] = 1
factors = factors[:, mask]
if save_figures is True:
plt.ioff()
fac_plots = self._plot_factors_or_pchars(
factors,
comp_ids=comp_ids,
same_window=same_window,
comp_label=comp_label,
img_data=img_data,
plot_shifts=plot_shifts,
plot_char=plot_char,
cmap=cmap,
per_row=per_row,
quiver_color=quiver_color,
vector_scale=vector_scale,
)
for idx in range(len(comp_ids)):
filename = "%s_%02i.%s" % (
factor_prefix,
comp_ids[idx],
save_figures_format,
)
if folder is not None:
filename = os.path.join(folder, filename)
ensure_directory(filename)
_args = {"dpi": 600, "format": save_figures_format}
fac_plots[idx].savefig(filename, **_args)
plt.ion()
elif multiple_files is False:
if self.axes_manager.signal_dimension == 2:
# factor images
axes_dicts = []
axes = self.axes_manager.signal_axes[::-1]
shape = (axes[1].size, axes[0].size)
factor_data = np.rollaxis(factors.reshape((shape[0], shape[1], -1)), 2)
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts.append(
{
"name": "factor_index",
"scale": 1.0,
"offset": 0.0,
"size": int(factors.shape[1]),
"units": "factor",
"index_in_array": 0,
}
)
s = Image(
factor_data,
axes=axes_dicts,
metadata={
"General": {
"title": "%s from %s"
% (factor_prefix, self.metadata.General.title),
}
},
)
elif self.axes_manager.signal_dimension == 1:
axes = [
self.axes_manager.signal_axes[0].get_axis_dictionary(),
{
"name": "factor_index",
"scale": 1.0,
"offset": 0.0,
"size": int(factors.shape[1]),
"units": "factor",
"index_in_array": 0,
},
]
axes[0]["index_in_array"] = 1
s = Spectrum(
factors.T,
axes=axes,
metadata={
"General": {
"title": "%s from %s"
% (factor_prefix, self.metadata.General.title),
}
},
)
filename = "%ss.%s" % (factor_prefix, factor_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
else: # Separate files
if self.axes_manager.signal_dimension == 1:
axis_dict = self.axes_manager.signal_axes[0].get_axis_dictionary()
axis_dict["index_in_array"] = 0
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Spectrum(
factors[:, index],
axes=[
axis_dict,
],
metadata={
"General": {
"title": "%s from %s"
% (factor_prefix, self.metadata.General.title),
}
},
)
filename = "%s-%i.%s" % (factor_prefix, dim, factor_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
if self.axes_manager.signal_dimension == 2:
axes = self.axes_manager.signal_axes
axes_dicts = [axes[0].get_axis_dictionary(), axes[1].get_axis_dictionary()]
axes_dicts[0]["index_in_array"] = 0
axes_dicts[1]["index_in_array"] = 1
factor_data = factors.reshape(
self.axes_manager._signal_shape_in_array
+ [
-1,
]
)
for dim, index in zip(comp_ids, range(len(comp_ids))):
im = Image(
factor_data[..., index],
axes=axes_dicts,
metadata={
"General": {
"title": "%s from %s"
% (factor_prefix, self.metadata.General.title),
}
},
)
filename = "%s-%i.%s" % (factor_prefix, dim, factor_format)
if folder is not None:
filename = os.path.join(folder, filename)
im.save(filename)
|
def _export_factors(
self,
factors,
folder=None,
comp_ids=None,
multiple_files=None,
save_figures=False,
save_figures_format="png",
factor_prefix=None,
factor_format=None,
comp_label=None,
cmap=plt.cm.gray,
plot_shifts=True,
plot_char=4,
img_data=None,
same_window=False,
calibrate=True,
quiver_color="white",
vector_scale=1,
no_nans=True,
per_row=3,
):
from hyperspy._signals.image import Image
from hyperspy._signals.spectrum import Spectrum
if multiple_files is None:
multiple_files = preferences.MachineLearning.multiple_files
if factor_format is None:
factor_format = preferences.MachineLearning.export_factors_default_file_format
# Select the desired factors
if comp_ids is None:
comp_ids = range(factors.shape[1])
elif not hasattr(comp_ids, "__iter__"):
comp_ids = range(comp_ids)
mask = np.zeros(factors.shape[1], dtype=np.bool)
for idx in comp_ids:
mask[idx] = 1
factors = factors[:, mask]
if save_figures is True:
plt.ioff()
fac_plots = self._plot_factors_or_pchars(
factors,
comp_ids=comp_ids,
same_window=same_window,
comp_label=comp_label,
img_data=img_data,
plot_shifts=plot_shifts,
plot_char=plot_char,
cmap=cmap,
per_row=per_row,
quiver_color=quiver_color,
vector_scale=vector_scale,
)
for idx in range(len(comp_ids)):
filename = "%s_%02i.%s" % (
factor_prefix,
comp_ids[idx],
save_figures_format,
)
if folder is not None:
filename = os.path.join(folder, filename)
ensure_directory(filename)
_args = {"dpi": 600, "format": save_figures_format}
fac_plots[idx].savefig(filename, **_args)
plt.ion()
elif multiple_files is False:
if self.axes_manager.signal_dimension == 2:
# factor images
axes_dicts = []
axes = self.axes_manager.signal_axes[::-1]
shape = (axes[1].size, axes[0].size)
factor_data = np.rollaxis(factors.reshape((shape[0], shape[1], -1)), 2)
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts.append(
{
"name": "factor_index",
"scale": 1.0,
"offset": 0.0,
"size": int(factors.shape[1]),
"units": "factor",
"index_in_array": 0,
}
)
s = Image(
factor_data,
axes=axes_dicts,
metadata={
"General": {
"title": "%s from %s"
% (factor_prefix, self.metadata.General.title),
}
},
)
elif self.axes_manager.signal_dimension == 1:
axes = [
self.axes_manager.signal_axes[0].get_axis_dictionary(),
{
"name": "factor_index",
"scale": 1.0,
"offset": 0.0,
"size": int(factors.shape[1]),
"units": "factor",
"index_in_array": 0,
},
]
axes[0]["index_in_array"] = 1
s = Spectrum(
factors.T,
axes=axes,
metadata={
"General": {
"title": "%s from %s"
% (factor_prefix, self.metadata.General.title),
}
},
)
filename = "%ss.%s" % (factor_prefix, factor_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
else: # Separate files
if self.axes_manager.signal_dimension == 1:
axis_dict = self.axes_manager.signal_axes[0].get_axis_dictionary()
axis_dict["index_in_array"] = 0
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Spectrum(
factors[:, index],
axes=[
axis_dict,
],
metadata={
"General": {
"title": "%s from %s"
% (factor_prefix, self.metadata.General.title),
}
},
)
filename = "%s-%i.%s" % (factor_prefix, dim, factor_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
if self.axes_manager.signal_dimension == 2:
axes = self.axes_manager.signal_axes
axes_dicts = [axes[0].get_axis_dictionary(), axes[1].get_axis_dictionary()]
axes_dicts[0]["index_in_array"] = 0
axes_dicts[1]["index_in_array"] = 1
factor_data = factors.reshape(
self.axes_manager._signal_shape_in_array
+ [
-1,
]
)
for dim, index in zip(comp_ids, range(len(comp_ids))):
im = Image(
factor_data[..., index],
axes=axes_dicts,
metadata={
"General": {
"title": "%s from %s"
% (factor_prefix, self.metadata.General.title),
}
},
)
filename = "%s-%i.%s" % (factor_prefix, dim, factor_format)
if folder is not None:
filename = os.path.join(folder, filename)
im.save(filename)
|
https://github.com/hyperspy/hyperspy/issues/1095
|
ImportErrorTraceback (most recent call last)
<ipython-input-8-b1971df6874d> in <module>()
----> 1 sW.export_decomposition_results(factor_format='msa',loading_format='tif')
/home/smc204/anaconda2/envs/hyperspy/lib/python3.5/site-packages/hyperspy/signal.py in export_decomposition_results(self, comp_ids, folder, calibrate, factor_prefix, factor_format, loading_prefix, loading_format, comp_label, cmap, same_window, multiple_files, no_nans, per_row, save_figures, save_figures_format)
951 same_window=same_window,
952 no_nans=no_nans,
--> 953 per_row=per_row)
954
955 def export_bss_results(self,
/home/smc204/anaconda2/envs/hyperspy/lib/python3.5/site-packages/hyperspy/signal.py in _export_loadings(self, loadings, folder, comp_ids, multiple_files, loading_prefix, loading_format, save_figures_format, comp_label, cmap, save_figures, same_window, calibrate, no_nans, per_row)
424 per_row=3):
425
--> 426 from hyperspy._signals.image import Image
427 from hyperspy._signals.spectrum import Spectrum
428
ImportError: No module named 'hyperspy._signals.image'
|
ImportError
|
def _export_loadings(
self,
loadings,
folder=None,
comp_ids=None,
multiple_files=None,
loading_prefix=None,
loading_format=None,
save_figures_format="png",
comp_label=None,
cmap=plt.cm.gray,
save_figures=False,
same_window=False,
calibrate=True,
no_nans=True,
per_row=3,
):
from hyperspy.signals import Image, Spectrum
if multiple_files is None:
multiple_files = preferences.MachineLearning.multiple_files
if loading_format is None:
loading_format = preferences.MachineLearning.export_loadings_default_file_format
if comp_ids is None:
comp_ids = range(loadings.shape[0])
elif not hasattr(comp_ids, "__iter__"):
comp_ids = range(comp_ids)
mask = np.zeros(loadings.shape[0], dtype=np.bool)
for idx in comp_ids:
mask[idx] = 1
loadings = loadings[mask]
if save_figures is True:
plt.ioff()
sc_plots = self._plot_loadings(
loadings,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=comp_label,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
)
for idx in range(len(comp_ids)):
filename = "%s_%02i.%s" % (
loading_prefix,
comp_ids[idx],
save_figures_format,
)
if folder is not None:
filename = os.path.join(folder, filename)
ensure_directory(filename)
_args = {"dpi": 600, "format": save_figures_format}
sc_plots[idx].savefig(filename, **_args)
plt.ion()
elif multiple_files is False:
if self.axes_manager.navigation_dimension == 2:
axes_dicts = []
axes = self.axes_manager.navigation_axes[::-1]
shape = (axes[1].size, axes[0].size)
loading_data = loadings.reshape((-1, shape[0], shape[1]))
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts[0]["index_in_array"] = 1
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts[1]["index_in_array"] = 2
axes_dicts.append(
{
"name": "loading_index",
"scale": 1.0,
"offset": 0.0,
"size": int(loadings.shape[0]),
"units": "factor",
"index_in_array": 0,
}
)
s = Image(
loading_data,
axes=axes_dicts,
metadata={
"General": {
"title": "%s from %s"
% (loading_prefix, self.metadata.General.title),
}
},
)
elif self.axes_manager.navigation_dimension == 1:
cal_axis = self.axes_manager.navigation_axes[0].get_axis_dictionary()
cal_axis["index_in_array"] = 1
axes = [
{
"name": "loading_index",
"scale": 1.0,
"offset": 0.0,
"size": int(loadings.shape[0]),
"units": "comp_id",
"index_in_array": 0,
},
cal_axis,
]
s = Image(
loadings,
axes=axes,
metadata={
"General": {
"title": "%s from %s"
% (loading_prefix, self.metadata.General.title),
}
},
)
filename = "%ss.%s" % (loading_prefix, loading_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
else: # Separate files
if self.axes_manager.navigation_dimension == 1:
axis_dict = self.axes_manager.navigation_axes[0].get_axis_dictionary()
axis_dict["index_in_array"] = 0
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Spectrum(
loadings[index],
axes=[
axis_dict,
],
)
filename = "%s-%i.%s" % (loading_prefix, dim, loading_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
elif self.axes_manager.navigation_dimension == 2:
axes_dicts = []
axes = self.axes_manager.navigation_axes[::-1]
shape = (axes[0].size, axes[1].size)
loading_data = loadings.reshape((-1, shape[0], shape[1]))
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts[0]["index_in_array"] = 0
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts[1]["index_in_array"] = 1
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Image(
loading_data[index, ...],
axes=axes_dicts,
metadata={
"General": {
"title": "%s from %s"
% (loading_prefix, self.metadata.General.title),
}
},
)
filename = "%s-%i.%s" % (loading_prefix, dim, loading_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
|
def _export_loadings(
self,
loadings,
folder=None,
comp_ids=None,
multiple_files=None,
loading_prefix=None,
loading_format=None,
save_figures_format="png",
comp_label=None,
cmap=plt.cm.gray,
save_figures=False,
same_window=False,
calibrate=True,
no_nans=True,
per_row=3,
):
from hyperspy._signals.image import Image
from hyperspy._signals.spectrum import Spectrum
if multiple_files is None:
multiple_files = preferences.MachineLearning.multiple_files
if loading_format is None:
loading_format = preferences.MachineLearning.export_loadings_default_file_format
if comp_ids is None:
comp_ids = range(loadings.shape[0])
elif not hasattr(comp_ids, "__iter__"):
comp_ids = range(comp_ids)
mask = np.zeros(loadings.shape[0], dtype=np.bool)
for idx in comp_ids:
mask[idx] = 1
loadings = loadings[mask]
if save_figures is True:
plt.ioff()
sc_plots = self._plot_loadings(
loadings,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=comp_label,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
)
for idx in range(len(comp_ids)):
filename = "%s_%02i.%s" % (
loading_prefix,
comp_ids[idx],
save_figures_format,
)
if folder is not None:
filename = os.path.join(folder, filename)
ensure_directory(filename)
_args = {"dpi": 600, "format": save_figures_format}
sc_plots[idx].savefig(filename, **_args)
plt.ion()
elif multiple_files is False:
if self.axes_manager.navigation_dimension == 2:
axes_dicts = []
axes = self.axes_manager.navigation_axes[::-1]
shape = (axes[1].size, axes[0].size)
loading_data = loadings.reshape((-1, shape[0], shape[1]))
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts[0]["index_in_array"] = 1
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts[1]["index_in_array"] = 2
axes_dicts.append(
{
"name": "loading_index",
"scale": 1.0,
"offset": 0.0,
"size": int(loadings.shape[0]),
"units": "factor",
"index_in_array": 0,
}
)
s = Image(
loading_data,
axes=axes_dicts,
metadata={
"General": {
"title": "%s from %s"
% (loading_prefix, self.metadata.General.title),
}
},
)
elif self.axes_manager.navigation_dimension == 1:
cal_axis = self.axes_manager.navigation_axes[0].get_axis_dictionary()
cal_axis["index_in_array"] = 1
axes = [
{
"name": "loading_index",
"scale": 1.0,
"offset": 0.0,
"size": int(loadings.shape[0]),
"units": "comp_id",
"index_in_array": 0,
},
cal_axis,
]
s = Image(
loadings,
axes=axes,
metadata={
"General": {
"title": "%s from %s"
% (loading_prefix, self.metadata.General.title),
}
},
)
filename = "%ss.%s" % (loading_prefix, loading_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
else: # Separate files
if self.axes_manager.navigation_dimension == 1:
axis_dict = self.axes_manager.navigation_axes[0].get_axis_dictionary()
axis_dict["index_in_array"] = 0
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Spectrum(
loadings[index],
axes=[
axis_dict,
],
)
filename = "%s-%i.%s" % (loading_prefix, dim, loading_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
elif self.axes_manager.navigation_dimension == 2:
axes_dicts = []
axes = self.axes_manager.navigation_axes[::-1]
shape = (axes[0].size, axes[1].size)
loading_data = loadings.reshape((-1, shape[0], shape[1]))
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts[0]["index_in_array"] = 0
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts[1]["index_in_array"] = 1
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Image(
loading_data[index, ...],
axes=axes_dicts,
metadata={
"General": {
"title": "%s from %s"
% (loading_prefix, self.metadata.General.title),
}
},
)
filename = "%s-%i.%s" % (loading_prefix, dim, loading_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
|
https://github.com/hyperspy/hyperspy/issues/1095
|
ImportErrorTraceback (most recent call last)
<ipython-input-8-b1971df6874d> in <module>()
----> 1 sW.export_decomposition_results(factor_format='msa',loading_format='tif')
/home/smc204/anaconda2/envs/hyperspy/lib/python3.5/site-packages/hyperspy/signal.py in export_decomposition_results(self, comp_ids, folder, calibrate, factor_prefix, factor_format, loading_prefix, loading_format, comp_label, cmap, same_window, multiple_files, no_nans, per_row, save_figures, save_figures_format)
951 same_window=same_window,
952 no_nans=no_nans,
--> 953 per_row=per_row)
954
955 def export_bss_results(self,
/home/smc204/anaconda2/envs/hyperspy/lib/python3.5/site-packages/hyperspy/signal.py in _export_loadings(self, loadings, folder, comp_ids, multiple_files, loading_prefix, loading_format, save_figures_format, comp_label, cmap, save_figures, same_window, calibrate, no_nans, per_row)
424 per_row=3):
425
--> 426 from hyperspy._signals.image import Image
427 from hyperspy._signals.spectrum import Spectrum
428
ImportError: No module named 'hyperspy._signals.image'
|
ImportError
|
def export_to_dictionary(target, whitelist, dic, fullcopy=True):
"""Exports attributes of target from whitelist.keys() to dictionary dic
All values are references only by default.
Parameters
----------
target : object
must contain the (nested) attributes of the whitelist.keys()
whitelist : dictionary
A dictionary, keys of which are used as attributes for exporting.
Key 'self' is only available with tag 'id', when the id of the
target is saved. The values are either None, or a tuple, where:
- the first item a string, which containts flags, separated by
commas.
- the second item is None if no 'init' flag is given, otherwise
the object required for the initialization.
The flag conventions are as follows:
* 'init':
object used for initialization of the target. The object is
saved in the tuple in whitelist
* 'fn':
the targeted attribute is a function, and may be pickled. A
tuple of (thing, value) will be exported to the dictionary,
where thing is None if function is passed as-is, and True if
dill package is used to pickle the function, with the value as
the result of the pickle.
* 'id':
the id of the targeted attribute is exported (e.g.
id(target.name))
* 'sig':
The targeted attribute is a signal, and will be converted to a
dictionary if fullcopy=True
dic : dictionary
A dictionary where the object will be exported
fullcopy : bool
Copies of objects are stored, not references. If any found,
functions will be pickled and signals converted to dictionaries
"""
whitelist_flags = {}
for key, value in whitelist.items():
if value is None:
# No flags and/or values are given, just save the target
thing = attrgetter(key)(target)
if fullcopy:
thing = deepcopy(thing)
dic[key] = thing
whitelist_flags[key] = ""
continue
flags_str, value = value
flags = parse_flag_string(flags_str)
check_that_flags_make_sense(flags)
if key is "self":
if "id" not in flags:
raise ValueError('Key "self" is only available with flag "id" given')
value = id(target)
else:
if "id" in flags:
value = id(attrgetter(key)(target))
# here value is either id(thing), or None (all others except 'init'),
# or something for init
if "init" not in flags and value is None:
value = attrgetter(key)(target)
# here value either id(thing), or an actual target to export
if "sig" in flags:
if fullcopy:
from hyperspy.signal import Signal
if isinstance(value, Signal):
value = value._to_dictionary()
value["data"] = deepcopy(value["data"])
elif "fn" in flags:
if fullcopy:
value = (True, dill.dumps(value))
else:
value = (None, value)
elif fullcopy:
value = deepcopy(value)
dic[key] = value
whitelist_flags[key] = flags_str
if "_whitelist" not in dic:
dic["_whitelist"] = {}
# the saved whitelist does not have any values, as they are saved in the
# original dictionary. Have to restore then when loading from dictionary,
# most notably all with 'init' flags!!
dic["_whitelist"].update(whitelist_flags)
|
def export_to_dictionary(target, whitelist, dic, fullcopy=True):
"""Exports attributes of target from whitelist.keys() to dictionary dic
All values are references only by default.
Parameters
----------
target : object
must contain the (nested) attributes of the whitelist.keys()
whitelist : dictionary
A dictionary, keys of which are used as attributes for exporting.
Key 'self' is only available with tag 'id', when the id of the
target is saved. The values are either None, or a tuple, where:
- the first item a string, which containts flags, separated by
commas.
- the second item is None if no 'init' flag is given, otherwise
the object required for the initialization.
The flag conventions are as follows:
* 'init':
object used for initialization of the target. The object is
saved in the tuple in whitelist
* 'fn':
the targeted attribute is a function, and may be pickled
(preferably with dill package). A tuple of (thing, value) will
be exported to the dictionary, where thing is None if function
is passed as-is, and bool if dill package is used to pickle the
function, and value is the result.
* 'id':
the id of the targeted attribute is exported (e.g.
id(target.name))
* 'sig':
The targeted attribute is a signal, and will be converted to a
dictionary if fullcopy=True
dic : dictionary
A dictionary where the object will be exported
fullcopy : bool
Copies of objects are stored, not references. If any found,
functions will be pickled and signals converted to dictionaries
"""
whitelist_flags = {}
for key, value in whitelist.items():
if value is None:
# No flags and/or values are given, just save the target
thing = attrgetter(key)(target)
if fullcopy:
thing = deepcopy(thing)
dic[key] = thing
whitelist_flags[key] = ""
continue
flags_str, value = value
flags = parse_flag_string(flags_str)
check_that_flags_make_sense(flags)
if key is "self":
if "id" not in flags:
raise ValueError('Key "self" is only available with flag "id" given')
value = id(target)
else:
if "id" in flags:
value = id(attrgetter(key)(target))
# here value is either id(thing), or None (all others except 'init'),
# or something for init
if "init" not in flags and value is None:
value = attrgetter(key)(target)
# here value either id(thing), or an actual target to export
if "sig" in flags:
if fullcopy:
from hyperspy.signal import Signal
if isinstance(value, Signal):
value = value._to_dictionary()
value["data"] = deepcopy(value["data"])
elif "fn" in flags:
if fullcopy:
if dill_avail:
value = (True, dill.dumps(value))
else:
# Apparently this fails because Python does not guarantee backwards-compatibility for marshal, and pickle does
# not work for our lambda functions. Hence drop marshal
# support and only work with dill package
value = (False, marshal.dumps(value.__code__))
else:
value = (None, value)
elif fullcopy:
value = deepcopy(value)
dic[key] = value
whitelist_flags[key] = flags_str
if "_whitelist" not in dic:
dic["_whitelist"] = {}
# the saved whitelist does not have any values, as they are saved in the
# original dictionary. Have to restore then when loading from dictionary,
# most notably all with 'init' flags!!
dic["_whitelist"].update(whitelist_flags)
|
https://github.com/hyperspy/hyperspy/issues/997
|
m = tmp.models.restore('BK_w_fine_structure')
---------------------------------------------------------------------------
EOFError Traceback (most recent call last)
<ipython-input-349-668408e20346> in <module>()
----> 1 tmp.models.restore('BK_w_fine_structure')
/home/josh/git_repos/hyperspy/hyperspy/signal.py in restore(self, name)
253 name = self._check_name(name, True)
254 d = self._models.get_item(name + '._dict').as_dictionary()
--> 255 return self._signal.create_model(dictionary=copy.deepcopy(d))
256
257 def __repr__(self):
/home/josh/git_repos/hyperspy/hyperspy/_signals/eels.py in create_model(self, ll, auto_background, auto_add_edges, GOS, dictionary)
1254 auto_add_edges=auto_add_edges,
1255 GOS=GOS,
-> 1256 dictionary=dictionary)
1257 return model
1258
/home/josh/git_repos/hyperspy/hyperspy/models/eelsmodel.py in __init__(self, spectrum, auto_background, auto_add_edges, ll, GOS, dictionary)
84 auto_background = False
85 auto_add_edges = False
---> 86 self._load_dictionary(dictionary)
87
88 if auto_background is True:
/home/josh/git_repos/hyperspy/hyperspy/model.py in _load_dictionary(self, dic)
255
256 self.append(getattr(components, comp['_id_name'])(**init_args))
--> 257 id_dict.update(self[-1]._load_dictionary(comp))
258 # deal with twins:
259 for comp in dic['components']:
/home/josh/git_repos/hyperspy/hyperspy/component.py in _load_dictionary(self, dic)
1079 if hasattr(self, idname):
1080 par = getattr(self, idname)
-> 1081 t_id = par._load_dictionary(p)
1082 id_dict[t_id] = par
1083 else:
/home/josh/git_repos/hyperspy/hyperspy/component.py in _load_dictionary(self, dictionary)
183 """
184 if dictionary['_id_name'] == self._id_name:
--> 185 load_from_dictionary(self, dictionary)
186 return dictionary['self']
187 else:
/home/josh/git_repos/hyperspy/hyperspy/misc/export_dictionary.py in load_from_dictionary(target, dic)
182 flags = parse_flag_string(flags_str)
183 if 'id' not in flags:
--> 184 value = reconstruct_object(flags, value)
185 if 'init' in flags:
186 new_whitelist[key] = (flags_str, value)
/home/josh/git_repos/hyperspy/hyperspy/misc/export_dictionary.py in reconstruct_object(flags, value)
215 return thing
216 if ifdill in [False, 'False', b'False']:
--> 217 return types.FunctionType(marshal.loads(thing), globals())
218 if ifdill in [True, 'True', b'True']:
219 if not dill_avail:
EOFError: marshal data too short
|
EOFError
|
def reconstruct_object(flags, value):
"""Reconstructs the value (if necessary) after having saved it in a
dictionary
"""
if not isinstance(flags, list):
flags = parse_flag_string(flags)
if "sig" in flags:
if isinstance(value, dict):
from hyperspy.signal import Signal
value = Signal(**value)
value._assign_subclass()
return value
if "fn" in flags:
ifdill, thing = value
if ifdill is None:
return thing
if ifdill in [True, "True", b"True"]:
return dill.loads(thing)
# should not be reached
raise ValueError("The object format is not recognized")
return value
|
def reconstruct_object(flags, value):
"""Reconstructs the value (if necessary) after having saved it in a
dictionary
"""
if not isinstance(flags, list):
flags = parse_flag_string(flags)
if "sig" in flags:
if isinstance(value, dict):
from hyperspy.signal import Signal
value = Signal(**value)
value._assign_subclass()
return value
if "fn" in flags:
ifdill, thing = value
if ifdill is None:
return thing
if ifdill in [False, "False", b"False"]:
return types.FunctionType(marshal.loads(thing), globals())
if ifdill in [True, "True", b"True"]:
if not dill_avail:
raise ValueError(
"the dictionary was constructed using "
'"dill" package, which is not available on the system'
)
else:
return dill.loads(thing)
# should not be reached
raise ValueError("The object format is not recognized")
return value
|
https://github.com/hyperspy/hyperspy/issues/997
|
m = tmp.models.restore('BK_w_fine_structure')
---------------------------------------------------------------------------
EOFError Traceback (most recent call last)
<ipython-input-349-668408e20346> in <module>()
----> 1 tmp.models.restore('BK_w_fine_structure')
/home/josh/git_repos/hyperspy/hyperspy/signal.py in restore(self, name)
253 name = self._check_name(name, True)
254 d = self._models.get_item(name + '._dict').as_dictionary()
--> 255 return self._signal.create_model(dictionary=copy.deepcopy(d))
256
257 def __repr__(self):
/home/josh/git_repos/hyperspy/hyperspy/_signals/eels.py in create_model(self, ll, auto_background, auto_add_edges, GOS, dictionary)
1254 auto_add_edges=auto_add_edges,
1255 GOS=GOS,
-> 1256 dictionary=dictionary)
1257 return model
1258
/home/josh/git_repos/hyperspy/hyperspy/models/eelsmodel.py in __init__(self, spectrum, auto_background, auto_add_edges, ll, GOS, dictionary)
84 auto_background = False
85 auto_add_edges = False
---> 86 self._load_dictionary(dictionary)
87
88 if auto_background is True:
/home/josh/git_repos/hyperspy/hyperspy/model.py in _load_dictionary(self, dic)
255
256 self.append(getattr(components, comp['_id_name'])(**init_args))
--> 257 id_dict.update(self[-1]._load_dictionary(comp))
258 # deal with twins:
259 for comp in dic['components']:
/home/josh/git_repos/hyperspy/hyperspy/component.py in _load_dictionary(self, dic)
1079 if hasattr(self, idname):
1080 par = getattr(self, idname)
-> 1081 t_id = par._load_dictionary(p)
1082 id_dict[t_id] = par
1083 else:
/home/josh/git_repos/hyperspy/hyperspy/component.py in _load_dictionary(self, dictionary)
183 """
184 if dictionary['_id_name'] == self._id_name:
--> 185 load_from_dictionary(self, dictionary)
186 return dictionary['self']
187 else:
/home/josh/git_repos/hyperspy/hyperspy/misc/export_dictionary.py in load_from_dictionary(target, dic)
182 flags = parse_flag_string(flags_str)
183 if 'id' not in flags:
--> 184 value = reconstruct_object(flags, value)
185 if 'init' in flags:
186 new_whitelist[key] = (flags_str, value)
/home/josh/git_repos/hyperspy/hyperspy/misc/export_dictionary.py in reconstruct_object(flags, value)
215 return thing
216 if ifdill in [False, 'False', b'False']:
--> 217 return types.FunctionType(marshal.loads(thing), globals())
218 if ifdill in [True, 'True', b'True']:
219 if not dill_avail:
EOFError: marshal data too short
|
EOFError
|
def fit(
self,
fitter=None,
method="ls",
grad=False,
bounded=False,
ext_bounding=False,
update_plot=False,
**kwargs,
):
"""Fits the model to the experimental data.
The chi-squared, reduced chi-squared and the degrees of freedom are
computed automatically when fitting. They are stored as signals, in the
`chisq`, `red_chisq` and `dof`. Note that,
unless ``metadata.Signal.Noise_properties.variance`` contains an
accurate estimation of the variance of the data, the chi-squared and
reduced chi-squared cannot be computed correctly. This is also true for
homocedastic noise.
Parameters
----------
fitter : {None, "leastsq", "odr", "mpfit", "fmin"}
The optimizer to perform the fitting. If None the fitter
defined in `preferences.Model.default_fitter` is used.
"leastsq" performs least squares using the Levenberg–Marquardt
algorithm.
"mpfit" performs least squares using the Levenberg–Marquardt
algorithm and, unlike "leastsq", support bounded optimization.
"fmin" performs curve fitting using a downhill simplex algorithm.
It is less robust than the Levenberg-Marquardt based optimizers,
but, at present, it is the only one that support maximum likelihood
optimization for poissonian noise.
"odr" performs the optimization using the orthogonal distance
regression algorithm. It does not support bounds.
"leastsq", "odr" and "mpfit" can estimate the standard deviation of
the estimated value of the parameters if the
"metada.Signal.Noise_properties.variance" attribute is defined.
Note that if it is not defined the standard deviation is estimated
using variance equal 1, what, if the noise is heterocedatic, will
result in a biased estimation of the parameter values and errors.i
If `variance` is a `Signal` instance of the
same `navigation_dimension` as the spectrum, and `method` is "ls"
weighted least squares is performed.
method : {'ls', 'ml'}
Choose 'ls' (default) for least squares and 'ml' for poissonian
maximum-likelihood estimation. The latter is only available when
`fitter` is "fmin".
grad : bool
If True, the analytical gradient is used if defined to
speed up the optimization.
bounded : bool
If True performs bounded optimization if the fitter
supports it. Currently only "mpfit" support it.
update_plot : bool
If True, the plot is updated during the optimization
process. It slows down the optimization but it permits
to visualize the optimization progress.
ext_bounding : bool
If True, enforce bounding by keeping the value of the
parameters constant out of the defined bounding area.
**kwargs : key word arguments
Any extra key word argument will be passed to the chosen
fitter. For more information read the docstring of the optimizer
of your choice in `scipy.optimize`.
See Also
--------
multifit
"""
if fitter is None:
fitter = preferences.Model.default_fitter
switch_aap = update_plot != self._plot_active
if switch_aap is True and update_plot is False:
self._disconnect_parameters2update_plot()
if bounded is True:
if fitter not in ("mpfit", "tnc", "l_bfgs_b"):
raise NotImplementedError(
"Bounded optimization is onlyavailable for the mpfit optimizer."
)
else:
# this has to be done before setting the p0, so moved things
# around
self.ensure_parameters_in_bounds()
self.p_std = None
self._set_p0()
if ext_bounding:
self._enable_ext_bounding()
if grad is False:
approx_grad = True
jacobian = None
odr_jacobian = None
grad_ml = None
grad_ls = None
else:
approx_grad = False
jacobian = self._jacobian
odr_jacobian = self._jacobian4odr
grad_ml = self._gradient_ml
grad_ls = self._gradient_ls
if method == "ml":
weights = None
if fitter != "fmin":
raise NotImplementedError(
"Maximum likelihood estimation "
'is only implemented for the "fmin" '
"optimizer"
)
elif method == "ls":
if "Signal.Noise_properties.variance" not in self.spectrum.metadata:
variance = 1
else:
variance = self.spectrum.metadata.Signal.Noise_properties.variance
if isinstance(variance, Signal):
if (
variance.axes_manager.navigation_shape
== self.spectrum.axes_manager.navigation_shape
):
variance = variance.data.__getitem__(
self.axes_manager._getitem_tuple
)[self.channel_switches]
else:
raise AttributeError(
"The `navigation_shape` of the variance signals "
"is not equal to the variance shape of the "
"spectrum"
)
elif not isinstance(variance, numbers.Number):
raise AttributeError(
"Variance must be a number or a `Signal` instance but "
"currently it is a %s" % type(variance)
)
weights = 1.0 / np.sqrt(variance)
else:
raise ValueError('method must be "ls" or "ml" but %s given' % method)
args = (self.spectrum()[self.channel_switches], weights)
# Least squares "dedicated" fitters
if fitter == "leastsq":
output = leastsq(
self._errfunc,
self.p0[:],
Dfun=jacobian,
col_deriv=1,
args=args,
full_output=True,
**kwargs,
)
self.p0, pcov = output[0:2]
if (self.axis.size > len(self.p0)) and pcov is not None:
pcov *= (self._errfunc(self.p0, *args) ** 2).sum() / (
len(args[0]) - len(self.p0)
)
self.p_std = np.sqrt(np.diag(pcov))
self.fit_output = output
elif fitter == "odr":
modelo = odr.Model(fcn=self._function4odr, fjacb=odr_jacobian)
mydata = odr.RealData(
self.axis.axis[self.channel_switches],
self.spectrum()[self.channel_switches],
sx=None,
sy=(1 / weights if weights is not None else None),
)
myodr = odr.ODR(mydata, modelo, beta0=self.p0[:])
myoutput = myodr.run()
result = myoutput.beta
self.p_std = myoutput.sd_beta
self.p0 = result
self.fit_output = myoutput
elif fitter == "mpfit":
autoderivative = 1
if grad is True:
autoderivative = 0
if bounded is True:
self.set_mpfit_parameters_info()
elif bounded is False:
self.mpfit_parinfo = None
m = mpfit(
self._errfunc4mpfit,
self.p0[:],
parinfo=self.mpfit_parinfo,
functkw={"y": self.spectrum()[self.channel_switches], "weights": weights},
autoderivative=autoderivative,
quiet=1,
)
self.p0 = m.params
if (self.axis.size > len(self.p0)) and m.perror is not None:
self.p_std = m.perror * np.sqrt(
(self._errfunc(self.p0, *args) ** 2).sum()
/ (len(args[0]) - len(self.p0))
)
self.fit_output = m
else:
# General optimizers (incluiding constrained ones(tnc,l_bfgs_b)
# Least squares or maximum likelihood
if method == "ml":
tominimize = self._poisson_likelihood_function
fprime = grad_ml
elif method in ["ls", "wls"]:
tominimize = self._errfunc2
fprime = grad_ls
# OPTIMIZERS
# Simple (don't use gradient)
if fitter == "fmin":
self.p0 = fmin(tominimize, self.p0, args=args, **kwargs)
elif fitter == "powell":
self.p0 = fmin_powell(tominimize, self.p0, args=args, **kwargs)
# Make use of the gradient
elif fitter == "cg":
self.p0 = fmin_cg(tominimize, self.p0, fprime=fprime, args=args, **kwargs)
elif fitter == "ncg":
self.p0 = fmin_ncg(tominimize, self.p0, fprime=fprime, args=args, **kwargs)
elif fitter == "bfgs":
self.p0 = fmin_bfgs(tominimize, self.p0, fprime=fprime, args=args, **kwargs)
# Constrainded optimizers
# Use gradient
elif fitter == "tnc":
if bounded is True:
self.set_boundaries()
elif bounded is False:
self.free_parameters_boundaries = None
self.p0 = fmin_tnc(
tominimize,
self.p0,
fprime=fprime,
args=args,
bounds=self.free_parameters_boundaries,
approx_grad=approx_grad,
**kwargs,
)[0]
elif fitter == "l_bfgs_b":
if bounded is True:
self.set_boundaries()
elif bounded is False:
self.free_parameters_boundaries = None
self.p0 = fmin_l_bfgs_b(
tominimize,
self.p0,
fprime=fprime,
args=args,
bounds=self.free_parameters_boundaries,
approx_grad=approx_grad,
**kwargs,
)[0]
else:
print(
"""
The %s optimizer is not available.
Available optimizers:
Unconstrained:
--------------
Only least Squares: leastsq and odr
General: fmin, powell, cg, ncg, bfgs
Cosntrained:
------------
tnc and l_bfgs_b
"""
% fitter
)
if np.iterable(self.p0) == 0:
self.p0 = (self.p0,)
self._fetch_values_from_p0(p_std=self.p_std)
self.store_current_values()
self._calculate_chisq()
self._set_current_degrees_of_freedom()
if ext_bounding is True:
self._disable_ext_bounding()
if switch_aap is True and update_plot is False:
self._connect_parameters2update_plot()
self.update_plot()
|
def fit(
self,
fitter=None,
method="ls",
grad=False,
bounded=False,
ext_bounding=False,
update_plot=False,
**kwargs,
):
"""Fits the model to the experimental data.
The chi-squared, reduced chi-squared and the degrees of freedom are
computed automatically when fitting. They are stored as signals, in the
`chisq`, `red_chisq` and `dof`. Note that,
unless ``metadata.Signal.Noise_properties.variance`` contains an
accurate estimation of the variance of the data, the chi-squared and
reduced chi-squared cannot be computed correctly. This is also true for
homocedastic noise.
Parameters
----------
fitter : {None, "leastsq", "odr", "mpfit", "fmin"}
The optimizer to perform the fitting. If None the fitter
defined in `preferences.Model.default_fitter` is used.
"leastsq" performs least squares using the Levenberg–Marquardt
algorithm.
"mpfit" performs least squares using the Levenberg–Marquardt
algorithm and, unlike "leastsq", support bounded optimization.
"fmin" performs curve fitting using a downhill simplex algorithm.
It is less robust than the Levenberg-Marquardt based optimizers,
but, at present, it is the only one that support maximum likelihood
optimization for poissonian noise.
"odr" performs the optimization using the orthogonal distance
regression algorithm. It does not support bounds.
"leastsq", "odr" and "mpfit" can estimate the standard deviation of
the estimated value of the parameters if the
"metada.Signal.Noise_properties.variance" attribute is defined.
Note that if it is not defined the standard deviation is estimated
using variance equal 1, what, if the noise is heterocedatic, will
result in a biased estimation of the parameter values and errors.i
If `variance` is a `Signal` instance of the
same `navigation_dimension` as the spectrum, and `method` is "ls"
weighted least squares is performed.
method : {'ls', 'ml'}
Choose 'ls' (default) for least squares and 'ml' for poissonian
maximum-likelihood estimation. The latter is only available when
`fitter` is "fmin".
grad : bool
If True, the analytical gradient is used if defined to
speed up the optimization.
bounded : bool
If True performs bounded optimization if the fitter
supports it. Currently only "mpfit" support it.
update_plot : bool
If True, the plot is updated during the optimization
process. It slows down the optimization but it permits
to visualize the optimization progress.
ext_bounding : bool
If True, enforce bounding by keeping the value of the
parameters constant out of the defined bounding area.
**kwargs : key word arguments
Any extra key word argument will be passed to the chosen
fitter. For more information read the docstring of the optimizer
of your choice in `scipy.optimize`.
See Also
--------
multifit
"""
if fitter is None:
fitter = preferences.Model.default_fitter
switch_aap = update_plot != self._plot_active
if switch_aap is True and update_plot is False:
self._disconnect_parameters2update_plot()
self.p_std = None
self._set_p0()
if ext_bounding:
self._enable_ext_bounding()
if grad is False:
approx_grad = True
jacobian = None
odr_jacobian = None
grad_ml = None
grad_ls = None
else:
approx_grad = False
jacobian = self._jacobian
odr_jacobian = self._jacobian4odr
grad_ml = self._gradient_ml
grad_ls = self._gradient_ls
if bounded is True and fitter not in ("mpfit", "tnc", "l_bfgs_b"):
raise NotImplementedError(
"Bounded optimization is only available for the mpfit optimizer."
)
if method == "ml":
weights = None
if fitter != "fmin":
raise NotImplementedError(
"Maximum likelihood estimation "
'is only implemented for the "fmin" '
"optimizer"
)
elif method == "ls":
if "Signal.Noise_properties.variance" not in self.spectrum.metadata:
variance = 1
else:
variance = self.spectrum.metadata.Signal.Noise_properties.variance
if isinstance(variance, Signal):
if (
variance.axes_manager.navigation_shape
== self.spectrum.axes_manager.navigation_shape
):
variance = variance.data.__getitem__(
self.axes_manager._getitem_tuple
)[self.channel_switches]
else:
raise AttributeError(
"The `navigation_shape` of the variance signals "
"is not equal to the variance shape of the "
"spectrum"
)
elif not isinstance(variance, numbers.Number):
raise AttributeError(
"Variance must be a number or a `Signal` instance but "
"currently it is a %s" % type(variance)
)
weights = 1.0 / np.sqrt(variance)
else:
raise ValueError('method must be "ls" or "ml" but %s given' % method)
args = (self.spectrum()[self.channel_switches], weights)
# Least squares "dedicated" fitters
if fitter == "leastsq":
output = leastsq(
self._errfunc,
self.p0[:],
Dfun=jacobian,
col_deriv=1,
args=args,
full_output=True,
**kwargs,
)
self.p0, pcov = output[0:2]
if (self.axis.size > len(self.p0)) and pcov is not None:
pcov *= (self._errfunc(self.p0, *args) ** 2).sum() / (
len(args[0]) - len(self.p0)
)
self.p_std = np.sqrt(np.diag(pcov))
self.fit_output = output
elif fitter == "odr":
modelo = odr.Model(fcn=self._function4odr, fjacb=odr_jacobian)
mydata = odr.RealData(
self.axis.axis[self.channel_switches],
self.spectrum()[self.channel_switches],
sx=None,
sy=(1 / weights if weights is not None else None),
)
myodr = odr.ODR(mydata, modelo, beta0=self.p0[:])
myoutput = myodr.run()
result = myoutput.beta
self.p_std = myoutput.sd_beta
self.p0 = result
self.fit_output = myoutput
elif fitter == "mpfit":
autoderivative = 1
if grad is True:
autoderivative = 0
if bounded is True:
self.set_mpfit_parameters_info()
elif bounded is False:
self.mpfit_parinfo = None
m = mpfit(
self._errfunc4mpfit,
self.p0[:],
parinfo=self.mpfit_parinfo,
functkw={"y": self.spectrum()[self.channel_switches], "weights": weights},
autoderivative=autoderivative,
quiet=1,
)
self.p0 = m.params
if (self.axis.size > len(self.p0)) and m.perror is not None:
self.p_std = m.perror * np.sqrt(
(self._errfunc(self.p0, *args) ** 2).sum()
/ (len(args[0]) - len(self.p0))
)
self.fit_output = m
else:
# General optimizers (incluiding constrained ones(tnc,l_bfgs_b)
# Least squares or maximum likelihood
if method == "ml":
tominimize = self._poisson_likelihood_function
fprime = grad_ml
elif method in ["ls", "wls"]:
tominimize = self._errfunc2
fprime = grad_ls
# OPTIMIZERS
# Simple (don't use gradient)
if fitter == "fmin":
self.p0 = fmin(tominimize, self.p0, args=args, **kwargs)
elif fitter == "powell":
self.p0 = fmin_powell(tominimize, self.p0, args=args, **kwargs)
# Make use of the gradient
elif fitter == "cg":
self.p0 = fmin_cg(tominimize, self.p0, fprime=fprime, args=args, **kwargs)
elif fitter == "ncg":
self.p0 = fmin_ncg(tominimize, self.p0, fprime=fprime, args=args, **kwargs)
elif fitter == "bfgs":
self.p0 = fmin_bfgs(tominimize, self.p0, fprime=fprime, args=args, **kwargs)
# Constrainded optimizers
# Use gradient
elif fitter == "tnc":
if bounded is True:
self.set_boundaries()
elif bounded is False:
self.self.free_parameters_boundaries = None
self.p0 = fmin_tnc(
tominimize,
self.p0,
fprime=fprime,
args=args,
bounds=self.free_parameters_boundaries,
approx_grad=approx_grad,
**kwargs,
)[0]
elif fitter == "l_bfgs_b":
if bounded is True:
self.set_boundaries()
elif bounded is False:
self.self.free_parameters_boundaries = None
self.p0 = fmin_l_bfgs_b(
tominimize,
self.p0,
fprime=fprime,
args=args,
bounds=self.free_parameters_boundaries,
approx_grad=approx_grad,
**kwargs,
)[0]
else:
print(
"""
The %s optimizer is not available.
Available optimizers:
Unconstrained:
--------------
Only least Squares: leastsq and odr
General: fmin, powell, cg, ncg, bfgs
Cosntrained:
------------
tnc and l_bfgs_b
"""
% fitter
)
if np.iterable(self.p0) == 0:
self.p0 = (self.p0,)
self._fetch_values_from_p0(p_std=self.p_std)
self.store_current_values()
self._calculate_chisq()
self._set_current_degrees_of_freedom()
if ext_bounding is True:
self._disable_ext_bounding()
if switch_aap is True and update_plot is False:
self._connect_parameters2update_plot()
self.update_plot()
|
https://github.com/hyperspy/hyperspy/issues/982
|
import hyperspy.api as hs
import numpy as np
s = hs.signals.EELSSpectrum(np.random.random((10, 1000)))
s.set_microscope_parameters(100, 1, 10)
s.add_elements(("C","O"))
m = s.create_model()
/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py:86: VisibleDeprecationWarning: Adding "background" to the user namespace. This feature will be removed in HyperSpy 0.9.
VisibleDeprecationWarning)
Hartree-Slater GOS
Element: O
Subshell: K
Onset Energy = 532.0
/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py:188: VisibleDeprecationWarning: Adding "O_K" to the user namespace. This feature will be removed in HyperSpy 0.9.
VisibleDeprecationWarning)
/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py:194: VisibleDeprecationWarning: Adding "O" to the user namespace. This feature will be removed in HyperSpy 0.9.
VisibleDeprecationWarning)
Hartree-Slater GOS
Element: C
Subshell: K
Onset Energy = 284.0
/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py:188: VisibleDeprecationWarning: Adding "C_K" to the user namespace. This feature will be removed in HyperSpy 0.9.
VisibleDeprecationWarning)
/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py:194: VisibleDeprecationWarning: Adding "C" to the user namespace. This feature will be removed in HyperSpy 0.9.
VisibleDeprecationWarning)
m.multifit(fitter="mpfit", kind="smart", bounded=True)
calculating 0% | | ETA: --:--:-- Traceback (most recent call last):
File "<ipython-input-9-6d54be28b846>", line 1, in <module>
m.multifit(fitter="mpfit", kind="smart", bounded=True)
File "/home/fjd29/Python/hyperspy3/hyperspy/model.py", line 1428, in multifit
self.fit(**kwargs)
File "/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py", line 346, in fit
**kwargs)
File "/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py", line 381, in smart_fit
self.fit_background(start_energy, **kwargs)
File "/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py", line 442, in fit_background
self.fit(**kwargs)
File "/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py", line 355, in fit
**kwargs)
File "/home/fjd29/Python/hyperspy3/hyperspy/model.py", line 1260, in fit
if (self.axis.size > len(self.p0)) and m.perror is not None:
TypeError: object of type 'NoneType' has no len()
|
TypeError
|
def multifit(
self,
mask=None,
fetch_only_fixed=False,
autosave=False,
autosave_every=10,
show_progressbar=None,
**kwargs,
):
"""Fit the data to the model at all the positions of the
navigation dimensions.
Parameters
----------
mask : {None, numpy.array}
To mask (do not fit) at certain position pass a numpy.array
of type bool where True indicates that the data will not be
fitted at the given position.
fetch_only_fixed : bool
If True, only the fixed parameters values will be updated
when changing the positon.
autosave : bool
If True, the result of the fit will be saved automatically
with a frequency defined by autosave_every.
autosave_every : int
Save the result of fitting every given number of spectra.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
**kwargs : key word arguments
Any extra key word argument will be passed to
the fit method. See the fit method documentation for
a list of valid arguments.
See Also
--------
fit
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
if autosave is not False:
fd, autosave_fn = tempfile.mkstemp(
prefix="hyperspy_autosave-", dir=".", suffix=".npz"
)
os.close(fd)
autosave_fn = autosave_fn[:-4]
messages.information(
"Autosaving each %s pixels to %s.npz" % (autosave_every, autosave_fn)
)
messages.information("When multifit finishes its job the file will be deleted")
if mask is not None and (
mask.shape != tuple(self.axes_manager._navigation_shape_in_array)
):
messages.warning_exit(
"The mask must be a numpy array of boolen type with "
" shape: %s" + str(self.axes_manager._navigation_shape_in_array)
)
masked_elements = 0 if mask is None else mask.sum()
maxval = self.axes_manager.navigation_size - masked_elements
if maxval > 0:
pbar = progressbar.progressbar(maxval=maxval, disabled=not show_progressbar)
if "bounded" in kwargs and kwargs["bounded"] is True:
if kwargs["fitter"] not in ("tnc", "l_bfgs_b", "mpfit"):
messages.information(
"The chosen fitter does not suppport bounding."
"If you require bounding please select one of the "
"following fitters instead: mpfit, tnc, l_bfgs_b"
)
kwargs["bounded"] = False
i = 0
self.axes_manager.disconnect(self.fetch_stored_values)
for index in self.axes_manager:
if mask is None or not mask[index[::-1]]:
self.fetch_stored_values(only_fixed=fetch_only_fixed)
self.fit(**kwargs)
i += 1
if maxval > 0:
pbar.update(i)
if autosave is True and i % autosave_every == 0:
self.save_parameters2file(autosave_fn)
if maxval > 0:
pbar.finish()
self.axes_manager.connect(self.fetch_stored_values)
if autosave is True:
messages.information(
"Deleting the temporary file %s pixels" % (autosave_fn + "npz")
)
os.remove(autosave_fn + ".npz")
|
def multifit(
self,
mask=None,
fetch_only_fixed=False,
autosave=False,
autosave_every=10,
show_progressbar=None,
**kwargs,
):
"""Fit the data to the model at all the positions of the
navigation dimensions.
Parameters
----------
mask : {None, numpy.array}
To mask (do not fit) at certain position pass a numpy.array
of type bool where True indicates that the data will not be
fitted at the given position.
fetch_only_fixed : bool
If True, only the fixed parameters values will be updated
when changing the positon.
autosave : bool
If True, the result of the fit will be saved automatically
with a frequency defined by autosave_every.
autosave_every : int
Save the result of fitting every given number of spectra.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
**kwargs : key word arguments
Any extra key word argument will be passed to
the fit method. See the fit method documentation for
a list of valid arguments.
See Also
--------
fit
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
if autosave is not False:
fd, autosave_fn = tempfile.mkstemp(
prefix="hyperspy_autosave-", dir=".", suffix=".npz"
)
os.close(fd)
autosave_fn = autosave_fn[:-4]
messages.information(
"Autosaving each %s pixels to %s.npz" % (autosave_every, autosave_fn)
)
messages.information("When multifit finishes its job the file will be deleted")
if mask is not None and (
mask.shape != tuple(self.axes_manager._navigation_shape_in_array)
):
messages.warning_exit(
"The mask must be a numpy array of boolen type with "
" shape: %s" + str(self.axes_manager._navigation_shape_in_array)
)
masked_elements = 0 if mask is None else mask.sum()
maxval = self.axes_manager.navigation_size - masked_elements
if maxval > 0:
pbar = progressbar.progressbar(maxval=maxval, disabled=not show_progressbar)
if "bounded" in kwargs and kwargs["bounded"] is True:
if kwargs["fitter"] == "mpfit":
self.set_mpfit_parameters_info()
kwargs["bounded"] = None
elif kwargs["fitter"] in ("tnc", "l_bfgs_b"):
self.set_boundaries()
kwargs["bounded"] = None
else:
messages.information(
"The chosen fitter does not suppport bounding."
"If you require bounding please select one of the "
"following fitters instead: mpfit, tnc, l_bfgs_b"
)
kwargs["bounded"] = False
i = 0
self.axes_manager.disconnect(self.fetch_stored_values)
for index in self.axes_manager:
if mask is None or not mask[index[::-1]]:
self.fetch_stored_values(only_fixed=fetch_only_fixed)
self.fit(**kwargs)
i += 1
if maxval > 0:
pbar.update(i)
if autosave is True and i % autosave_every == 0:
self.save_parameters2file(autosave_fn)
if maxval > 0:
pbar.finish()
self.axes_manager.connect(self.fetch_stored_values)
if autosave is True:
messages.information(
"Deleting the temporary file %s pixels" % (autosave_fn + "npz")
)
os.remove(autosave_fn + ".npz")
|
https://github.com/hyperspy/hyperspy/issues/982
|
import hyperspy.api as hs
import numpy as np
s = hs.signals.EELSSpectrum(np.random.random((10, 1000)))
s.set_microscope_parameters(100, 1, 10)
s.add_elements(("C","O"))
m = s.create_model()
/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py:86: VisibleDeprecationWarning: Adding "background" to the user namespace. This feature will be removed in HyperSpy 0.9.
VisibleDeprecationWarning)
Hartree-Slater GOS
Element: O
Subshell: K
Onset Energy = 532.0
/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py:188: VisibleDeprecationWarning: Adding "O_K" to the user namespace. This feature will be removed in HyperSpy 0.9.
VisibleDeprecationWarning)
/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py:194: VisibleDeprecationWarning: Adding "O" to the user namespace. This feature will be removed in HyperSpy 0.9.
VisibleDeprecationWarning)
Hartree-Slater GOS
Element: C
Subshell: K
Onset Energy = 284.0
/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py:188: VisibleDeprecationWarning: Adding "C_K" to the user namespace. This feature will be removed in HyperSpy 0.9.
VisibleDeprecationWarning)
/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py:194: VisibleDeprecationWarning: Adding "C" to the user namespace. This feature will be removed in HyperSpy 0.9.
VisibleDeprecationWarning)
m.multifit(fitter="mpfit", kind="smart", bounded=True)
calculating 0% | | ETA: --:--:-- Traceback (most recent call last):
File "<ipython-input-9-6d54be28b846>", line 1, in <module>
m.multifit(fitter="mpfit", kind="smart", bounded=True)
File "/home/fjd29/Python/hyperspy3/hyperspy/model.py", line 1428, in multifit
self.fit(**kwargs)
File "/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py", line 346, in fit
**kwargs)
File "/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py", line 381, in smart_fit
self.fit_background(start_energy, **kwargs)
File "/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py", line 442, in fit_background
self.fit(**kwargs)
File "/home/fjd29/Python/hyperspy3/hyperspy/models/eelsmodel.py", line 355, in fit
**kwargs)
File "/home/fjd29/Python/hyperspy3/hyperspy/model.py", line 1260, in fit
if (self.axis.size > len(self.p0)) and m.perror is not None:
TypeError: object of type 'NoneType' has no len()
|
TypeError
|
def _print_summary(self):
string = "\n\tTitle: "
string += self.metadata.General.title
if self.metadata.has_item("Signal.signal_type"):
string += "\n\tSignal type: "
string += self.metadata.Signal.signal_type
string += "\n\tData dimensions: "
string += str(self.axes_manager.shape)
if self.metadata.has_item("Signal.record_by"):
string += "\n\tData representation: "
string += self.metadata.Signal.record_by
string += "\n\tData type: "
string += str(self.data.dtype)
print(string)
|
def _print_summary(self):
string = "\n\tTitle: "
string += self.metadata.General.title.decode("utf8")
if self.metadata.has_item("Signal.signal_type"):
string += "\n\tSignal type: "
string += self.metadata.Signal.signal_type
string += "\n\tData dimensions: "
string += str(self.axes_manager.shape)
if self.metadata.has_item("Signal.record_by"):
string += "\n\tData representation: "
string += self.metadata.Signal.record_by
string += "\n\tData type: "
string += str(self.data.dtype)
print(string)
|
https://github.com/hyperspy/hyperspy/issues/924
|
s = hs.load("*.msa", stack=True)
Loading individual files
Individual files loaded correctly
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-5-23a60eda1ce6> in <module>()
----> 1 s = hs.load("*.msa", stack=True)
/home/fjd29/git/hyperspy/hyperspy/io.py in load(filenames, record_by, signal_type, signal_origin, stack, stack_axis, new_axis_name, mmap, mmap_dir, **kwds)
194 )[1]
195 messages.information('Individual files loaded correctly')
--> 196 signal._print_summary()
197 objects = [signal, ]
198 else:
/home/fjd29/git/hyperspy/hyperspy/signal.py in _print_summary(self)
2808 def _print_summary(self):
2809 string = "\n\tTitle: "
-> 2810 string += self.metadata.General.title.decode('utf8')
2811 if self.metadata.has_item("Signal.signal_type"):
2812 string += "\n\tSignal type: "
AttributeError: 'str' object has no attribute 'decode'
|
AttributeError
|
def _binary_operator_ruler(self, other, op_name):
exception_message = "Invalid dimensions for this operation"
if isinstance(other, Signal):
# Both objects are signals
oam = other.axes_manager
sam = self.axes_manager
if (
sam.navigation_shape == oam.navigation_shape
and sam.signal_shape == oam.signal_shape
):
# They have the same signal shape.
# The signal axes are aligned but there is
# no guarantee that data axes area aligned so we make sure that
# they are aligned for the operation.
sdata = self._data_aligned_with_axes
odata = other._data_aligned_with_axes
if op_name in INPLACE_OPERATORS:
self.data = getattr(sdata, op_name)(odata)
self.axes_manager._sort_axes()
return self
else:
ns = self._deepcopy_with_new_data(getattr(sdata, op_name)(odata))
ns.axes_manager._sort_axes()
return ns
else:
# Different navigation and/or signal shapes
if not are_signals_aligned(self, other):
raise ValueError(exception_message)
else:
# They are broadcastable but have different number of axes
new_nav_axes = []
for saxis, oaxis in zip(sam.navigation_axes, oam.navigation_axes):
new_nav_axes.append(
saxis if saxis.size > 1 or oaxis.size == 1 else oaxis
)
bigger_am = None
if sam.navigation_dimension != oam.navigation_dimension:
bigger_am = (
sam
if sam.navigation_dimension > oam.navigation_dimension
else oam
)
new_nav_axes.extend(bigger_am.navigation_axes[len(new_nav_axes) :])
# Because they are broadcastable and navigation axes come
# first in the data array, we don't need to pad the data
# array.
new_sig_axes = []
for saxis, oaxis in zip(sam.signal_axes, oam.signal_axes):
new_sig_axes.append(
saxis if saxis.size > 1 or oaxis.size == 1 else oaxis
)
if sam.signal_dimension != oam.signal_dimension:
bigger_am = (
sam if sam.signal_dimension > oam.signal_dimension else oam
)
new_sig_axes.extend(bigger_am.signal_axes[len(new_sig_axes) :])
sdim_diff = abs(sam.signal_dimension - oam.signal_dimension)
sdata = self._data_aligned_with_axes
odata = other._data_aligned_with_axes
if len(new_nav_axes) and sdim_diff:
if bigger_am is sam:
# Pad odata
while sdim_diff:
odata = np.expand_dims(odata, oam.navigation_dimension)
sdim_diff -= 1
else:
# Pad sdata
while sdim_diff:
sdata = np.expand_dims(sdata, sam.navigation_dimension)
sdim_diff -= 1
if op_name in INPLACE_OPERATORS:
# This should raise a ValueError if the operation
# changes the shape of the object on the left.
self.data = getattr(sdata, op_name)(odata)
self.axes_manager._sort_axes()
return self
else:
ns = self._deepcopy_with_new_data(getattr(sdata, op_name)(odata))
new_axes = new_nav_axes[::-1] + new_sig_axes[::-1]
ns.axes_manager._axes = [axis.copy() for axis in new_axes]
if bigger_am is oam:
ns.metadata.Signal.record_by = other.metadata.Signal.record_by
ns._assign_subclass()
return ns
else:
# Second object is not a Signal
if op_name in INPLACE_OPERATORS:
getattr(self.data, op_name)(other)
return self
else:
return self._deepcopy_with_new_data(getattr(self.data, op_name)(other))
|
def _binary_operator_ruler(self, other, op_name):
exception_message = "Invalid dimensions for this operation"
if isinstance(other, Signal):
# Both objects are signals
oam = other.axes_manager
sam = self.axes_manager
if (
sam.navigation_shape == oam.navigation_shape
and sam.signal_shape == oam.signal_shape
):
# They have the same signal shape.
# The signal axes are aligned but there is
# no guarantee that data axes area aligned so we make sure that
# they are aligned for the operation.
sdata = self._data_aligned_with_axes
odata = other._data_aligned_with_axes
if op_name in INPLACE_OPERATORS:
self.data = getattr(sdata, op_name)(odata)
self.axes_manager._sort_axes()
return self
else:
ns = self._deepcopy_with_new_data(getattr(sdata, op_name)(odata))
ns.axes_manager._sort_axes()
return ns
else:
# Different navigation and/or signal shapes
if not are_signals_aligned(self, other):
raise ValueError(exception_message)
else:
# They are broadcastable but have different number of axes
new_nav_axes = []
for saxis, oaxis in zip(sam.navigation_axes, oam.navigation_axes):
new_nav_axes.append(
saxis if saxis.size > 1 or oaxis.size == 1 else oaxis
)
if sam.navigation_dimension != oam.navigation_dimension:
bigger_am = (
sam
if sam.navigation_dimension > oam.navigation_dimension
else oam
)
new_nav_axes.extend(bigger_am.navigation_axes[len(new_nav_axes) :])
# Because they are broadcastable and navigation axes come
# first in the data array, we don't need to pad the data
# array.
new_sig_axes = []
for saxis, oaxis in zip(sam.signal_axes, oam.signal_axes):
new_sig_axes.append(
saxis if saxis.size > 1 or oaxis.size == 1 else oaxis
)
if sam.signal_dimension != oam.signal_dimension:
bigger_am = (
sam if sam.signal_dimension > oam.signal_dimension else oam
)
new_sig_axes.extend(bigger_am.signal_axes[len(new_sig_axes) :])
sdim_diff = abs(sam.signal_dimension - oam.signal_dimension)
sdata = self._data_aligned_with_axes
odata = other._data_aligned_with_axes
if len(new_nav_axes) and sdim_diff:
if bigger_am is sam:
# Pad odata
while sdim_diff:
odata = np.expand_dims(odata, oam.navigation_dimension)
sdim_diff -= 1
else:
# Pad sdata
while sdim_diff:
sdata = np.expand_dims(sdata, sam.navigation_dimension)
sdim_diff -= 1
if op_name in INPLACE_OPERATORS:
# This should raise a ValueError if the operation
# changes the shape of the object on the left.
self.data = getattr(sdata, op_name)(odata)
self.axes_manager._sort_axes()
return self
else:
ns = self._deepcopy_with_new_data(getattr(sdata, op_name)(odata))
new_axes = new_nav_axes[::-1] + new_sig_axes[::-1]
ns.axes_manager._axes = [axis.copy() for axis in new_axes]
if bigger_am is oam:
ns.metadata.Signal.record_by = other.metadata.Signal.record_by
ns._assign_subclass()
return ns
else:
# Second object is not a Signal
if op_name in INPLACE_OPERATORS:
getattr(self.data, op_name)(other)
return self
else:
return self._deepcopy_with_new_data(getattr(self.data, op_name)(other))
|
https://github.com/hyperspy/hyperspy/issues/911
|
In [23]: s = hs.signals.Signal(np.arange(100.))
In [24]: s1 = s / s.max(0)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-24-91a5f30cc65d> in <module>()
----> 1 s1 = s / s.max(0)
/home/to266/dev/hyperspy/hyperspy/signal.py in __truediv__(self, other)
/home/to266/dev/hyperspy/hyperspy/signal.py in _binary_operator_ruler(self, other, op_name)
2973 ns.axes_manager._axes = [axis.copy()
2974 for axis in new_axes]
-> 2975 if bigger_am is oam:
2976 ns.metadata.Signal.record_by = \
2977 other.metadata.Signal.record_by
UnboundLocalError: local variable 'bigger_am' referenced before assignment
|
UnboundLocalError
|
def estimate_parameters(self, signal, x1, x2, only_current=False):
"""Estimate the parameters by the two area method
Parameters
----------
signal : Signal instance
x1 : float
Defines the left limit of the spectral range to use for the
estimation.
x2 : float
Defines the right limit of the spectral range to use for the
estimation.
only_current : bool
If False estimates the parameters for the full dataset.
Returns
-------
bool
"""
axis = signal.axes_manager.signal_axes[0]
binned = signal.metadata.Signal.binned
i1, i2 = axis.value_range_to_indices(x1, x2)
if only_current is True:
estimation = np.polyfit(
axis.axis[i1:i2], signal()[i1:i2], self.get_polynomial_order()
)
if binned is True:
self.coefficients.value = estimation / axis.scale
else:
self.coefficients.value = estimation
return True
else:
if self.coefficients.map is None:
self._create_arrays()
nav_shape = signal.axes_manager._navigation_shape_in_array
unfolded = signal.unfold()
try:
dc = signal.data
# For polyfit the spectrum goes in the first axis
if axis.index_in_array > 0:
dc = dc.T # Unfolded, so simply transpose
cmaps = np.polyfit(
axis.axis[i1:i2], dc[i1:i2, :], self.get_polynomial_order()
)
if axis.index_in_array > 0:
cmaps = cmaps.T # Transpose back if needed
# Shape needed to fit coefficients.map:
cmap_shape = nav_shape + (self.get_polynomial_order() + 1,)
self.coefficients.map["values"][:] = cmaps.reshape(cmap_shape)
if binned is True:
self.coefficients.map["values"] /= axis.scale
self.coefficients.map["is_set"][:] = True
finally:
# Make sure we always attempt to refold
if unfolded:
signal.fold()
self.fetch_stored_values()
return True
|
def estimate_parameters(self, signal, x1, x2, only_current=False):
"""Estimate the parameters by the two area method
Parameters
----------
signal : Signal instance
x1 : float
Defines the left limit of the spectral range to use for the
estimation.
x2 : float
Defines the right limit of the spectral range to use for the
estimation.
only_current : bool
If False estimates the parameters for the full dataset.
Returns
-------
bool
"""
axis = signal.axes_manager.signal_axes[0]
binned = signal.metadata.Signal.binned
i1, i2 = axis.value_range_to_indices(x1, x2)
if only_current is True:
estimation = np.polyfit(
axis.axis[i1:i2], signal()[i1:i2], self.get_polynomial_order()
)
if binned is True:
self.coefficients.value = estimation / axis.scale
else:
self.coefficients.value = estimation
return True
else:
if self.coefficients.map is None:
self._create_arrays()
nav_shape = signal.axes_manager._navigation_shape_in_array
signal.unfold()
dc = signal.data
# For polyfit the spectrum goes in the first axis
if axis.index_in_array > 0:
dc = np.rollaxis(dc, axis.index_in_array, 0)
cmaps = np.polyfit(
axis.axis[i1:i2], dc[i1:i2, :], self.get_polynomial_order()
).reshape(
[
self.get_polynomial_order() + 1,
]
+ nav_shape
)
self.coefficients.map["values"][:] = np.rollaxis(cmaps, 0, axis.index_in_array)
if binned is True:
self.coefficients.map["values"] /= axis.scale
self.coefficients.map["is_set"][:] = True
signal.fold()
self.fetch_stored_values()
return True
|
https://github.com/hyperspy/hyperspy/issues/466
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-7c69fa23a4d3> in <module>()
----> 1 s.remove_background(signal_range=(0,100), background_type='Polynomial')
/media/storage/PhD/software/hyperspy/dev/hyperspy/hyperspy/hyperspy/signal.pyc in remove_background(self, signal_range, background_type, polynomial_order)
996
997 spectra = self._remove_background_cli(
--> 998 signal_range, background_estimator)
999 return spectra
1000
/media/storage/PhD/software/hyperspy/dev/hyperspy/hyperspy/hyperspy/signal.pyc in _remove_background_cli(self, signal_range, background_estimator)
942 signal_range[0],
943 signal_range[1],
--> 944 only_current=False)
945 return self - model.as_signal()
946
/media/storage/PhD/software/hyperspy/dev/hyperspy/hyperspy/hyperspy/_components/polynomial.py in estimate_parameters(self, signal, x1, x2, only_current)
112 cmaps = np.polyfit(axis.axis[i1:i2],
113 dc[i1:i2, :], self.get_polynomial_order()).reshape([
--> 114 self.get_polynomial_order() + 1, ] + nav_shape)
115 self.coefficients.map['values'][:] = np.rollaxis(cmaps, 0,
116 axis.index_in_array)
TypeError: can only concatenate list (not "tuple") to list
|
TypeError
|
def estimate_parameters(self, signal, x1, x2, only_current=False):
"""Estimate the parameters by the two area method
Parameters
----------
signal : Signal instance
x1 : float
Defines the left limit of the spectral range to use for the
estimation.
x2 : float
Defines the right limit of the spectral range to use for the
estimation.
only_current : bool
If False estimates the parameters for the full dataset.
Returns
-------
bool
"""
axis = signal.axes_manager.signal_axes[0]
binned = signal.metadata.Signal.binned
i1, i2 = axis.value_range_to_indices(x1, x2)
if only_current is True:
estimation = np.polyfit(
axis.axis[i1:i2], signal()[i1:i2], self.get_polynomial_order()
)
if binned is True:
self.coefficients.value = estimation / axis.scale
else:
self.coefficients.value = estimation
return True
else:
if self.coefficients.map is None:
self._create_arrays()
nav_shape = signal.axes_manager._navigation_shape_in_array
unfolded = signal.unfold()
try:
dc = signal.data
# For polyfit the spectrum goes in the first axis
if axis.index_in_array > 0:
dc = np.rollaxis(dc, 1, 0) # Unfolded, so use 1
cmaps = np.polyfit(
axis.axis[i1:i2], dc[i1:i2, :], self.get_polynomial_order()
)
if axis.index_in_array > 0:
cmaps = np.rollaxis(cmaps, 0, 2)
cmap_shape = nav_shape + (self.get_polynomial_order() + 1,)
self.coefficients.map["values"][:] = cmaps.reshape(cmap_shape)
if binned is True:
self.coefficients.map["values"] /= axis.scale
self.coefficients.map["is_set"][:] = True
finally:
if unfolded:
signal.fold()
self.fetch_stored_values()
return True
|
def estimate_parameters(self, signal, x1, x2, only_current=False):
"""Estimate the parameters by the two area method
Parameters
----------
signal : Signal instance
x1 : float
Defines the left limit of the spectral range to use for the
estimation.
x2 : float
Defines the right limit of the spectral range to use for the
estimation.
only_current : bool
If False estimates the parameters for the full dataset.
Returns
-------
bool
"""
axis = signal.axes_manager.signal_axes[0]
binned = signal.metadata.Signal.binned
i1, i2 = axis.value_range_to_indices(x1, x2)
if only_current is True:
estimation = np.polyfit(
axis.axis[i1:i2], signal()[i1:i2], self.get_polynomial_order()
)
if binned is True:
self.coefficients.value = estimation / axis.scale
else:
self.coefficients.value = estimation
return True
else:
if self.coefficients.map is None:
self._create_arrays()
nav_shape = signal.axes_manager._navigation_shape_in_array
signal.unfold()
dc = signal.data
# For polyfit the spectrum goes in the first axis
if axis.index_in_array > 0:
dc = np.rollaxis(dc, axis.index_in_array, 0)
cmaps = np.polyfit(
axis.axis[i1:i2], dc[i1:i2, :], self.get_polynomial_order()
).reshape(
[
self.get_polynomial_order() + 1,
]
+ nav_shape
)
self.coefficients.map["values"][:] = np.rollaxis(cmaps, 0, axis.index_in_array)
if binned is True:
self.coefficients.map["values"] /= axis.scale
self.coefficients.map["is_set"][:] = True
signal.fold()
self.fetch_stored_values()
return True
|
https://github.com/hyperspy/hyperspy/issues/466
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-7c69fa23a4d3> in <module>()
----> 1 s.remove_background(signal_range=(0,100), background_type='Polynomial')
/media/storage/PhD/software/hyperspy/dev/hyperspy/hyperspy/hyperspy/signal.pyc in remove_background(self, signal_range, background_type, polynomial_order)
996
997 spectra = self._remove_background_cli(
--> 998 signal_range, background_estimator)
999 return spectra
1000
/media/storage/PhD/software/hyperspy/dev/hyperspy/hyperspy/hyperspy/signal.pyc in _remove_background_cli(self, signal_range, background_estimator)
942 signal_range[0],
943 signal_range[1],
--> 944 only_current=False)
945 return self - model.as_signal()
946
/media/storage/PhD/software/hyperspy/dev/hyperspy/hyperspy/hyperspy/_components/polynomial.py in estimate_parameters(self, signal, x1, x2, only_current)
112 cmaps = np.polyfit(axis.axis[i1:i2],
113 dc[i1:i2, :], self.get_polynomial_order()).reshape([
--> 114 self.get_polynomial_order() + 1, ] + nav_shape)
115 self.coefficients.map['values'][:] = np.rollaxis(cmaps, 0,
116 axis.index_in_array)
TypeError: can only concatenate list (not "tuple") to list
|
TypeError
|
def run(self):
"""Load skills and update periodically from disk and internet."""
self._remove_git_locks()
self._connected_event.wait()
if (
not self.skill_updater.defaults_installed()
and self.skills_config["auto_update"]
):
LOG.info("Not all default skills are installed, performing skill update...")
self.skill_updater.update_skills()
self._load_on_startup()
# Sync backend and skills.
if is_paired() and not self.upload_queue.started:
self._start_settings_update()
# Scan the file folder that contains Skills. If a Skill is updated,
# unload the existing version from memory and reload from the disk.
while not self._stop_event.is_set():
try:
self._unload_removed_skills()
self._reload_modified_skills()
self._load_new_skills()
self._update_skills()
if is_paired() and self.upload_queue.started and len(self.upload_queue) > 0:
self.msm.clear_cache()
self.skill_updater.post_manifest()
self.upload_queue.send()
self._watchdog()
sleep(2) # Pause briefly before beginning next scan
except Exception:
LOG.exception(
"Something really unexpected has occured "
"and the skill manager loop safety harness was "
"hit."
)
sleep(30)
|
def run(self):
"""Load skills and update periodically from disk and internet."""
self._remove_git_locks()
self._connected_event.wait()
if (
not self.skill_updater.defaults_installed()
and self.skills_config["auto_update"]
):
LOG.info("Not all default skills are installed, performing skill update...")
self.skill_updater.update_skills()
self._load_on_startup()
# Sync backend and skills.
if is_paired() and not self.upload_queue.started:
self._start_settings_update()
# Scan the file folder that contains Skills. If a Skill is updated,
# unload the existing version from memory and reload from the disk.
while not self._stop_event.is_set():
try:
self._reload_modified_skills()
self._load_new_skills()
self._unload_removed_skills()
self._update_skills()
if is_paired() and self.upload_queue.started and len(self.upload_queue) > 0:
self.msm.clear_cache()
self.skill_updater.post_manifest()
self.upload_queue.send()
self._watchdog()
sleep(2) # Pause briefly before beginning next scan
except Exception:
LOG.exception(
"Something really unexpected has occured "
"and the skill manager loop safety harness was "
"hit."
)
sleep(30)
|
https://github.com/MycroftAI/mycroft-core/issues/2822
|
2021-02-04 11:35:41.063 | ERROR | 20080 | mycroft.skills.skill_manager:run:260 | Something really unexpected has occured and the skill manager loop safety harness was hit.
Traceback (most recent call last):
File "/home/gez/mycroft-core/mycroft/skills/skill_manager.py", line 248, in run
self._load_new_skills()
File "/home/gez/mycroft-core/mycroft/skills/skill_manager.py", line 296, in _load_new_skills
for skill_dir in self._get_skill_directories():
File "/home/gez/mycroft-core/mycroft/skills/skill_manager.py", line 322, in _get_skill_directories
if SKILL_MAIN_MODULE in os.listdir(skill_dir):
FileNotFoundError: [Errno 2] No such file or directory: '/home/gez/.local/share/mycroft/skills/youtube-music-skill.forslund/'
2021-02-04 11:36:11.184 | INFO | 20080 | mycroft.skills.skill_manager:_unload_removed_skills:338 | removing youtube-music-skill.forslund
|
FileNotFoundError
|
def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"):
super().__init__(key_phrase, config, lang)
keyword_file_paths = [
expanduser(x.strip())
for x in self.config.get("keyword_file_path", "hey_mycroft.ppn").split(",")
]
sensitivities = self.config.get("sensitivities", 0.5)
try:
from pvporcupine.porcupine import Porcupine
from pvporcupine.util import pv_library_path, pv_model_path
except ImportError as err:
raise Exception(
"Python bindings for Porcupine not found. "
'Please run "mycroft-pip install pvporcupine"'
) from err
library_path = pv_library_path("")
model_file_path = pv_model_path("")
if isinstance(sensitivities, float):
sensitivities = [sensitivities] * len(keyword_file_paths)
else:
sensitivities = [float(x) for x in sensitivities.split(",")]
self.audio_buffer = []
self.has_found = False
self.num_keywords = len(keyword_file_paths)
LOG.warning(
"The Porcupine wakeword engine shipped with "
"Mycroft-core is deprecated and will be removed in "
"mycroft-core 21.02. Use the mycroft-porcupine-plugin "
"instead."
)
LOG.info(
"Loading Porcupine using library path {} and keyword paths {}".format(
library_path, keyword_file_paths
)
)
self.porcupine = Porcupine(
library_path=library_path,
model_path=model_file_path,
keyword_paths=keyword_file_paths,
sensitivities=sensitivities,
)
LOG.info("Loaded Porcupine")
|
def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"):
super(PorcupineHotWord, self).__init__(key_phrase, config, lang)
porcupine_path = expanduser(
self.config.get("porcupine_path", join("~", ".mycroft", "Porcupine"))
)
keyword_file_paths = [
expanduser(x.strip())
for x in self.config.get("keyword_file_path", "hey_mycroft.ppn").split(",")
]
sensitivities = self.config.get("sensitivities", 0.5)
bindings_path = join(porcupine_path, "binding/python")
LOG.info("Adding %s to Python path" % bindings_path)
sys.path.append(bindings_path)
try:
from porcupine import Porcupine
except ImportError:
raise Exception(
"Python bindings for Porcupine not found. "
"Please use --porcupine-path to set Porcupine base path"
)
system = platform.system()
machine = platform.machine()
library_path = join(porcupine_path, "lib/linux/%s/libpv_porcupine.so" % machine)
model_file_path = join(porcupine_path, "lib/common/porcupine_params.pv")
if isinstance(sensitivities, float):
sensitivities = [sensitivities] * len(keyword_file_paths)
else:
sensitivities = [float(x) for x in sensitivities.split(",")]
self.audio_buffer = []
self.has_found = False
self.num_keywords = len(keyword_file_paths)
LOG.info(
"Loading Porcupine using library path {} and keyword paths {}".format(
library_path, keyword_file_paths
)
)
self.porcupine = Porcupine(
library_path=library_path,
model_file_path=model_file_path,
keyword_file_paths=keyword_file_paths,
sensitivities=sensitivities,
)
LOG.info("Loaded Porcupine")
|
https://github.com/MycroftAI/mycroft-core/issues/2720
|
2020-10-13 18:03:22.296 | INFO | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:328 | Creating wake word engine
2020-10-13 18:03:22.299 | INFO | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:351 | Using hotword entry for blueberry
2020-10-13 18:03:22.302 | WARNING | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:353 | Phonemes are missing falling back to listeners configuration
2020-10-13 18:03:22.305 | WARNING | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:357 | Threshold is missing falling back to listeners configuration
2020-10-13 18:03:22.310 | INFO | 6577 | mycroft.client.speech.hotword_factory:load_module:403 | Loading "blueberry" wake word via porcupine
2020-10-13 18:03:22.315 | INFO | 6577 | mycroft.client.speech.hotword_factory:__init__:331 | Adding /home/pi/.mycroft/Porcupine/binding/python to Python path
2020-10-13 18:03:22.338 | INFO | 6577 | mycroft.client.speech.hotword_factory:__init__:356 | Loading Porcupine using library path /home/pi/.mycroft/Porcupine/lib/linux/armv7l/libpv_porcupine.so and keyword paths ['/home/pi/.mycroft/Porcupine/resources/keyword_files/raspberry-pi/blueberry_raspberry-pi.ppn']
2020-10-13 18:03:22.341 | ERROR | 6577 | mycroft.client.speech.hotword_factory:initialize:423 | Could not create hotword. Falling back to default.
Traceback (most recent call last):
File "/home/pi/mycroft-core/mycroft/client/speech/hotword_factory.py", line 411, in initialize
instance = clazz(hotword, config, lang=lang)
File "/home/pi/mycroft-core/mycroft/client/speech/hotword_factory.py", line 361, in __init__
sensitivities=sensitivities)
TypeError: __init__() got an unexpected keyword argument 'model_file_path'
2020-10-13 18:03:22.345 | INFO | 6577 | mycroft.client.speech.hotword_factory:load_module:403 | Loading "blueberry" wake word via pocketsphinx
2020-10-13 18:03:22.475 | INFO | 6577 | mycroft.client.speech.listener:create_wakeup_recognizer:365 | creating stand up word engine
2020-10-13 18:03:22.478 | INFO | 6577 | mycroft.client.speech.hotword_factory:load_module:403 | Loading "wake up" wake word via pocketsphinx
2020-10-13 18:03:22.602 | INFO | 6577 | __main__:on_ready:175 | Speech client is ready.
|
TypeError
|
def update(self, chunk):
"""Update detection state from a chunk of audio data.
Arguments:
chunk (bytes): Audio data to parse
"""
pcm = struct.unpack_from("h" * (len(chunk) // 2), chunk)
self.audio_buffer += pcm
while True:
if len(self.audio_buffer) >= self.porcupine.frame_length:
result = self.porcupine.process(
self.audio_buffer[0 : self.porcupine.frame_length]
)
# result will be the index of the found keyword or -1 if
# nothing has been found.
self.has_found |= result >= 0
self.audio_buffer = self.audio_buffer[self.porcupine.frame_length :]
else:
return
|
def update(self, chunk):
pcm = struct.unpack_from("h" * (len(chunk) // 2), chunk)
self.audio_buffer += pcm
while True:
if len(self.audio_buffer) >= self.porcupine.frame_length:
result = self.porcupine.process(
self.audio_buffer[0 : self.porcupine.frame_length]
)
# result could be boolean (if there is one keword)
# or int (if more than one keyword)
self.has_found |= (self.num_keywords == 1 and result) | (
self.num_keywords > 1 and result >= 0
)
self.audio_buffer = self.audio_buffer[self.porcupine.frame_length :]
else:
return
|
https://github.com/MycroftAI/mycroft-core/issues/2720
|
2020-10-13 18:03:22.296 | INFO | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:328 | Creating wake word engine
2020-10-13 18:03:22.299 | INFO | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:351 | Using hotword entry for blueberry
2020-10-13 18:03:22.302 | WARNING | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:353 | Phonemes are missing falling back to listeners configuration
2020-10-13 18:03:22.305 | WARNING | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:357 | Threshold is missing falling back to listeners configuration
2020-10-13 18:03:22.310 | INFO | 6577 | mycroft.client.speech.hotword_factory:load_module:403 | Loading "blueberry" wake word via porcupine
2020-10-13 18:03:22.315 | INFO | 6577 | mycroft.client.speech.hotword_factory:__init__:331 | Adding /home/pi/.mycroft/Porcupine/binding/python to Python path
2020-10-13 18:03:22.338 | INFO | 6577 | mycroft.client.speech.hotword_factory:__init__:356 | Loading Porcupine using library path /home/pi/.mycroft/Porcupine/lib/linux/armv7l/libpv_porcupine.so and keyword paths ['/home/pi/.mycroft/Porcupine/resources/keyword_files/raspberry-pi/blueberry_raspberry-pi.ppn']
2020-10-13 18:03:22.341 | ERROR | 6577 | mycroft.client.speech.hotword_factory:initialize:423 | Could not create hotword. Falling back to default.
Traceback (most recent call last):
File "/home/pi/mycroft-core/mycroft/client/speech/hotword_factory.py", line 411, in initialize
instance = clazz(hotword, config, lang=lang)
File "/home/pi/mycroft-core/mycroft/client/speech/hotword_factory.py", line 361, in __init__
sensitivities=sensitivities)
TypeError: __init__() got an unexpected keyword argument 'model_file_path'
2020-10-13 18:03:22.345 | INFO | 6577 | mycroft.client.speech.hotword_factory:load_module:403 | Loading "blueberry" wake word via pocketsphinx
2020-10-13 18:03:22.475 | INFO | 6577 | mycroft.client.speech.listener:create_wakeup_recognizer:365 | creating stand up word engine
2020-10-13 18:03:22.478 | INFO | 6577 | mycroft.client.speech.hotword_factory:load_module:403 | Loading "wake up" wake word via pocketsphinx
2020-10-13 18:03:22.602 | INFO | 6577 | __main__:on_ready:175 | Speech client is ready.
|
TypeError
|
def found_wake_word(self, frame_data):
"""Check if wakeword has been found.
Returns:
(bool) True if wakeword was found otherwise False.
"""
if self.has_found:
self.has_found = False
return True
return False
|
def found_wake_word(self, frame_data):
if self.has_found:
self.has_found = False
return True
return False
|
https://github.com/MycroftAI/mycroft-core/issues/2720
|
2020-10-13 18:03:22.296 | INFO | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:328 | Creating wake word engine
2020-10-13 18:03:22.299 | INFO | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:351 | Using hotword entry for blueberry
2020-10-13 18:03:22.302 | WARNING | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:353 | Phonemes are missing falling back to listeners configuration
2020-10-13 18:03:22.305 | WARNING | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:357 | Threshold is missing falling back to listeners configuration
2020-10-13 18:03:22.310 | INFO | 6577 | mycroft.client.speech.hotword_factory:load_module:403 | Loading "blueberry" wake word via porcupine
2020-10-13 18:03:22.315 | INFO | 6577 | mycroft.client.speech.hotword_factory:__init__:331 | Adding /home/pi/.mycroft/Porcupine/binding/python to Python path
2020-10-13 18:03:22.338 | INFO | 6577 | mycroft.client.speech.hotword_factory:__init__:356 | Loading Porcupine using library path /home/pi/.mycroft/Porcupine/lib/linux/armv7l/libpv_porcupine.so and keyword paths ['/home/pi/.mycroft/Porcupine/resources/keyword_files/raspberry-pi/blueberry_raspberry-pi.ppn']
2020-10-13 18:03:22.341 | ERROR | 6577 | mycroft.client.speech.hotword_factory:initialize:423 | Could not create hotword. Falling back to default.
Traceback (most recent call last):
File "/home/pi/mycroft-core/mycroft/client/speech/hotword_factory.py", line 411, in initialize
instance = clazz(hotword, config, lang=lang)
File "/home/pi/mycroft-core/mycroft/client/speech/hotword_factory.py", line 361, in __init__
sensitivities=sensitivities)
TypeError: __init__() got an unexpected keyword argument 'model_file_path'
2020-10-13 18:03:22.345 | INFO | 6577 | mycroft.client.speech.hotword_factory:load_module:403 | Loading "blueberry" wake word via pocketsphinx
2020-10-13 18:03:22.475 | INFO | 6577 | mycroft.client.speech.listener:create_wakeup_recognizer:365 | creating stand up word engine
2020-10-13 18:03:22.478 | INFO | 6577 | mycroft.client.speech.hotword_factory:load_module:403 | Loading "wake up" wake word via pocketsphinx
2020-10-13 18:03:22.602 | INFO | 6577 | __main__:on_ready:175 | Speech client is ready.
|
TypeError
|
def stop(self):
"""Stop the hotword engine.
Clean up Porcupine library.
"""
if self.porcupine is not None:
self.porcupine.delete()
|
def stop(self):
if self.porcupine is not None:
self.porcupine.delete()
|
https://github.com/MycroftAI/mycroft-core/issues/2720
|
2020-10-13 18:03:22.296 | INFO | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:328 | Creating wake word engine
2020-10-13 18:03:22.299 | INFO | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:351 | Using hotword entry for blueberry
2020-10-13 18:03:22.302 | WARNING | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:353 | Phonemes are missing falling back to listeners configuration
2020-10-13 18:03:22.305 | WARNING | 6577 | mycroft.client.speech.listener:create_wake_word_recognizer:357 | Threshold is missing falling back to listeners configuration
2020-10-13 18:03:22.310 | INFO | 6577 | mycroft.client.speech.hotword_factory:load_module:403 | Loading "blueberry" wake word via porcupine
2020-10-13 18:03:22.315 | INFO | 6577 | mycroft.client.speech.hotword_factory:__init__:331 | Adding /home/pi/.mycroft/Porcupine/binding/python to Python path
2020-10-13 18:03:22.338 | INFO | 6577 | mycroft.client.speech.hotword_factory:__init__:356 | Loading Porcupine using library path /home/pi/.mycroft/Porcupine/lib/linux/armv7l/libpv_porcupine.so and keyword paths ['/home/pi/.mycroft/Porcupine/resources/keyword_files/raspberry-pi/blueberry_raspberry-pi.ppn']
2020-10-13 18:03:22.341 | ERROR | 6577 | mycroft.client.speech.hotword_factory:initialize:423 | Could not create hotword. Falling back to default.
Traceback (most recent call last):
File "/home/pi/mycroft-core/mycroft/client/speech/hotword_factory.py", line 411, in initialize
instance = clazz(hotword, config, lang=lang)
File "/home/pi/mycroft-core/mycroft/client/speech/hotword_factory.py", line 361, in __init__
sensitivities=sensitivities)
TypeError: __init__() got an unexpected keyword argument 'model_file_path'
2020-10-13 18:03:22.345 | INFO | 6577 | mycroft.client.speech.hotword_factory:load_module:403 | Loading "blueberry" wake word via pocketsphinx
2020-10-13 18:03:22.475 | INFO | 6577 | mycroft.client.speech.listener:create_wakeup_recognizer:365 | creating stand up word engine
2020-10-13 18:03:22.478 | INFO | 6577 | mycroft.client.speech.hotword_factory:load_module:403 | Loading "wake up" wake word via pocketsphinx
2020-10-13 18:03:22.602 | INFO | 6577 | __main__:on_ready:175 | Speech client is ready.
|
TypeError
|
def add(self, name, handler, once=False):
"""Create event handler for executing intent or other event.
Arguments:
name (string): IntentParser name
handler (func): Method to call
once (bool, optional): Event handler will be removed after it has
been run once.
"""
def once_wrapper(message):
# Remove registered one-time handler before invoking,
# allowing them to re-schedule themselves.
handler(message)
self.remove(name)
if handler:
if once:
self.bus.once(name, once_wrapper)
self.events.append((name, once_wrapper))
else:
self.bus.on(name, handler)
self.events.append((name, handler))
LOG.debug("Added event: {}".format(name))
|
def add(self, name, handler, once=False):
"""Create event handler for executing intent or other event.
Arguments:
name (string): IntentParser name
handler (func): Method to call
once (bool, optional): Event handler will be removed after it has
been run once.
"""
def once_wrapper(message):
# Remove registered one-time handler before invoking,
# allowing them to re-schedule themselves.
handler(message)
self.remove(name)
if handler:
if once:
self.bus.once(name, once_wrapper)
else:
self.bus.on(name, handler)
self.events.append((name, handler))
|
https://github.com/MycroftAI/mycroft-core/issues/2337
|
12:04:25.758 | INFO | 22386 | mycroft.skills.skill_loader:reload:109 | ATTEMPTING TO RELOAD SKILL: mycroft-alarm.mycroftai
12:04:25.760 | ERROR | 22386 | mycroft.skills.skill_loader:_execute_instance_shutdown:145 | An error occurred while shutting down AlarmSkill
Traceback (most recent call last):
File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/skills/skill_loader.py", line 142, in _execute_instance_shutdown
self.instance.default_shutdown()
File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/skills/mycroft_skill/mycroft_skill.py", line 1162, in default_shutdown
self.event_scheduler.shutdown()
File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/skills/event_scheduler.py", line 433, in shutdown
self.events.clear()
File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/skills/mycroft_skill/event_container.py", line 183, in clear
self.bus.remove(e, f)
File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/messagebus/client/client.py", line 172, in remove
self.emitter.remove_listener(event_name, func)
File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/messagebus/client/threaded_event_emitter.py", line 57, in remove_listener
return super().remove_listener(event_name, func)
File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/.venv/lib/python3.6/site-packages/pyee/__init__.py", line 205, in remove_listener
self._events[event].pop(f)
KeyError: <function create_basic_wrapper.<locals>.wrapper at 0x7f2a504781e0>
|
KeyError
|
def connect(
host="localhost",
user=None,
password="",
db=None,
port=3306,
unix_socket=None,
charset="",
sql_mode=None,
read_default_file=None,
conv=decoders,
use_unicode=None,
client_flag=0,
cursorclass=Cursor,
init_command=None,
connect_timeout=None,
read_default_group=None,
no_delay=None,
autocommit=False,
echo=False,
local_infile=False,
loop=None,
ssl=None,
auth_plugin="",
program_name="",
server_public_key=None,
):
"""See connections.Connection.__init__() for information about
defaults."""
coro = _connect(
host=host,
user=user,
password=password,
db=db,
port=port,
unix_socket=unix_socket,
charset=charset,
sql_mode=sql_mode,
read_default_file=read_default_file,
conv=conv,
use_unicode=use_unicode,
client_flag=client_flag,
cursorclass=cursorclass,
init_command=init_command,
connect_timeout=connect_timeout,
read_default_group=read_default_group,
no_delay=no_delay,
autocommit=autocommit,
echo=echo,
local_infile=local_infile,
loop=loop,
ssl=ssl,
auth_plugin=auth_plugin,
program_name=program_name,
)
return _ConnectionContextManager(coro)
|
def connect(
host="localhost",
user=None,
password="",
db=None,
port=3306,
unix_socket=None,
charset="",
sql_mode=None,
read_default_file=None,
conv=decoders,
use_unicode=None,
client_flag=0,
cursorclass=Cursor,
init_command=None,
connect_timeout=None,
read_default_group=None,
no_delay=None,
autocommit=False,
echo=False,
local_infile=False,
loop=None,
ssl=None,
auth_plugin="",
program_name="",
):
"""See connections.Connection.__init__() for information about
defaults."""
coro = _connect(
host=host,
user=user,
password=password,
db=db,
port=port,
unix_socket=unix_socket,
charset=charset,
sql_mode=sql_mode,
read_default_file=read_default_file,
conv=conv,
use_unicode=use_unicode,
client_flag=client_flag,
cursorclass=cursorclass,
init_command=init_command,
connect_timeout=connect_timeout,
read_default_group=read_default_group,
no_delay=no_delay,
autocommit=autocommit,
echo=echo,
local_infile=local_infile,
loop=loop,
ssl=ssl,
auth_plugin=auth_plugin,
program_name=program_name,
)
return _ConnectionContextManager(coro)
|
https://github.com/aio-libs/aiomysql/issues/297
|
mysql5 works
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 464, in _connect
await self._request_authentication()
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 719, in _request_authentication
auth_packet = await self._read_packet()
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 554, in _read_packet
packet.check_error()
File "/usr/local/lib/python3.6/site-packages/pymysql/connections.py", line 384, in check_error
err.raise_mysql_exception(self._data)
File "/usr/local/lib/python3.6/site-packages/pymysql/err.py", line 109, in raise_mysql_exception
raise errorclass(errno, errval)
pymysql.err.OperationalError: (1045, "Access denied for user 'test'@'172.18.0.3' (using password: NO)")
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "main.py", line 20, in <module>
asyncio.get_event_loop().run_until_complete(connect())
File "/usr/local/lib/python3.6/asyncio/base_events.py", line 468, in run_until_complete
return future.result()
File "main.py", line 17, in connect
await aiomysql.connect(host='mysql8', **args)
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 76, in _connect
await conn._connect()
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 484, in _connect
self._host) from e
pymysql.err.OperationalError: (2003, "Can't connect to MySQL server on 'mysql8'")
|
pymysql.err.OperationalError
|
def __init__(
self,
host="localhost",
user=None,
password="",
db=None,
port=3306,
unix_socket=None,
charset="",
sql_mode=None,
read_default_file=None,
conv=decoders,
use_unicode=None,
client_flag=0,
cursorclass=Cursor,
init_command=None,
connect_timeout=None,
read_default_group=None,
no_delay=None,
autocommit=False,
echo=False,
local_infile=False,
loop=None,
ssl=None,
auth_plugin="",
program_name="",
server_public_key=None,
):
"""
Establish a connection to the MySQL database. Accepts several
arguments:
:param host: Host where the database server is located
:param user: Username to log in as
:param password: Password to use.
:param db: Database to use, None to not use a particular one.
:param port: MySQL port to use, default is usually OK.
:param unix_socket: Optionally, you can use a unix socket rather
than TCP/IP.
:param charset: Charset you want to use.
:param sql_mode: Default SQL_MODE to use.
:param read_default_file: Specifies my.cnf file to read these
parameters from under the [client] section.
:param conv: Decoders dictionary to use instead of the default one.
This is used to provide custom marshalling of types.
See converters.
:param use_unicode: Whether or not to default to unicode strings.
:param client_flag: Custom flags to send to MySQL. Find
potential values in constants.CLIENT.
:param cursorclass: Custom cursor class to use.
:param init_command: Initial SQL statement to run when connection is
established.
:param connect_timeout: Timeout before throwing an exception
when connecting.
:param read_default_group: Group to read from in the configuration
file.
:param no_delay: Disable Nagle's algorithm on the socket
:param autocommit: Autocommit mode. None means use server default.
(default: False)
:param local_infile: boolean to enable the use of LOAD DATA LOCAL
command. (default: False)
:param ssl: Optional SSL Context to force SSL
:param auth_plugin: String to manually specify the authentication
plugin to use, i.e you will want to use mysql_clear_password
when using IAM authentication with Amazon RDS.
(default: Server Default)
:param program_name: Program name string to provide when
handshaking with MySQL. (default: sys.argv[0])
:param server_public_key: SHA256 authentication plugin public
key value.
:param loop: asyncio loop
"""
self._loop = loop or asyncio.get_event_loop()
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = configparser.RawConfigParser()
cfg.read(os.path.expanduser(read_default_file))
_config = partial(cfg.get, read_default_group)
user = _config("user", fallback=user)
password = _config("password", fallback=password)
host = _config("host", fallback=host)
db = _config("database", fallback=db)
unix_socket = _config("socket", fallback=unix_socket)
port = int(_config("port", fallback=port))
charset = _config("default-character-set", fallback=charset)
# pymysql port
if no_delay is not None:
warnings.warn("no_delay option is deprecated", DeprecationWarning)
no_delay = bool(no_delay)
else:
no_delay = True
self._host = host
self._port = port
self._user = user or DEFAULT_USER
self._password = password or ""
self._db = db
self._no_delay = no_delay
self._echo = echo
self._last_usage = self._loop.time()
self._client_auth_plugin = auth_plugin
self._server_auth_plugin = ""
self._auth_plugin_used = ""
self.server_public_key = server_public_key
self.salt = None
# TODO somehow import version from __init__.py
self._connect_attrs = {
"_client_name": "aiomysql",
"_pid": str(os.getpid()),
"_client_version": "0.0.16",
}
if program_name:
self._connect_attrs["program_name"] = program_name
elif sys.argv:
self._connect_attrs["program_name"] = sys.argv[0]
self._unix_socket = unix_socket
if charset:
self._charset = charset
self.use_unicode = True
else:
self._charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
self._ssl_context = ssl
if ssl:
client_flag |= CLIENT.SSL
self._encoding = charset_by_name(self._charset).encoding
if local_infile:
client_flag |= CLIENT.LOCAL_FILES
client_flag |= CLIENT.CAPABILITIES
client_flag |= CLIENT.MULTI_STATEMENTS
if self._db:
client_flag |= CLIENT.CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self.connect_timeout = connect_timeout
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
#: specified autocommit mode. None means use server default.
self.autocommit_mode = autocommit
self.encoders = encoders # Need for MySQLdb compatibility.
self.decoders = conv
self.sql_mode = sql_mode
self.init_command = init_command
# asyncio StreamReader, StreamWriter
self._reader = None
self._writer = None
# If connection was closed for specific reason, we should show that to
# user
self._close_reason = None
|
def __init__(
self,
host="localhost",
user=None,
password="",
db=None,
port=3306,
unix_socket=None,
charset="",
sql_mode=None,
read_default_file=None,
conv=decoders,
use_unicode=None,
client_flag=0,
cursorclass=Cursor,
init_command=None,
connect_timeout=None,
read_default_group=None,
no_delay=None,
autocommit=False,
echo=False,
local_infile=False,
loop=None,
ssl=None,
auth_plugin="",
program_name="",
):
"""
Establish a connection to the MySQL database. Accepts several
arguments:
:param host: Host where the database server is located
:param user: Username to log in as
:param password: Password to use.
:param db: Database to use, None to not use a particular one.
:param port: MySQL port to use, default is usually OK.
:param unix_socket: Optionally, you can use a unix socket rather
than TCP/IP.
:param charset: Charset you want to use.
:param sql_mode: Default SQL_MODE to use.
:param read_default_file: Specifies my.cnf file to read these
parameters from under the [client] section.
:param conv: Decoders dictionary to use instead of the default one.
This is used to provide custom marshalling of types.
See converters.
:param use_unicode: Whether or not to default to unicode strings.
:param client_flag: Custom flags to send to MySQL. Find
potential values in constants.CLIENT.
:param cursorclass: Custom cursor class to use.
:param init_command: Initial SQL statement to run when connection is
established.
:param connect_timeout: Timeout before throwing an exception
when connecting.
:param read_default_group: Group to read from in the configuration
file.
:param no_delay: Disable Nagle's algorithm on the socket
:param autocommit: Autocommit mode. None means use server default.
(default: False)
:param local_infile: boolean to enable the use of LOAD DATA LOCAL
command. (default: False)
:param ssl: Optional SSL Context to force SSL
:param auth_plugin: String to manually specify the authentication
plugin to use, i.e you will want to use mysql_clear_password
when using IAM authentication with Amazon RDS.
(default: Server Default)
:param program_name: Program name string to provide when
handshaking with MySQL. (default: sys.argv[0])
:param loop: asyncio loop
"""
self._loop = loop or asyncio.get_event_loop()
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = configparser.RawConfigParser()
cfg.read(os.path.expanduser(read_default_file))
_config = partial(cfg.get, read_default_group)
user = _config("user", fallback=user)
password = _config("password", fallback=password)
host = _config("host", fallback=host)
db = _config("database", fallback=db)
unix_socket = _config("socket", fallback=unix_socket)
port = int(_config("port", fallback=port))
charset = _config("default-character-set", fallback=charset)
# pymysql port
if no_delay is not None:
warnings.warn("no_delay option is deprecated", DeprecationWarning)
no_delay = bool(no_delay)
else:
no_delay = True
self._host = host
self._port = port
self._user = user or DEFAULT_USER
self._password = password or ""
self._db = db
self._no_delay = no_delay
self._echo = echo
self._last_usage = self._loop.time()
self._client_auth_plugin = auth_plugin
self._server_auth_plugin = ""
self._auth_plugin_used = ""
# TODO somehow import version from __init__.py
self._connect_attrs = {
"_client_name": "aiomysql",
"_pid": str(os.getpid()),
"_client_version": "0.0.16",
}
if program_name:
self._connect_attrs["program_name"] = program_name
elif sys.argv:
self._connect_attrs["program_name"] = sys.argv[0]
self._unix_socket = unix_socket
if charset:
self._charset = charset
self.use_unicode = True
else:
self._charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
self._ssl_context = ssl
if ssl:
client_flag |= CLIENT.SSL
self._encoding = charset_by_name(self._charset).encoding
if local_infile:
client_flag |= CLIENT.LOCAL_FILES
client_flag |= CLIENT.CAPABILITIES
client_flag |= CLIENT.MULTI_STATEMENTS
if self._db:
client_flag |= CLIENT.CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self.connect_timeout = connect_timeout
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
#: specified autocommit mode. None means use server default.
self.autocommit_mode = autocommit
self.encoders = encoders # Need for MySQLdb compatibility.
self.decoders = conv
self.sql_mode = sql_mode
self.init_command = init_command
# asyncio StreamReader, StreamWriter
self._reader = None
self._writer = None
# If connection was closed for specific reason, we should show that to
# user
self._close_reason = None
|
https://github.com/aio-libs/aiomysql/issues/297
|
mysql5 works
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 464, in _connect
await self._request_authentication()
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 719, in _request_authentication
auth_packet = await self._read_packet()
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 554, in _read_packet
packet.check_error()
File "/usr/local/lib/python3.6/site-packages/pymysql/connections.py", line 384, in check_error
err.raise_mysql_exception(self._data)
File "/usr/local/lib/python3.6/site-packages/pymysql/err.py", line 109, in raise_mysql_exception
raise errorclass(errno, errval)
pymysql.err.OperationalError: (1045, "Access denied for user 'test'@'172.18.0.3' (using password: NO)")
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "main.py", line 20, in <module>
asyncio.get_event_loop().run_until_complete(connect())
File "/usr/local/lib/python3.6/asyncio/base_events.py", line 468, in run_until_complete
return future.result()
File "main.py", line 17, in connect
await aiomysql.connect(host='mysql8', **args)
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 76, in _connect
await conn._connect()
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 484, in _connect
self._host) from e
pymysql.err.OperationalError: (2003, "Can't connect to MySQL server on 'mysql8'")
|
pymysql.err.OperationalError
|
async def _request_authentication(self):
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
if int(self.server_version.split(".", 1)[0]) >= 5:
self.client_flag |= CLIENT.MULTI_RESULTS
if self.user is None:
raise ValueError("Did not specify a username")
if self._ssl_context:
# capablities, max packet, charset
data = struct.pack("<IIB", self.client_flag, 16777216, 33)
data += b"\x00" * (32 - len(data))
self.write_packet(data)
# Stop sending events to data_received
self._writer.transport.pause_reading()
# Get the raw socket from the transport
raw_sock = self._writer.transport.get_extra_info("socket", default=None)
if raw_sock is None:
raise RuntimeError("Transport does not expose socket instance")
raw_sock = raw_sock.dup()
self._writer.transport.close()
# MySQL expects TLS negotiation to happen in the middle of a
# TCP connection not at start. Passing in a socket to
# open_connection will cause it to negotiate TLS on an existing
# connection not initiate a new one.
self._reader, self._writer = await asyncio.open_connection(
sock=raw_sock,
ssl=self._ssl_context,
loop=self._loop,
server_hostname=self._host,
)
charset_id = charset_by_name(self.charset).id
if isinstance(self.user, str):
_user = self.user.encode(self.encoding)
else:
_user = self.user
data_init = struct.pack(
"<iIB23s", self.client_flag, MAX_PACKET_LEN, charset_id, b""
)
data = data_init + _user + b"\0"
authresp = b""
auth_plugin = self._client_auth_plugin
if not self._client_auth_plugin:
# Contains the auth plugin from handshake
auth_plugin = self._server_auth_plugin
if auth_plugin in ("", "mysql_native_password"):
authresp = _auth.scramble_native_password(
self._password.encode("latin1"), self.salt
)
elif auth_plugin == "caching_sha2_password":
if self._password:
authresp = _auth.scramble_caching_sha2(
self._password.encode("latin1"), self.salt
)
# Else: empty password
elif auth_plugin == "sha256_password":
if self._ssl_context and self.server_capabilities & CLIENT.SSL:
authresp = self._password.encode("latin1") + b"\0"
elif self._password:
authresp = b"\1" # request public key
else:
authresp = b"\0" # empty password
elif auth_plugin in ("", "mysql_clear_password"):
authresp = self._password.encode("latin1") + b"\0"
if self.server_capabilities & CLIENT.PLUGIN_AUTH_LENENC_CLIENT_DATA:
data += lenenc_int(len(authresp)) + authresp
elif self.server_capabilities & CLIENT.SECURE_CONNECTION:
data += struct.pack("B", len(authresp)) + authresp
else: # pragma: no cover
# not testing against servers without secure auth (>=5.0)
data += authresp + b"\0"
if self._db and self.server_capabilities & CLIENT.CONNECT_WITH_DB:
if isinstance(self._db, str):
db = self._db.encode(self.encoding)
else:
db = self._db
data += db + b"\0"
if self.server_capabilities & CLIENT.PLUGIN_AUTH:
name = auth_plugin
if isinstance(name, str):
name = name.encode("ascii")
data += name + b"\0"
self._auth_plugin_used = auth_plugin
# Sends the server a few pieces of client info
if self.server_capabilities & CLIENT.CONNECT_ATTRS:
connect_attrs = b""
for k, v in self._connect_attrs.items():
k, v = k.encode("utf8"), v.encode("utf8")
connect_attrs += struct.pack("B", len(k)) + k
connect_attrs += struct.pack("B", len(v)) + v
data += struct.pack("B", len(connect_attrs)) + connect_attrs
self.write_packet(data)
auth_packet = await self._read_packet()
# if authentication method isn't accepted the first byte
# will have the octet 254
if auth_packet.is_auth_switch_request():
# https://dev.mysql.com/doc/internals/en/
# connection-phase-packets.html#packet-Protocol::AuthSwitchRequest
auth_packet.read_uint8() # 0xfe packet identifier
plugin_name = auth_packet.read_string()
if self.server_capabilities & CLIENT.PLUGIN_AUTH and plugin_name is not None:
await self._process_auth(plugin_name, auth_packet)
else:
# send legacy handshake
data = (
_auth.scramble_old_password(
self._password.encode("latin1"), auth_packet.read_all()
)
+ b"\0"
)
self.write_packet(data)
await self._read_packet()
elif auth_packet.is_extra_auth_data():
if auth_plugin == "caching_sha2_password":
await self.caching_sha2_password_auth(auth_packet)
elif auth_plugin == "sha256_password":
await self.sha256_password_auth(auth_packet)
else:
raise OperationalError(
"Received extra packet for auth method %r", auth_plugin
)
|
async def _request_authentication(self):
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
if int(self.server_version.split(".", 1)[0]) >= 5:
self.client_flag |= CLIENT.MULTI_RESULTS
if self.user is None:
raise ValueError("Did not specify a username")
if self._ssl_context:
# capablities, max packet, charset
data = struct.pack("<IIB", self.client_flag, 16777216, 33)
data += b"\x00" * (32 - len(data))
self.write_packet(data)
# Stop sending events to data_received
self._writer.transport.pause_reading()
# Get the raw socket from the transport
raw_sock = self._writer.transport.get_extra_info("socket", default=None)
if raw_sock is None:
raise RuntimeError("Transport does not expose socket instance")
raw_sock = raw_sock.dup()
self._writer.transport.close()
# MySQL expects TLS negotiation to happen in the middle of a
# TCP connection not at start. Passing in a socket to
# open_connection will cause it to negotiate TLS on an existing
# connection not initiate a new one.
self._reader, self._writer = await asyncio.open_connection(
sock=raw_sock,
ssl=self._ssl_context,
loop=self._loop,
server_hostname=self._host,
)
charset_id = charset_by_name(self.charset).id
if isinstance(self.user, str):
_user = self.user.encode(self.encoding)
else:
_user = self.user
data_init = struct.pack(
"<iIB23s", self.client_flag, MAX_PACKET_LEN, charset_id, b""
)
data = data_init + _user + b"\0"
authresp = b""
auth_plugin = self._client_auth_plugin
if not self._client_auth_plugin:
# Contains the auth plugin from handshake
auth_plugin = self._server_auth_plugin
if auth_plugin in ("", "mysql_native_password"):
authresp = _auth.scramble_native_password(
self._password.encode("latin1"), self.salt
)
elif auth_plugin in ("", "mysql_clear_password"):
authresp = self._password.encode("latin1") + b"\0"
if self.server_capabilities & CLIENT.PLUGIN_AUTH_LENENC_CLIENT_DATA:
data += lenenc_int(len(authresp)) + authresp
elif self.server_capabilities & CLIENT.SECURE_CONNECTION:
data += struct.pack("B", len(authresp)) + authresp
else: # pragma: no cover
# not testing against servers without secure auth (>=5.0)
data += authresp + b"\0"
if self._db and self.server_capabilities & CLIENT.CONNECT_WITH_DB:
if isinstance(self._db, str):
db = self._db.encode(self.encoding)
else:
db = self._db
data += db + b"\0"
if self.server_capabilities & CLIENT.PLUGIN_AUTH:
name = auth_plugin
if isinstance(name, str):
name = name.encode("ascii")
data += name + b"\0"
self._auth_plugin_used = auth_plugin
# Sends the server a few pieces of client info
if self.server_capabilities & CLIENT.CONNECT_ATTRS:
connect_attrs = b""
for k, v in self._connect_attrs.items():
k, v = k.encode("utf8"), v.encode("utf8")
connect_attrs += struct.pack("B", len(k)) + k
connect_attrs += struct.pack("B", len(v)) + v
data += struct.pack("B", len(connect_attrs)) + connect_attrs
self.write_packet(data)
auth_packet = await self._read_packet()
# if authentication method isn't accepted the first byte
# will have the octet 254
if auth_packet.is_auth_switch_request():
# https://dev.mysql.com/doc/internals/en/
# connection-phase-packets.html#packet-Protocol::AuthSwitchRequest
auth_packet.read_uint8() # 0xfe packet identifier
plugin_name = auth_packet.read_string()
if self.server_capabilities & CLIENT.PLUGIN_AUTH and plugin_name is not None:
await self._process_auth(plugin_name, auth_packet)
else:
# send legacy handshake
data = (
_auth.scramble_old_password(
self._password.encode("latin1"), auth_packet.read_all()
)
+ b"\0"
)
self.write_packet(data)
await self._read_packet()
|
https://github.com/aio-libs/aiomysql/issues/297
|
mysql5 works
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 464, in _connect
await self._request_authentication()
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 719, in _request_authentication
auth_packet = await self._read_packet()
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 554, in _read_packet
packet.check_error()
File "/usr/local/lib/python3.6/site-packages/pymysql/connections.py", line 384, in check_error
err.raise_mysql_exception(self._data)
File "/usr/local/lib/python3.6/site-packages/pymysql/err.py", line 109, in raise_mysql_exception
raise errorclass(errno, errval)
pymysql.err.OperationalError: (1045, "Access denied for user 'test'@'172.18.0.3' (using password: NO)")
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "main.py", line 20, in <module>
asyncio.get_event_loop().run_until_complete(connect())
File "/usr/local/lib/python3.6/asyncio/base_events.py", line 468, in run_until_complete
return future.result()
File "main.py", line 17, in connect
await aiomysql.connect(host='mysql8', **args)
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 76, in _connect
await conn._connect()
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 484, in _connect
self._host) from e
pymysql.err.OperationalError: (2003, "Can't connect to MySQL server on 'mysql8'")
|
pymysql.err.OperationalError
|
async def _process_auth(self, plugin_name, auth_packet):
# These auth plugins do their own packet handling
if plugin_name == b"caching_sha2_password":
await self.caching_sha2_password_auth(auth_packet)
self._auth_plugin_used = plugin_name.decode()
elif plugin_name == b"sha256_password":
await self.sha256_password_auth(auth_packet)
self._auth_plugin_used = plugin_name.decode()
else:
if plugin_name == b"mysql_native_password":
# https://dev.mysql.com/doc/internals/en/
# secure-password-authentication.html#packet-Authentication::
# Native41
data = _auth.scramble_native_password(
self._password.encode("latin1"), auth_packet.read_all()
)
elif plugin_name == b"mysql_old_password":
# https://dev.mysql.com/doc/internals/en/
# old-password-authentication.html
data = (
_auth.scramble_old_password(
self._password.encode("latin1"), auth_packet.read_all()
)
+ b"\0"
)
elif plugin_name == b"mysql_clear_password":
# https://dev.mysql.com/doc/internals/en/
# clear-text-authentication.html
data = self._password.encode("latin1") + b"\0"
else:
raise OperationalError(
2059, "Authentication plugin '{0}' not configured".format(plugin_name)
)
self.write_packet(data)
pkt = await self._read_packet()
pkt.check_error()
self._auth_plugin_used = plugin_name.decode()
return pkt
|
async def _process_auth(self, plugin_name, auth_packet):
if plugin_name == b"mysql_native_password":
# https://dev.mysql.com/doc/internals/en/
# secure-password-authentication.html#packet-Authentication::
# Native41
data = _auth.scramble_native_password(
self._password.encode("latin1"), auth_packet.read_all()
)
elif plugin_name == b"mysql_old_password":
# https://dev.mysql.com/doc/internals/en/
# old-password-authentication.html
data = (
_auth.scramble_old_password(
self._password.encode("latin1"), auth_packet.read_all()
)
+ b"\0"
)
elif plugin_name == b"mysql_clear_password":
# https://dev.mysql.com/doc/internals/en/
# clear-text-authentication.html
data = self._password.encode("latin1") + b"\0"
else:
raise OperationalError(
2059, "Authentication plugin '%s' not configured" % plugin_name
)
self.write_packet(data)
pkt = await self._read_packet()
pkt.check_error()
self._auth_plugin_used = plugin_name
return pkt
|
https://github.com/aio-libs/aiomysql/issues/297
|
mysql5 works
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 464, in _connect
await self._request_authentication()
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 719, in _request_authentication
auth_packet = await self._read_packet()
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 554, in _read_packet
packet.check_error()
File "/usr/local/lib/python3.6/site-packages/pymysql/connections.py", line 384, in check_error
err.raise_mysql_exception(self._data)
File "/usr/local/lib/python3.6/site-packages/pymysql/err.py", line 109, in raise_mysql_exception
raise errorclass(errno, errval)
pymysql.err.OperationalError: (1045, "Access denied for user 'test'@'172.18.0.3' (using password: NO)")
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "main.py", line 20, in <module>
asyncio.get_event_loop().run_until_complete(connect())
File "/usr/local/lib/python3.6/asyncio/base_events.py", line 468, in run_until_complete
return future.result()
File "main.py", line 17, in connect
await aiomysql.connect(host='mysql8', **args)
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 76, in _connect
await conn._connect()
File "/usr/local/lib/python3.6/site-packages/aiomysql/connection.py", line 484, in _connect
self._host) from e
pymysql.err.OperationalError: (2003, "Can't connect to MySQL server on 'mysql8'")
|
pymysql.err.OperationalError
|
def _get_full_key(self, key: Optional[Union[DictKeyType, int]]) -> str: ...
|
def _get_full_key(self, key: Union[str, Enum, int, None]) -> str: ...
|
https://github.com/omry/omegaconf/issues/554
|
del cfg["FOO"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jasha10/omegaconf/omegaconf/dictconfig.py", line 405, in __delitem__
del self.__dict__["_content"][key]
KeyError: 'FOO'
|
KeyError
|
def _resolve_with_default(
self,
key: Union[DictKeyType, int],
value: Any,
default_value: Any = DEFAULT_VALUE_MARKER,
) -> Any:
"""returns the value with the specified key, like obj.key and obj['key']"""
def is_mandatory_missing(val: Any) -> bool:
return bool(get_value_kind(val) == ValueKind.MANDATORY_MISSING)
val = _get_value(value)
has_default = default_value is not DEFAULT_VALUE_MARKER
if has_default and (val is None or is_mandatory_missing(val)):
return default_value
resolved = self._maybe_resolve_interpolation(
parent=self,
key=key,
value=value,
throw_on_missing=not has_default,
throw_on_resolution_failure=not has_default,
)
if resolved is None and has_default:
return default_value
if is_mandatory_missing(resolved):
if has_default:
return default_value
else:
raise MissingMandatoryValue("Missing mandatory value: $FULL_KEY")
return _get_value(resolved)
|
def _resolve_with_default(
self,
key: Union[str, int, Enum],
value: Any,
default_value: Any = DEFAULT_VALUE_MARKER,
) -> Any:
"""returns the value with the specified key, like obj.key and obj['key']"""
def is_mandatory_missing(val: Any) -> bool:
return bool(get_value_kind(val) == ValueKind.MANDATORY_MISSING)
val = _get_value(value)
has_default = default_value is not DEFAULT_VALUE_MARKER
if has_default and (val is None or is_mandatory_missing(val)):
return default_value
resolved = self._maybe_resolve_interpolation(
parent=self,
key=key,
value=value,
throw_on_missing=not has_default,
throw_on_resolution_failure=not has_default,
)
if resolved is None and has_default:
return default_value
if is_mandatory_missing(resolved):
if has_default:
return default_value
else:
raise MissingMandatoryValue("Missing mandatory value: $FULL_KEY")
return _get_value(resolved)
|
https://github.com/omry/omegaconf/issues/554
|
del cfg["FOO"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jasha10/omegaconf/omegaconf/dictconfig.py", line 405, in __delitem__
del self.__dict__["_content"][key]
KeyError: 'FOO'
|
KeyError
|
def _get_full_key(self, key: Union[DictKeyType, int, slice, None]) -> str:
from .listconfig import ListConfig
from .omegaconf import _select_one
if not isinstance(key, (int, str, Enum, float, bool, slice, type(None))):
return ""
def _slice_to_str(x: slice) -> str:
if x.step is not None:
return f"{x.start}:{x.stop}:{x.step}"
else:
return f"{x.start}:{x.stop}"
def prepand(full_key: str, parent_type: Any, cur_type: Any, key: Any) -> str:
if isinstance(key, slice):
key = _slice_to_str(key)
elif isinstance(key, Enum):
key = key.name
elif isinstance(key, (int, float, bool)):
key = str(key)
if issubclass(parent_type, ListConfig):
if full_key != "":
if issubclass(cur_type, ListConfig):
full_key = f"[{key}]{full_key}"
else:
full_key = f"[{key}].{full_key}"
else:
full_key = f"[{key}]"
else:
if full_key == "":
full_key = key
else:
if issubclass(cur_type, ListConfig):
full_key = f"{key}{full_key}"
else:
full_key = f"{key}.{full_key}"
return full_key
if key is not None and key != "":
assert isinstance(self, Container)
cur, _ = _select_one(
c=self, key=str(key), throw_on_missing=False, throw_on_type_error=False
)
if cur is None:
cur = self
full_key = prepand("", type(cur), None, key)
if cur._key() is not None:
full_key = prepand(
full_key, type(cur._get_parent()), type(cur), cur._key()
)
else:
full_key = prepand("", type(cur._get_parent()), type(cur), cur._key())
else:
cur = self
if cur._key() is None:
return ""
full_key = self._key()
assert cur is not None
while cur._get_parent() is not None:
cur = cur._get_parent()
assert cur is not None
key = cur._key()
if key is not None:
full_key = prepand(full_key, type(cur._get_parent()), type(cur), cur._key())
return full_key
|
def _get_full_key(self, key: Union[str, Enum, int, slice, None]) -> str:
from .listconfig import ListConfig
from .omegaconf import _select_one
if not isinstance(key, (int, str, Enum, slice, type(None))):
return ""
def _slice_to_str(x: slice) -> str:
if x.step is not None:
return f"{x.start}:{x.stop}:{x.step}"
else:
return f"{x.start}:{x.stop}"
def prepand(full_key: str, parent_type: Any, cur_type: Any, key: Any) -> str:
if isinstance(key, slice):
key = _slice_to_str(key)
elif isinstance(key, Enum):
key = key.name
if issubclass(parent_type, ListConfig):
if full_key != "":
if issubclass(cur_type, ListConfig):
full_key = f"[{key}]{full_key}"
else:
full_key = f"[{key}].{full_key}"
else:
full_key = f"[{key}]"
else:
if full_key == "":
full_key = key
else:
if issubclass(cur_type, ListConfig):
full_key = f"{key}{full_key}"
else:
full_key = f"{key}.{full_key}"
return full_key
if key is not None and key != "":
assert isinstance(self, Container)
cur, _ = _select_one(
c=self, key=str(key), throw_on_missing=False, throw_on_type_error=False
)
if cur is None:
cur = self
full_key = prepand("", type(cur), None, key)
if cur._key() is not None:
full_key = prepand(
full_key, type(cur._get_parent()), type(cur), cur._key()
)
else:
full_key = prepand("", type(cur._get_parent()), type(cur), cur._key())
else:
cur = self
if cur._key() is None:
return ""
full_key = self._key()
assert cur is not None
while cur._get_parent() is not None:
cur = cur._get_parent()
assert cur is not None
key = cur._key()
if key is not None:
full_key = prepand(full_key, type(cur._get_parent()), type(cur), cur._key())
return full_key
|
https://github.com/omry/omegaconf/issues/554
|
del cfg["FOO"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jasha10/omegaconf/omegaconf/dictconfig.py", line 405, in __delitem__
del self.__dict__["_content"][key]
KeyError: 'FOO'
|
KeyError
|
def prepand(full_key: str, parent_type: Any, cur_type: Any, key: Any) -> str:
if isinstance(key, slice):
key = _slice_to_str(key)
elif isinstance(key, Enum):
key = key.name
elif isinstance(key, (int, float, bool)):
key = str(key)
if issubclass(parent_type, ListConfig):
if full_key != "":
if issubclass(cur_type, ListConfig):
full_key = f"[{key}]{full_key}"
else:
full_key = f"[{key}].{full_key}"
else:
full_key = f"[{key}]"
else:
if full_key == "":
full_key = key
else:
if issubclass(cur_type, ListConfig):
full_key = f"{key}{full_key}"
else:
full_key = f"{key}.{full_key}"
return full_key
|
def prepand(full_key: str, parent_type: Any, cur_type: Any, key: Any) -> str:
if isinstance(key, slice):
key = _slice_to_str(key)
elif isinstance(key, Enum):
key = key.name
if issubclass(parent_type, ListConfig):
if full_key != "":
if issubclass(cur_type, ListConfig):
full_key = f"[{key}]{full_key}"
else:
full_key = f"[{key}].{full_key}"
else:
full_key = f"[{key}]"
else:
if full_key == "":
full_key = key
else:
if issubclass(cur_type, ListConfig):
full_key = f"{key}{full_key}"
else:
full_key = f"{key}.{full_key}"
return full_key
|
https://github.com/omry/omegaconf/issues/554
|
del cfg["FOO"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jasha10/omegaconf/omegaconf/dictconfig.py", line 405, in __delitem__
del self.__dict__["_content"][key]
KeyError: 'FOO'
|
KeyError
|
def _s_validate_and_normalize_key(self, key_type: Any, key: Any) -> DictKeyType:
if key_type is Any:
for t in DictKeyType.__args__: # type: ignore
if isinstance(key, t):
return key # type: ignore
raise KeyValidationError("Incompatible key type '$KEY_TYPE'")
elif key_type is bool and key in [0, 1]:
# Python treats True as 1 and False as 0 when used as dict keys
# assert hash(0) == hash(False)
# assert hash(1) == hash(True)
return bool(key)
elif key_type in (str, int, float, bool): # primitive type
if not isinstance(key, key_type):
raise KeyValidationError(
f"Key $KEY ($KEY_TYPE) is incompatible with ({key_type.__name__})"
)
return key # type: ignore
elif issubclass(key_type, Enum):
try:
ret = EnumNode.validate_and_convert_to_enum(key_type, key, allow_none=False)
assert ret is not None
return ret
except ValidationError:
valid = ", ".join([x for x in key_type.__members__.keys()])
raise KeyValidationError(
f"Key '$KEY' is incompatible with the enum type '{key_type.__name__}', valid: [{valid}]"
)
else:
assert False, f"Unsupported key type {key_type}"
|
def _s_validate_and_normalize_key(self, key_type: Any, key: Any) -> DictKeyType:
if key_type is Any:
for t in DictKeyType.__args__: # type: ignore
try:
return self._s_validate_and_normalize_key(key_type=t, key=key)
except KeyValidationError:
pass
raise KeyValidationError("Incompatible key type '$KEY_TYPE'")
elif key_type == str:
if not isinstance(key, str):
raise KeyValidationError(
f"Key $KEY ($KEY_TYPE) is incompatible with ({key_type.__name__})"
)
return key
elif key_type == int:
if not isinstance(key, int):
raise KeyValidationError(
f"Key $KEY ($KEY_TYPE) is incompatible with ({key_type.__name__})"
)
return key
elif issubclass(key_type, Enum):
try:
ret = EnumNode.validate_and_convert_to_enum(key_type, key)
assert ret is not None
return ret
except ValidationError:
valid = ", ".join([x for x in key_type.__members__.keys()])
raise KeyValidationError(
f"Key '$KEY' is incompatible with the enum type '{key_type.__name__}', valid: [{valid}]"
)
else:
assert False, f"Unsupported key type {key_type}"
|
https://github.com/omry/omegaconf/issues/554
|
del cfg["FOO"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jasha10/omegaconf/omegaconf/dictconfig.py", line 405, in __delitem__
del self.__dict__["_content"][key]
KeyError: 'FOO'
|
KeyError
|
def __delitem__(self, key: DictKeyType) -> None:
key = self._validate_and_normalize_key(key)
if self._get_flag("readonly"):
self._format_and_raise(
key=key,
value=None,
cause=ReadonlyConfigError(
"DictConfig in read-only mode does not support deletion"
),
)
if self._get_flag("struct"):
self._format_and_raise(
key=key,
value=None,
cause=ConfigTypeError(
"DictConfig in struct mode does not support deletion"
),
)
if self._is_typed() and self._get_node_flag("struct") is not False:
self._format_and_raise(
key=key,
value=None,
cause=ConfigTypeError(
f"{type_str(self._metadata.object_type)} (DictConfig) does not support deletion"
),
)
try:
del self.__dict__["_content"][key]
except KeyError:
msg = "Key not found: '$KEY'"
self._format_and_raise(key=key, value=None, cause=ConfigKeyError(msg))
|
def __delitem__(self, key: DictKeyType) -> None:
if self._get_flag("readonly"):
self._format_and_raise(
key=key,
value=None,
cause=ReadonlyConfigError(
"DictConfig in read-only mode does not support deletion"
),
)
if self._get_flag("struct"):
self._format_and_raise(
key=key,
value=None,
cause=ConfigTypeError(
"DictConfig in struct mode does not support deletion"
),
)
if self._is_typed() and self._get_node_flag("struct") is not False:
self._format_and_raise(
key=key,
value=None,
cause=ConfigTypeError(
f"{type_str(self._metadata.object_type)} (DictConfig) does not support deletion"
),
)
del self.__dict__["_content"][key]
|
https://github.com/omry/omegaconf/issues/554
|
del cfg["FOO"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jasha10/omegaconf/omegaconf/dictconfig.py", line 405, in __delitem__
del self.__dict__["_content"][key]
KeyError: 'FOO'
|
KeyError
|
def _get_full_key(self, key: Optional[Union[DictKeyType, int]]) -> str:
parent = self._get_parent()
if parent is None:
if self._metadata.key is None:
return ""
else:
return str(self._metadata.key)
else:
return parent._get_full_key(self._metadata.key)
|
def _get_full_key(self, key: Union[str, Enum, int, None]) -> str:
parent = self._get_parent()
if parent is None:
if self._metadata.key is None:
return ""
else:
return str(self._metadata.key)
else:
return parent._get_full_key(self._metadata.key)
|
https://github.com/omry/omegaconf/issues/554
|
del cfg["FOO"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jasha10/omegaconf/omegaconf/dictconfig.py", line 405, in __delitem__
del self.__dict__["_content"][key]
KeyError: 'FOO'
|
KeyError
|
def validate_and_convert_to_enum(
enum_type: Type[Enum], value: Any, allow_none: bool = True
) -> Optional[Enum]:
if allow_none and value is None:
return None
if not isinstance(value, (str, int)) and not isinstance(value, enum_type):
raise ValidationError(
f"Value $VALUE ($VALUE_TYPE) is not a valid input for {enum_type}"
)
if isinstance(value, enum_type):
return value
try:
if isinstance(value, (float, bool)):
raise ValueError
if isinstance(value, int):
return enum_type(value)
if isinstance(value, str):
prefix = f"{enum_type.__name__}."
if value.startswith(prefix):
value = value[len(prefix) :]
return enum_type[value]
assert False
except (ValueError, KeyError) as e:
valid = ", ".join([x for x in enum_type.__members__.keys()])
raise ValidationError(
f"Invalid value '$VALUE', expected one of [{valid}]"
).with_traceback(sys.exc_info()[2]) from e
|
def validate_and_convert_to_enum(enum_type: Type[Enum], value: Any) -> Optional[Enum]:
if value is None:
return None
if not isinstance(value, (str, int)) and not isinstance(value, enum_type):
raise ValidationError(
f"Value $VALUE ($VALUE_TYPE) is not a valid input for {enum_type}"
)
if isinstance(value, enum_type):
return value
try:
if isinstance(value, (float, bool)):
raise ValueError
if isinstance(value, int):
return enum_type(value)
if isinstance(value, str):
prefix = f"{enum_type.__name__}."
if value.startswith(prefix):
value = value[len(prefix) :]
return enum_type[value]
assert False
except (ValueError, KeyError) as e:
valid = ", ".join([x for x in enum_type.__members__.keys()])
raise ValidationError(
f"Invalid value '$VALUE', expected one of [{valid}]"
).with_traceback(sys.exc_info()[2]) from e
|
https://github.com/omry/omegaconf/issues/554
|
del cfg["FOO"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jasha10/omegaconf/omegaconf/dictconfig.py", line 405, in __delitem__
del self.__dict__["_content"][key]
KeyError: 'FOO'
|
KeyError
|
def to_container(
cfg: Any,
*,
resolve: bool = False,
enum_to_str: bool = False,
exclude_structured_configs: bool = False,
) -> Union[Dict[DictKeyType, Any], List[Any], None, str]:
"""
Resursively converts an OmegaConf config to a primitive container (dict or list).
:param cfg: the config to convert
:param resolve: True to resolve all values
:param enum_to_str: True to convert Enum values to strings
:param exclude_structured_configs: If True, do not convert Structured Configs
(DictConfigs backed by a dataclass)
:return: A dict or a list representing this config as a primitive container.
"""
if not OmegaConf.is_config(cfg):
raise ValueError(
f"Input cfg is not an OmegaConf config object ({type_str(type(cfg))})"
)
return BaseContainer._to_content(
cfg,
resolve=resolve,
enum_to_str=enum_to_str,
exclude_structured_configs=exclude_structured_configs,
)
|
def to_container(
cfg: Any,
*,
resolve: bool = False,
enum_to_str: bool = False,
exclude_structured_configs: bool = False,
) -> Union[Dict[DictKeyType, Any], List[Any], None, str]:
"""
Resursively converts an OmegaConf config to a primitive container (dict or list).
:param cfg: the config to convert
:param resolve: True to resolve all values
:param enum_to_str: True to convert Enum values to strings
:param exclude_structured_configs: If True, do not convert Structured Configs
(DictConfigs backed by a dataclass)
:return: A dict or a list representing this config as a primitive container.
"""
assert isinstance(cfg, Container)
# noinspection PyProtectedMember
return BaseContainer._to_content(
cfg,
resolve=resolve,
enum_to_str=enum_to_str,
exclude_structured_configs=exclude_structured_configs,
)
|
https://github.com/omry/omegaconf/issues/418
|
Traceback (most recent call last):
File "cluster.py", line 24, in start_job
OmegaConf.to_container(cfg, resolve=True)
AssertionError
Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
|
AssertionError
|
def _map_merge(dest: "BaseContainer", src: "BaseContainer") -> None:
"""merge src into dest and return a new copy, does not modified input"""
from omegaconf import DictConfig, OmegaConf, ValueNode
assert isinstance(dest, DictConfig)
assert isinstance(src, DictConfig)
src_type = src._metadata.object_type
# if source DictConfig is an interpolation set the DictConfig one to be the same interpolation.
if src._is_interpolation():
dest._set_value(src._value())
return
# if source DictConfig is missing set the DictConfig one to be missing too.
if src._is_missing():
dest._set_value("???")
return
dest._validate_merge(key=None, value=src)
def expand(node: Container) -> None:
type_ = get_ref_type(node)
if type_ is not None:
_is_optional, type_ = _resolve_optional(type_)
if is_dict_annotation(type_):
node._set_value({})
elif is_list_annotation(type_):
node._set_value([])
else:
node._set_value(type_)
if dest._is_interpolation() or dest._is_missing():
expand(dest)
for key, src_value in src.items_ex(resolve=False):
dest_node = dest._get_node(key, validate_access=False)
if isinstance(dest_node, Container) and OmegaConf.is_none(dest, key):
if not OmegaConf.is_none(src_value):
expand(dest_node)
if dest_node is not None:
if dest_node._is_interpolation():
target_node = dest_node._dereference_node(
throw_on_resolution_failure=False
)
if isinstance(target_node, Container):
dest[key] = target_node
dest_node = dest._get_node(key)
if is_structured_config(dest._metadata.element_type):
dest[key] = DictConfig(content=dest._metadata.element_type, parent=dest)
dest_node = dest._get_node(key)
if dest_node is not None:
if isinstance(dest_node, BaseContainer):
if isinstance(src_value, BaseContainer):
dest._validate_merge(key=key, value=src_value)
dest_node._merge_with(src_value)
else:
dest.__setitem__(key, src_value)
else:
if isinstance(src_value, BaseContainer):
dest.__setitem__(key, src_value)
else:
assert isinstance(dest_node, ValueNode)
try:
dest_node._set_value(src_value)
except (ValidationError, ReadonlyConfigError) as e:
dest._format_and_raise(key=key, value=src_value, cause=e)
else:
from omegaconf import open_dict
if is_structured_config(src_type):
# verified to be compatible above in _validate_set_merge_impl
with open_dict(dest):
dest[key] = src._get_node(key)
else:
dest[key] = src._get_node(key)
if src_type is not None and not is_primitive_dict(src_type):
dest._metadata.object_type = src_type
# explicit flags on the source config are replacing the flag values in the destination
flags = src._metadata.flags
assert flags is not None
for flag, value in flags.items():
if value is not None:
dest._set_flag(flag, value)
|
def _map_merge(dest: "BaseContainer", src: "BaseContainer") -> None:
"""merge src into dest and return a new copy, does not modified input"""
from omegaconf import DictConfig, OmegaConf, ValueNode
assert isinstance(dest, DictConfig)
assert isinstance(src, DictConfig)
src_type = src._metadata.object_type
# if source DictConfig is missing set the DictConfig one to be missing too.
if src._is_missing():
dest._set_value("???")
return
dest._validate_merge(key=None, value=src)
def expand(node: Container) -> None:
type_ = get_ref_type(node)
if type_ is not None:
_is_optional, type_ = _resolve_optional(type_)
if is_dict_annotation(type_):
node._set_value({})
elif is_list_annotation(type_):
node._set_value([])
else:
node._set_value(type_)
if dest._is_missing():
expand(dest)
for key, src_value in src.items_ex(resolve=False):
dest_node = dest._get_node(key, validate_access=False)
if isinstance(dest_node, Container) and OmegaConf.is_none(dest, key):
if not OmegaConf.is_none(src_value):
expand(dest_node)
if dest_node is not None:
if dest_node._is_interpolation():
target_node = dest_node._dereference_node(
throw_on_resolution_failure=False
)
if isinstance(target_node, Container):
dest[key] = target_node
dest_node = dest._get_node(key)
if is_structured_config(dest._metadata.element_type):
dest[key] = DictConfig(content=dest._metadata.element_type, parent=dest)
dest_node = dest._get_node(key)
if dest_node is not None:
if isinstance(dest_node, BaseContainer):
if isinstance(src_value, BaseContainer):
dest._validate_merge(key=key, value=src_value)
dest_node._merge_with(src_value)
else:
dest.__setitem__(key, src_value)
else:
if isinstance(src_value, BaseContainer):
dest.__setitem__(key, src_value)
else:
assert isinstance(dest_node, ValueNode)
try:
dest_node._set_value(src_value)
except (ValidationError, ReadonlyConfigError) as e:
dest._format_and_raise(key=key, value=src_value, cause=e)
else:
from omegaconf import open_dict
if is_structured_config(src_type):
# verified to be compatible above in _validate_set_merge_impl
with open_dict(dest):
dest[key] = src._get_node(key)
else:
dest[key] = src._get_node(key)
if src_type is not None and not is_primitive_dict(src_type):
dest._metadata.object_type = src_type
# explicit flags on the source config are replacing the flag values in the destination
flags = src._metadata.flags
assert flags is not None
for flag, value in flags.items():
if value is not None:
dest._set_flag(flag, value)
|
https://github.com/omry/omegaconf/issues/431
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/omegaconf.py", line 321, in merge
target.merge_with(*others[1:])
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/basecontainer.py", line 327, in merge_with
self._format_and_raise(key=None, value=None, cause=e)
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/base.py", line 101, in _format_and_raise
type_override=type_override,
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/_utils.py", line 675, in format_and_raise
_raise(ex, cause)
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/_utils.py", line 591, in _raise
raise ex # set end OC_CAUSE=1 for full backtrace
omegaconf.errors.ConfigKeyError: str interpolation key 'optimization.lr' not found
full_key:
reference_type=Any
object_type=test_class
|
omegaconf.errors.ConfigKeyError
|
def _merge_with(
self,
*others: Union["BaseContainer", Dict[str, Any], List[Any], Tuple[Any], Any],
) -> None:
from .dictconfig import DictConfig
from .listconfig import ListConfig
from .omegaconf import OmegaConf
"""merge a list of other Config objects into this one, overriding as needed"""
for other in others:
if other is None:
raise ValueError("Cannot merge with a None config")
my_flags = {}
if self._get_flag("allow_objects") is True:
my_flags = {"allow_objects": True}
other = _ensure_container(other, flags=my_flags)
if isinstance(self, DictConfig) and isinstance(other, DictConfig):
BaseContainer._map_merge(self, other)
elif isinstance(self, ListConfig) and isinstance(other, ListConfig):
self.__dict__["_content"] = []
if other._is_interpolation():
self._set_value(other._value())
elif other._is_missing():
self._set_value("???")
elif other._is_none():
self._set_value(None)
else:
et = self._metadata.element_type
if is_structured_config(et):
prototype = OmegaConf.structured(et)
for item in other:
if isinstance(item, DictConfig):
item = OmegaConf.merge(prototype, item)
self.append(item)
else:
for item in other:
self.append(item)
# explicit flags on the source config are replacing the flag values in the destination
flags = other._metadata.flags
assert flags is not None
for flag, value in flags.items():
if value is not None:
self._set_flag(flag, value)
else:
raise TypeError("Cannot merge DictConfig with ListConfig")
# recursively correct the parent hierarchy after the merge
self._re_parent()
|
def _merge_with(
self,
*others: Union["BaseContainer", Dict[str, Any], List[Any], Tuple[Any], Any],
) -> None:
from .dictconfig import DictConfig
from .listconfig import ListConfig
from .omegaconf import OmegaConf
"""merge a list of other Config objects into this one, overriding as needed"""
for other in others:
if other is None:
raise ValueError("Cannot merge with a None config")
my_flags = {}
if self._get_flag("allow_objects") is True:
my_flags = {"allow_objects": True}
other = _ensure_container(other, flags=my_flags)
if isinstance(self, DictConfig) and isinstance(other, DictConfig):
BaseContainer._map_merge(self, other)
elif isinstance(self, ListConfig) and isinstance(other, ListConfig):
if self._is_none() or self._is_missing() or self._is_interpolation():
self.__dict__["_content"] = []
else:
self.__dict__["_content"].clear()
if other._is_missing():
self._set_value("???")
elif other._is_none():
self._set_value(None)
else:
et = self._metadata.element_type
if is_structured_config(et):
prototype = OmegaConf.structured(et)
for item in other:
if isinstance(item, DictConfig):
item = OmegaConf.merge(prototype, item)
self.append(item)
else:
for item in other:
self.append(item)
# explicit flags on the source config are replacing the flag values in the destination
flags = other._metadata.flags
assert flags is not None
for flag, value in flags.items():
if value is not None:
self._set_flag(flag, value)
else:
raise TypeError("Cannot merge DictConfig with ListConfig")
# recursively correct the parent hierarchy after the merge
self._re_parent()
|
https://github.com/omry/omegaconf/issues/431
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/omegaconf.py", line 321, in merge
target.merge_with(*others[1:])
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/basecontainer.py", line 327, in merge_with
self._format_and_raise(key=None, value=None, cause=e)
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/base.py", line 101, in _format_and_raise
type_override=type_override,
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/_utils.py", line 675, in format_and_raise
_raise(ex, cause)
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/_utils.py", line 591, in _raise
raise ex # set end OC_CAUSE=1 for full backtrace
omegaconf.errors.ConfigKeyError: str interpolation key 'optimization.lr' not found
full_key:
reference_type=Any
object_type=test_class
|
omegaconf.errors.ConfigKeyError
|
def _is_missing(self) -> bool:
try:
self._dereference_node(throw_on_resolution_failure=False, throw_on_missing=True)
return False
except MissingMandatoryValue:
ret = True
assert isinstance(ret, bool)
return ret
|
def _is_missing(self) -> bool:
try:
self._dereference_node(throw_on_missing=True)
return False
except MissingMandatoryValue:
ret = True
assert isinstance(ret, bool)
return ret
|
https://github.com/omry/omegaconf/issues/431
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/omegaconf.py", line 321, in merge
target.merge_with(*others[1:])
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/basecontainer.py", line 327, in merge_with
self._format_and_raise(key=None, value=None, cause=e)
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/base.py", line 101, in _format_and_raise
type_override=type_override,
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/_utils.py", line 675, in format_and_raise
_raise(ex, cause)
File "/private/home/abaevski/.conda/envs/fairseq-fp16-20200821/lib/python3.6/site-packages/omegaconf/_utils.py", line 591, in _raise
raise ex # set end OC_CAUSE=1 for full backtrace
omegaconf.errors.ConfigKeyError: str interpolation key 'optimization.lr' not found
full_key:
reference_type=Any
object_type=test_class
|
omegaconf.errors.ConfigKeyError
|
def get_dataclass_data(
obj: Any, allow_objects: Optional[bool] = None
) -> Dict[str, Any]:
from omegaconf.omegaconf import MISSING, OmegaConf, _maybe_wrap
flags = {"allow_objects": allow_objects} if allow_objects is not None else {}
dummy_parent = OmegaConf.create({}, flags=flags)
d = {}
resolved_hints = get_type_hints(get_type_of(obj))
for field in dataclasses.fields(obj):
name = field.name
is_optional, type_ = _resolve_optional(resolved_hints[field.name])
type_ = _resolve_forward(type_, obj.__module__)
if hasattr(obj, name):
value = getattr(obj, name)
if value == dataclasses.MISSING:
value = MISSING
else:
if field.default_factory == dataclasses.MISSING: # type: ignore
value = MISSING
else:
value = field.default_factory() # type: ignore
if _is_union(type_):
e = ConfigValueError(
f"Union types are not supported:\n{name}: {type_str(type_)}"
)
format_and_raise(node=None, key=None, value=value, cause=e, msg=str(e))
d[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=dummy_parent,
)
d[name]._set_parent(None)
return d
|
def get_dataclass_data(
obj: Any, allow_objects: Optional[bool] = None
) -> Dict[str, Any]:
from omegaconf.omegaconf import MISSING, OmegaConf, _maybe_wrap
flags = {"allow_objects": allow_objects} if allow_objects is not None else {}
dummy_parent = OmegaConf.create({}, flags=flags)
d = {}
for field in dataclasses.fields(obj):
name = field.name
is_optional, type_ = _resolve_optional(field.type)
type_ = _resolve_forward(type_, obj.__module__)
if hasattr(obj, name):
value = getattr(obj, name)
if value == dataclasses.MISSING:
value = MISSING
else:
if field.default_factory == dataclasses.MISSING: # type: ignore
value = MISSING
else:
value = field.default_factory() # type: ignore
if _is_union(type_):
e = ConfigValueError(
f"Union types are not supported:\n{name}: {type_str(type_)}"
)
format_and_raise(node=None, key=None, value=value, cause=e, msg=str(e))
d[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=dummy_parent,
)
d[name]._set_parent(None)
return d
|
https://github.com/omry/omegaconf/issues/303
|
Traceback (most recent call last):
File "test2.py", line 10, in <module>
cfg = OmegaConf.structured(Config)
File "/private/home/odelalleau/.conda/envs/py37-hydra/lib/python3.7/site-packages/omegaconf/omegaconf.py", line 134, in structured
return OmegaConf.create(obj, parent)
File "/private/home/odelalleau/.conda/envs/py37-hydra/lib/python3.7/site-packages/omegaconf/omegaconf.py", line 171, in create
return OmegaConf._create_impl(obj=obj, parent=parent)
File "/private/home/odelalleau/.conda/envs/py37-hydra/lib/python3.7/site-packages/omegaconf/omegaconf.py", line 220, in _create_impl
element_type=element_type,
File "/private/home/odelalleau/.conda/envs/py37-hydra/lib/python3.7/site-packages/omegaconf/dictconfig.py", line 74, in __init__
self._set_value(content)
File "/private/home/odelalleau/.conda/envs/py37-hydra/lib/python3.7/site-packages/omegaconf/dictconfig.py", line 546, in _set_value
data = get_structured_config_data(value)
File "/private/home/odelalleau/.conda/envs/py37-hydra/lib/python3.7/site-packages/omegaconf/_utils.py", line 258, in get_structured_config_data
return get_dataclass_data(obj)
File "/private/home/odelalleau/.conda/envs/py37-hydra/lib/python3.7/site-packages/omegaconf/_utils.py", line 202, in get_dataclass_data
ref_type=type_, is_optional=is_optional, key=name, value=value, parent=None,
File "/private/home/odelalleau/.conda/envs/py37-hydra/lib/python3.7/site-packages/omegaconf/omegaconf.py", line 681, in _maybe_wrap
key=key,
File "/private/home/odelalleau/.conda/envs/py37-hydra/lib/python3.7/site-packages/omegaconf/omegaconf.py", line 641, in _node_wrap
elif issubclass(type_, Enum):
TypeError: issubclass() arg 1 must be a class
|
TypeError
|
def _map_merge(dest: "BaseContainer", src: "BaseContainer") -> None:
"""merge src into dest and return a new copy, does not modified input"""
from omegaconf import DictConfig, OmegaConf, ValueNode
assert isinstance(dest, DictConfig)
assert isinstance(src, DictConfig)
src_type = src._metadata.object_type
# if source DictConfig is missing set the DictConfig one to be missing too.
if src._is_missing():
dest._set_value("???")
return
dest._validate_set_merge_impl(key=None, value=src, is_assign=False)
def expand(node: Container) -> None:
type_ = get_ref_type(node)
if type_ is not None:
_is_optional, type_ = _resolve_optional(type_)
if is_dict_annotation(type_):
node._set_value({})
elif is_list_annotation(type_):
node._set_value([])
else:
node._set_value(type_)
if dest._is_missing():
expand(dest)
for key, src_value in src.items_ex(resolve=False):
dest_node = dest._get_node(key, validate_access=False)
if isinstance(dest_node, Container) and OmegaConf.is_none(dest, key):
if not OmegaConf.is_none(src_value):
expand(dest_node)
if dest_node is not None:
if dest_node._is_interpolation():
target_node = dest_node._dereference_node(
throw_on_resolution_failure=False
)
if isinstance(target_node, Container):
dest[key] = target_node
dest_node = dest._get_node(key)
if is_structured_config(dest._metadata.element_type):
dest[key] = DictConfig(content=dest._metadata.element_type, parent=dest)
dest_node = dest._get_node(key)
if dest_node is not None:
if isinstance(dest_node, BaseContainer):
if isinstance(src_value, BaseContainer):
dest._validate_merge(key=key, value=src_value)
dest_node._merge_with(src_value)
else:
dest.__setitem__(key, src_value)
else:
if isinstance(src_value, BaseContainer):
dest.__setitem__(key, src_value)
else:
assert isinstance(dest_node, ValueNode)
try:
dest_node._set_value(src_value)
except (ValidationError, ReadonlyConfigError) as e:
dest._format_and_raise(key=key, value=src_value, cause=e)
else:
from omegaconf import open_dict
if is_structured_config(src_type):
# verified to be compatible above in _validate_set_merge_impl
with open_dict(dest):
dest[key] = src._get_node(key)
else:
dest[key] = src._get_node(key)
if src_type is not None and not is_primitive_dict(src_type):
dest._metadata.object_type = src_type
# explicit flags on the source config are replacing the flag values in the destination
for flag, value in src._metadata.flags.items():
if value is not None:
dest._set_flag(flag, value)
|
def _map_merge(dest: "BaseContainer", src: "BaseContainer") -> None:
"""merge src into dest and return a new copy, does not modified input"""
from omegaconf import DictConfig, OmegaConf, ValueNode
assert isinstance(dest, DictConfig)
assert isinstance(src, DictConfig)
src_type = src._metadata.object_type
# if source DictConfig is missing set the DictConfig one to be missing too.
if src._is_missing():
dest._set_value("???")
return
dest._validate_set_merge_impl(key=None, value=src, is_assign=False)
def expand(node: Container) -> None:
type_ = get_ref_type(node)
if type_ is not None:
_is_optional, type_ = _resolve_optional(type_)
if is_dict_annotation(type_):
node._set_value({})
else:
node._set_value(type_)
if dest._is_missing():
expand(dest)
for key, src_value in src.items_ex(resolve=False):
dest_node = dest._get_node(key, validate_access=False)
if isinstance(dest_node, Container) and OmegaConf.is_none(dest, key):
if not OmegaConf.is_none(src_value):
expand(dest_node)
if dest_node is not None:
if dest_node._is_interpolation():
target_node = dest_node._dereference_node(
throw_on_resolution_failure=False
)
if isinstance(target_node, Container):
dest[key] = target_node
dest_node = dest._get_node(key)
if is_structured_config(dest._metadata.element_type):
dest[key] = DictConfig(content=dest._metadata.element_type, parent=dest)
dest_node = dest._get_node(key)
if dest_node is not None:
if isinstance(dest_node, BaseContainer):
if isinstance(src_value, BaseContainer):
dest._validate_merge(key=key, value=src_value)
dest_node._merge_with(src_value)
else:
dest.__setitem__(key, src_value)
else:
if isinstance(src_value, BaseContainer):
dest.__setitem__(key, src_value)
else:
assert isinstance(dest_node, ValueNode)
try:
dest_node._set_value(src_value)
except (ValidationError, ReadonlyConfigError) as e:
dest._format_and_raise(key=key, value=src_value, cause=e)
else:
from omegaconf import open_dict
if is_structured_config(src_type):
# verified to be compatible above in _validate_set_merge_impl
with open_dict(dest):
dest[key] = src._get_node(key)
else:
dest[key] = src._get_node(key)
if src_type is not None and not is_primitive_dict(src_type):
dest._metadata.object_type = src_type
# explicit flags on the source config are replacing the flag values in the destination
for flag, value in src._metadata.flags.items():
if value is not None:
dest._set_flag(flag, value)
|
https://github.com/omry/omegaconf/issues/336
|
Traceback (most recent call last):
File "hydra/_internal/config_load
er_impl.py", line 610, in _load_config_impl
merged = OmegaConf.merge(schema.config, ret.config)
File "omegaconf/omegaconf.py", li
ne 316, in merge
target.merge_with(*others[1:])
File "omegaconf/basecontainer.py"
, line 324, in merge_with
self._format_and_raise(key=None, value=None, cause=e)
File "omegaconf/base.py", line 10
1, in _format_and_raise
type_override=type_override,
File "omegaconf/_utils.py", line
675, in format_and_raise
_raise(ex, cause)
File "omegaconf/_utils.py", line
591, in _raise
raise ex # set end OC_CAUSE=1 for full backtrace
omegaconf.errors.ValidationError: Invalid value assigned : _GenericAlias is not a subclass of ListConfig or list.
full_key:
reference_type=Optional[Dict[Union[str, Enum], Any]]
object_type=dict
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "hydra/_internal/utils.py",
line 203, in run_and_report
return func()
File "hydra/_internal/utils.py",
line 355, in <lambda>
overrides=args.overrides,
File "hydra/_internal/hydra.py",
line 102, in run
run_mode=RunMode.RUN,
File "hydra/_internal/hydra.py",
line 512, in compose_config
from_shell=from_shell,
File "hydra/_internal/config_loader_impl.py", line 153, in load_configuration
from_shell=from_shell,
File "hydra/_internal/config_loader_impl.py", line 256, in _load_configuration
run_mode=run_mode,
File "hydra/_internal/config_loader_impl.py", line 797, in _merge_defaults_into_config
hydra_cfg = merge_defaults_list_into_config(hydra_cfg, user_list)
File "hydra/_internal/config_loader_impl.py", line 775, in merge_defaults_list_into_config
package_override=default1.package,
File "hydra/_internal/config_loader_impl.py", line 690, in _merge_config
package_override=package_override,
File "hydra/_internal/config_loader_impl.py", line 622, in _load_config_impl
) from e
hydra.errors.ConfigCompositionException: Error merging 'lr_scheduler/multi_step' with schema
|
omegaconf.errors.ValidationError
|
def expand(node: Container) -> None:
type_ = get_ref_type(node)
if type_ is not None:
_is_optional, type_ = _resolve_optional(type_)
if is_dict_annotation(type_):
node._set_value({})
elif is_list_annotation(type_):
node._set_value([])
else:
node._set_value(type_)
|
def expand(node: Container) -> None:
type_ = get_ref_type(node)
if type_ is not None:
_is_optional, type_ = _resolve_optional(type_)
if is_dict_annotation(type_):
node._set_value({})
else:
node._set_value(type_)
|
https://github.com/omry/omegaconf/issues/336
|
Traceback (most recent call last):
File "hydra/_internal/config_load
er_impl.py", line 610, in _load_config_impl
merged = OmegaConf.merge(schema.config, ret.config)
File "omegaconf/omegaconf.py", li
ne 316, in merge
target.merge_with(*others[1:])
File "omegaconf/basecontainer.py"
, line 324, in merge_with
self._format_and_raise(key=None, value=None, cause=e)
File "omegaconf/base.py", line 10
1, in _format_and_raise
type_override=type_override,
File "omegaconf/_utils.py", line
675, in format_and_raise
_raise(ex, cause)
File "omegaconf/_utils.py", line
591, in _raise
raise ex # set end OC_CAUSE=1 for full backtrace
omegaconf.errors.ValidationError: Invalid value assigned : _GenericAlias is not a subclass of ListConfig or list.
full_key:
reference_type=Optional[Dict[Union[str, Enum], Any]]
object_type=dict
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "hydra/_internal/utils.py",
line 203, in run_and_report
return func()
File "hydra/_internal/utils.py",
line 355, in <lambda>
overrides=args.overrides,
File "hydra/_internal/hydra.py",
line 102, in run
run_mode=RunMode.RUN,
File "hydra/_internal/hydra.py",
line 512, in compose_config
from_shell=from_shell,
File "hydra/_internal/config_loader_impl.py", line 153, in load_configuration
from_shell=from_shell,
File "hydra/_internal/config_loader_impl.py", line 256, in _load_configuration
run_mode=run_mode,
File "hydra/_internal/config_loader_impl.py", line 797, in _merge_defaults_into_config
hydra_cfg = merge_defaults_list_into_config(hydra_cfg, user_list)
File "hydra/_internal/config_loader_impl.py", line 775, in merge_defaults_list_into_config
package_override=default1.package,
File "hydra/_internal/config_loader_impl.py", line 690, in _merge_config
package_override=package_override,
File "hydra/_internal/config_loader_impl.py", line 622, in _load_config_impl
) from e
hydra.errors.ConfigCompositionException: Error merging 'lr_scheduler/multi_step' with schema
|
omegaconf.errors.ValidationError
|
def _fetch_reference_injections(
fn: Callable[..., Any],
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
# # Hotfix, see: https://github.com/ets-labs/python-dependency-injector/issues/362
if GenericAlias and fn is GenericAlias:
fn = fn.__init__
signature = inspect.signature(fn)
injections = {}
closing = {}
for parameter_name, parameter in signature.parameters.items():
if not isinstance(parameter.default, _Marker) and not _is_fastapi_depends(
parameter.default
):
continue
marker = parameter.default
if _is_fastapi_depends(marker):
marker = marker.dependency
if not isinstance(marker, _Marker):
continue
if isinstance(marker, Closing):
marker = marker.provider
closing[parameter_name] = marker
injections[parameter_name] = marker
return injections, closing
|
def _fetch_reference_injections(
fn: Callable[..., Any],
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
signature = inspect.signature(fn)
injections = {}
closing = {}
for parameter_name, parameter in signature.parameters.items():
if not isinstance(parameter.default, _Marker) and not _is_fastapi_depends(
parameter.default
):
continue
marker = parameter.default
if _is_fastapi_depends(marker):
marker = marker.dependency
if not isinstance(marker, _Marker):
continue
if isinstance(marker, Closing):
marker = marker.provider
closing[parameter_name] = marker
injections[parameter_name] = marker
return injections, closing
|
https://github.com/ets-labs/python-dependency-injector/issues/362
|
Traceback (most recent call last):
File "/home/ventaquil/Git/project/package/__main__.py", line 86, in <module>
sys.exit(main())
File "/home/ventaquil/Git/project/package/__main__.py", line 67, in main
container.wire(modules=[http, manager, socketio], packages=[model])
File "src/dependency_injector/containers.pyx", line 222, in dependency_injector.containers.DynamicContainer.wire
File "/home/ventaquil/Git/project/cluster/venv/lib/python3.9/site-packages/dependency_injector/wiring.py", line 230, in wire
_patch_method(member, method_name, method, providers_map)
File "/home/ventaquil/Git/project/venv/lib/python3.9/site-packages/dependency_injector/wiring.py", line 302, in _patch_method
reference_injections, reference_closing = _fetch_reference_injections(fn)
File "/home/ventaquil/Git/project/venv/lib/python3.9/site-packages/dependency_injector/wiring.py", line 336, in _fetch_reference_injections
signature = inspect.signature(fn)
File "/usr/lib/python3.9/inspect.py", line 3118, in signature
return Signature.from_callable(obj, follow_wrapped=follow_wrapped)
File "/usr/lib/python3.9/inspect.py", line 2867, in from_callable
return _signature_from_callable(obj, sigcls=cls,
File "/usr/lib/python3.9/inspect.py", line 2398, in _signature_from_callable
raise ValueError(
ValueError: no signature found for builtin type <class 'types.GenericAlias'>
|
ValueError
|
def _fetch_modules(package):
modules = [package]
for module_info in pkgutil.walk_packages(
path=package.__path__,
prefix=package.__name__ + ".",
):
module = importlib.import_module(module_info.name)
modules.append(module)
return modules
|
def _fetch_modules(package):
modules = [package]
for loader, module_name, is_pkg in pkgutil.walk_packages(
path=package.__path__,
prefix=package.__name__ + ".",
):
module = loader.find_module(module_name).load_module(module_name)
modules.append(module)
return modules
|
https://github.com/ets-labs/python-dependency-injector/issues/320
|
INFO:root:configuration completed
INFO:root:Configuration container wired successfully
INFO:root:Initializing model...
INFO:root:model: <dependency_injector.wiring.Provide object at 0x00000235228E4250>
Traceback (most recent call last):
File "c:/Users/Federico/Desktop/DeepCleverBot/bot/app.py", line 24, in <module>
bot = Bot()
File "C:\Users\Federico\anaconda3\envs\deepcleverbot\lib\site-packages\dependency_injector\wiring.py", line 319, in _patched
to_inject[injection] = provider()
File "src/dependency_injector/providers.pyx", line 160, in dependency_injector.providers.Provider.__call__
File "src/dependency_injector/providers.pyx", line 2130, in dependency_injector.providers.Singleton._provide
File "src/dependency_injector/providers.pxd", line 450, in dependency_injector.providers.__factory_call
File "src/dependency_injector/providers.pxd", line 436, in dependency_injector.providers.__callable_call
File "src/dependency_injector/providers.pxd", line 432, in dependency_injector.providers.__call
File "c:\Users\Federico\Desktop\DeepCleverBot\bot\src\QA\QA.py", line 13, in __init__
self.init_model()
File "c:\Users\Federico\Desktop\DeepCleverBot\bot\src\QA\QA.py", line 18, in init_model
self.tokenizer = BertTokenizer.from_pretrained(self.model, return_token_type_ids = True)
File "C:\Users\Federico\anaconda3\envs\deepcleverbot\lib\site-packages\transformers\tokenization_utils_base.py", line 1516, in from_pretrained
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
File "C:\Users\Federico\anaconda3\envs\deepcleverbot\lib\genericpath.py", line 30, in isfile
st = os.stat(path)
TypeError: stat: path should be string, bytes, os.PathLike or integer, not Provide
|
TypeError
|
def main():
# bring our logging stuff up as early as possible
debug = logging.DEBUG if "-d" in sys.argv or "--debug" in sys.argv else logging.INFO
_init_logger(debug)
extension_mgr = _init_extensions()
baseline_formatters = [
f.name
for f in filter(
lambda x: hasattr(x.plugin, "_accepts_baseline"), extension_mgr.formatters
)
]
# now do normal startup
parser = argparse.ArgumentParser(
description="Bandit - a Python source code security analyzer",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"targets",
metavar="targets",
type=str,
nargs="*",
help="source file(s) or directory(s) to be tested",
)
parser.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
help="find and process files in subdirectories",
)
parser.add_argument(
"-a",
"--aggregate",
dest="agg_type",
action="store",
default="file",
type=str,
choices=["file", "vuln"],
help="aggregate output by vulnerability (default) or by filename",
)
parser.add_argument(
"-n",
"--number",
dest="context_lines",
action="store",
default=3,
type=int,
help="maximum number of code lines to output for each issue",
)
parser.add_argument(
"-c",
"--configfile",
dest="config_file",
action="store",
default=None,
type=str,
help="optional config file to use for selecting plugins and "
"overriding defaults",
)
parser.add_argument(
"-p",
"--profile",
dest="profile",
action="store",
default=None,
type=str,
help="profile to use (defaults to executing all tests)",
)
parser.add_argument(
"-t",
"--tests",
dest="tests",
action="store",
default=None,
type=str,
help="comma-separated list of test IDs to run",
)
parser.add_argument(
"-s",
"--skip",
dest="skips",
action="store",
default=None,
type=str,
help="comma-separated list of test IDs to skip",
)
parser.add_argument(
"-l",
"--level",
dest="severity",
action="count",
default=1,
help="report only issues of a given severity level or "
"higher (-l for LOW, -ll for MEDIUM, -lll for HIGH)",
)
parser.add_argument(
"-i",
"--confidence",
dest="confidence",
action="count",
default=1,
help="report only issues of a given confidence level or "
"higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)",
)
output_format = "screen" if sys.stdout.isatty() else "txt"
parser.add_argument(
"-f",
"--format",
dest="output_format",
action="store",
default=output_format,
help="specify output format",
choices=sorted(extension_mgr.formatter_names),
)
parser.add_argument(
"--msg-template",
action="store",
default=None,
help="specify output message template"
" (only usable with --format custom),"
" see CUSTOM FORMAT section"
" for list of available values",
)
parser.add_argument(
"-o",
"--output",
dest="output_file",
action="store",
nargs="?",
type=argparse.FileType("w", encoding="utf-8"),
default=sys.stdout,
help="write report to filename",
)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
help="output extra information like excluded and included files",
)
parser.add_argument(
"-d", "--debug", dest="debug", action="store_true", help="turn on debug mode"
)
group.add_argument(
"-q",
"--quiet",
"--silent",
dest="quiet",
action="store_true",
help="only show output in the case of an error",
)
parser.add_argument(
"--ignore-nosec",
dest="ignore_nosec",
action="store_true",
help="do not skip lines with # nosec comments",
)
parser.add_argument(
"-x",
"--exclude",
dest="excluded_paths",
action="store",
default=",".join(constants.EXCLUDE),
help="comma-separated list of paths (glob patterns "
"supported) to exclude from scan "
"(note that these are in addition to the excluded "
"paths provided in the config file) (default: "
+ ",".join(constants.EXCLUDE)
+ ")",
)
parser.add_argument(
"-b",
"--baseline",
dest="baseline",
action="store",
default=None,
help="path of a baseline report to compare against "
"(only JSON-formatted files are accepted)",
)
parser.add_argument(
"--ini",
dest="ini_path",
action="store",
default=None,
help="path to a .bandit file that supplies command line arguments",
)
parser.add_argument(
"--exit-zero",
action="store_true",
dest="exit_zero",
default=False,
help="exit with 0, even with results found",
)
python_ver = sys.version.replace("\n", "")
parser.add_argument(
"--version",
action="version",
version="%(prog)s {version}\n python version = {python}".format(
version=bandit.__version__, python=python_ver
),
)
parser.set_defaults(debug=False)
parser.set_defaults(verbose=False)
parser.set_defaults(quiet=False)
parser.set_defaults(ignore_nosec=False)
plugin_info = [
"%s\t%s" % (a[0], a[1].name) for a in extension_mgr.plugins_by_id.items()
]
blacklist_info = []
for a in extension_mgr.blacklist.items():
for b in a[1]:
blacklist_info.append("%s\t%s" % (b["id"], b["name"]))
plugin_list = "\n\t".join(sorted(set(plugin_info + blacklist_info)))
dedent_text = textwrap.dedent("""
CUSTOM FORMATTING
-----------------
Available tags:
{abspath}, {relpath}, {line}, {test_id},
{severity}, {msg}, {confidence}, {range}
Example usage:
Default template:
bandit -r examples/ --format custom --msg-template \\
"{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}"
Provides same output as:
bandit -r examples/ --format custom
Tags can also be formatted in python string.format() style:
bandit -r examples/ --format custom --msg-template \\
"{relpath:20.20s}: {line:03}: {test_id:^8}: DEFECT: {msg:>20}"
See python documentation for more information about formatting style:
https://docs.python.org/3/library/string.html
The following tests were discovered and loaded:
-----------------------------------------------
""")
parser.epilog = dedent_text + "\t{0}".format(plugin_list)
# setup work - parse arguments, and initialize BanditManager
args = parser.parse_args()
# Check if `--msg-template` is not present without custom formatter
if args.output_format != "custom" and args.msg_template is not None:
parser.error("--msg-template can only be used with --format=custom")
try:
b_conf = b_config.BanditConfig(config_file=args.config_file)
except utils.ConfigError as e:
LOG.error(e)
sys.exit(2)
# Handle .bandit files in projects to pass cmdline args from file
ini_options = _get_options_from_ini(args.ini_path, args.targets)
if ini_options:
# prefer command line, then ini file
args.excluded_paths = _log_option_source(
args.excluded_paths, ini_options.get("exclude"), "excluded paths"
)
args.skips = _log_option_source(
args.skips, ini_options.get("skips"), "skipped tests"
)
args.tests = _log_option_source(
args.tests, ini_options.get("tests"), "selected tests"
)
ini_targets = ini_options.get("targets")
if ini_targets:
ini_targets = ini_targets.split(",")
args.targets = _log_option_source(args.targets, ini_targets, "selected targets")
# TODO(tmcpeak): any other useful options to pass from .bandit?
args.recursive = _log_option_source(
args.recursive, ini_options.get("recursive"), "recursive scan"
)
args.agg_type = _log_option_source(
args.agg_type, ini_options.get("aggregate"), "aggregate output type"
)
args.context_lines = _log_option_source(
args.context_lines,
ini_options.get("number"),
"max code lines output for issue",
)
args.profile = _log_option_source(
args.profile, ini_options.get("profile"), "profile"
)
args.severity = _log_option_source(
args.severity, ini_options.get("level"), "severity level"
)
args.confidence = _log_option_source(
args.confidence, ini_options.get("confidence"), "confidence level"
)
args.output_format = _log_option_source(
args.output_format, ini_options.get("format"), "output format"
)
args.msg_template = _log_option_source(
args.msg_template,
ini_options.get("msg-template"),
"output message template",
)
args.output_file = _log_option_source(
args.output_file, ini_options.get("output"), "output file"
)
args.verbose = _log_option_source(
args.verbose, ini_options.get("verbose"), "output extra information"
)
args.debug = _log_option_source(
args.debug, ini_options.get("debug"), "debug mode"
)
args.quiet = _log_option_source(
args.quiet, ini_options.get("quiet"), "silent mode"
)
args.ignore_nosec = _log_option_source(
args.ignore_nosec,
ini_options.get("ignore-nosec"),
"do not skip lines with # nosec",
)
args.baseline = _log_option_source(
args.baseline, ini_options.get("baseline"), "path of a baseline report"
)
if not args.targets:
LOG.error("No targets found in CLI or ini files, exiting.")
sys.exit(2)
# if the log format string was set in the options, reinitialize
if b_conf.get_option("log_format"):
log_format = b_conf.get_option("log_format")
_init_logger(log_level=logging.DEBUG, log_format=log_format)
if args.quiet:
_init_logger(log_level=logging.WARN)
try:
profile = _get_profile(b_conf, args.profile, args.config_file)
_log_info(args, profile)
profile["include"].update(args.tests.split(",") if args.tests else [])
profile["exclude"].update(args.skips.split(",") if args.skips else [])
extension_mgr.validate_profile(profile)
except (utils.ProfileNotFound, ValueError) as e:
LOG.error(e)
sys.exit(2)
b_mgr = b_manager.BanditManager(
b_conf,
args.agg_type,
args.debug,
profile=profile,
verbose=args.verbose,
quiet=args.quiet,
ignore_nosec=args.ignore_nosec,
)
if args.baseline is not None:
try:
with open(args.baseline) as bl:
data = bl.read()
b_mgr.populate_baseline(data)
except IOError:
LOG.warning("Could not open baseline report: %s", args.baseline)
sys.exit(2)
if args.output_format not in baseline_formatters:
LOG.warning(
"Baseline must be used with one of the following "
"formats: " + str(baseline_formatters)
)
sys.exit(2)
if args.output_format != "json":
if args.config_file:
LOG.info("using config: %s", args.config_file)
LOG.info(
"running on Python %d.%d.%d",
sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro,
)
# initiate file discovery step within Bandit Manager
b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths)
if not b_mgr.b_ts.tests:
LOG.error("No tests would be run, please check the profile.")
sys.exit(2)
# initiate execution of tests within Bandit Manager
b_mgr.run_tests()
LOG.debug(b_mgr.b_ma)
LOG.debug(b_mgr.metrics)
# trigger output of results by Bandit Manager
sev_level = constants.RANKING[args.severity - 1]
conf_level = constants.RANKING[args.confidence - 1]
b_mgr.output_results(
args.context_lines,
sev_level,
conf_level,
args.output_file,
args.output_format,
args.msg_template,
)
if (
b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0
and not args.exit_zero
):
sys.exit(1)
else:
sys.exit(0)
|
def main():
# bring our logging stuff up as early as possible
debug = logging.DEBUG if "-d" in sys.argv or "--debug" in sys.argv else logging.INFO
_init_logger(debug)
extension_mgr = _init_extensions()
baseline_formatters = [
f.name
for f in filter(
lambda x: hasattr(x.plugin, "_accepts_baseline"), extension_mgr.formatters
)
]
# now do normal startup
parser = argparse.ArgumentParser(
description="Bandit - a Python source code security analyzer",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"targets",
metavar="targets",
type=str,
nargs="*",
help="source file(s) or directory(s) to be tested",
)
parser.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
help="find and process files in subdirectories",
)
parser.add_argument(
"-a",
"--aggregate",
dest="agg_type",
action="store",
default="file",
type=str,
choices=["file", "vuln"],
help="aggregate output by vulnerability (default) or by filename",
)
parser.add_argument(
"-n",
"--number",
dest="context_lines",
action="store",
default=3,
type=int,
help="maximum number of code lines to output for each issue",
)
parser.add_argument(
"-c",
"--configfile",
dest="config_file",
action="store",
default=None,
type=str,
help="optional config file to use for selecting plugins and "
"overriding defaults",
)
parser.add_argument(
"-p",
"--profile",
dest="profile",
action="store",
default=None,
type=str,
help="profile to use (defaults to executing all tests)",
)
parser.add_argument(
"-t",
"--tests",
dest="tests",
action="store",
default=None,
type=str,
help="comma-separated list of test IDs to run",
)
parser.add_argument(
"-s",
"--skip",
dest="skips",
action="store",
default=None,
type=str,
help="comma-separated list of test IDs to skip",
)
parser.add_argument(
"-l",
"--level",
dest="severity",
action="count",
default=1,
help="report only issues of a given severity level or "
"higher (-l for LOW, -ll for MEDIUM, -lll for HIGH)",
)
parser.add_argument(
"-i",
"--confidence",
dest="confidence",
action="count",
default=1,
help="report only issues of a given confidence level or "
"higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)",
)
output_format = "screen" if sys.stdout.isatty() else "txt"
parser.add_argument(
"-f",
"--format",
dest="output_format",
action="store",
default=output_format,
help="specify output format",
choices=sorted(extension_mgr.formatter_names),
)
parser.add_argument(
"--msg-template",
action="store",
default=None,
help="specify output message template"
" (only usable with --format custom),"
" see CUSTOM FORMAT section"
" for list of available values",
)
parser.add_argument(
"-o",
"--output",
dest="output_file",
action="store",
nargs="?",
type=argparse.FileType("w"),
default=sys.stdout,
help="write report to filename",
)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
help="output extra information like excluded and included files",
)
parser.add_argument(
"-d", "--debug", dest="debug", action="store_true", help="turn on debug mode"
)
group.add_argument(
"-q",
"--quiet",
"--silent",
dest="quiet",
action="store_true",
help="only show output in the case of an error",
)
parser.add_argument(
"--ignore-nosec",
dest="ignore_nosec",
action="store_true",
help="do not skip lines with # nosec comments",
)
parser.add_argument(
"-x",
"--exclude",
dest="excluded_paths",
action="store",
default=",".join(constants.EXCLUDE),
help="comma-separated list of paths (glob patterns "
"supported) to exclude from scan "
"(note that these are in addition to the excluded "
"paths provided in the config file) (default: "
+ ",".join(constants.EXCLUDE)
+ ")",
)
parser.add_argument(
"-b",
"--baseline",
dest="baseline",
action="store",
default=None,
help="path of a baseline report to compare against "
"(only JSON-formatted files are accepted)",
)
parser.add_argument(
"--ini",
dest="ini_path",
action="store",
default=None,
help="path to a .bandit file that supplies command line arguments",
)
parser.add_argument(
"--exit-zero",
action="store_true",
dest="exit_zero",
default=False,
help="exit with 0, even with results found",
)
python_ver = sys.version.replace("\n", "")
parser.add_argument(
"--version",
action="version",
version="%(prog)s {version}\n python version = {python}".format(
version=bandit.__version__, python=python_ver
),
)
parser.set_defaults(debug=False)
parser.set_defaults(verbose=False)
parser.set_defaults(quiet=False)
parser.set_defaults(ignore_nosec=False)
plugin_info = [
"%s\t%s" % (a[0], a[1].name) for a in extension_mgr.plugins_by_id.items()
]
blacklist_info = []
for a in extension_mgr.blacklist.items():
for b in a[1]:
blacklist_info.append("%s\t%s" % (b["id"], b["name"]))
plugin_list = "\n\t".join(sorted(set(plugin_info + blacklist_info)))
dedent_text = textwrap.dedent("""
CUSTOM FORMATTING
-----------------
Available tags:
{abspath}, {relpath}, {line}, {test_id},
{severity}, {msg}, {confidence}, {range}
Example usage:
Default template:
bandit -r examples/ --format custom --msg-template \\
"{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}"
Provides same output as:
bandit -r examples/ --format custom
Tags can also be formatted in python string.format() style:
bandit -r examples/ --format custom --msg-template \\
"{relpath:20.20s}: {line:03}: {test_id:^8}: DEFECT: {msg:>20}"
See python documentation for more information about formatting style:
https://docs.python.org/3/library/string.html
The following tests were discovered and loaded:
-----------------------------------------------
""")
parser.epilog = dedent_text + "\t{0}".format(plugin_list)
# setup work - parse arguments, and initialize BanditManager
args = parser.parse_args()
# Check if `--msg-template` is not present without custom formatter
if args.output_format != "custom" and args.msg_template is not None:
parser.error("--msg-template can only be used with --format=custom")
try:
b_conf = b_config.BanditConfig(config_file=args.config_file)
except utils.ConfigError as e:
LOG.error(e)
sys.exit(2)
# Handle .bandit files in projects to pass cmdline args from file
ini_options = _get_options_from_ini(args.ini_path, args.targets)
if ini_options:
# prefer command line, then ini file
args.excluded_paths = _log_option_source(
args.excluded_paths, ini_options.get("exclude"), "excluded paths"
)
args.skips = _log_option_source(
args.skips, ini_options.get("skips"), "skipped tests"
)
args.tests = _log_option_source(
args.tests, ini_options.get("tests"), "selected tests"
)
ini_targets = ini_options.get("targets")
if ini_targets:
ini_targets = ini_targets.split(",")
args.targets = _log_option_source(args.targets, ini_targets, "selected targets")
# TODO(tmcpeak): any other useful options to pass from .bandit?
args.recursive = _log_option_source(
args.recursive, ini_options.get("recursive"), "recursive scan"
)
args.agg_type = _log_option_source(
args.agg_type, ini_options.get("aggregate"), "aggregate output type"
)
args.context_lines = _log_option_source(
args.context_lines,
ini_options.get("number"),
"max code lines output for issue",
)
args.profile = _log_option_source(
args.profile, ini_options.get("profile"), "profile"
)
args.severity = _log_option_source(
args.severity, ini_options.get("level"), "severity level"
)
args.confidence = _log_option_source(
args.confidence, ini_options.get("confidence"), "confidence level"
)
args.output_format = _log_option_source(
args.output_format, ini_options.get("format"), "output format"
)
args.msg_template = _log_option_source(
args.msg_template,
ini_options.get("msg-template"),
"output message template",
)
args.output_file = _log_option_source(
args.output_file, ini_options.get("output"), "output file"
)
args.verbose = _log_option_source(
args.verbose, ini_options.get("verbose"), "output extra information"
)
args.debug = _log_option_source(
args.debug, ini_options.get("debug"), "debug mode"
)
args.quiet = _log_option_source(
args.quiet, ini_options.get("quiet"), "silent mode"
)
args.ignore_nosec = _log_option_source(
args.ignore_nosec,
ini_options.get("ignore-nosec"),
"do not skip lines with # nosec",
)
args.baseline = _log_option_source(
args.baseline, ini_options.get("baseline"), "path of a baseline report"
)
if not args.targets:
LOG.error("No targets found in CLI or ini files, exiting.")
sys.exit(2)
# if the log format string was set in the options, reinitialize
if b_conf.get_option("log_format"):
log_format = b_conf.get_option("log_format")
_init_logger(log_level=logging.DEBUG, log_format=log_format)
if args.quiet:
_init_logger(log_level=logging.WARN)
try:
profile = _get_profile(b_conf, args.profile, args.config_file)
_log_info(args, profile)
profile["include"].update(args.tests.split(",") if args.tests else [])
profile["exclude"].update(args.skips.split(",") if args.skips else [])
extension_mgr.validate_profile(profile)
except (utils.ProfileNotFound, ValueError) as e:
LOG.error(e)
sys.exit(2)
b_mgr = b_manager.BanditManager(
b_conf,
args.agg_type,
args.debug,
profile=profile,
verbose=args.verbose,
quiet=args.quiet,
ignore_nosec=args.ignore_nosec,
)
if args.baseline is not None:
try:
with open(args.baseline) as bl:
data = bl.read()
b_mgr.populate_baseline(data)
except IOError:
LOG.warning("Could not open baseline report: %s", args.baseline)
sys.exit(2)
if args.output_format not in baseline_formatters:
LOG.warning(
"Baseline must be used with one of the following "
"formats: " + str(baseline_formatters)
)
sys.exit(2)
if args.output_format != "json":
if args.config_file:
LOG.info("using config: %s", args.config_file)
LOG.info(
"running on Python %d.%d.%d",
sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro,
)
# initiate file discovery step within Bandit Manager
b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths)
if not b_mgr.b_ts.tests:
LOG.error("No tests would be run, please check the profile.")
sys.exit(2)
# initiate execution of tests within Bandit Manager
b_mgr.run_tests()
LOG.debug(b_mgr.b_ma)
LOG.debug(b_mgr.metrics)
# trigger output of results by Bandit Manager
sev_level = constants.RANKING[args.severity - 1]
conf_level = constants.RANKING[args.confidence - 1]
b_mgr.output_results(
args.context_lines,
sev_level,
conf_level,
args.output_file,
args.output_format,
args.msg_template,
)
if (
b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0
and not args.exit_zero
):
sys.exit(1)
else:
sys.exit(0)
|
https://github.com/PyCQA/bandit/issues/362
|
[main] INFO profile include tests: None
[main] INFO profile exclude tests: None
[main] INFO cli include tests: None
[main] INFO cli exclude tests: None
[main] INFO running on Python 3.6.5
[node_visitor] INFO Unable to find qualified name for module: test.py
Traceback (most recent call last):
File "c:\users\<username>\appdata\local\programs\python\python36\lib\site-packages\bandit\core\manager.py", line 157, in output_results
conf_level=conf_level, lines=lines)
File "c:\users\<username>\appdata\local\programs\python\python36\lib\site-packages\bandit\formatters\text.py", line 161, in report
wrapped_file.write(utils.convert_file_contents(result))
File "c:\users\<username>\appdata\local\programs\python\python36\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode character '\U0001f44f' in position 135: character maps to <undefined>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\users\<username>\appdata\local\programs\python\python36\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\users\<username>\appdata\local\programs\python\python36\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Users\w107566\AppData\Local\Programs\Python\Python36\Scripts\bandit.exe\__main__.py", line 9, in <module>
File "c:\users\<username>\appdata\local\programs\python\python36\lib\site-packages\bandit\cli\main.py", line 345, in main
args.output_format)
File "c:\users\<username>\appdata\local\programs\python\python36\lib\site-packages\bandit\core\manager.py", line 161, in output_results
"%s" % (output_format, str(e)))
RuntimeError: Unable to output report using 'txt' formatter: 'charmap' codec can't encode character '\U0001f44f' in position 135: character maps to <undefined>
|
UnicodeEncodeError
|
def is_assigned(self, node):
assigned = False
if self.ignore_nodes:
if isinstance(self.ignore_nodes, (list, tuple, object)):
if isinstance(node, self.ignore_nodes):
return assigned
if isinstance(node, ast.Expr):
assigned = self.is_assigned(node.value)
elif isinstance(node, ast.FunctionDef):
for name in node.args.args:
if isinstance(name, ast.Name):
if name.id == self.var_name.id:
# If is param the assignations are not affected
return assigned
assigned = self.is_assigned_in(node.body)
elif isinstance(node, ast.With):
if six.PY2:
if node.optional_vars.id == self.var_name.id:
assigned = node
else:
assigned = self.is_assigned_in(node.body)
else:
for withitem in node.items:
var_id = getattr(withitem.optional_vars, "id", None)
if var_id == self.var_name.id:
assigned = node
else:
assigned = self.is_assigned_in(node.body)
elif six.PY2 and isinstance(node, ast.TryFinally):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.finalbody))
elif six.PY2 and isinstance(node, ast.TryExcept):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.handlers))
assigned.extend(self.is_assigned_in(node.orelse))
elif not six.PY2 and isinstance(node, ast.Try):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.handlers))
assigned.extend(self.is_assigned_in(node.orelse))
assigned.extend(self.is_assigned_in(node.finalbody))
elif isinstance(node, ast.ExceptHandler):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
elif isinstance(node, (ast.If, ast.For, ast.While)):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.orelse))
elif isinstance(node, ast.AugAssign):
if isinstance(node.target, ast.Name):
if node.target.id == self.var_name.id:
assigned = node.value
elif isinstance(node, ast.Assign) and node.targets:
target = node.targets[0]
if isinstance(target, ast.Name):
if target.id == self.var_name.id:
assigned = node.value
elif isinstance(target, ast.Tuple):
pos = 0
for name in target.elts:
if name.id == self.var_name.id:
assigned = node.value.elts[pos]
break
pos += 1
return assigned
|
def is_assigned(self, node):
assigned = False
if self.ignore_nodes:
if isinstance(self.ignore_nodes, (list, tuple, object)):
if isinstance(node, self.ignore_nodes):
return assigned
if isinstance(node, ast.Expr):
assigned = self.is_assigned(node.value)
elif isinstance(node, ast.FunctionDef):
for name in node.args.args:
if isinstance(name, ast.Name):
if name.id == self.var_name.id:
# If is param the assignations are not affected
return assigned
assigned = self.is_assigned_in(node.body)
elif isinstance(node, ast.With):
if six.PY2:
if node.optional_vars.id == self.var_name.id:
assigned = node
else:
assigned = self.is_assigned_in(node.body)
else:
for withitem in node.items:
if withitem.optional_vars.id == self.var_name.id:
assigned = node
else:
assigned = self.is_assigned_in(node.body)
elif six.PY2 and isinstance(node, ast.TryFinally):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.finalbody))
elif six.PY2 and isinstance(node, ast.TryExcept):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.handlers))
assigned.extend(self.is_assigned_in(node.orelse))
elif not six.PY2 and isinstance(node, ast.Try):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.handlers))
assigned.extend(self.is_assigned_in(node.orelse))
assigned.extend(self.is_assigned_in(node.finalbody))
elif isinstance(node, ast.ExceptHandler):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
elif isinstance(node, (ast.If, ast.For, ast.While)):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.orelse))
elif isinstance(node, ast.AugAssign):
if isinstance(node.target, ast.Name):
if node.target.id == self.var_name.id:
assigned = node.value
elif isinstance(node, ast.Assign) and node.targets:
target = node.targets[0]
if isinstance(target, ast.Name):
if target.id == self.var_name.id:
assigned = node.value
elif isinstance(target, ast.Tuple):
pos = 0
for name in target.elts:
if name.id == self.var_name.id:
assigned = node.value.elts[pos]
break
pos += 1
return assigned
|
https://github.com/PyCQA/bandit/issues/574
|
[tester] ERROR Bandit internal error running: django_mark_safe on file ./venv/lib/python3.7/site-packages/django/template/base.py at line 738: 'NoneType' object has no attribute 'id'Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/bandit/core/tester.py", line 64, in run_tests
result = test(context)
File "/usr/local/lib/python3.7/site-packages/bandit/plugins/django_xss.py", line 295, in django_mark_safe
return check_risk(context.node)
File "/usr/local/lib/python3.7/site-packages/bandit/plugins/django_xss.py", line 243, in check_risk
secure = evaluate_var(xss_var, parent, node.lineno)
File "/usr/local/lib/python3.7/site-packages/bandit/plugins/django_xss.py", line 123, in evaluate_var
to = analyser.is_assigned(node)
File "/usr/local/lib/python3.7/site-packages/bandit/plugins/django_xss.py", line 89, in is_assigned
assigned.extend(self.is_assigned_in(node.body))
File "/usr/local/lib/python3.7/site-packages/bandit/plugins/django_xss.py", line 33, in is_assigned_in
new_assigned = self.is_assigned(ast_inst)
File "/usr/local/lib/python3.7/site-packages/bandit/plugins/django_xss.py", line 90, in is_assigned
assigned.extend(self.is_assigned_in(node.orelse))
File "/usr/local/lib/python3.7/site-packages/bandit/plugins/django_xss.py", line 33, in is_assigned_in
new_assigned = self.is_assigned(ast_inst)
File "/usr/local/lib/python3.7/site-packages/bandit/plugins/django_xss.py", line 89, in is_assigned
assigned.extend(self.is_assigned_in(node.body))
File "/usr/local/lib/python3.7/site-packages/bandit/plugins/django_xss.py", line 33, in is_assigned_in
new_assigned = self.is_assigned(ast_inst)
File "/usr/local/lib/python3.7/site-packages/bandit/plugins/django_xss.py", line 65, in is_assigned
if withitem.optional_vars.id == self.var_name.id:
AttributeError: 'NoneType' object has no attribute 'id'
|
AttributeError
|
def hashlib_new(context):
if isinstance(context.call_function_name_qual, str):
qualname_list = context.call_function_name_qual.split(".")
func = qualname_list[-1]
if "hashlib" in qualname_list and func == "new":
args = context.call_args
keywords = context.call_keywords
name = args[0] if args else keywords["name"]
if isinstance(name, str) and name.lower() in ("md4", "md5"):
return bandit.Issue(
severity=bandit.MEDIUM,
confidence=bandit.HIGH,
text="Use of insecure MD4 or MD5 hash function.",
lineno=context.node.lineno,
)
|
def hashlib_new(context):
if isinstance(context.call_function_name_qual, str):
qualname_list = context.call_function_name_qual.split(".")
func = qualname_list[-1]
if "hashlib" in qualname_list and func == "new":
args = context.call_args
keywords = context.call_keywords
name = args[0] if args else keywords["name"]
if name.lower() in ("md4", "md5"):
return bandit.Issue(
severity=bandit.MEDIUM,
confidence=bandit.HIGH,
text="Use of insecure MD4 or MD5 hash function.",
lineno=context.node.lineno,
)
|
https://github.com/PyCQA/bandit/issues/504
|
$ bandit test_hash_new.py
...
[tester] ERROR Bandit internal error running: hashlib_new on file test_hash_new.py at line 4: 'NoneType' object has no attribute 'lower'Traceback (most recent call last):
File "/home/pshchelo/.virtualenvs/bandit/lib/python3.6/site-packages/bandit/core/tester.py", line 64, in run_tests
result = test(context)
File "/home/pshchelo/.virtualenvs/bandit/lib/python3.6/site-packages/bandit/plugins/hashlib_new_insecure_functions.py", line 57, in hashlib_new
if name.lower() in ('md4', 'md5'):
AttributeError: 'NoneType' object has no attribute 'lower'
...
|
AttributeError
|
def visit_Str(self, node):
"""Visitor for AST String nodes
add relevant information about node to
the context for use in tests which inspect strings.
:param node: The node that is being inspected
:return: -
"""
self.context["str"] = node.s
if not isinstance(node._bandit_parent, ast.Expr): # docstring
self.context["linerange"] = b_utils.linerange_fix(node._bandit_parent)
self.update_scores(self.tester.run_tests(self.context, "Str"))
|
def visit_Str(self, node):
"""Visitor for AST String nodes
add relevant information about node to
the context for use in tests which inspect strings.
:param node: The node that is being inspected
:return: -
"""
self.context["str"] = node.s
if not isinstance(node.parent, ast.Expr): # docstring
self.context["linerange"] = b_utils.linerange_fix(node.parent)
self.update_scores(self.tester.run_tests(self.context, "Str"))
|
https://github.com/PyCQA/bandit/issues/487
|
from bandit.core.config import BanditConfig
from bandit.core.meta_ast import BanditMetaAst
from bandit.core.metrics import Metrics
from bandit.core.node_visitor import BanditNodeVisitor
from bandit.core.test_set import BanditTestSet
from pyflakes.checker import Checker
import ast
profile = {}
bnv = BanditNodeVisitor(
... 'filename',
... BanditMetaAst(),
... BanditTestSet(BanditConfig(), profile=profile),
... False,
... [],
... Metrics(),
... )
tree = ast.parse("""def test():
... try:
... x = 5
... if True:
... x = 10 # noqa: F841
... except AttributeError:
... pass
... """)
bnv.generic_visit(tree)
Checker(tree=tree)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 673, in __init__
self.runDeferred(self._deferredFunctions)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 710, in runDeferred
handler()
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1474, in runFunction
self.handleChildren(node, omit='decorator_list')
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1623, in TRY
self.handleNode(child, node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1353, in NAME
self.handleNodeStore(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1017, in handleNodeStore
self.addBinding(node, binding)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 876, in addBinding
not self.differentForks(node, existing.source)):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 857, in differentForks
if self.descendantOf(lnode, items, ancestor) ^ \
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 835, in descendantOf
if self.getCommonAncestor(node, a, stop):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 827, in getCommonAncestor
if (lnode.depth > rnode.depth):
AttributeError: 'ExceptHandler' object has no attribute 'depth'
|
AttributeError
|
def visit_Bytes(self, node):
"""Visitor for AST Bytes nodes
add relevant information about node to
the context for use in tests which inspect strings.
:param node: The node that is being inspected
:return: -
"""
self.context["bytes"] = node.s
if not isinstance(node._bandit_parent, ast.Expr): # docstring
self.context["linerange"] = b_utils.linerange_fix(node._bandit_parent)
self.update_scores(self.tester.run_tests(self.context, "Bytes"))
|
def visit_Bytes(self, node):
"""Visitor for AST Bytes nodes
add relevant information about node to
the context for use in tests which inspect strings.
:param node: The node that is being inspected
:return: -
"""
self.context["bytes"] = node.s
if not isinstance(node.parent, ast.Expr): # docstring
self.context["linerange"] = b_utils.linerange_fix(node.parent)
self.update_scores(self.tester.run_tests(self.context, "Bytes"))
|
https://github.com/PyCQA/bandit/issues/487
|
from bandit.core.config import BanditConfig
from bandit.core.meta_ast import BanditMetaAst
from bandit.core.metrics import Metrics
from bandit.core.node_visitor import BanditNodeVisitor
from bandit.core.test_set import BanditTestSet
from pyflakes.checker import Checker
import ast
profile = {}
bnv = BanditNodeVisitor(
... 'filename',
... BanditMetaAst(),
... BanditTestSet(BanditConfig(), profile=profile),
... False,
... [],
... Metrics(),
... )
tree = ast.parse("""def test():
... try:
... x = 5
... if True:
... x = 10 # noqa: F841
... except AttributeError:
... pass
... """)
bnv.generic_visit(tree)
Checker(tree=tree)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 673, in __init__
self.runDeferred(self._deferredFunctions)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 710, in runDeferred
handler()
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1474, in runFunction
self.handleChildren(node, omit='decorator_list')
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1623, in TRY
self.handleNode(child, node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1353, in NAME
self.handleNodeStore(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1017, in handleNodeStore
self.addBinding(node, binding)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 876, in addBinding
not self.differentForks(node, existing.source)):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 857, in differentForks
if self.descendantOf(lnode, items, ancestor) ^ \
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 835, in descendantOf
if self.getCommonAncestor(node, a, stop):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 827, in getCommonAncestor
if (lnode.depth > rnode.depth):
AttributeError: 'ExceptHandler' object has no attribute 'depth'
|
AttributeError
|
def generic_visit(self, node):
"""Drive the visitor."""
for _, value in ast.iter_fields(node):
if isinstance(value, list):
max_idx = len(value) - 1
for idx, item in enumerate(value):
if isinstance(item, ast.AST):
if idx < max_idx:
setattr(item, "_bandit_sibling", value[idx + 1])
else:
setattr(item, "_bandit_sibling", None)
setattr(item, "_bandit_parent", node)
if self.pre_visit(item):
self.visit(item)
self.generic_visit(item)
self.post_visit(item)
elif isinstance(value, ast.AST):
setattr(value, "_bandit_sibling", None)
setattr(value, "_bandit_parent", node)
if self.pre_visit(value):
self.visit(value)
self.generic_visit(value)
self.post_visit(value)
|
def generic_visit(self, node):
"""Drive the visitor."""
for _, value in ast.iter_fields(node):
if isinstance(value, list):
max_idx = len(value) - 1
for idx, item in enumerate(value):
if isinstance(item, ast.AST):
if idx < max_idx:
setattr(item, "sibling", value[idx + 1])
else:
setattr(item, "sibling", None)
setattr(item, "parent", node)
if self.pre_visit(item):
self.visit(item)
self.generic_visit(item)
self.post_visit(item)
elif isinstance(value, ast.AST):
setattr(value, "sibling", None)
setattr(value, "parent", node)
if self.pre_visit(value):
self.visit(value)
self.generic_visit(value)
self.post_visit(value)
|
https://github.com/PyCQA/bandit/issues/487
|
from bandit.core.config import BanditConfig
from bandit.core.meta_ast import BanditMetaAst
from bandit.core.metrics import Metrics
from bandit.core.node_visitor import BanditNodeVisitor
from bandit.core.test_set import BanditTestSet
from pyflakes.checker import Checker
import ast
profile = {}
bnv = BanditNodeVisitor(
... 'filename',
... BanditMetaAst(),
... BanditTestSet(BanditConfig(), profile=profile),
... False,
... [],
... Metrics(),
... )
tree = ast.parse("""def test():
... try:
... x = 5
... if True:
... x = 10 # noqa: F841
... except AttributeError:
... pass
... """)
bnv.generic_visit(tree)
Checker(tree=tree)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 673, in __init__
self.runDeferred(self._deferredFunctions)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 710, in runDeferred
handler()
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1474, in runFunction
self.handleChildren(node, omit='decorator_list')
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1623, in TRY
self.handleNode(child, node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1353, in NAME
self.handleNodeStore(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1017, in handleNodeStore
self.addBinding(node, binding)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 876, in addBinding
not self.differentForks(node, existing.source)):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 857, in differentForks
if self.descendantOf(lnode, items, ancestor) ^ \
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 835, in descendantOf
if self.getCommonAncestor(node, a, stop):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 827, in getCommonAncestor
if (lnode.depth > rnode.depth):
AttributeError: 'ExceptHandler' object has no attribute 'depth'
|
AttributeError
|
def linerange_fix(node):
"""Try and work around a known Python bug with multi-line strings."""
# deal with multiline strings lineno behavior (Python issue #16806)
lines = linerange(node)
if hasattr(node, "_bandit_sibling") and hasattr(node._bandit_sibling, "lineno"):
start = min(lines)
delta = node._bandit_sibling.lineno - start
if delta > 1:
return list(range(start, node._bandit_sibling.lineno))
return lines
|
def linerange_fix(node):
"""Try and work around a known Python bug with multi-line strings."""
# deal with multiline strings lineno behavior (Python issue #16806)
lines = linerange(node)
if hasattr(node, "sibling") and hasattr(node.sibling, "lineno"):
start = min(lines)
delta = node.sibling.lineno - start
if delta > 1:
return list(range(start, node.sibling.lineno))
return lines
|
https://github.com/PyCQA/bandit/issues/487
|
from bandit.core.config import BanditConfig
from bandit.core.meta_ast import BanditMetaAst
from bandit.core.metrics import Metrics
from bandit.core.node_visitor import BanditNodeVisitor
from bandit.core.test_set import BanditTestSet
from pyflakes.checker import Checker
import ast
profile = {}
bnv = BanditNodeVisitor(
... 'filename',
... BanditMetaAst(),
... BanditTestSet(BanditConfig(), profile=profile),
... False,
... [],
... Metrics(),
... )
tree = ast.parse("""def test():
... try:
... x = 5
... if True:
... x = 10 # noqa: F841
... except AttributeError:
... pass
... """)
bnv.generic_visit(tree)
Checker(tree=tree)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 673, in __init__
self.runDeferred(self._deferredFunctions)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 710, in runDeferred
handler()
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1474, in runFunction
self.handleChildren(node, omit='decorator_list')
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1623, in TRY
self.handleNode(child, node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1353, in NAME
self.handleNodeStore(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1017, in handleNodeStore
self.addBinding(node, binding)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 876, in addBinding
not self.differentForks(node, existing.source)):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 857, in differentForks
if self.descendantOf(lnode, items, ancestor) ^ \
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 835, in descendantOf
if self.getCommonAncestor(node, a, stop):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 827, in getCommonAncestor
if (lnode.depth > rnode.depth):
AttributeError: 'ExceptHandler' object has no attribute 'depth'
|
AttributeError
|
def concat_string(node, stop=None):
"""Builds a string from a ast.BinOp chain.
This will build a string from a series of ast.Str nodes wrapped in
ast.BinOp nodes. Something like "a" + "b" + "c" or "a %s" % val etc.
The provided node can be any participant in the BinOp chain.
:param node: (ast.Str or ast.BinOp) The node to process
:param stop: (ast.Str or ast.BinOp) Optional base node to stop at
:returns: (Tuple) the root node of the expression, the string value
"""
def _get(node, bits, stop=None):
if node != stop:
bits.append(
_get(node.left, bits, stop)
if isinstance(node.left, ast.BinOp)
else node.left
)
bits.append(
_get(node.right, bits, stop)
if isinstance(node.right, ast.BinOp)
else node.right
)
bits = [node]
while isinstance(node._bandit_parent, ast.BinOp):
node = node._bandit_parent
if isinstance(node, ast.BinOp):
_get(node, bits, stop)
return (node, " ".join([x.s for x in bits if isinstance(x, ast.Str)]))
|
def concat_string(node, stop=None):
"""Builds a string from a ast.BinOp chain.
This will build a string from a series of ast.Str nodes wrapped in
ast.BinOp nodes. Something like "a" + "b" + "c" or "a %s" % val etc.
The provided node can be any participant in the BinOp chain.
:param node: (ast.Str or ast.BinOp) The node to process
:param stop: (ast.Str or ast.BinOp) Optional base node to stop at
:returns: (Tuple) the root node of the expression, the string value
"""
def _get(node, bits, stop=None):
if node != stop:
bits.append(
_get(node.left, bits, stop)
if isinstance(node.left, ast.BinOp)
else node.left
)
bits.append(
_get(node.right, bits, stop)
if isinstance(node.right, ast.BinOp)
else node.right
)
bits = [node]
while isinstance(node.parent, ast.BinOp):
node = node.parent
if isinstance(node, ast.BinOp):
_get(node, bits, stop)
return (node, " ".join([x.s for x in bits if isinstance(x, ast.Str)]))
|
https://github.com/PyCQA/bandit/issues/487
|
from bandit.core.config import BanditConfig
from bandit.core.meta_ast import BanditMetaAst
from bandit.core.metrics import Metrics
from bandit.core.node_visitor import BanditNodeVisitor
from bandit.core.test_set import BanditTestSet
from pyflakes.checker import Checker
import ast
profile = {}
bnv = BanditNodeVisitor(
... 'filename',
... BanditMetaAst(),
... BanditTestSet(BanditConfig(), profile=profile),
... False,
... [],
... Metrics(),
... )
tree = ast.parse("""def test():
... try:
... x = 5
... if True:
... x = 10 # noqa: F841
... except AttributeError:
... pass
... """)
bnv.generic_visit(tree)
Checker(tree=tree)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 673, in __init__
self.runDeferred(self._deferredFunctions)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 710, in runDeferred
handler()
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1474, in runFunction
self.handleChildren(node, omit='decorator_list')
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1623, in TRY
self.handleNode(child, node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1353, in NAME
self.handleNodeStore(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1017, in handleNodeStore
self.addBinding(node, binding)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 876, in addBinding
not self.differentForks(node, existing.source)):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 857, in differentForks
if self.descendantOf(lnode, items, ancestor) ^ \
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 835, in descendantOf
if self.getCommonAncestor(node, a, stop):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 827, in getCommonAncestor
if (lnode.depth > rnode.depth):
AttributeError: 'ExceptHandler' object has no attribute 'depth'
|
AttributeError
|
def check_risk(node):
description = "Potential XSS on mark_safe function."
xss_var = node.args[0]
secure = False
if isinstance(xss_var, ast.Name):
# Check if the var are secure
parent = node._bandit_parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent._bandit_parent
is_param = False
if isinstance(parent, ast.FunctionDef):
for name in parent.args.args:
arg_name = name.id if six.PY2 else name.arg
if arg_name == xss_var.id:
is_param = True
break
if not is_param:
secure = evaluate_var(xss_var, parent, node.lineno)
elif isinstance(xss_var, ast.Call):
parent = node._bandit_parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent._bandit_parent
secure = evaluate_call(xss_var, parent)
elif isinstance(xss_var, ast.BinOp):
is_mod = isinstance(xss_var.op, ast.Mod)
is_left_str = isinstance(xss_var.left, ast.Str)
if is_mod and is_left_str:
parent = node._bandit_parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent._bandit_parent
new_call = transform2call(xss_var)
secure = evaluate_call(new_call, parent)
if not secure:
return bandit.Issue(
severity=bandit.MEDIUM, confidence=bandit.HIGH, text=description
)
|
def check_risk(node):
description = "Potential XSS on mark_safe function."
xss_var = node.args[0]
secure = False
if isinstance(xss_var, ast.Name):
# Check if the var are secure
parent = node.parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent.parent
is_param = False
if isinstance(parent, ast.FunctionDef):
for name in parent.args.args:
arg_name = name.id if six.PY2 else name.arg
if arg_name == xss_var.id:
is_param = True
break
if not is_param:
secure = evaluate_var(xss_var, parent, node.lineno)
elif isinstance(xss_var, ast.Call):
parent = node.parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent.parent
secure = evaluate_call(xss_var, parent)
elif isinstance(xss_var, ast.BinOp):
is_mod = isinstance(xss_var.op, ast.Mod)
is_left_str = isinstance(xss_var.left, ast.Str)
if is_mod and is_left_str:
parent = node.parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent.parent
new_call = transform2call(xss_var)
secure = evaluate_call(new_call, parent)
if not secure:
return bandit.Issue(
severity=bandit.MEDIUM, confidence=bandit.HIGH, text=description
)
|
https://github.com/PyCQA/bandit/issues/487
|
from bandit.core.config import BanditConfig
from bandit.core.meta_ast import BanditMetaAst
from bandit.core.metrics import Metrics
from bandit.core.node_visitor import BanditNodeVisitor
from bandit.core.test_set import BanditTestSet
from pyflakes.checker import Checker
import ast
profile = {}
bnv = BanditNodeVisitor(
... 'filename',
... BanditMetaAst(),
... BanditTestSet(BanditConfig(), profile=profile),
... False,
... [],
... Metrics(),
... )
tree = ast.parse("""def test():
... try:
... x = 5
... if True:
... x = 10 # noqa: F841
... except AttributeError:
... pass
... """)
bnv.generic_visit(tree)
Checker(tree=tree)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 673, in __init__
self.runDeferred(self._deferredFunctions)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 710, in runDeferred
handler()
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1474, in runFunction
self.handleChildren(node, omit='decorator_list')
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1623, in TRY
self.handleNode(child, node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1353, in NAME
self.handleNodeStore(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1017, in handleNodeStore
self.addBinding(node, binding)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 876, in addBinding
not self.differentForks(node, existing.source)):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 857, in differentForks
if self.descendantOf(lnode, items, ancestor) ^ \
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 835, in descendantOf
if self.getCommonAncestor(node, a, stop):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 827, in getCommonAncestor
if (lnode.depth > rnode.depth):
AttributeError: 'ExceptHandler' object has no attribute 'depth'
|
AttributeError
|
def hardcoded_password_string(context):
"""**B105: Test for use of hard-coded password strings**
The use of hard-coded passwords increases the possibility of password
guessing tremendously. This plugin test looks for all string literals and
checks the following conditions:
- assigned to a variable that looks like a password
- assigned to a dict key that looks like a password
- used in a comparison with a variable that looks like a password
Variables are considered to look like a password if they have match any one
of:
- "password"
- "pass"
- "passwd"
- "pwd"
- "secret"
- "token"
- "secrete"
Note: this can be noisy and may generate false positives.
**Config Options:**
None
:Example:
.. code-block:: none
>> Issue: Possible hardcoded password '(root)'
Severity: Low Confidence: Low
Location: ./examples/hardcoded-passwords.py:5
4 def someFunction2(password):
5 if password == "root":
6 print("OK, logged in")
.. seealso::
- https://www.owasp.org/index.php/Use_of_hard-coded_password
.. versionadded:: 0.9.0
"""
node = context.node
if isinstance(node._bandit_parent, ast.Assign):
# looks for "candidate='some_string'"
for targ in node._bandit_parent.targets:
if isinstance(targ, ast.Name) and RE_CANDIDATES.search(targ.id):
return _report(node.s)
elif isinstance(node._bandit_parent, ast.Index) and RE_CANDIDATES.search(node.s):
# looks for "dict[candidate]='some_string'"
# assign -> subscript -> index -> string
assign = node._bandit_parent._bandit_parent._bandit_parent
if isinstance(assign, ast.Assign) and isinstance(assign.value, ast.Str):
return _report(assign.value.s)
elif isinstance(node._bandit_parent, ast.Compare):
# looks for "candidate == 'some_string'"
comp = node._bandit_parent
if isinstance(comp.left, ast.Name):
if RE_CANDIDATES.search(comp.left.id):
if isinstance(comp.comparators[0], ast.Str):
return _report(comp.comparators[0].s)
|
def hardcoded_password_string(context):
"""**B105: Test for use of hard-coded password strings**
The use of hard-coded passwords increases the possibility of password
guessing tremendously. This plugin test looks for all string literals and
checks the following conditions:
- assigned to a variable that looks like a password
- assigned to a dict key that looks like a password
- used in a comparison with a variable that looks like a password
Variables are considered to look like a password if they have match any one
of:
- "password"
- "pass"
- "passwd"
- "pwd"
- "secret"
- "token"
- "secrete"
Note: this can be noisy and may generate false positives.
**Config Options:**
None
:Example:
.. code-block:: none
>> Issue: Possible hardcoded password '(root)'
Severity: Low Confidence: Low
Location: ./examples/hardcoded-passwords.py:5
4 def someFunction2(password):
5 if password == "root":
6 print("OK, logged in")
.. seealso::
- https://www.owasp.org/index.php/Use_of_hard-coded_password
.. versionadded:: 0.9.0
"""
node = context.node
if isinstance(node.parent, ast.Assign):
# looks for "candidate='some_string'"
for targ in node.parent.targets:
if isinstance(targ, ast.Name) and RE_CANDIDATES.search(targ.id):
return _report(node.s)
elif isinstance(node.parent, ast.Index) and RE_CANDIDATES.search(node.s):
# looks for "dict[candidate]='some_string'"
# assign -> subscript -> index -> string
assign = node.parent.parent.parent
if isinstance(assign, ast.Assign) and isinstance(assign.value, ast.Str):
return _report(assign.value.s)
elif isinstance(node.parent, ast.Compare):
# looks for "candidate == 'some_string'"
comp = node.parent
if isinstance(comp.left, ast.Name):
if RE_CANDIDATES.search(comp.left.id):
if isinstance(comp.comparators[0], ast.Str):
return _report(comp.comparators[0].s)
|
https://github.com/PyCQA/bandit/issues/487
|
from bandit.core.config import BanditConfig
from bandit.core.meta_ast import BanditMetaAst
from bandit.core.metrics import Metrics
from bandit.core.node_visitor import BanditNodeVisitor
from bandit.core.test_set import BanditTestSet
from pyflakes.checker import Checker
import ast
profile = {}
bnv = BanditNodeVisitor(
... 'filename',
... BanditMetaAst(),
... BanditTestSet(BanditConfig(), profile=profile),
... False,
... [],
... Metrics(),
... )
tree = ast.parse("""def test():
... try:
... x = 5
... if True:
... x = 10 # noqa: F841
... except AttributeError:
... pass
... """)
bnv.generic_visit(tree)
Checker(tree=tree)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 673, in __init__
self.runDeferred(self._deferredFunctions)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 710, in runDeferred
handler()
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1474, in runFunction
self.handleChildren(node, omit='decorator_list')
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1623, in TRY
self.handleNode(child, node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1353, in NAME
self.handleNodeStore(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1017, in handleNodeStore
self.addBinding(node, binding)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 876, in addBinding
not self.differentForks(node, existing.source)):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 857, in differentForks
if self.descendantOf(lnode, items, ancestor) ^ \
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 835, in descendantOf
if self.getCommonAncestor(node, a, stop):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 827, in getCommonAncestor
if (lnode.depth > rnode.depth):
AttributeError: 'ExceptHandler' object has no attribute 'depth'
|
AttributeError
|
def _evaluate_ast(node):
wrapper = None
statement = ""
if isinstance(node._bandit_parent, ast.BinOp):
out = utils.concat_string(node, node._bandit_parent)
wrapper = out[0]._bandit_parent
statement = out[1]
elif (
isinstance(node._bandit_parent, ast.Attribute)
and node._bandit_parent.attr == "format"
):
statement = node.s
# Hierarchy for "".format() is Wrapper -> Call -> Attribute -> Str
wrapper = node._bandit_parent._bandit_parent._bandit_parent
elif hasattr(ast, "JoinedStr") and isinstance(node._bandit_parent, ast.JoinedStr):
statement = node.s
wrapper = node._bandit_parent._bandit_parent
if isinstance(wrapper, ast.Call): # wrapped in "execute" call?
names = ["execute", "executemany"]
name = utils.get_called_name(wrapper)
return (name in names, statement)
else:
return (False, statement)
|
def _evaluate_ast(node):
wrapper = None
statement = ""
if isinstance(node.parent, ast.BinOp):
out = utils.concat_string(node, node.parent)
wrapper = out[0].parent
statement = out[1]
elif isinstance(node.parent, ast.Attribute) and node.parent.attr == "format":
statement = node.s
# Hierarchy for "".format() is Wrapper -> Call -> Attribute -> Str
wrapper = node.parent.parent.parent
elif hasattr(ast, "JoinedStr") and isinstance(node.parent, ast.JoinedStr):
statement = node.s
wrapper = node.parent.parent
if isinstance(wrapper, ast.Call): # wrapped in "execute" call?
names = ["execute", "executemany"]
name = utils.get_called_name(wrapper)
return (name in names, statement)
else:
return (False, statement)
|
https://github.com/PyCQA/bandit/issues/487
|
from bandit.core.config import BanditConfig
from bandit.core.meta_ast import BanditMetaAst
from bandit.core.metrics import Metrics
from bandit.core.node_visitor import BanditNodeVisitor
from bandit.core.test_set import BanditTestSet
from pyflakes.checker import Checker
import ast
profile = {}
bnv = BanditNodeVisitor(
... 'filename',
... BanditMetaAst(),
... BanditTestSet(BanditConfig(), profile=profile),
... False,
... [],
... Metrics(),
... )
tree = ast.parse("""def test():
... try:
... x = 5
... if True:
... x = 10 # noqa: F841
... except AttributeError:
... pass
... """)
bnv.generic_visit(tree)
Checker(tree=tree)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 673, in __init__
self.runDeferred(self._deferredFunctions)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 710, in runDeferred
handler()
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1474, in runFunction
self.handleChildren(node, omit='decorator_list')
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1623, in TRY
self.handleNode(child, node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1073, in handleChildren
self.handleNode(node, tree)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1120, in handleNode
handler(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1353, in NAME
self.handleNodeStore(node)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 1017, in handleNodeStore
self.addBinding(node, binding)
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 876, in addBinding
not self.differentForks(node, existing.source)):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 857, in differentForks
if self.descendantOf(lnode, items, ancestor) ^ \
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 835, in descendantOf
if self.getCommonAncestor(node, a, stop):
File "/tmp/t/venv/lib/python3.5/site-packages/pyflakes/checker.py", line 827, in getCommonAncestor
if (lnode.depth > rnode.depth):
AttributeError: 'ExceptHandler' object has no attribute 'depth'
|
AttributeError
|
def blacklist(context, config):
"""Generic blacklist test, B001.
This generic blacklist test will be called for any encountered node with
defined blacklist data available. This data is loaded via plugins using
the 'bandit.blacklists' entry point. Please see the documentation for more
details. Each blacklist datum has a unique bandit ID that may be used for
filtering purposes, or alternatively all blacklisting can be filtered using
the id of this built in test, 'B001'.
"""
blacklists = config
node_type = context.node.__class__.__name__
if node_type == "Call":
func = context.node.func
if isinstance(func, ast.Name) and func.id == "__import__":
if len(context.node.args):
if isinstance(context.node.args[0], ast.Str):
name = context.node.args[0].s
else:
# TODO(??): import through a variable, need symbol tab
name = "UNKNOWN"
else:
name = "" # handle '__import__()'
else:
name = context.call_function_name_qual
# In the case the Call is an importlib.import, treat the first
# argument name as an actual import module name.
# Will produce None if argument is not a literal or identifier
if name in ["importlib.import_module", "importlib.__import__"]:
name = context.call_args[0]
for check in blacklists[node_type]:
for qn in check["qualnames"]:
if name is not None and fnmatch.fnmatch(name, qn):
return report_issue(check, name)
if node_type.startswith("Import"):
prefix = ""
if node_type == "ImportFrom":
if context.node.module is not None:
prefix = context.node.module + "."
for check in blacklists[node_type]:
for name in context.node.names:
for qn in check["qualnames"]:
if (prefix + name.name).startswith(qn):
return report_issue(check, name.name)
|
def blacklist(context, config):
"""Generic blacklist test, B001.
This generic blacklist test will be called for any encountered node with
defined blacklist data available. This data is loaded via plugins using
the 'bandit.blacklists' entry point. Please see the documentation for more
details. Each blacklist datum has a unique bandit ID that may be used for
filtering purposes, or alternatively all blacklisting can be filtered using
the id of this built in test, 'B001'.
"""
blacklists = config
node_type = context.node.__class__.__name__
if node_type == "Call":
func = context.node.func
if isinstance(func, ast.Name) and func.id == "__import__":
if len(context.node.args):
if isinstance(context.node.args[0], ast.Str):
name = context.node.args[0].s
else:
# TODO(??): import through a variable, need symbol tab
name = "UNKNOWN"
else:
name = "" # handle '__import__()'
else:
name = context.call_function_name_qual
# In the case the Call is an importlib.import, treat the first
# argument name as an actual import module name.
if name in ["importlib.import_module", "importlib.__import__"]:
name = context.call_args[0]
for check in blacklists[node_type]:
for qn in check["qualnames"]:
if fnmatch.fnmatch(name, qn):
return report_issue(check, name)
if node_type.startswith("Import"):
prefix = ""
if node_type == "ImportFrom":
if context.node.module is not None:
prefix = context.node.module + "."
for check in blacklists[node_type]:
for name in context.node.names:
for qn in check["qualnames"]:
if (prefix + name.name).startswith(qn):
return report_issue(check, name.name)
|
https://github.com/PyCQA/bandit/issues/344
|
ERROR Bandit internal error running: blacklist on file /home/nighty/workspaces/cegeka/usd_api/api_documentation/views.py at line 125: expected string or bufferTraceback (most recent call last):
File "/home/nighty/.virtualenvs/usd_api/local/lib/python2.7/site-packages/bandit/core/tester.py", line 62, in run_tests
result = test(context, test._config)
File "/home/nighty/.virtualenvs/usd_api/local/lib/python2.7/site-packages/bandit/core/blacklisting.py", line 62, in blacklist
if fnmatch.fnmatch(name, qn):
File "/home/nighty/.virtualenvs/usd_api/lib/python2.7/fnmatch.py", line 43, in fnmatch
return fnmatchcase(name, pat)
File "/home/nighty/.virtualenvs/usd_api/lib/python2.7/fnmatch.py", line 83, in fnmatchcase
return re_pat.match(name) is not None
TypeError: expected string or buffer
|
TypeError
|
def is_assigned(self, node):
assigned = False
if self.ignore_nodes:
if isinstance(self.ignore_nodes, (list, tuple, object)):
if isinstance(node, self.ignore_nodes):
return assigned
if isinstance(node, ast.Expr):
assigned = self.is_assigned(node.value)
elif isinstance(node, ast.FunctionDef):
for name in node.args.args:
if isinstance(name, ast.Name):
if name.id == self.var_name.id:
# If is param the assignations are not affected
return assigned
assigned = self.is_assigned_in(node.body)
elif isinstance(node, ast.With):
if six.PY2:
if node.optional_vars.id == self.var_name.id:
assigned = node
else:
assigned = self.is_assigned_in(node.body)
else:
for withitem in node.items:
if withitem.optional_vars.id == self.var_name.id:
assigned = node
else:
assigned = self.is_assigned_in(node.body)
elif six.PY2 and isinstance(node, ast.TryFinally):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.finalbody))
elif six.PY2 and isinstance(node, ast.TryExcept):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.handlers))
assigned.extend(self.is_assigned_in(node.orelse))
elif not six.PY2 and isinstance(node, ast.Try):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.handlers))
assigned.extend(self.is_assigned_in(node.orelse))
assigned.extend(self.is_assigned_in(node.finalbody))
elif isinstance(node, ast.ExceptHandler):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
elif isinstance(node, (ast.If, ast.For, ast.While)):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.orelse))
elif isinstance(node, ast.AugAssign):
if isinstance(node.target, ast.Name):
if node.target.id == self.var_name.id:
assigned = node.value
elif isinstance(node, ast.Assign) and node.targets:
target = node.targets[0]
if isinstance(target, ast.Name):
if target.id == self.var_name.id:
assigned = node.value
elif isinstance(target, ast.Tuple):
pos = 0
for name in target.elts:
if name.id == self.var_name.id:
assigned = node.value.elts[pos]
break
pos += 1
return assigned
|
def is_assigned(self, node):
assigned = False
if self.ignore_nodes:
if isinstance(self.ignore_nodes, (list, tuple, object)):
if isinstance(node, self.ignore_nodes):
return assigned
if isinstance(node, ast.Expr):
assigned = self.is_assigned(node.value)
elif isinstance(node, ast.FunctionDef):
for name in node.args.args:
if isinstance(name, ast.Name):
if name.id == self.var_name.id:
# If is param the assignations are not affected
return assigned
assigned = self.is_assigned_in(node.body)
elif isinstance(node, ast.With):
if node.optional_vars.id == self.var_name.id:
assigned = node
else:
assigned = self.is_assigned_in(node.body)
elif isinstance(node, ast.TryFinally):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.finalbody))
elif isinstance(node, ast.TryExcept):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.handlers))
assigned.extend(self.is_assigned_in(node.orelse))
elif isinstance(node, ast.ExceptHandler):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
elif isinstance(node, (ast.If, ast.For, ast.While)):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.orelse))
elif isinstance(node, ast.AugAssign):
if isinstance(node.target, ast.Name):
if node.target.id == self.var_name.id:
assigned = node.value
elif isinstance(node, ast.Assign) and node.targets:
target = node.targets[0]
if isinstance(target, ast.Name):
if target.id == self.var_name.id:
assigned = node.value
elif isinstance(target, ast.Tuple):
pos = 0
for name in target.elts:
if name.id == self.var_name.id:
assigned = node.value.elts[pos]
break
pos += 1
return assigned
|
https://github.com/PyCQA/bandit/issues/350
|
Bandit internal error running: django_mark_safe on file /home/travis/build/PyCQA/bandit/examples/mark_safe_secure.py at line 33: 'Call' object has no attribute 'kwargs'Traceback (most recent call last):
File "/home/travis/build/PyCQA/bandit/bandit/core/tester.py", line 64, in run_tests
result = test(context)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 279, in django_mark_safe
return check_risk(context.node)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 232, in check_risk
secure = evaluate_call(xss_var, parent)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 154, in evaluate_call
if call.keywords or call.kwargs:
AttributeError: 'Call' object has no attribute 'kwargs'
|
AttributeError
|
def evaluate_var(xss_var, parent, until, ignore_nodes=None):
secure = False
if isinstance(xss_var, ast.Name):
if isinstance(parent, ast.FunctionDef):
for name in parent.args.args:
arg_name = name.id if six.PY2 else name.arg
if arg_name == xss_var.id:
return False # Params are not secure
analyser = DeepAssignation(xss_var, ignore_nodes)
for node in parent.body:
if node.lineno >= until:
break
to = analyser.is_assigned(node)
if to:
if isinstance(to, ast.Str):
secure = True
elif isinstance(to, ast.Name):
secure = evaluate_var(to, parent, to.lineno, ignore_nodes)
elif isinstance(to, ast.Call):
secure = evaluate_call(to, parent, ignore_nodes)
elif isinstance(to, (list, tuple)):
num_secure = 0
for some_to in to:
if isinstance(some_to, ast.Str):
num_secure += 1
elif isinstance(some_to, ast.Name):
if evaluate_var(some_to, parent, node.lineno, ignore_nodes):
num_secure += 1
else:
break
else:
break
if num_secure == len(to):
secure = True
else:
secure = False
break
else:
secure = False
break
return secure
|
def evaluate_var(xss_var, parent, until, ignore_nodes=None):
secure = False
if isinstance(xss_var, ast.Name):
if isinstance(parent, ast.FunctionDef):
for name in parent.args.args:
if name.id == xss_var.id:
return False # Params are not secure
analyser = DeepAssignation(xss_var, ignore_nodes)
for node in parent.body:
if node.lineno >= until:
break
to = analyser.is_assigned(node)
if to:
if isinstance(to, ast.Str):
secure = True
elif isinstance(to, ast.Name):
secure = evaluate_var(to, parent, to.lineno, ignore_nodes)
elif isinstance(to, ast.Call):
secure = evaluate_call(to, parent, ignore_nodes)
elif isinstance(to, (list, tuple)):
num_secure = 0
for some_to in to:
if isinstance(some_to, ast.Str):
num_secure += 1
elif isinstance(some_to, ast.Name):
if evaluate_var(some_to, parent, node.lineno, ignore_nodes):
num_secure += 1
else:
break
else:
break
if num_secure == len(to):
secure = True
else:
secure = False
break
else:
secure = False
break
return secure
|
https://github.com/PyCQA/bandit/issues/350
|
Bandit internal error running: django_mark_safe on file /home/travis/build/PyCQA/bandit/examples/mark_safe_secure.py at line 33: 'Call' object has no attribute 'kwargs'Traceback (most recent call last):
File "/home/travis/build/PyCQA/bandit/bandit/core/tester.py", line 64, in run_tests
result = test(context)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 279, in django_mark_safe
return check_risk(context.node)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 232, in check_risk
secure = evaluate_call(xss_var, parent)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 154, in evaluate_call
if call.keywords or call.kwargs:
AttributeError: 'Call' object has no attribute 'kwargs'
|
AttributeError
|
def evaluate_call(call, parent, ignore_nodes=None):
secure = False
evaluate = False
if isinstance(call, ast.Call) and isinstance(call.func, ast.Attribute):
if isinstance(call.func.value, ast.Str) and call.func.attr == "format":
evaluate = True
if call.keywords or (six.PY2 and call.kwargs):
evaluate = False # TODO(??) get support for this
if evaluate:
args = list(call.args)
if (
six.PY2
and call.starargs
and isinstance(call.starargs, (ast.List, ast.Tuple))
):
args.extend(call.starargs.elts)
num_secure = 0
for arg in args:
if isinstance(arg, ast.Str):
num_secure += 1
elif isinstance(arg, ast.Name):
if evaluate_var(arg, parent, call.lineno, ignore_nodes):
num_secure += 1
else:
break
elif isinstance(arg, ast.Call):
if evaluate_call(arg, parent, ignore_nodes):
num_secure += 1
else:
break
elif (
not six.PY2
and isinstance(arg, ast.Starred)
and isinstance(arg.value, (ast.List, ast.Tuple))
):
args.extend(arg.value.elts)
num_secure += 1
else:
break
secure = num_secure == len(args)
return secure
|
def evaluate_call(call, parent, ignore_nodes=None):
secure = False
evaluate = False
if isinstance(call, ast.Call) and isinstance(call.func, ast.Attribute):
if isinstance(call.func.value, ast.Str) and call.func.attr == "format":
evaluate = True
if call.keywords or call.kwargs:
evaluate = False # TODO(??) get support for this
if evaluate:
args = list(call.args)
if call.starargs and isinstance(call.starargs, (ast.List, ast.Tuple)):
args.extend(call.starargs.elts)
num_secure = 0
for arg in args:
if isinstance(arg, ast.Str):
num_secure += 1
elif isinstance(arg, ast.Name):
if evaluate_var(arg, parent, call.lineno, ignore_nodes):
num_secure += 1
else:
break
elif isinstance(arg, ast.Call):
if evaluate_call(arg, parent, ignore_nodes):
num_secure += 1
else:
break
else:
break
secure = num_secure == len(args)
return secure
|
https://github.com/PyCQA/bandit/issues/350
|
Bandit internal error running: django_mark_safe on file /home/travis/build/PyCQA/bandit/examples/mark_safe_secure.py at line 33: 'Call' object has no attribute 'kwargs'Traceback (most recent call last):
File "/home/travis/build/PyCQA/bandit/bandit/core/tester.py", line 64, in run_tests
result = test(context)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 279, in django_mark_safe
return check_risk(context.node)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 232, in check_risk
secure = evaluate_call(xss_var, parent)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 154, in evaluate_call
if call.keywords or call.kwargs:
AttributeError: 'Call' object has no attribute 'kwargs'
|
AttributeError
|
def transform2call(var):
if isinstance(var, ast.BinOp):
is_mod = isinstance(var.op, ast.Mod)
is_left_str = isinstance(var.left, ast.Str)
if is_mod and is_left_str:
new_call = ast.Call()
new_call.args = []
new_call.args = []
if six.PY2:
new_call.starargs = None
new_call.keywords = None
if six.PY2:
new_call.kwargs = None
new_call.lineno = var.lineno
new_call.func = ast.Attribute()
new_call.func.value = var.left
new_call.func.attr = "format"
if isinstance(var.right, ast.Tuple):
new_call.args = var.right.elts
elif six.PY2 and isinstance(var.right, ast.Dict):
new_call.kwargs = var.right
else:
new_call.args = [var.right]
return new_call
|
def transform2call(var):
if isinstance(var, ast.BinOp):
is_mod = isinstance(var.op, ast.Mod)
is_left_str = isinstance(var.left, ast.Str)
if is_mod and is_left_str:
new_call = ast.Call()
new_call.args = []
new_call.args = []
new_call.starargs = None
new_call.keywords = None
new_call.kwargs = None
new_call.lineno = var.lineno
new_call.func = ast.Attribute()
new_call.func.value = var.left
new_call.func.attr = "format"
if isinstance(var.right, ast.Tuple):
new_call.args = var.right.elts
elif isinstance(var.right, ast.Dict):
new_call.kwargs = var.right
else:
new_call.args = [var.right]
return new_call
|
https://github.com/PyCQA/bandit/issues/350
|
Bandit internal error running: django_mark_safe on file /home/travis/build/PyCQA/bandit/examples/mark_safe_secure.py at line 33: 'Call' object has no attribute 'kwargs'Traceback (most recent call last):
File "/home/travis/build/PyCQA/bandit/bandit/core/tester.py", line 64, in run_tests
result = test(context)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 279, in django_mark_safe
return check_risk(context.node)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 232, in check_risk
secure = evaluate_call(xss_var, parent)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 154, in evaluate_call
if call.keywords or call.kwargs:
AttributeError: 'Call' object has no attribute 'kwargs'
|
AttributeError
|
def check_risk(node):
description = "Potential XSS on mark_safe function."
xss_var = node.args[0]
secure = False
if isinstance(xss_var, ast.Name):
# Check if the var are secure
parent = node.parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent.parent
is_param = False
if isinstance(parent, ast.FunctionDef):
for name in parent.args.args:
arg_name = name.id if six.PY2 else name.arg
if arg_name == xss_var.id:
is_param = True
break
if not is_param:
secure = evaluate_var(xss_var, parent, node.lineno)
elif isinstance(xss_var, ast.Call):
parent = node.parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent.parent
secure = evaluate_call(xss_var, parent)
elif isinstance(xss_var, ast.BinOp):
is_mod = isinstance(xss_var.op, ast.Mod)
is_left_str = isinstance(xss_var.left, ast.Str)
if is_mod and is_left_str:
parent = node.parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent.parent
new_call = transform2call(xss_var)
secure = evaluate_call(new_call, parent)
if not secure:
return bandit.Issue(
severity=bandit.MEDIUM, confidence=bandit.HIGH, text=description
)
|
def check_risk(node):
description = "Potential XSS on mark_safe function."
xss_var = node.args[0]
secure = False
if isinstance(xss_var, ast.Name):
# Check if the var are secure
parent = node.parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent.parent
is_param = False
if isinstance(parent, ast.FunctionDef):
for name in parent.args.args:
if name.id == xss_var.id:
is_param = True
break
if not is_param:
secure = evaluate_var(xss_var, parent, node.lineno)
elif isinstance(xss_var, ast.Call):
parent = node.parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent.parent
secure = evaluate_call(xss_var, parent)
elif isinstance(xss_var, ast.BinOp):
is_mod = isinstance(xss_var.op, ast.Mod)
is_left_str = isinstance(xss_var.left, ast.Str)
if is_mod and is_left_str:
parent = node.parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent.parent
new_call = transform2call(xss_var)
secure = evaluate_call(new_call, parent)
if not secure:
return bandit.Issue(
severity=bandit.MEDIUM, confidence=bandit.HIGH, text=description
)
|
https://github.com/PyCQA/bandit/issues/350
|
Bandit internal error running: django_mark_safe on file /home/travis/build/PyCQA/bandit/examples/mark_safe_secure.py at line 33: 'Call' object has no attribute 'kwargs'Traceback (most recent call last):
File "/home/travis/build/PyCQA/bandit/bandit/core/tester.py", line 64, in run_tests
result = test(context)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 279, in django_mark_safe
return check_risk(context.node)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 232, in check_risk
secure = evaluate_call(xss_var, parent)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 154, in evaluate_call
if call.keywords or call.kwargs:
AttributeError: 'Call' object has no attribute 'kwargs'
|
AttributeError
|
def has_shell(context):
keywords = context.node.keywords
result = False
if "shell" in context.call_keywords:
for key in keywords:
if key.arg == "shell":
val = key.value
if isinstance(val, ast.Num):
result = bool(val.n)
elif isinstance(val, ast.List):
result = bool(val.elts)
elif isinstance(val, ast.Dict):
result = bool(val.keys)
elif isinstance(val, ast.Name) and val.id in ["False", "None"]:
result = False
elif not six.PY2 and isinstance(val, ast.NameConstant):
result = val.value
else:
result = True
return result
|
def has_shell(context):
keywords = context.node.keywords
if "shell" in context.call_keywords:
for key in keywords:
if key.arg == "shell":
val = key.value
if isinstance(val, ast.Num):
return bool(val.n)
if isinstance(val, ast.List):
return bool(val.elts)
if isinstance(val, ast.Dict):
return bool(val.keys)
if isinstance(val, ast.Name):
if val.id in ["False", "None"]:
return False
return True
return False
|
https://github.com/PyCQA/bandit/issues/350
|
Bandit internal error running: django_mark_safe on file /home/travis/build/PyCQA/bandit/examples/mark_safe_secure.py at line 33: 'Call' object has no attribute 'kwargs'Traceback (most recent call last):
File "/home/travis/build/PyCQA/bandit/bandit/core/tester.py", line 64, in run_tests
result = test(context)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 279, in django_mark_safe
return check_risk(context.node)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 232, in check_risk
secure = evaluate_call(xss_var, parent)
File "/home/travis/build/PyCQA/bandit/bandit/plugins/django_xss.py", line 154, in evaluate_call
if call.keywords or call.kwargs:
AttributeError: 'Call' object has no attribute 'kwargs'
|
AttributeError
|
def mtsac_metaworld_mt50(ctxt=None, seed=1, use_gpu=False, _gpu=0):
"""Train MTSAC with MT50 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
use_gpu (bool): Used to enable ussage of GPU in training.
_gpu (int): The ID of the gpu (used on multi-gpu machines).
"""
deterministic.set_seed(seed)
runner = LocalRunner(ctxt)
task_names = mwb.MT50.get_train_tasks().all_task_names
train_envs = []
test_envs = []
for task_name in task_names:
train_env = normalize(
GymEnv(mwb.MT50.from_task(task_name)), normalize_reward=True
)
test_env = normalize(GymEnv(mwb.MT50.from_task(task_name)))
train_envs.append(train_env)
test_envs.append(test_env)
mt50_train_envs = MultiEnvWrapper(
train_envs, sample_strategy=round_robin_strategy, mode="vanilla"
)
mt50_test_envs = MultiEnvWrapper(
test_envs, sample_strategy=round_robin_strategy, mode="vanilla"
)
policy = TanhGaussianMLPPolicy(
env_spec=mt50_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.0),
max_std=np.exp(2.0),
)
qf1 = ContinuousMLPQFunction(
env_spec=mt50_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu,
)
qf2 = ContinuousMLPQFunction(
env_spec=mt50_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu,
)
replay_buffer = PathBuffer(
capacity_in_transitions=int(1e6),
)
timesteps = 100000000
batch_size = int(150 * mt50_train_envs.num_tasks)
num_evaluation_points = 500
epochs = timesteps // batch_size
epoch_cycles = epochs // num_evaluation_points
epochs = epochs // epoch_cycles
mtsac = MTSAC(
policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=150,
max_episode_length=150,
eval_env=mt50_test_envs,
env_spec=mt50_train_envs.spec,
num_tasks=10,
steps_per_epoch=epoch_cycles,
replay_buffer=replay_buffer,
min_buffer_size=7500,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=6400,
)
set_gpu_mode(use_gpu, _gpu)
mtsac.to()
runner.setup(algo=mtsac, env=mt50_train_envs, sampler_cls=LocalSampler)
runner.train(n_epochs=epochs, batch_size=batch_size)
|
def mtsac_metaworld_mt50(ctxt=None, seed=1, use_gpu=False, _gpu=0):
"""Train MTSAC with MT50 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
use_gpu (bool): Used to enable ussage of GPU in training.
_gpu (int): The ID of the gpu (used on multi-gpu machines).
"""
deterministic.set_seed(seed)
runner = LocalRunner(ctxt)
task_names = mwb.MT50.get_train_tasks().all_task_names
train_envs = []
test_envs = []
for task_name in task_names:
train_env = normalize(
GymEnv(mwb.MT50.from_task(task_name)), normalize_reward=True
)
test_env = normalize(GymEnv(mwb.MT50.from_task(task_name)))
train_envs.append(train_env)
test_envs.append(test_env)
mt50_train_envs = MultiEnvWrapper(
train_envs, sample_strategy=round_robin_strategy, mode="vanilla"
)
mt50_test_envs = MultiEnvWrapper(
test_envs, sample_strategy=round_robin_strategy, mode="vanilla"
)
policy = TanhGaussianMLPPolicy(
env_spec=mt50_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.0),
max_std=np.exp(2.0),
)
qf1 = ContinuousMLPQFunction(
env_spec=mt50_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu,
)
qf2 = ContinuousMLPQFunction(
env_spec=mt50_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu,
)
replay_buffer = PathBuffer(
capacity_in_transitions=int(1e6),
)
timesteps = 100000000
batch_size = int(150 * mt50_train_envs.num_tasks)
num_evaluation_points = 500
epochs = timesteps // batch_size
epoch_cycles = epochs // num_evaluation_points
epochs = epochs // epoch_cycles
mtsac = MTSAC(
policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=150,
max_episode_length=250,
eval_env=mt50_test_envs,
env_spec=mt50_train_envs.spec,
num_tasks=10,
steps_per_epoch=epoch_cycles,
replay_buffer=replay_buffer,
min_buffer_size=7500,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=6400,
)
set_gpu_mode(use_gpu, _gpu)
mtsac.to()
runner.setup(algo=mtsac, env=mt50_train_envs, sampler_cls=LocalSampler)
runner.train(n_epochs=epochs, batch_size=batch_size)
|
https://github.com/rlworkgroup/garage/issues/1903
|
2020-08-15 02:07:45 | [mtsac_metaworld_mt50] Setting seed to 1
^T2020-08-15 02:09:26 | [mtsac_metaworld_mt50] Obtaining samples...
Traceback (most recent call last):
File "examples/torch/mtsac_metaworld_mt50.py", line 103, in <module>
mtsac_metaworld_mt50()
File "/home/eholly/venv/lib/python3.5/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/eholly/venv/lib/python3.5/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/eholly/venv/lib/python3.5/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/eholly/venv/lib/python3.5/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/eholly/venv/lib/python3.5/site-packages/garage/experiment/experiment.py", line 553, in __call__
result = self.function(ctxt, **kwargs)
File "examples/torch/mtsac_metaworld_mt50.py", line 100, in mtsac_metaworld_mt50
runner.train(n_epochs=epochs, batch_size=batch_size)
File "/home/eholly/venv/lib/python3.5/site-packages/garage/experiment/local_runner.py", line 485, in train
average_return = self._algo.train(self)
File "/home/eholly/venv/lib/python3.5/site-packages/garage/torch/algos/sac.py", line 206, in train
last_return = self._evaluate_policy(runner.step_itr)
File "/home/eholly/venv/lib/python3.5/site-packages/garage/torch/algos/mtsac.py", line 189, in _evaluate_policy
num_trajs=self._num_evaluation_trajectories))
File "/home/eholly/venv/lib/python3.5/site-packages/garage/np/_functions.py", line 62, in obtain_evaluation_samples
deterministic=True)
File "/home/eholly/venv/lib/python3.5/site-packages/garage/sampler/utils.py", line 66, in rollout
next_o, r, d, env_info = env.step(a)
File "/home/eholly/venv/lib/python3.5/site-packages/garage/envs/multi_env_wrapper.py", line 231, in step
obs, reward, done, info = self.env.step(action)
File "/home/eholly/venv/lib/python3.5/site-packages/garage/envs/garage_env.py", line 154, in step
observation, reward, done, info = self.env.step(action)
File "/home/eholly/venv/lib/python3.5/site-packages/garage/envs/normalized_env.py", line 153, in step
next_obs, reward, done, info = self.env.step(scaled_action)
File "/home/eholly/venv/lib/python3.5/site-packages/garage/envs/garage_env.py", line 154, in stpe
observation, reward, done, info = self.env.step(action)
File "/home/eholly/venv/lib/python3.5/site-packages/metaworld/envs/mujoco/multitask_env.py", line 161, in step
obs, reward, done, info = self.active_env.step(action)
File "/home/eholly/venv/lib/python3.5/site-packages/metaworld/envs/mujoco/sawyer_xyz/sawyer_plate_slide_back_side.py", line 124, in step
self.do_simulation([action[-1], -action[-1]])
File "/home/eholly/venv/lib/python3.5/site-packages/metaworld/envs/mujoco/mujoco_env.py", line 118, in do_simulation
raise ValueError('Maximum path length allowed by the benchmark has been exceeded')
ValueError: Maximum path length allowed by the benchmark has been exceeded
Makefile:187: recipe for target 'run-headless' failed
make: *** [run-headless] Error 1
|
ValueError
|
def step(self, action):
"""Call step on wrapped env.
This method is necessary to suppress a deprecated warning
thrown by gym.Wrapper.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
np.ndarray: Agent's observation of the current environment
float: Amount of reward returned after previous action
bool: Whether the episode has ended, in which case further step()
calls will return undefined results
dict: Contains auxiliary diagnostic information (helpful for
debugging, and sometimes learning)
"""
observation, reward, done, info = self.env.step(action)
# gym envs that are wrapped in TimeLimit wrapper modify
# the done/termination signal to be true whenever a time
# limit expiration occurs. The following statement sets
# the done signal to be True only if caused by an
# environment termination, and not a time limit
# termination. The time limit termination signal
# will be saved inside env_infos as
# 'BulletEnv.TimeLimitTerminated'
if "TimeLimit.truncated" in info:
info["BulletEnv.TimeLimitTerminated"] = done # done = True always
done = not info["TimeLimit.truncated"]
else:
info["TimeLimit.truncated"] = False
info["BulletEnv.TimeLimitTerminated"] = False
return observation, reward, done, info
|
def step(self, action):
"""Call step on wrapped env.
This method is necessary to suppress a deprecated warning
thrown by gym.Wrapper.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
np.ndarray: Agent's observation of the current environment
float: Amount of reward returned after previous action
bool: Whether the episode has ended, in which case further step()
calls will return undefined results
dict: Contains auxiliary diagnostic information (helpful for
debugging, and sometimes learning)
"""
observation, reward, done, info = self.env.step(action)
# gym envs that are wrapped in TimeLimit wrapper modify
# the done/termination signal to be true whenever a time
# limit expiration occurs. The following statement sets
# the done signal to be True only if caused by an
# environment termination, and not a time limit
# termination. The time limit termination signal
# will be saved inside env_infos as
# 'BulletEnv.TimeLimitTerminated'
if "TimeLimit.truncated" in info:
info["BulletEnv.TimeLimitTerminated"] = done # done = True always
done = not info["TimeLimit.truncated"]
return observation, reward, done, info
|
https://github.com/rlworkgroup/garage/issues/1797
|
--------------------------------------- --------------
Sampling [####################################] 100%
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Optimizing policy...
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Computing loss before
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Computing KL before
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Optimizing
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Start CG optimization: #parameters: 1282, #inputs: 47, #subsample_inputs: 47
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | computing loss before
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | computing gradient
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | gradient computed
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | computing descent direction
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | descent direction computed
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | backtrack iters: 2
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | optimization finished
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Computing KL after
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Computing loss after
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Fitting baseline...
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Saving snapshot...
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Saved
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Time 7.64 s
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | EpochTime 0.93 s
--------------------------------------- --------------
Evaluation/AverageDiscountedReturn 54.6411
Evaluation/AverageReturn 85.1489
Evaluation/Iteration 3
Evaluation/MaxReturn 188
Evaluation/MinReturn 21
Evaluation/NumTrajs 47
Evaluation/StdReturn 37.8738
Evaluation/TerminationRate 1
Extras/EpisodeRewardMean 70.39
LinearFeatureBaseline/ExplainedVariance 0.402734
TotalEnvSteps 16156
policy/Entropy 1.41126
policy/KL 0.00784782
policy/KLBefore 0
policy/LossAfter -0.101578
policy/LossBefore -0.0811697
policy/Perplexity 4.10113
policy/dLoss 0.0204084
--------------------------------------- --------------
Traceback (most recent call last):
File "garage/examples/tf/trpo_gym_tf_cartpole.py", line 48, in <module>
trpo_gym_tf_cartpole()
File "/home/csidrane/Documents/NASA/garage/src/garage/experiment/experiment.py", line 362, in __call__
result = self.function(ctxt, **kwargs)
File "garage/examples/tf/trpo_gym_tf_cartpole.py", line 45, in trpo_gym_tf_cartpole
runner.train(n_epochs=120, batch_size=4000)
File "/home/csidrane/Documents/NASA/garage/src/garage/experiment/local_runner.py", line 500, in train
average_return = self._algo.train(self)
File "/home/csidrane/Documents/NASA/garage/src/garage/tf/algos/npo.py", line 185, in train
runner.step_path = runner.obtain_samples(runner.step_itr)
File "/home/csidrane/Documents/NASA/garage/src/garage/experiment/local_runner.py", line 358, in obtain_samples
env_update)
File "/home/csidrane/Documents/NASA/garage/src/garage/experiment/local_runner.py", line 325, in obtain_trajectories
env_update=env_update)
File "/home/csidrane/Documents/NASA/garage/src/garage/sampler/ray_sampler.py", line 164, in obtain_samples
ready_worker_id, trajectory_batch = ray.get(result)
File "/home/csidrane/anaconda3/envs/garage/lib/python3.6/site-packages/ray/worker.py", line 1474, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::SamplerWorker.rollout() (pid=17540, ip=171.64.160.86)
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 400, in ray._raylet.execute_task.function_executor
File "/home/csidrane/Documents/NASA/garage/src/garage/sampler/ray_sampler.py", line 299, in rollout
return (self.worker_id, self.inner_worker.rollout())
File "/home/csidrane/Documents/NASA/garage/src/garage/tf/samplers/worker.py", line 137, in rollout
return self._inner_worker.rollout()
File "/home/csidrane/Documents/NASA/garage/src/garage/sampler/default_worker.py", line 181, in rollout
return self.collect_rollout()
File "/home/csidrane/Documents/NASA/garage/src/garage/sampler/default_worker.py", line 169, in collect_rollout
dtype='i'))
File "/home/csidrane/Documents/NASA/garage/src/garage/_dtypes.py", line 213, in __new__
format(inferred_batch_size, key, val.shape[0]))
ValueError: Each entry in env_infos must have a batch dimension of length 200, but got key TimeLimit.truncated with batch size 1 instead.
|
ValueError
|
def step(self, action):
"""Call step on wrapped env.
This method is necessary to suppress a deprecated warning
thrown by gym.Wrapper.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
np.ndarray: Agent's observation of the current environment
float: Amount of reward returned after previous action
bool: Whether the episode has ended, in which case further step()
calls will return undefined results
dict: Contains auxiliary diagnostic information (helpful for
debugging, and sometimes learning)
"""
observation, reward, done, info = self.env.step(action)
# gym envs that are wrapped in TimeLimit wrapper modify
# the done/termination signal to be true whenever a time
# limit expiration occurs. The following statement sets
# the done signal to be True only if caused by an
# environment termination, and not a time limit
# termination. The time limit termination signal
# will be saved inside env_infos as
# 'GarageEnv.TimeLimitTerminated'
if "TimeLimit.truncated" in info:
info["GarageEnv.TimeLimitTerminated"] = done # done = True always
done = not info["TimeLimit.truncated"]
else:
info["TimeLimit.truncated"] = False
info["GarageEnv.TimeLimitTerminated"] = False
return observation, reward, done, info
|
def step(self, action):
"""Call step on wrapped env.
This method is necessary to suppress a deprecated warning
thrown by gym.Wrapper.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
np.ndarray: Agent's observation of the current environment
float: Amount of reward returned after previous action
bool: Whether the episode has ended, in which case further step()
calls will return undefined results
dict: Contains auxiliary diagnostic information (helpful for
debugging, and sometimes learning)
"""
observation, reward, done, info = self.env.step(action)
# gym envs that are wrapped in TimeLimit wrapper modify
# the done/termination signal to be true whenever a time
# limit expiration occurs. The following statement sets
# the done signal to be True only if caused by an
# environment termination, and not a time limit
# termination. The time limit termination signal
# will be saved inside env_infos as
# 'GarageEnv.TimeLimitTerminated'
if "TimeLimit.truncated" in info:
info["GarageEnv.TimeLimitTerminated"] = done # done = True always
done = not info["TimeLimit.truncated"]
return observation, reward, done, info
|
https://github.com/rlworkgroup/garage/issues/1797
|
--------------------------------------- --------------
Sampling [####################################] 100%
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Optimizing policy...
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Computing loss before
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Computing KL before
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Optimizing
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Start CG optimization: #parameters: 1282, #inputs: 47, #subsample_inputs: 47
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | computing loss before
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | computing gradient
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | gradient computed
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | computing descent direction
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | descent direction computed
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | backtrack iters: 2
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | optimization finished
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Computing KL after
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Computing loss after
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Fitting baseline...
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Saving snapshot...
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Saved
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Time 7.64 s
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | EpochTime 0.93 s
--------------------------------------- --------------
Evaluation/AverageDiscountedReturn 54.6411
Evaluation/AverageReturn 85.1489
Evaluation/Iteration 3
Evaluation/MaxReturn 188
Evaluation/MinReturn 21
Evaluation/NumTrajs 47
Evaluation/StdReturn 37.8738
Evaluation/TerminationRate 1
Extras/EpisodeRewardMean 70.39
LinearFeatureBaseline/ExplainedVariance 0.402734
TotalEnvSteps 16156
policy/Entropy 1.41126
policy/KL 0.00784782
policy/KLBefore 0
policy/LossAfter -0.101578
policy/LossBefore -0.0811697
policy/Perplexity 4.10113
policy/dLoss 0.0204084
--------------------------------------- --------------
Traceback (most recent call last):
File "garage/examples/tf/trpo_gym_tf_cartpole.py", line 48, in <module>
trpo_gym_tf_cartpole()
File "/home/csidrane/Documents/NASA/garage/src/garage/experiment/experiment.py", line 362, in __call__
result = self.function(ctxt, **kwargs)
File "garage/examples/tf/trpo_gym_tf_cartpole.py", line 45, in trpo_gym_tf_cartpole
runner.train(n_epochs=120, batch_size=4000)
File "/home/csidrane/Documents/NASA/garage/src/garage/experiment/local_runner.py", line 500, in train
average_return = self._algo.train(self)
File "/home/csidrane/Documents/NASA/garage/src/garage/tf/algos/npo.py", line 185, in train
runner.step_path = runner.obtain_samples(runner.step_itr)
File "/home/csidrane/Documents/NASA/garage/src/garage/experiment/local_runner.py", line 358, in obtain_samples
env_update)
File "/home/csidrane/Documents/NASA/garage/src/garage/experiment/local_runner.py", line 325, in obtain_trajectories
env_update=env_update)
File "/home/csidrane/Documents/NASA/garage/src/garage/sampler/ray_sampler.py", line 164, in obtain_samples
ready_worker_id, trajectory_batch = ray.get(result)
File "/home/csidrane/anaconda3/envs/garage/lib/python3.6/site-packages/ray/worker.py", line 1474, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::SamplerWorker.rollout() (pid=17540, ip=171.64.160.86)
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 400, in ray._raylet.execute_task.function_executor
File "/home/csidrane/Documents/NASA/garage/src/garage/sampler/ray_sampler.py", line 299, in rollout
return (self.worker_id, self.inner_worker.rollout())
File "/home/csidrane/Documents/NASA/garage/src/garage/tf/samplers/worker.py", line 137, in rollout
return self._inner_worker.rollout()
File "/home/csidrane/Documents/NASA/garage/src/garage/sampler/default_worker.py", line 181, in rollout
return self.collect_rollout()
File "/home/csidrane/Documents/NASA/garage/src/garage/sampler/default_worker.py", line 169, in collect_rollout
dtype='i'))
File "/home/csidrane/Documents/NASA/garage/src/garage/_dtypes.py", line 213, in __new__
format(inferred_batch_size, key, val.shape[0]))
ValueError: Each entry in env_infos must have a batch dimension of length 200, but got key TimeLimit.truncated with batch size 1 instead.
|
ValueError
|
def _gather_rollout(self, rollout_number, last_observation):
assert 0 < self._path_lengths[rollout_number] <= self._max_episode_length
env_infos = self._env_infos[rollout_number]
agent_infos = self._agent_infos[rollout_number]
for k, v in env_infos.items():
env_infos[k] = np.asarray(v)
for k, v in agent_infos.items():
agent_infos[k] = np.asarray(v)
traj = TrajectoryBatch(
env_spec=self._envs[rollout_number].spec,
observations=np.asarray(self._observations[rollout_number]),
last_observations=np.asarray([last_observation]),
actions=np.asarray(self._actions[rollout_number]),
rewards=np.asarray(self._rewards[rollout_number]),
step_types=np.asarray(self._step_types[rollout_number], dtype=StepType),
env_infos=dict(env_infos),
agent_infos=dict(agent_infos),
lengths=np.asarray([self._path_lengths[rollout_number]], dtype="l"),
)
self._completed_rollouts.append(traj)
self._observations[rollout_number] = []
self._actions[rollout_number] = []
self._rewards[rollout_number] = []
self._step_types[rollout_number] = []
self._path_lengths[rollout_number] = 0
self._prev_obs[rollout_number] = self._envs[rollout_number].reset()
self._env_infos[rollout_number] = collections.defaultdict(list)
self._agent_infos[rollout_number] = collections.defaultdict(list)
|
def _gather_rollout(self, rollout_number, last_observation):
assert 0 < self._path_lengths[rollout_number] <= self._max_episode_length
traj = TrajectoryBatch(
env_spec=self._envs[rollout_number].spec,
observations=np.asarray(self._observations[rollout_number]),
last_observations=np.asarray([last_observation]),
actions=np.asarray(self._actions[rollout_number]),
rewards=np.asarray(self._rewards[rollout_number]),
step_types=np.asarray(self._step_types[rollout_number], dtype=StepType),
env_infos=self._env_infos[rollout_number],
agent_infos=self._agent_infos[rollout_number],
lengths=np.asarray([self._path_lengths[rollout_number]], dtype="l"),
)
self._completed_rollouts.append(traj)
self._observations[rollout_number] = []
self._actions[rollout_number] = []
self._rewards[rollout_number] = []
self._step_types[rollout_number] = []
self._path_lengths[rollout_number] = 0
self._prev_obs[rollout_number] = self._envs[rollout_number].reset()
|
https://github.com/rlworkgroup/garage/issues/1797
|
--------------------------------------- --------------
Sampling [####################################] 100%
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Optimizing policy...
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Computing loss before
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Computing KL before
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Optimizing
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Start CG optimization: #parameters: 1282, #inputs: 47, #subsample_inputs: 47
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | computing loss before
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | computing gradient
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | gradient computed
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | computing descent direction
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | descent direction computed
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | backtrack iters: 2
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | optimization finished
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Computing KL after
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Computing loss after
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Fitting baseline...
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Saving snapshot...
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Saved
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | Time 7.64 s
2020-07-24 12:28:46 | [trpo_gym_tf_cartpole] epoch #3 | EpochTime 0.93 s
--------------------------------------- --------------
Evaluation/AverageDiscountedReturn 54.6411
Evaluation/AverageReturn 85.1489
Evaluation/Iteration 3
Evaluation/MaxReturn 188
Evaluation/MinReturn 21
Evaluation/NumTrajs 47
Evaluation/StdReturn 37.8738
Evaluation/TerminationRate 1
Extras/EpisodeRewardMean 70.39
LinearFeatureBaseline/ExplainedVariance 0.402734
TotalEnvSteps 16156
policy/Entropy 1.41126
policy/KL 0.00784782
policy/KLBefore 0
policy/LossAfter -0.101578
policy/LossBefore -0.0811697
policy/Perplexity 4.10113
policy/dLoss 0.0204084
--------------------------------------- --------------
Traceback (most recent call last):
File "garage/examples/tf/trpo_gym_tf_cartpole.py", line 48, in <module>
trpo_gym_tf_cartpole()
File "/home/csidrane/Documents/NASA/garage/src/garage/experiment/experiment.py", line 362, in __call__
result = self.function(ctxt, **kwargs)
File "garage/examples/tf/trpo_gym_tf_cartpole.py", line 45, in trpo_gym_tf_cartpole
runner.train(n_epochs=120, batch_size=4000)
File "/home/csidrane/Documents/NASA/garage/src/garage/experiment/local_runner.py", line 500, in train
average_return = self._algo.train(self)
File "/home/csidrane/Documents/NASA/garage/src/garage/tf/algos/npo.py", line 185, in train
runner.step_path = runner.obtain_samples(runner.step_itr)
File "/home/csidrane/Documents/NASA/garage/src/garage/experiment/local_runner.py", line 358, in obtain_samples
env_update)
File "/home/csidrane/Documents/NASA/garage/src/garage/experiment/local_runner.py", line 325, in obtain_trajectories
env_update=env_update)
File "/home/csidrane/Documents/NASA/garage/src/garage/sampler/ray_sampler.py", line 164, in obtain_samples
ready_worker_id, trajectory_batch = ray.get(result)
File "/home/csidrane/anaconda3/envs/garage/lib/python3.6/site-packages/ray/worker.py", line 1474, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::SamplerWorker.rollout() (pid=17540, ip=171.64.160.86)
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 400, in ray._raylet.execute_task.function_executor
File "/home/csidrane/Documents/NASA/garage/src/garage/sampler/ray_sampler.py", line 299, in rollout
return (self.worker_id, self.inner_worker.rollout())
File "/home/csidrane/Documents/NASA/garage/src/garage/tf/samplers/worker.py", line 137, in rollout
return self._inner_worker.rollout()
File "/home/csidrane/Documents/NASA/garage/src/garage/sampler/default_worker.py", line 181, in rollout
return self.collect_rollout()
File "/home/csidrane/Documents/NASA/garage/src/garage/sampler/default_worker.py", line 169, in collect_rollout
dtype='i'))
File "/home/csidrane/Documents/NASA/garage/src/garage/_dtypes.py", line 213, in __new__
format(inferred_batch_size, key, val.shape[0]))
ValueError: Each entry in env_infos must have a batch dimension of length 200, but got key TimeLimit.truncated with batch size 1 instead.
|
ValueError
|
def objective_fun(params):
global task_id
exp_prefix = params.pop("exp_prefix")
exp_name = "{exp}_{pid}_{id}".format(exp=exp_prefix, pid=os.getpid(), id=task_id)
max_retries = params.pop("max_retries", 0) + 1
result_timeout = params.pop("result_timeout")
run_experiment_kwargs = params.pop("run_experiment_kwargs", {})
func, eval_func = _extract_params(params)
result_success = False
while max_retries > 0:
_launch_ec2(func, exp_prefix, exp_name, params, run_experiment_kwargs)
task_id += 1
max_retries -= 1
if _wait_result(exp_prefix, exp_name, result_timeout):
result_success = True
break
elif max_retries > 0:
print("Timed out waiting for results. Retrying...")
if not result_success:
print("Reached max retries, no results. Giving up.")
return {"status": STATUS_FAIL}
print("Results in! Processing.")
result_dict = eval_func(exp_prefix, exp_name)
result_dict["status"] = STATUS_OK
result_dict["params"] = params
return result_dict
|
def objective_fun(params):
global task_id
exp_prefix = params.pop("exp_prefix")
exp_name = "{exp}_{pid}_{id}".format(exp=exp_prefix, pid=os.getpid(), id=task_id)
max_retries = params.pop("max_retries", 0) + 1
result_timeout = params.pop("result_timeout")
run_experiment_kwargs = params.pop("run_experiment_kwargs", {})
func, eval_func = _get_stubs(params)
result_success = False
while max_retries > 0:
_launch_ec2(func, exp_prefix, exp_name, params, run_experiment_kwargs)
task_id += 1
max_retries -= 1
if _wait_result(exp_prefix, exp_name, result_timeout):
result_success = True
break
elif max_retries > 0:
print("Timed out waiting for results. Retrying...")
if not result_success:
print("Reached max retries, no results. Giving up.")
return {"status": STATUS_FAIL}
print("Results in! Processing.")
result_dict = eval_func(exp_prefix, exp_name)
result_dict["status"] = STATUS_OK
result_dict["params"] = params
return result_dict
|
https://github.com/rlworkgroup/garage/issues/239
|
Traceback (most recent call last):
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 191, in <module>
run_experiment(sys.argv)
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 146, in run_experiment
logger.log_parameters_lite(params_log_file, args)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 372, in log_parameters_lite
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
File "/anaconda2/envs/garage/lib/python3.6/json/__init__.py", line 179, in dump
for chunk in iterable:
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 430, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
[Previous line repeated 1 more times]
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 437, in _iterencode
o = _default(o)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 352, in default
return json.JSONEncoder.default(self, o)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'TimeLimit' is not JSON serializable
|
TypeError
|
def launch_hyperopt_search(
task_method,
eval_method,
param_space,
hyperopt_experiment_key,
hyperopt_db_host="localhost",
hyperopt_db_port=1234,
hyperopt_db_name="garage",
n_hyperopt_workers=1,
hyperopt_max_evals=100,
result_timeout=1200,
max_retries=0,
run_experiment_kwargs=None,
):
"""
Launch a hyperopt search using EC2.
This uses the hyperopt parallel processing functionality based on MongoDB.
The MongoDB server at the specified host and port is assumed to be already
running. Downloading and running MongoDB is pretty straightforward, see
https://github.com/hyperopt/hyperopt/wiki/Parallelizing-Evaluations-During-
Search-via-MongoDB for instructions.
The parameter space to be searched over is specified in param_space. See
https://github.com/hyperopt/hyperopt/wiki/FMin, section "Defining a search
space" for further info. Also see the (very basic) example in
contrib.rllab_hyperopt.example.main.py.
NOTE: While the argument n_hyperopt_workers specifies the number of (local)
parallel hyperopt workers to start, an equal number of EC2 instances will
be started in parallel!
NOTE2: garage currently terminates / starts a new EC2 instance for every
task. This means what you'll pay amounts to hyperopt_max_evals *
instance_hourly_rate. So you might want to be conservative with
hyperopt_max_evals.
:param task_method: the method call that runs the actual task.
Should take a single dict as argument, with the params to evaluate.
See e.g. contrib.rllab_hyperopt.example.task.py
:param eval_method: the method call that reads in results returned
from S3 and produces a score. Should take the exp_prefix and exp_name as
arguments (this is where S3 results will be synced to).
See e.g. contrib.rllab_hyperopt.example.score.py
:param param_space: dict specifying the param space to search.
See https://github.com/hyperopt/hyperopt/wiki/FMin, section
"Defining a search space" for further info
:param hyperopt_experiment_key: str, the key hyperopt will use to store
results in the DB
:param hyperopt_db_host: str, optional (default "localhost"). The host
where mongodb runs
:param hyperopt_db_port: int, optional (default 1234), the port where
mongodb is listening for connections
:param hyperopt_db_name: str, optional (default "garage"), the DB name
where hyperopt will store results
:param n_hyperopt_workers: int, optional (default 1). The nr of parallel
workers to start. NOTE: an equal number of EC2 instances will be started
in parallel.
:param hyperopt_max_evals: int, optional (defailt 100). Number of
parameterset evaluations hyperopt should try.
NOTE: garage currently terminates / starts a new EC2 instance for every
task. This means what you'll pay amounts to
hyperopt_max_evals * instance_hourly_rate. So you might want to be
conservative with hyperopt_max_evals.
:param result_timeout: int, optional (default 1200). Nr of seconds to wait
for results from S3 for a given task. If results are not in within this
time frame, <max_retries> new attempts will be made. A new attempt entails
launching the task again on a new EC2 instance.
:param max_retries: int, optional (default 0). Number of times to retry
launching a task when results don't come in from S3
:param run_experiment_kwargs: dict, optional (default None). Further kwargs
to pass to run_experiment. Note that specified values for exp_prefix,
exp_name, variant, and confirm_remote will be ignored.
:return the best result as found by hyperopt.fmin
"""
exp_key = hyperopt_experiment_key
worker_args = {
"exp_prefix": exp_key,
"task_module": task_method.__module__,
"task_function": task_method.__name__,
"eval_module": eval_method.__module__,
"eval_function": eval_method.__name__,
"result_timeout": result_timeout,
"max_retries": max_retries,
}
worker_args.update(param_space)
if run_experiment_kwargs is not None:
worker_args["run_experiment_kwargs"] = run_experiment_kwargs
trials = MongoTrials(
"mongo://{0}:{1:d}/{2}/jobs".format(
hyperopt_db_host, hyperopt_db_port, hyperopt_db_name
),
exp_key=exp_key,
)
workers = _launch_workers(
exp_key,
n_hyperopt_workers,
hyperopt_db_host,
hyperopt_db_port,
hyperopt_db_name,
)
s3sync = S3SyncThread()
s3sync.start()
print("Starting hyperopt")
best = fmin(
objective_fun,
worker_args,
trials=trials,
algo=tpe.suggest,
max_evals=hyperopt_max_evals,
)
s3sync.stop()
s3sync.join()
for worker in workers:
worker.terminate()
return best
|
def launch_hyperopt_search(
task_method,
eval_method,
param_space,
hyperopt_experiment_key,
hyperopt_db_host="localhost",
hyperopt_db_port=1234,
hyperopt_db_name="garage",
n_hyperopt_workers=1,
hyperopt_max_evals=100,
result_timeout=1200,
max_retries=0,
run_experiment_kwargs=None,
):
"""
Launch a hyperopt search using EC2.
This uses the hyperopt parallel processing functionality based on MongoDB.
The MongoDB server at the specified host and port is assumed to be already
running. Downloading and running MongoDB is pretty straightforward, see
https://github.com/hyperopt/hyperopt/wiki/Parallelizing-Evaluations-During-
Search-via-MongoDB for instructions.
The parameter space to be searched over is specified in param_space. See
https://github.com/hyperopt/hyperopt/wiki/FMin, section "Defining a search
space" for further info. Also see the (very basic) example in
contrib.rllab_hyperopt.example.main.py.
NOTE: While the argument n_hyperopt_workers specifies the number of (local)
parallel hyperopt workers to start, an equal number of EC2 instances will
be started in parallel!
NOTE2: garage currently terminates / starts a new EC2 instance for every
task. This means what you'll pay amounts to hyperopt_max_evals *
instance_hourly_rate. So you might want to be conservative with
hyperopt_max_evals.
:param task_method: the stubbed method call that runs the actual task.
Should take a single dict as argument, with the params to evaluate.
See e.g. contrib.rllab_hyperopt.example.task.py
:param eval_method: the stubbed method call that reads in results returned
from S3 and produces a score. Should take the exp_prefix and exp_name as
arguments (this is where S3 results will be synced to).
See e.g. contrib.rllab_hyperopt.example.score.py
:param param_space: dict specifying the param space to search.
See https://github.com/hyperopt/hyperopt/wiki/FMin, section
"Defining a search space" for further info
:param hyperopt_experiment_key: str, the key hyperopt will use to store
results in the DB
:param hyperopt_db_host: str, optional (default "localhost"). The host
where mongodb runs
:param hyperopt_db_port: int, optional (default 1234), the port where
mongodb is listening for connections
:param hyperopt_db_name: str, optional (default "garage"), the DB name
where hyperopt will store results
:param n_hyperopt_workers: int, optional (default 1). The nr of parallel
workers to start. NOTE: an equal number of EC2 instances will be started
in parallel.
:param hyperopt_max_evals: int, optional (defailt 100). Number of
parameterset evaluations hyperopt should try.
NOTE: garage currently terminates / starts a new EC2 instance for every
task. This means what you'll pay amounts to
hyperopt_max_evals * instance_hourly_rate. So you might want to be
conservative with hyperopt_max_evals.
:param result_timeout: int, optional (default 1200). Nr of seconds to wait
for results from S3 for a given task. If results are not in within this
time frame, <max_retries> new attempts will be made. A new attempt entails
launching the task again on a new EC2 instance.
:param max_retries: int, optional (default 0). Number of times to retry
launching a task when results don't come in from S3
:param run_experiment_kwargs: dict, optional (default None). Further kwargs
to pass to run_experiment. Note that specified values for exp_prefix,
exp_name, variant, and confirm_remote will be ignored.
:return the best result as found by hyperopt.fmin
"""
exp_key = hyperopt_experiment_key
worker_args = {
"exp_prefix": exp_key,
"task_module": task_method.__module__,
"task_function": task_method.__name__,
"eval_module": eval_method.__module__,
"eval_function": eval_method.__name__,
"result_timeout": result_timeout,
"max_retries": max_retries,
}
worker_args.update(param_space)
if run_experiment_kwargs is not None:
worker_args["run_experiment_kwargs"] = run_experiment_kwargs
trials = MongoTrials(
"mongo://{0}:{1:d}/{2}/jobs".format(
hyperopt_db_host, hyperopt_db_port, hyperopt_db_name
),
exp_key=exp_key,
)
workers = _launch_workers(
exp_key,
n_hyperopt_workers,
hyperopt_db_host,
hyperopt_db_port,
hyperopt_db_name,
)
s3sync = S3SyncThread()
s3sync.start()
print("Starting hyperopt")
best = fmin(
objective_fun,
worker_args,
trials=trials,
algo=tpe.suggest,
max_evals=hyperopt_max_evals,
)
s3sync.stop()
s3sync.join()
for worker in workers:
worker.terminate()
return best
|
https://github.com/rlworkgroup/garage/issues/239
|
Traceback (most recent call last):
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 191, in <module>
run_experiment(sys.argv)
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 146, in run_experiment
logger.log_parameters_lite(params_log_file, args)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 372, in log_parameters_lite
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
File "/anaconda2/envs/garage/lib/python3.6/json/__init__.py", line 179, in dump
for chunk in iterable:
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 430, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
[Previous line repeated 1 more times]
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 437, in _iterencode
o = _default(o)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 352, in default
return json.JSONEncoder.default(self, o)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'TimeLimit' is not JSON serializable
|
TypeError
|
def run_experiment(
method_call=None,
batch_tasks=None,
exp_prefix="experiment",
exp_name=None,
log_dir=None,
script="scripts/run_experiment.py",
python_command="python",
mode="local",
dry=False,
docker_image=None,
aws_config=None,
env=None,
variant=None,
use_tf=False,
use_gpu=False,
sync_s3_pkl=False,
sync_s3_png=False,
sync_s3_log=False,
sync_log_on_termination=True,
confirm_remote=True,
terminate_machine=True,
periodic_sync=True,
periodic_sync_interval=15,
sync_all_data_node_to_s3=True,
use_cloudpickle=None,
pre_commands=None,
added_project_directories=[],
**kwargs,
):
"""
Serialize the method call and run the experiment using the
specified mode.
:param method_call: A method call.
:param script: The name of the entrance point python script
:param mode: Where and how to run the experiment. Should be one of "local",
"local_docker", "ec2", or "lab_kube".
:param dry: Whether to do a dry-run, which only prints the commands without
executing them.
:param exp_prefix: Name prefix for the experiments
:param docker_image: name of the docker image. Ignored if using local mode.
:param aws_config: configuration for AWS. Only used under EC2 mode
:param env: extra environment variables
:param kwargs: All other parameters will be passed directly to the entrance
python script.
:param variant: If provided, should be a dictionary of parameters
:param use_tf: this flag is used along with the Theano and GPU
configuration when using TensorFlow
:param use_gpu: Whether the launched task is running on GPU. This triggers
a few configuration changes including
certain environment flags
:param sync_s3_pkl: Whether to sync pkl files during execution of the
experiment (they will always be synced at
the end of the experiment)
:param sync_s3_png: Whether to sync png files during execution of the
experiment (they will always be synced at
the end of the experiment)
:param sync_s3_log: Whether to sync log files during execution of the
experiment (they will always be synced at
the end of the experiment)
:param confirm_remote: Whether to confirm before launching experiments
remotely
:param terminate_machine: Whether to terminate machine after experiment
finishes. Only used when using mode="ec2". This is useful when one wants
to debug after an experiment finishes abnormally.
:param periodic_sync: Whether to synchronize certain experiment files
periodically during execution.
:param periodic_sync_interval: Time interval between each periodic sync,
in seconds.
"""
assert method_call is not None or batch_tasks is not None, (
"Must provide at least either method_call or batch_tasks"
)
if use_cloudpickle is None:
for task in batch_tasks or [method_call]:
assert hasattr(task, "__call__")
use_cloudpickle = True
# ensure variant exists
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(
kwargs,
pre_commands=pre_commands,
method_call=method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant,
use_cloudpickle=use_cloudpickle,
)
]
global exp_count
global remote_confirmed
config.USE_GPU = use_gpu
config.USE_TF = use_tf
if use_tf:
if not use_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
else:
os.unsetenv("CUDA_VISIBLE_DEVICES")
# params_list = []
for task in batch_tasks:
call = task.pop("method_call")
if use_cloudpickle:
import cloudpickle
data = base64.b64encode(cloudpickle.dumps(call)).decode("utf-8")
else:
data = base64.b64encode(pickle.dumps(call)).decode("utf-8")
task["args_data"] = data
exp_count += 1
params = dict(kwargs)
if task.get("exp_name", None) is None:
task["exp_name"] = "%s_%s_%04d" % (exp_prefix, timestamp, exp_count)
if task.get("log_dir", None) is None:
task["log_dir"] = (
config.LOG_DIR
+ "/local/"
+ exp_prefix.replace("_", "-")
+ "/"
+ task["exp_name"]
)
if task.get("variant", None) is not None:
variant = task.pop("variant")
if "exp_name" not in variant:
variant["exp_name"] = task["exp_name"]
task["variant_data"] = base64.b64encode(pickle.dumps(variant)).decode(
"utf-8"
)
elif "variant" in task:
del task["variant"]
task["remote_log_dir"] = osp.join(
config.AWS_S3_PATH, exp_prefix.replace("_", "-"), task["exp_name"]
)
task["env"] = task.get("env", dict()) or dict()
task["env"]["GARAGE_USE_GPU"] = str(use_gpu)
task["env"]["GARAGE_USE_TF"] = str(use_tf)
if (
mode not in ["local", "local_docker"]
and not remote_confirmed
and not dry
and confirm_remote
):
remote_confirmed = query_yes_no("Running in (non-dry) mode %s. Confirm?" % mode)
if not remote_confirmed:
sys.exit(1)
if hasattr(mode, "__call__"):
if docker_image is None:
docker_image = config.DOCKER_IMAGE
mode(
task,
docker_image=docker_image,
use_gpu=use_gpu,
exp_prefix=exp_prefix,
script=script,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
)
elif mode == "local":
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_local_command(
task,
python_command=python_command,
script=osp.join(config.PROJECT_PATH, script),
use_gpu=use_gpu,
)
print(command)
if dry:
return
try:
if env is None:
env = dict()
subprocess.call(command, shell=True, env=dict(os.environ, **env))
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
elif mode == "local_docker":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_docker_command(
task, # these are the params. Pre and Post command can be here
docker_image=docker_image,
script=script,
env=env,
use_gpu=use_gpu,
use_tty=True,
python_command=python_command,
)
print(command)
if dry:
return
p = subprocess.Popen(command, shell=True)
try:
p.wait()
except KeyboardInterrupt:
try:
print("terminating")
p.terminate()
except OSError:
print("os error!")
pass
p.wait()
elif mode == "ec2":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
s3_code_path = s3_sync_code(
config, dry=dry, added_project_directories=added_project_directories
)
launch_ec2(
batch_tasks,
exp_prefix=exp_prefix,
docker_image=docker_image,
python_command=python_command,
script=script,
aws_config=aws_config,
dry=dry,
terminate_machine=terminate_machine,
use_gpu=use_gpu,
code_full_path=s3_code_path,
sync_s3_pkl=sync_s3_pkl,
sync_s3_png=sync_s3_png,
sync_s3_log=sync_s3_log,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
)
elif mode == "lab_kube":
# assert env is None
# first send code folder to s3
s3_code_path = s3_sync_code(config, dry=dry)
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
# if 'env' in task:
# assert task.pop('env') is None
# TODO: dangerous when there are multiple tasks?
task["resources"] = params.pop("resources", config.KUBE_DEFAULT_RESOURCES)
task["node_selector"] = params.pop(
"node_selector", config.KUBE_DEFAULT_NODE_SELECTOR
)
task["exp_prefix"] = exp_prefix
pod_dict = to_lab_kube_pod(
task,
code_full_path=s3_code_path,
docker_image=docker_image,
script=script,
is_gpu=use_gpu,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
terminate_machine=terminate_machine,
)
pod_str = json.dumps(pod_dict, indent=1)
if dry:
print(pod_str)
dir = "{pod_dir}/{exp_prefix}".format(
pod_dir=config.POD_DIR, exp_prefix=exp_prefix
)
ensure_dir(dir)
fname = "{dir}/{exp_name}.json".format(dir=dir, exp_name=task["exp_name"])
with open(fname, "w") as fh:
fh.write(pod_str)
kubecmd = "kubectl create -f %s" % fname
print(kubecmd)
if dry:
return
retry_count = 0
wait_interval = 1
while retry_count <= 5:
try:
return_code = subprocess.call(kubecmd, shell=True)
if return_code == 0:
break
retry_count += 1
print("trying again...")
time.sleep(wait_interval)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise
print(e)
else:
raise NotImplementedError
|
def run_experiment(
stub_method_call=None,
batch_tasks=None,
exp_prefix="experiment",
exp_name=None,
log_dir=None,
script="scripts/run_experiment.py",
python_command="python",
mode="local",
dry=False,
docker_image=None,
aws_config=None,
env=None,
variant=None,
use_tf=False,
use_gpu=False,
sync_s3_pkl=False,
sync_s3_png=False,
sync_s3_log=False,
sync_log_on_termination=True,
confirm_remote=True,
terminate_machine=True,
periodic_sync=True,
periodic_sync_interval=15,
sync_all_data_node_to_s3=True,
use_cloudpickle=None,
pre_commands=None,
added_project_directories=[],
**kwargs,
):
"""
Serialize the stubbed method call and run the experiment using the
specified mode.
:param stub_method_call: A stubbed method call.
:param script: The name of the entrance point python script
:param mode: Where and how to run the experiment. Should be one of "local",
"local_docker", "ec2", or "lab_kube".
:param dry: Whether to do a dry-run, which only prints the commands without
executing them.
:param exp_prefix: Name prefix for the experiments
:param docker_image: name of the docker image. Ignored if using local mode.
:param aws_config: configuration for AWS. Only used under EC2 mode
:param env: extra environment variables
:param kwargs: All other parameters will be passed directly to the entrance
python script.
:param variant: If provided, should be a dictionary of parameters
:param use_tf: this flag is used along with the Theano and GPU
configuration when using TensorFlow
:param use_gpu: Whether the launched task is running on GPU. This triggers
a few configuration changes including
certain environment flags
:param sync_s3_pkl: Whether to sync pkl files during execution of the
experiment (they will always be synced at
the end of the experiment)
:param sync_s3_png: Whether to sync png files during execution of the
experiment (they will always be synced at
the end of the experiment)
:param sync_s3_log: Whether to sync log files during execution of the
experiment (they will always be synced at
the end of the experiment)
:param confirm_remote: Whether to confirm before launching experiments
remotely
:param terminate_machine: Whether to terminate machine after experiment
finishes. Only used when using mode="ec2". This is useful when one wants
to debug after an experiment finishes abnormally.
:param periodic_sync: Whether to synchronize certain experiment files
periodically during execution.
:param periodic_sync_interval: Time interval between each periodic sync,
in seconds.
"""
assert stub_method_call is not None or batch_tasks is not None, (
"Must provide at least either stub_method_call or batch_tasks"
)
if use_cloudpickle is None:
for maybe_stub in batch_tasks or [stub_method_call]:
# decide mode
if isinstance(maybe_stub, StubBase):
use_cloudpickle = False
else:
assert hasattr(maybe_stub, "__call__")
use_cloudpickle = True
# ensure variant exists
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(
kwargs,
pre_commands=pre_commands,
stub_method_call=stub_method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant,
use_cloudpickle=use_cloudpickle,
)
]
global exp_count
global remote_confirmed
config.USE_GPU = use_gpu
config.USE_TF = use_tf
if use_tf:
if not use_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
else:
os.unsetenv("CUDA_VISIBLE_DEVICES")
# params_list = []
for task in batch_tasks:
call = task.pop("stub_method_call")
if use_cloudpickle:
import cloudpickle
data = base64.b64encode(cloudpickle.dumps(call)).decode("utf-8")
else:
data = base64.b64encode(pickle.dumps(call)).decode("utf-8")
task["args_data"] = data
exp_count += 1
params = dict(kwargs)
if task.get("exp_name", None) is None:
task["exp_name"] = "%s_%s_%04d" % (exp_prefix, timestamp, exp_count)
if task.get("log_dir", None) is None:
task["log_dir"] = (
config.LOG_DIR
+ "/local/"
+ exp_prefix.replace("_", "-")
+ "/"
+ task["exp_name"]
)
if task.get("variant", None) is not None:
variant = task.pop("variant")
if "exp_name" not in variant:
variant["exp_name"] = task["exp_name"]
task["variant_data"] = base64.b64encode(pickle.dumps(variant)).decode(
"utf-8"
)
elif "variant" in task:
del task["variant"]
task["remote_log_dir"] = osp.join(
config.AWS_S3_PATH, exp_prefix.replace("_", "-"), task["exp_name"]
)
task["env"] = task.get("env", dict()) or dict()
task["env"]["GARAGE_USE_GPU"] = str(use_gpu)
task["env"]["GARAGE_USE_TF"] = str(use_tf)
if (
mode not in ["local", "local_docker"]
and not remote_confirmed
and not dry
and confirm_remote
):
remote_confirmed = query_yes_no("Running in (non-dry) mode %s. Confirm?" % mode)
if not remote_confirmed:
sys.exit(1)
if hasattr(mode, "__call__"):
if docker_image is None:
docker_image = config.DOCKER_IMAGE
mode(
task,
docker_image=docker_image,
use_gpu=use_gpu,
exp_prefix=exp_prefix,
script=script,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
)
elif mode == "local":
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_local_command(
task,
python_command=python_command,
script=osp.join(config.PROJECT_PATH, script),
use_gpu=use_gpu,
)
print(command)
if dry:
return
try:
if env is None:
env = dict()
subprocess.call(command, shell=True, env=dict(os.environ, **env))
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
elif mode == "local_docker":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_docker_command(
task, # these are the params. Pre and Post command can be here
docker_image=docker_image,
script=script,
env=env,
use_gpu=use_gpu,
use_tty=True,
python_command=python_command,
)
print(command)
if dry:
return
p = subprocess.Popen(command, shell=True)
try:
p.wait()
except KeyboardInterrupt:
try:
print("terminating")
p.terminate()
except OSError:
print("os error!")
pass
p.wait()
elif mode == "ec2":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
s3_code_path = s3_sync_code(
config, dry=dry, added_project_directories=added_project_directories
)
launch_ec2(
batch_tasks,
exp_prefix=exp_prefix,
docker_image=docker_image,
python_command=python_command,
script=script,
aws_config=aws_config,
dry=dry,
terminate_machine=terminate_machine,
use_gpu=use_gpu,
code_full_path=s3_code_path,
sync_s3_pkl=sync_s3_pkl,
sync_s3_png=sync_s3_png,
sync_s3_log=sync_s3_log,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
)
elif mode == "lab_kube":
# assert env is None
# first send code folder to s3
s3_code_path = s3_sync_code(config, dry=dry)
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
# if 'env' in task:
# assert task.pop('env') is None
# TODO: dangerous when there are multiple tasks?
task["resources"] = params.pop("resources", config.KUBE_DEFAULT_RESOURCES)
task["node_selector"] = params.pop(
"node_selector", config.KUBE_DEFAULT_NODE_SELECTOR
)
task["exp_prefix"] = exp_prefix
pod_dict = to_lab_kube_pod(
task,
code_full_path=s3_code_path,
docker_image=docker_image,
script=script,
is_gpu=use_gpu,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
terminate_machine=terminate_machine,
)
pod_str = json.dumps(pod_dict, indent=1)
if dry:
print(pod_str)
dir = "{pod_dir}/{exp_prefix}".format(
pod_dir=config.POD_DIR, exp_prefix=exp_prefix
)
ensure_dir(dir)
fname = "{dir}/{exp_name}.json".format(dir=dir, exp_name=task["exp_name"])
with open(fname, "w") as fh:
fh.write(pod_str)
kubecmd = "kubectl create -f %s" % fname
print(kubecmd)
if dry:
return
retry_count = 0
wait_interval = 1
while retry_count <= 5:
try:
return_code = subprocess.call(kubecmd, shell=True)
if return_code == 0:
break
retry_count += 1
print("trying again...")
time.sleep(wait_interval)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise
print(e)
else:
raise NotImplementedError
|
https://github.com/rlworkgroup/garage/issues/239
|
Traceback (most recent call last):
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 191, in <module>
run_experiment(sys.argv)
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 146, in run_experiment
logger.log_parameters_lite(params_log_file, args)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 372, in log_parameters_lite
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
File "/anaconda2/envs/garage/lib/python3.6/json/__init__.py", line 179, in dump
for chunk in iterable:
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 430, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
[Previous line repeated 1 more times]
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 437, in _iterencode
o = _default(o)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 352, in default
return json.JSONEncoder.default(self, o)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'TimeLimit' is not JSON serializable
|
TypeError
|
def concretize(obj):
if isinstance(obj, dict):
# make sure that there's no hidden caveat
ret = dict()
for k, v in obj.items():
ret[concretize(k)] = concretize(v)
return ret
elif isinstance(obj, (list, tuple)):
return obj.__class__(list(map(concretize, obj)))
else:
return obj
|
def concretize(maybe_stub):
if isinstance(maybe_stub, StubMethodCall):
obj = concretize(maybe_stub.obj)
method = getattr(obj, maybe_stub.method_name)
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
return method(*args, **kwargs)
elif isinstance(maybe_stub, StubClass):
return maybe_stub.proxy_class
elif isinstance(maybe_stub, StubAttr):
obj = concretize(maybe_stub.obj)
attr_name = maybe_stub.attr_name
attr_val = getattr(obj, attr_name)
return concretize(attr_val)
elif isinstance(maybe_stub, StubObject):
if not hasattr(maybe_stub, "__stub_cache"):
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
try:
maybe_stub.__stub_cache = maybe_stub.proxy_class(*args, **kwargs)
except Exception as e:
print(("Error while instantiating %s" % maybe_stub.proxy_class))
import traceback
traceback.print_exc()
ret = maybe_stub.__stub_cache
return ret
elif isinstance(maybe_stub, dict):
# make sure that there's no hidden caveat
ret = dict()
for k, v in maybe_stub.items():
ret[concretize(k)] = concretize(v)
return ret
elif isinstance(maybe_stub, (list, tuple)):
return maybe_stub.__class__(list(map(concretize, maybe_stub)))
else:
return maybe_stub
|
https://github.com/rlworkgroup/garage/issues/239
|
Traceback (most recent call last):
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 191, in <module>
run_experiment(sys.argv)
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 146, in run_experiment
logger.log_parameters_lite(params_log_file, args)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 372, in log_parameters_lite
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
File "/anaconda2/envs/garage/lib/python3.6/json/__init__.py", line 179, in dump
for chunk in iterable:
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 430, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
[Previous line repeated 1 more times]
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 437, in _iterencode
o = _default(o)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 352, in default
return json.JSONEncoder.default(self, o)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'TimeLimit' is not JSON serializable
|
TypeError
|
def log_parameters_lite(log_file, args):
log_params = {}
for param_name, param_value in args.__dict__.items():
log_params[param_name] = param_value
if args.args_data is not None:
log_params["json_args"] = dict()
mkdir_p(os.path.dirname(log_file))
with open(log_file, "w") as f:
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
|
def log_parameters_lite(log_file, args):
log_params = {}
for param_name, param_value in args.__dict__.items():
log_params[param_name] = param_value
if args.args_data is not None:
stub_method = pickle.loads(base64.b64decode(args.args_data))
method_args = stub_method.kwargs
log_params["json_args"] = dict()
for k, v in list(method_args.items()):
log_params["json_args"][k] = stub_to_json(v)
kwargs = stub_method.obj.kwargs
for k in ["baseline", "env", "policy"]:
if k in kwargs:
log_params["json_args"][k] = stub_to_json(kwargs.pop(k))
log_params["json_args"]["algo"] = stub_to_json(stub_method.obj)
mkdir_p(os.path.dirname(log_file))
with open(log_file, "w") as f:
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
|
https://github.com/rlworkgroup/garage/issues/239
|
Traceback (most recent call last):
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 191, in <module>
run_experiment(sys.argv)
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 146, in run_experiment
logger.log_parameters_lite(params_log_file, args)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 372, in log_parameters_lite
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
File "/anaconda2/envs/garage/lib/python3.6/json/__init__.py", line 179, in dump
for chunk in iterable:
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 430, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
[Previous line repeated 1 more times]
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 437, in _iterencode
o = _default(o)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 352, in default
return json.JSONEncoder.default(self, o)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'TimeLimit' is not JSON serializable
|
TypeError
|
def log_variant(log_file, variant_data):
mkdir_p(os.path.dirname(log_file))
if hasattr(variant_data, "dump"):
variant_data = variant_data.dump()
with open(log_file, "w") as f:
json.dump(variant_data, f, indent=2, sort_keys=True, cls=MyEncoder)
|
def log_variant(log_file, variant_data):
mkdir_p(os.path.dirname(log_file))
if hasattr(variant_data, "dump"):
variant_data = variant_data.dump()
variant_json = stub_to_json(variant_data)
with open(log_file, "w") as f:
json.dump(variant_json, f, indent=2, sort_keys=True, cls=MyEncoder)
|
https://github.com/rlworkgroup/garage/issues/239
|
Traceback (most recent call last):
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 191, in <module>
run_experiment(sys.argv)
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 146, in run_experiment
logger.log_parameters_lite(params_log_file, args)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 372, in log_parameters_lite
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
File "/anaconda2/envs/garage/lib/python3.6/json/__init__.py", line 179, in dump
for chunk in iterable:
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 430, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
[Previous line repeated 1 more times]
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 437, in _iterencode
o = _default(o)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 352, in default
return json.JSONEncoder.default(self, o)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'TimeLimit' is not JSON serializable
|
TypeError
|
def load_progress(progress_csv_path):
print("Reading %s" % progress_csv_path)
entries = dict()
with open(progress_csv_path, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for k, v in row.items():
if k not in entries:
entries[k] = []
try:
entries[k].append(float(v))
except: # noqa
entries[k].append(0.0)
entries = dict([(k, np.array(v)) for k, v in entries.items()])
return entries
|
def load_progress(progress_csv_path):
print("Reading %s" % progress_csv_path)
entries = dict()
with open(progress_csv_path, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for k, v in row.items():
if k not in entries:
entries[k] = []
try:
entries[k].append(float(v))
except:
entries[k].append(0.0)
entries = dict([(k, np.array(v)) for k, v in entries.items()])
return entries
|
https://github.com/rlworkgroup/garage/issues/239
|
Traceback (most recent call last):
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 191, in <module>
run_experiment(sys.argv)
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 146, in run_experiment
logger.log_parameters_lite(params_log_file, args)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 372, in log_parameters_lite
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
File "/anaconda2/envs/garage/lib/python3.6/json/__init__.py", line 179, in dump
for chunk in iterable:
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 430, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
[Previous line repeated 1 more times]
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 437, in _iterencode
o = _default(o)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 352, in default
return json.JSONEncoder.default(self, o)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'TimeLimit' is not JSON serializable
|
TypeError
|
def extract_distinct_params(
exps_data, excluded_params=("exp_name", "seed", "log_dir"), length=1
):
# all_pairs = unique(flatten([d.flat_params.items() for d in exps_data]))
# if logger:
# logger("(Excluding {excluded})".format(
# excluded=', '.join(excluded_params)))
# def cmp(x,y):
# if x < y:
# return -1
# elif x > y:
# return 1
# else:
# return 0
try:
stringified_pairs = sorted(
map(
eval,
unique(
flatten(
[
list(map(smart_repr, list(d.flat_params.items())))
for d in exps_data
]
)
),
),
key=lambda x: (tuple(0.0 if it is None else it for it in x),),
)
except Exception as e:
print(e)
proposals = [
(k, [x[1] for x in v])
for k, v in itertools.groupby(stringified_pairs, lambda x: x[0])
]
filtered = [
(k, v)
for (k, v) in proposals
if len(v) > length
and all([k.find(excluded_param) != 0 for excluded_param in excluded_params])
]
return filtered
|
def extract_distinct_params(
exps_data, excluded_params=("exp_name", "seed", "log_dir"), l=1
):
# all_pairs = unique(flatten([d.flat_params.items() for d in exps_data]))
# if logger:
# logger("(Excluding {excluded})".format(excluded=', '.join(excluded_params)))
# def cmp(x,y):
# if x < y:
# return -1
# elif x > y:
# return 1
# else:
# return 0
try:
stringified_pairs = sorted(
map(
eval,
unique(
flatten(
[
list(map(smart_repr, list(d.flat_params.items())))
for d in exps_data
]
)
),
),
key=lambda x: (tuple(0.0 if it is None else it for it in x),),
)
except Exception as e:
print(e)
proposals = [
(k, [x[1] for x in v])
for k, v in itertools.groupby(stringified_pairs, lambda x: x[0])
]
filtered = [
(k, v)
for (k, v) in proposals
if len(v) > l
and all([k.find(excluded_param) != 0 for excluded_param in excluded_params])
]
return filtered
|
https://github.com/rlworkgroup/garage/issues/239
|
Traceback (most recent call last):
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 191, in <module>
run_experiment(sys.argv)
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 146, in run_experiment
logger.log_parameters_lite(params_log_file, args)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 372, in log_parameters_lite
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
File "/anaconda2/envs/garage/lib/python3.6/json/__init__.py", line 179, in dump
for chunk in iterable:
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 430, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
[Previous line repeated 1 more times]
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 437, in _iterencode
o = _default(o)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 352, in default
return json.JSONEncoder.default(self, o)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'TimeLimit' is not JSON serializable
|
TypeError
|
def run_experiment(argv):
default_log_dir = config.LOG_DIR
now = datetime.datetime.now(dateutil.tz.tzlocal())
# avoid name clashes when running distributed jobs
rand_id = str(uuid.uuid4())[:5]
timestamp = now.strftime("%Y_%m_%d_%H_%M_%S_%f_%Z")
default_exp_name = "experiment_%s_%s" % (timestamp, rand_id)
parser = argparse.ArgumentParser()
parser.add_argument(
"--n_parallel",
type=int,
default=1,
help=(
"Number of parallel workers to perform rollouts. "
"0 => don't start any workers"
),
)
parser.add_argument(
"--exp_name", type=str, default=default_exp_name, help="Name of the experiment."
)
parser.add_argument(
"--log_dir",
type=str,
default=None,
help="Path to save the log and iteration snapshot.",
)
parser.add_argument(
"--snapshot_mode",
type=str,
default="all",
help='Mode to save the snapshot. Can be either "all" '
'(all iterations will be saved), "last" (only '
'the last iteration will be saved), "gap" (every'
'`snapshot_gap` iterations are saved), or "none" '
"(do not save snapshots)",
)
parser.add_argument(
"--snapshot_gap", type=int, default=1, help="Gap between snapshot iterations."
)
parser.add_argument(
"--tabular_log_file",
type=str,
default="progress.csv",
help="Name of the tabular log file (in csv).",
)
parser.add_argument(
"--text_log_file",
type=str,
default="debug.log",
help="Name of the text log file (in pure text).",
)
parser.add_argument(
"--tensorboard_step_key",
type=str,
default=None,
help=("Name of the step key in tensorboard_summary."),
)
parser.add_argument(
"--params_log_file",
type=str,
default="params.json",
help="Name of the parameter log file (in json).",
)
parser.add_argument(
"--variant_log_file",
type=str,
default="variant.json",
help="Name of the variant log file (in json).",
)
parser.add_argument(
"--resume_from",
type=str,
default=None,
help="Name of the pickle file to resume experiment from.",
)
parser.add_argument(
"--plot",
type=ast.literal_eval,
default=False,
help="Whether to plot the iteration results",
)
parser.add_argument(
"--log_tabular_only",
type=ast.literal_eval,
default=False,
help="Print only the tabular log information (in a horizontal format)",
)
parser.add_argument("--seed", type=int, help="Random seed for numpy")
parser.add_argument("--args_data", type=str, help="Pickled data for objects")
parser.add_argument(
"--variant_data", type=str, help="Pickled data for variant configuration"
)
parser.add_argument("--use_cloudpickle", type=ast.literal_eval, default=False)
args = parser.parse_args(argv[1:])
if args.seed is not None:
set_seed(args.seed)
# SIGINT is blocked for all processes created in parallel_sampler to avoid
# the creation of sleeping and zombie processes.
#
# If the user interrupts run_experiment, there's a chance some processes
# won't die due to a dead lock condition where one of the children in the
# parallel sampler exits without releasing a lock once after it catches
# SIGINT.
#
# Later the parent tries to acquire the same lock to proceed with his
# cleanup, but it remains sleeping waiting for the lock to be released.
# In the meantime, all the process in parallel sampler remain in the zombie
# state since the parent cannot proceed with their clean up.
with mask_signals([signal.SIGINT]):
if args.n_parallel > 0:
parallel_sampler.initialize(n_parallel=args.n_parallel)
if args.seed is not None:
parallel_sampler.set_seed(args.seed)
if not args.plot:
garage.plotter.Plotter.disable()
garage.tf.plotter.Plotter.disable()
if args.log_dir is None:
log_dir = osp.join(default_log_dir, args.exp_name)
else:
log_dir = args.log_dir
tabular_log_file = osp.join(log_dir, args.tabular_log_file)
text_log_file = osp.join(log_dir, args.text_log_file)
params_log_file = osp.join(log_dir, args.params_log_file)
if args.variant_data is not None:
variant_data = pickle.loads(base64.b64decode(args.variant_data))
variant_log_file = osp.join(log_dir, args.variant_log_file)
logger.log_variant(variant_log_file, variant_data)
else:
variant_data = None
if not args.use_cloudpickle:
logger.log_parameters_lite(params_log_file, args)
logger.add_text_output(text_log_file)
logger.add_tabular_output(tabular_log_file)
logger.set_tensorboard_dir(log_dir)
prev_snapshot_dir = logger.get_snapshot_dir()
prev_mode = logger.get_snapshot_mode()
logger.set_snapshot_dir(log_dir)
logger.set_snapshot_mode(args.snapshot_mode)
logger.set_snapshot_gap(args.snapshot_gap)
logger.set_log_tabular_only(args.log_tabular_only)
logger.set_tensorboard_step_key(args.tensorboard_step_key)
logger.push_prefix("[%s] " % args.exp_name)
if args.resume_from is not None:
data = joblib.load(args.resume_from)
assert "algo" in data
algo = data["algo"]
algo.train()
else:
# read from stdin
if args.use_cloudpickle:
import cloudpickle
method_call = cloudpickle.loads(base64.b64decode(args.args_data))
try:
method_call(variant_data)
except BaseException:
children = garage.plotter.Plotter.get_plotters()
children += garage.tf.plotter.Plotter.get_plotters()
if args.n_parallel > 0:
children += [parallel_sampler]
child_proc_shutdown(children)
raise
else:
data = pickle.loads(base64.b64decode(args.args_data))
maybe_iter = concretize(data)
if is_iterable(maybe_iter):
for _ in maybe_iter:
pass
logger.set_snapshot_mode(prev_mode)
logger.set_snapshot_dir(prev_snapshot_dir)
logger.remove_tabular_output(tabular_log_file)
logger.remove_text_output(text_log_file)
logger.pop_prefix()
|
def run_experiment(argv):
default_log_dir = config.LOG_DIR
now = datetime.datetime.now(dateutil.tz.tzlocal())
# avoid name clashes when running distributed jobs
rand_id = str(uuid.uuid4())[:5]
timestamp = now.strftime("%Y_%m_%d_%H_%M_%S_%f_%Z")
default_exp_name = "experiment_%s_%s" % (timestamp, rand_id)
parser = argparse.ArgumentParser()
parser.add_argument(
"--n_parallel",
type=int,
default=1,
help=(
"Number of parallel workers to perform rollouts. "
"0 => don't start any workers"
),
)
parser.add_argument(
"--exp_name", type=str, default=default_exp_name, help="Name of the experiment."
)
parser.add_argument(
"--log_dir",
type=str,
default=None,
help="Path to save the log and iteration snapshot.",
)
parser.add_argument(
"--snapshot_mode",
type=str,
default="all",
help='Mode to save the snapshot. Can be either "all" '
'(all iterations will be saved), "last" (only '
'the last iteration will be saved), "gap" (every'
'`snapshot_gap` iterations are saved), or "none" '
"(do not save snapshots)",
)
parser.add_argument(
"--snapshot_gap", type=int, default=1, help="Gap between snapshot iterations."
)
parser.add_argument(
"--tabular_log_file",
type=str,
default="progress.csv",
help="Name of the tabular log file (in csv).",
)
parser.add_argument(
"--text_log_file",
type=str,
default="debug.log",
help="Name of the text log file (in pure text).",
)
parser.add_argument(
"--tensorboard_step_key",
type=str,
default=None,
help=("Name of the step key in tensorboard_summary."),
)
parser.add_argument(
"--params_log_file",
type=str,
default="params.json",
help="Name of the parameter log file (in json).",
)
parser.add_argument(
"--variant_log_file",
type=str,
default="variant.json",
help="Name of the variant log file (in json).",
)
parser.add_argument(
"--resume_from",
type=str,
default=None,
help="Name of the pickle file to resume experiment from.",
)
parser.add_argument(
"--plot",
type=ast.literal_eval,
default=False,
help="Whether to plot the iteration results",
)
parser.add_argument(
"--log_tabular_only",
type=ast.literal_eval,
default=False,
help="Print only the tabular log information (in a horizontal format)",
)
parser.add_argument("--seed", type=int, help="Random seed for numpy")
parser.add_argument("--args_data", type=str, help="Pickled data for stub objects")
parser.add_argument(
"--variant_data", type=str, help="Pickled data for variant configuration"
)
parser.add_argument("--use_cloudpickle", type=ast.literal_eval, default=False)
args = parser.parse_args(argv[1:])
if args.seed is not None:
set_seed(args.seed)
# SIGINT is blocked for all processes created in parallel_sampler to avoid
# the creation of sleeping and zombie processes.
#
# If the user interrupts run_experiment, there's a chance some processes
# won't die due to a dead lock condition where one of the children in the
# parallel sampler exits without releasing a lock once after it catches
# SIGINT.
#
# Later the parent tries to acquire the same lock to proceed with his
# cleanup, but it remains sleeping waiting for the lock to be released.
# In the meantime, all the process in parallel sampler remain in the zombie
# state since the parent cannot proceed with their clean up.
with mask_signals([signal.SIGINT]):
if args.n_parallel > 0:
parallel_sampler.initialize(n_parallel=args.n_parallel)
if args.seed is not None:
parallel_sampler.set_seed(args.seed)
if not args.plot:
garage.plotter.Plotter.disable()
garage.tf.plotter.Plotter.disable()
if args.log_dir is None:
log_dir = osp.join(default_log_dir, args.exp_name)
else:
log_dir = args.log_dir
tabular_log_file = osp.join(log_dir, args.tabular_log_file)
text_log_file = osp.join(log_dir, args.text_log_file)
params_log_file = osp.join(log_dir, args.params_log_file)
if args.variant_data is not None:
variant_data = pickle.loads(base64.b64decode(args.variant_data))
variant_log_file = osp.join(log_dir, args.variant_log_file)
logger.log_variant(variant_log_file, variant_data)
else:
variant_data = None
if not args.use_cloudpickle:
logger.log_parameters_lite(params_log_file, args)
logger.add_text_output(text_log_file)
logger.add_tabular_output(tabular_log_file)
logger.set_tensorboard_dir(log_dir)
prev_snapshot_dir = logger.get_snapshot_dir()
prev_mode = logger.get_snapshot_mode()
logger.set_snapshot_dir(log_dir)
logger.set_snapshot_mode(args.snapshot_mode)
logger.set_snapshot_gap(args.snapshot_gap)
logger.set_log_tabular_only(args.log_tabular_only)
logger.set_tensorboard_step_key(args.tensorboard_step_key)
logger.push_prefix("[%s] " % args.exp_name)
if args.resume_from is not None:
data = joblib.load(args.resume_from)
assert "algo" in data
algo = data["algo"]
algo.train()
else:
# read from stdin
if args.use_cloudpickle:
import cloudpickle
method_call = cloudpickle.loads(base64.b64decode(args.args_data))
try:
method_call(variant_data)
except BaseException:
children = garage.plotter.Plotter.get_plotters()
children += garage.tf.plotter.Plotter.get_plotters()
if args.n_parallel > 0:
children += [parallel_sampler]
child_proc_shutdown(children)
raise
else:
data = pickle.loads(base64.b64decode(args.args_data))
maybe_iter = concretize(data)
if is_iterable(maybe_iter):
for _ in maybe_iter:
pass
logger.set_snapshot_mode(prev_mode)
logger.set_snapshot_dir(prev_snapshot_dir)
logger.remove_tabular_output(tabular_log_file)
logger.remove_text_output(text_log_file)
logger.pop_prefix()
|
https://github.com/rlworkgroup/garage/issues/239
|
Traceback (most recent call last):
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 191, in <module>
run_experiment(sys.argv)
File "/Users/jonathon/Documents/garage/garage/scripts/run_experiment.py", line 146, in run_experiment
logger.log_parameters_lite(params_log_file, args)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 372, in log_parameters_lite
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
File "/anaconda2/envs/garage/lib/python3.6/json/__init__.py", line 179, in dump
for chunk in iterable:
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 430, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
[Previous line repeated 1 more times]
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 437, in _iterencode
o = _default(o)
File "/Users/jonathon/Documents/garage/garage/garage/misc/logger.py", line 352, in default
return json.JSONEncoder.default(self, o)
File "/anaconda2/envs/garage/lib/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'TimeLimit' is not JSON serializable
|
TypeError
|
def sync_list_repositories(
executable_path, python_file, module_name, working_directory, attribute
):
from dagster.grpc.types import ListRepositoriesResponse, ListRepositoriesInput
result = check.inst(
execute_unary_api_cli_command(
executable_path,
"list_repositories",
ListRepositoriesInput(
module_name=module_name,
python_file=python_file,
working_directory=working_directory,
attribute=attribute,
),
),
(ListRepositoriesResponse, SerializableErrorInfo),
)
if isinstance(result, SerializableErrorInfo):
raise DagsterUserCodeProcessError(
result.to_string(), user_code_process_error_infos=[result]
)
else:
return result
|
def sync_list_repositories(
executable_path, python_file, module_name, working_directory, attribute
):
from dagster.grpc.types import ListRepositoriesResponse, ListRepositoriesInput
return check.inst(
execute_unary_api_cli_command(
executable_path,
"list_repositories",
ListRepositoriesInput(
module_name=module_name,
python_file=python_file,
working_directory=working_directory,
attribute=attribute,
),
),
ListRepositoriesResponse,
)
|
https://github.com/dagster-io/dagster/issues/2772
|
Traceback (most recent call last):
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 11, in execute_command_in_subprocess
subprocess.check_output(parts, stderr=subprocess.STDOUT)
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 356, in check_output
**kwargs).stdout
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 438, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/sryza/.pyenv/versions/dagster-3.6.8/bin/dagster", line 11, in <module>
load_entry_point('dagster', 'console_scripts', 'dagster')()
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main
cli(obj={}) # pylint:disable=E1123
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 262, in pipeline_execute_command
return _logged_pipeline_execute_command(config, preset, mode, DagsterInstance.get(), kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/core/telemetry.py", line 89, in wrap
result = f(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 290, in _logged_pipeline_execute_command
result = execute_execute_command(env, kwargs, mode, tags)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 297, in execute_execute_command
external_pipeline = get_external_pipeline_from_kwargs(cli_args, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 404, in get_external_pipeline_from_kwargs
external_repo = get_external_repository_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 367, in get_external_repository_from_kwargs
repo_location = get_repository_location_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 335, in get_repository_location_from_kwargs
workspace = get_workspace_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 198, in get_workspace_from_kwargs
return workspace_from_load_target(created_workspace_load_target(kwargs), instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 168, in workspace_from_load_target
user_process_api=python_user_process_api,
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/load.py", line 253, in location_handle_from_python_file
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/list_repositories.py", line 17, in sync_list_repositories
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 32, in execute_unary_api_cli_command
execute_command_in_subprocess(parts)
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 14, in execute_command_in_subprocess
"Error when executing API command {cmd}: {output}".format(cmd=e.cmd, output=e.output)
dagster.serdes.ipc.DagsterIPCProtocolError: Error when executing API command ['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']: b'/Users/sryza/dagster/python_modules/libraries/dagster-pandas/dagster_pandas/data_frame.py:190: UserWarning: Using create_dagster_pandas_dataframe_type for dataframe types is deprecated,\n and is planned to be removed in a future version (tentatively 0.10.0).\n Please use create_structured_dataframe_type instead.\n Please use create_structured_dataframe_type instead."""\nTraceback (most recent call last):\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 193, in _run_module_as_main\n "__main__", mod_spec)\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 85, in _run_code\n exec(code, run_globals)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/__main__.py", line 3, in <module>\n main()\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main\n cli(obj={}) # pylint:disable=E1123\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__\n return self.main(*args, **kwargs)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main\n rv = self.invoke(ctx)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke\n return callback(*args, **kwargs)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 115, in command\n output = check.inst(fn(args), output_cls)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 140, in list_repositories_command\n loadable_targets = get_loadable_targets(python_file, module_name, working_directory, attribute)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/grpc/utils.py", line 20, in get_loadable_targets\n else loadable_targets_from_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/autodiscovery.py", line 11, in loadable_targets_from_python_file\n loaded_module = load_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/code_pointer.py", line 88, in load_python_file\n return import_module_from_path(module_name, python_file)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/seven/__init__.py", line 110, in import_module_from_path\n spec.loader.exec_module(module)\n File "<frozen importlib._bootstrap_external>", line 678, in exec_module\n File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed\n File "examples/legacy_examples/dagster_examples/simple_lakehouse/simple_lakehouse.py", line 189, in <module>\n from dagster_examples.simple_lakehouse.daily_temperature_high_diffs import (\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 31, in <module>\n @repository\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 225, in repository\n return _Repository()(name)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 23, in __call__\n repository_definitions = fn()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 37, in legacy_examples\n + get_lakehouse_pipelines()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 17, in get_lakehouse_pipelines\n from dagster_examples.simple_lakehouse.pipelines import simple_lakehouse_pipeline\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/simple_lakehouse/pipelines.py", line 7, in <module>\n from dagster_examples.simple_lakehouse.simple_lakehouse import simple_lakehouse\nImportError: cannot import name \'simple_lakehouse\'\n'
|
subprocess.CalledProcessError
|
def sync_list_repositories_grpc(api_client):
from dagster.grpc.client import DagsterGrpcClient
from dagster.grpc.types import ListRepositoriesResponse
check.inst_param(api_client, "api_client", DagsterGrpcClient)
result = check.inst(
api_client.list_repositories(),
(ListRepositoriesResponse, SerializableErrorInfo),
)
if isinstance(result, SerializableErrorInfo):
raise DagsterUserCodeProcessError(
result.to_string(), user_code_process_error_infos=[result]
)
else:
return result
|
def sync_list_repositories_grpc(api_client):
from dagster.grpc.client import DagsterGrpcClient
from dagster.grpc.types import ListRepositoriesResponse
check.inst_param(api_client, "api_client", DagsterGrpcClient)
return check.inst(api_client.list_repositories(), ListRepositoriesResponse)
|
https://github.com/dagster-io/dagster/issues/2772
|
Traceback (most recent call last):
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 11, in execute_command_in_subprocess
subprocess.check_output(parts, stderr=subprocess.STDOUT)
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 356, in check_output
**kwargs).stdout
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 438, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/sryza/.pyenv/versions/dagster-3.6.8/bin/dagster", line 11, in <module>
load_entry_point('dagster', 'console_scripts', 'dagster')()
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main
cli(obj={}) # pylint:disable=E1123
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 262, in pipeline_execute_command
return _logged_pipeline_execute_command(config, preset, mode, DagsterInstance.get(), kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/core/telemetry.py", line 89, in wrap
result = f(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 290, in _logged_pipeline_execute_command
result = execute_execute_command(env, kwargs, mode, tags)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 297, in execute_execute_command
external_pipeline = get_external_pipeline_from_kwargs(cli_args, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 404, in get_external_pipeline_from_kwargs
external_repo = get_external_repository_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 367, in get_external_repository_from_kwargs
repo_location = get_repository_location_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 335, in get_repository_location_from_kwargs
workspace = get_workspace_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 198, in get_workspace_from_kwargs
return workspace_from_load_target(created_workspace_load_target(kwargs), instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 168, in workspace_from_load_target
user_process_api=python_user_process_api,
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/load.py", line 253, in location_handle_from_python_file
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/list_repositories.py", line 17, in sync_list_repositories
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 32, in execute_unary_api_cli_command
execute_command_in_subprocess(parts)
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 14, in execute_command_in_subprocess
"Error when executing API command {cmd}: {output}".format(cmd=e.cmd, output=e.output)
dagster.serdes.ipc.DagsterIPCProtocolError: Error when executing API command ['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']: b'/Users/sryza/dagster/python_modules/libraries/dagster-pandas/dagster_pandas/data_frame.py:190: UserWarning: Using create_dagster_pandas_dataframe_type for dataframe types is deprecated,\n and is planned to be removed in a future version (tentatively 0.10.0).\n Please use create_structured_dataframe_type instead.\n Please use create_structured_dataframe_type instead."""\nTraceback (most recent call last):\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 193, in _run_module_as_main\n "__main__", mod_spec)\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 85, in _run_code\n exec(code, run_globals)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/__main__.py", line 3, in <module>\n main()\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main\n cli(obj={}) # pylint:disable=E1123\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__\n return self.main(*args, **kwargs)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main\n rv = self.invoke(ctx)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke\n return callback(*args, **kwargs)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 115, in command\n output = check.inst(fn(args), output_cls)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 140, in list_repositories_command\n loadable_targets = get_loadable_targets(python_file, module_name, working_directory, attribute)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/grpc/utils.py", line 20, in get_loadable_targets\n else loadable_targets_from_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/autodiscovery.py", line 11, in loadable_targets_from_python_file\n loaded_module = load_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/code_pointer.py", line 88, in load_python_file\n return import_module_from_path(module_name, python_file)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/seven/__init__.py", line 110, in import_module_from_path\n spec.loader.exec_module(module)\n File "<frozen importlib._bootstrap_external>", line 678, in exec_module\n File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed\n File "examples/legacy_examples/dagster_examples/simple_lakehouse/simple_lakehouse.py", line 189, in <module>\n from dagster_examples.simple_lakehouse.daily_temperature_high_diffs import (\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 31, in <module>\n @repository\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 225, in repository\n return _Repository()(name)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 23, in __call__\n repository_definitions = fn()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 37, in legacy_examples\n + get_lakehouse_pipelines()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 17, in get_lakehouse_pipelines\n from dagster_examples.simple_lakehouse.pipelines import simple_lakehouse_pipeline\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/simple_lakehouse/pipelines.py", line 7, in <module>\n from dagster_examples.simple_lakehouse.simple_lakehouse import simple_lakehouse\nImportError: cannot import name \'simple_lakehouse\'\n'
|
subprocess.CalledProcessError
|
def list_repositories_command(args):
check.inst_param(args, "args", ListRepositoriesInput)
python_file, module_name, working_directory, attribute = (
args.python_file,
args.module_name,
args.working_directory,
args.attribute,
)
try:
loadable_targets = get_loadable_targets(
python_file, module_name, working_directory, attribute
)
return ListRepositoriesResponse(
[
LoadableRepositorySymbol(
attribute=lt.attribute,
repository_name=repository_def_from_target_def(
lt.target_definition
).name,
)
for lt in loadable_targets
]
)
except Exception: # pylint: disable=broad-except
return serializable_error_info_from_exc_info(sys.exc_info())
|
def list_repositories_command(args):
check.inst_param(args, "args", ListRepositoriesInput)
python_file, module_name, working_directory, attribute = (
args.python_file,
args.module_name,
args.working_directory,
args.attribute,
)
loadable_targets = get_loadable_targets(
python_file, module_name, working_directory, attribute
)
return ListRepositoriesResponse(
[
LoadableRepositorySymbol(
attribute=lt.attribute,
repository_name=repository_def_from_target_def(
lt.target_definition
).name,
)
for lt in loadable_targets
]
)
|
https://github.com/dagster-io/dagster/issues/2772
|
Traceback (most recent call last):
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 11, in execute_command_in_subprocess
subprocess.check_output(parts, stderr=subprocess.STDOUT)
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 356, in check_output
**kwargs).stdout
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 438, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/sryza/.pyenv/versions/dagster-3.6.8/bin/dagster", line 11, in <module>
load_entry_point('dagster', 'console_scripts', 'dagster')()
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main
cli(obj={}) # pylint:disable=E1123
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 262, in pipeline_execute_command
return _logged_pipeline_execute_command(config, preset, mode, DagsterInstance.get(), kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/core/telemetry.py", line 89, in wrap
result = f(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 290, in _logged_pipeline_execute_command
result = execute_execute_command(env, kwargs, mode, tags)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 297, in execute_execute_command
external_pipeline = get_external_pipeline_from_kwargs(cli_args, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 404, in get_external_pipeline_from_kwargs
external_repo = get_external_repository_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 367, in get_external_repository_from_kwargs
repo_location = get_repository_location_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 335, in get_repository_location_from_kwargs
workspace = get_workspace_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 198, in get_workspace_from_kwargs
return workspace_from_load_target(created_workspace_load_target(kwargs), instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 168, in workspace_from_load_target
user_process_api=python_user_process_api,
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/load.py", line 253, in location_handle_from_python_file
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/list_repositories.py", line 17, in sync_list_repositories
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 32, in execute_unary_api_cli_command
execute_command_in_subprocess(parts)
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 14, in execute_command_in_subprocess
"Error when executing API command {cmd}: {output}".format(cmd=e.cmd, output=e.output)
dagster.serdes.ipc.DagsterIPCProtocolError: Error when executing API command ['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']: b'/Users/sryza/dagster/python_modules/libraries/dagster-pandas/dagster_pandas/data_frame.py:190: UserWarning: Using create_dagster_pandas_dataframe_type for dataframe types is deprecated,\n and is planned to be removed in a future version (tentatively 0.10.0).\n Please use create_structured_dataframe_type instead.\n Please use create_structured_dataframe_type instead."""\nTraceback (most recent call last):\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 193, in _run_module_as_main\n "__main__", mod_spec)\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 85, in _run_code\n exec(code, run_globals)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/__main__.py", line 3, in <module>\n main()\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main\n cli(obj={}) # pylint:disable=E1123\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__\n return self.main(*args, **kwargs)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main\n rv = self.invoke(ctx)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke\n return callback(*args, **kwargs)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 115, in command\n output = check.inst(fn(args), output_cls)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 140, in list_repositories_command\n loadable_targets = get_loadable_targets(python_file, module_name, working_directory, attribute)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/grpc/utils.py", line 20, in get_loadable_targets\n else loadable_targets_from_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/autodiscovery.py", line 11, in loadable_targets_from_python_file\n loaded_module = load_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/code_pointer.py", line 88, in load_python_file\n return import_module_from_path(module_name, python_file)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/seven/__init__.py", line 110, in import_module_from_path\n spec.loader.exec_module(module)\n File "<frozen importlib._bootstrap_external>", line 678, in exec_module\n File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed\n File "examples/legacy_examples/dagster_examples/simple_lakehouse/simple_lakehouse.py", line 189, in <module>\n from dagster_examples.simple_lakehouse.daily_temperature_high_diffs import (\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 31, in <module>\n @repository\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 225, in repository\n return _Repository()(name)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 23, in __call__\n repository_definitions = fn()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 37, in legacy_examples\n + get_lakehouse_pipelines()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 17, in get_lakehouse_pipelines\n from dagster_examples.simple_lakehouse.pipelines import simple_lakehouse_pipeline\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/simple_lakehouse/pipelines.py", line 7, in <module>\n from dagster_examples.simple_lakehouse.simple_lakehouse import simple_lakehouse\nImportError: cannot import name \'simple_lakehouse\'\n'
|
subprocess.CalledProcessError
|
def list_repositories(self):
res = self._query("ListRepositories", api_pb2.ListRepositoriesRequest)
return deserialize_json_to_dagster_namedtuple(
res.serialized_list_repositories_response_or_error
)
|
def list_repositories(self):
res = self._query("ListRepositories", api_pb2.ListRepositoriesRequest)
return deserialize_json_to_dagster_namedtuple(
res.serialized_list_repositories_response
)
|
https://github.com/dagster-io/dagster/issues/2772
|
Traceback (most recent call last):
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 11, in execute_command_in_subprocess
subprocess.check_output(parts, stderr=subprocess.STDOUT)
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 356, in check_output
**kwargs).stdout
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 438, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/sryza/.pyenv/versions/dagster-3.6.8/bin/dagster", line 11, in <module>
load_entry_point('dagster', 'console_scripts', 'dagster')()
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main
cli(obj={}) # pylint:disable=E1123
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 262, in pipeline_execute_command
return _logged_pipeline_execute_command(config, preset, mode, DagsterInstance.get(), kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/core/telemetry.py", line 89, in wrap
result = f(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 290, in _logged_pipeline_execute_command
result = execute_execute_command(env, kwargs, mode, tags)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 297, in execute_execute_command
external_pipeline = get_external_pipeline_from_kwargs(cli_args, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 404, in get_external_pipeline_from_kwargs
external_repo = get_external_repository_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 367, in get_external_repository_from_kwargs
repo_location = get_repository_location_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 335, in get_repository_location_from_kwargs
workspace = get_workspace_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 198, in get_workspace_from_kwargs
return workspace_from_load_target(created_workspace_load_target(kwargs), instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 168, in workspace_from_load_target
user_process_api=python_user_process_api,
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/load.py", line 253, in location_handle_from_python_file
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/list_repositories.py", line 17, in sync_list_repositories
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 32, in execute_unary_api_cli_command
execute_command_in_subprocess(parts)
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 14, in execute_command_in_subprocess
"Error when executing API command {cmd}: {output}".format(cmd=e.cmd, output=e.output)
dagster.serdes.ipc.DagsterIPCProtocolError: Error when executing API command ['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']: b'/Users/sryza/dagster/python_modules/libraries/dagster-pandas/dagster_pandas/data_frame.py:190: UserWarning: Using create_dagster_pandas_dataframe_type for dataframe types is deprecated,\n and is planned to be removed in a future version (tentatively 0.10.0).\n Please use create_structured_dataframe_type instead.\n Please use create_structured_dataframe_type instead."""\nTraceback (most recent call last):\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 193, in _run_module_as_main\n "__main__", mod_spec)\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 85, in _run_code\n exec(code, run_globals)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/__main__.py", line 3, in <module>\n main()\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main\n cli(obj={}) # pylint:disable=E1123\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__\n return self.main(*args, **kwargs)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main\n rv = self.invoke(ctx)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke\n return callback(*args, **kwargs)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 115, in command\n output = check.inst(fn(args), output_cls)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 140, in list_repositories_command\n loadable_targets = get_loadable_targets(python_file, module_name, working_directory, attribute)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/grpc/utils.py", line 20, in get_loadable_targets\n else loadable_targets_from_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/autodiscovery.py", line 11, in loadable_targets_from_python_file\n loaded_module = load_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/code_pointer.py", line 88, in load_python_file\n return import_module_from_path(module_name, python_file)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/seven/__init__.py", line 110, in import_module_from_path\n spec.loader.exec_module(module)\n File "<frozen importlib._bootstrap_external>", line 678, in exec_module\n File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed\n File "examples/legacy_examples/dagster_examples/simple_lakehouse/simple_lakehouse.py", line 189, in <module>\n from dagster_examples.simple_lakehouse.daily_temperature_high_diffs import (\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 31, in <module>\n @repository\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 225, in repository\n return _Repository()(name)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 23, in __call__\n repository_definitions = fn()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 37, in legacy_examples\n + get_lakehouse_pipelines()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 17, in get_lakehouse_pipelines\n from dagster_examples.simple_lakehouse.pipelines import simple_lakehouse_pipeline\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/simple_lakehouse/pipelines.py", line 7, in <module>\n from dagster_examples.simple_lakehouse.simple_lakehouse import simple_lakehouse\nImportError: cannot import name \'simple_lakehouse\'\n'
|
subprocess.CalledProcessError
|
def __init__(
self,
shutdown_server_event,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
):
super(DagsterApiServer, self).__init__()
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
self._shutdown_server_event = check.inst_param(
shutdown_server_event, "shutdown_server_event", seven.ThreadingEventType
)
self._loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
self._shutdown_server_event = check.inst_param(
shutdown_server_event, "shutdown_server_event", seven.ThreadingEventType
)
# Dict[str, multiprocessing.Process] of run_id to execute_run process
self._executions = {}
# Dict[str, multiprocessing.Event]
self._termination_events = {}
self._execution_lock = threading.Lock()
self._repository_symbols_and_code_pointers = LazyRepositorySymbolsAndCodePointers(
loadable_target_origin
)
self.__last_heartbeat_time = time.time()
if heartbeat:
self.__heartbeat_thread = threading.Thread(
target=heartbeat_thread,
args=(
heartbeat_timeout,
self.__last_heartbeat_time,
self._shutdown_server_event,
),
)
self.__heartbeat_thread.daemon = True
self.__heartbeat_thread.start()
else:
self.__heartbeat_thread = None
|
def __init__(
self,
shutdown_server_event,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
):
super(DagsterApiServer, self).__init__()
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
self._shutdown_server_event = check.inst_param(
shutdown_server_event, "shutdown_server_event", seven.ThreadingEventType
)
self._loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
if loadable_target_origin:
loadable_targets = get_loadable_targets(
loadable_target_origin.python_file,
loadable_target_origin.module_name,
loadable_target_origin.working_directory,
loadable_target_origin.attribute,
)
self._loadable_repository_symbols = [
LoadableRepositorySymbol(
attribute=loadable_target.attribute,
repository_name=repository_def_from_target_def(
loadable_target.target_definition
).name,
)
for loadable_target in loadable_targets
]
else:
self._loadable_repository_symbols = []
self._shutdown_server_event = check.inst_param(
shutdown_server_event, "shutdown_server_event", seven.ThreadingEventType
)
# Dict[str, multiprocessing.Process] of run_id to execute_run process
self._executions = {}
# Dict[str, multiprocessing.Event]
self._termination_events = {}
self._execution_lock = threading.Lock()
self._repository_code_pointer_dict = {}
for loadable_repository_symbol in self._loadable_repository_symbols:
if self._loadable_target_origin.python_file:
self._repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_python_file(
self._loadable_target_origin.python_file,
loadable_repository_symbol.attribute,
self._loadable_target_origin.working_directory,
)
if self._loadable_target_origin.module_name:
self._repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_module(
self._loadable_target_origin.module_name,
loadable_repository_symbol.attribute,
)
self.__last_heartbeat_time = time.time()
if heartbeat:
self.__heartbeat_thread = threading.Thread(
target=heartbeat_thread,
args=(
heartbeat_timeout,
self.__last_heartbeat_time,
self._shutdown_server_event,
),
)
self.__heartbeat_thread.daemon = True
self.__heartbeat_thread.start()
else:
self.__heartbeat_thread = None
|
https://github.com/dagster-io/dagster/issues/2772
|
Traceback (most recent call last):
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 11, in execute_command_in_subprocess
subprocess.check_output(parts, stderr=subprocess.STDOUT)
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 356, in check_output
**kwargs).stdout
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 438, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/sryza/.pyenv/versions/dagster-3.6.8/bin/dagster", line 11, in <module>
load_entry_point('dagster', 'console_scripts', 'dagster')()
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main
cli(obj={}) # pylint:disable=E1123
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 262, in pipeline_execute_command
return _logged_pipeline_execute_command(config, preset, mode, DagsterInstance.get(), kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/core/telemetry.py", line 89, in wrap
result = f(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 290, in _logged_pipeline_execute_command
result = execute_execute_command(env, kwargs, mode, tags)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 297, in execute_execute_command
external_pipeline = get_external_pipeline_from_kwargs(cli_args, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 404, in get_external_pipeline_from_kwargs
external_repo = get_external_repository_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 367, in get_external_repository_from_kwargs
repo_location = get_repository_location_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 335, in get_repository_location_from_kwargs
workspace = get_workspace_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 198, in get_workspace_from_kwargs
return workspace_from_load_target(created_workspace_load_target(kwargs), instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 168, in workspace_from_load_target
user_process_api=python_user_process_api,
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/load.py", line 253, in location_handle_from_python_file
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/list_repositories.py", line 17, in sync_list_repositories
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 32, in execute_unary_api_cli_command
execute_command_in_subprocess(parts)
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 14, in execute_command_in_subprocess
"Error when executing API command {cmd}: {output}".format(cmd=e.cmd, output=e.output)
dagster.serdes.ipc.DagsterIPCProtocolError: Error when executing API command ['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']: b'/Users/sryza/dagster/python_modules/libraries/dagster-pandas/dagster_pandas/data_frame.py:190: UserWarning: Using create_dagster_pandas_dataframe_type for dataframe types is deprecated,\n and is planned to be removed in a future version (tentatively 0.10.0).\n Please use create_structured_dataframe_type instead.\n Please use create_structured_dataframe_type instead."""\nTraceback (most recent call last):\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 193, in _run_module_as_main\n "__main__", mod_spec)\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 85, in _run_code\n exec(code, run_globals)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/__main__.py", line 3, in <module>\n main()\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main\n cli(obj={}) # pylint:disable=E1123\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__\n return self.main(*args, **kwargs)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main\n rv = self.invoke(ctx)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke\n return callback(*args, **kwargs)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 115, in command\n output = check.inst(fn(args), output_cls)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 140, in list_repositories_command\n loadable_targets = get_loadable_targets(python_file, module_name, working_directory, attribute)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/grpc/utils.py", line 20, in get_loadable_targets\n else loadable_targets_from_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/autodiscovery.py", line 11, in loadable_targets_from_python_file\n loaded_module = load_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/code_pointer.py", line 88, in load_python_file\n return import_module_from_path(module_name, python_file)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/seven/__init__.py", line 110, in import_module_from_path\n spec.loader.exec_module(module)\n File "<frozen importlib._bootstrap_external>", line 678, in exec_module\n File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed\n File "examples/legacy_examples/dagster_examples/simple_lakehouse/simple_lakehouse.py", line 189, in <module>\n from dagster_examples.simple_lakehouse.daily_temperature_high_diffs import (\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 31, in <module>\n @repository\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 225, in repository\n return _Repository()(name)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 23, in __call__\n repository_definitions = fn()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 37, in legacy_examples\n + get_lakehouse_pipelines()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 17, in get_lakehouse_pipelines\n from dagster_examples.simple_lakehouse.pipelines import simple_lakehouse_pipeline\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/simple_lakehouse/pipelines.py", line 7, in <module>\n from dagster_examples.simple_lakehouse.simple_lakehouse import simple_lakehouse\nImportError: cannot import name \'simple_lakehouse\'\n'
|
subprocess.CalledProcessError
|
def _recon_repository_from_origin(self, repository_origin):
check.inst_param(
repository_origin,
"repository_origin",
RepositoryOrigin,
)
if isinstance(repository_origin, RepositoryGrpcServerOrigin):
return ReconstructableRepository(
self._repository_symbols_and_code_pointers.code_pointers_by_repo_name[
repository_origin.repository_name
]
)
return recon_repository_from_origin(repository_origin)
|
def _recon_repository_from_origin(self, repository_origin):
check.inst_param(
repository_origin,
"repository_origin",
RepositoryOrigin,
)
if isinstance(repository_origin, RepositoryGrpcServerOrigin):
return ReconstructableRepository(
self._repository_code_pointer_dict[repository_origin.repository_name]
)
return recon_repository_from_origin(repository_origin)
|
https://github.com/dagster-io/dagster/issues/2772
|
Traceback (most recent call last):
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 11, in execute_command_in_subprocess
subprocess.check_output(parts, stderr=subprocess.STDOUT)
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 356, in check_output
**kwargs).stdout
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 438, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/sryza/.pyenv/versions/dagster-3.6.8/bin/dagster", line 11, in <module>
load_entry_point('dagster', 'console_scripts', 'dagster')()
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main
cli(obj={}) # pylint:disable=E1123
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 262, in pipeline_execute_command
return _logged_pipeline_execute_command(config, preset, mode, DagsterInstance.get(), kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/core/telemetry.py", line 89, in wrap
result = f(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 290, in _logged_pipeline_execute_command
result = execute_execute_command(env, kwargs, mode, tags)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 297, in execute_execute_command
external_pipeline = get_external_pipeline_from_kwargs(cli_args, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 404, in get_external_pipeline_from_kwargs
external_repo = get_external_repository_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 367, in get_external_repository_from_kwargs
repo_location = get_repository_location_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 335, in get_repository_location_from_kwargs
workspace = get_workspace_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 198, in get_workspace_from_kwargs
return workspace_from_load_target(created_workspace_load_target(kwargs), instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 168, in workspace_from_load_target
user_process_api=python_user_process_api,
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/load.py", line 253, in location_handle_from_python_file
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/list_repositories.py", line 17, in sync_list_repositories
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 32, in execute_unary_api_cli_command
execute_command_in_subprocess(parts)
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 14, in execute_command_in_subprocess
"Error when executing API command {cmd}: {output}".format(cmd=e.cmd, output=e.output)
dagster.serdes.ipc.DagsterIPCProtocolError: Error when executing API command ['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']: b'/Users/sryza/dagster/python_modules/libraries/dagster-pandas/dagster_pandas/data_frame.py:190: UserWarning: Using create_dagster_pandas_dataframe_type for dataframe types is deprecated,\n and is planned to be removed in a future version (tentatively 0.10.0).\n Please use create_structured_dataframe_type instead.\n Please use create_structured_dataframe_type instead."""\nTraceback (most recent call last):\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 193, in _run_module_as_main\n "__main__", mod_spec)\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 85, in _run_code\n exec(code, run_globals)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/__main__.py", line 3, in <module>\n main()\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main\n cli(obj={}) # pylint:disable=E1123\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__\n return self.main(*args, **kwargs)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main\n rv = self.invoke(ctx)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke\n return callback(*args, **kwargs)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 115, in command\n output = check.inst(fn(args), output_cls)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 140, in list_repositories_command\n loadable_targets = get_loadable_targets(python_file, module_name, working_directory, attribute)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/grpc/utils.py", line 20, in get_loadable_targets\n else loadable_targets_from_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/autodiscovery.py", line 11, in loadable_targets_from_python_file\n loaded_module = load_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/code_pointer.py", line 88, in load_python_file\n return import_module_from_path(module_name, python_file)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/seven/__init__.py", line 110, in import_module_from_path\n spec.loader.exec_module(module)\n File "<frozen importlib._bootstrap_external>", line 678, in exec_module\n File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed\n File "examples/legacy_examples/dagster_examples/simple_lakehouse/simple_lakehouse.py", line 189, in <module>\n from dagster_examples.simple_lakehouse.daily_temperature_high_diffs import (\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 31, in <module>\n @repository\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 225, in repository\n return _Repository()(name)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 23, in __call__\n repository_definitions = fn()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 37, in legacy_examples\n + get_lakehouse_pipelines()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 17, in get_lakehouse_pipelines\n from dagster_examples.simple_lakehouse.pipelines import simple_lakehouse_pipeline\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/simple_lakehouse/pipelines.py", line 7, in <module>\n from dagster_examples.simple_lakehouse.simple_lakehouse import simple_lakehouse\nImportError: cannot import name \'simple_lakehouse\'\n'
|
subprocess.CalledProcessError
|
def ListRepositories(self, request, _context):
try:
response = ListRepositoriesResponse(
self._repository_symbols_and_code_pointers.loadable_repository_symbols,
executable_path=self._loadable_target_origin.executable_path
if self._loadable_target_origin
else None,
repository_code_pointer_dict=(
self._repository_symbols_and_code_pointers.code_pointers_by_repo_name
),
)
except Exception: # pylint: disable=broad-except
response = serializable_error_info_from_exc_info(sys.exc_info())
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(
response
)
)
|
def ListRepositories(self, request, _context):
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response=serialize_dagster_namedtuple(
ListRepositoriesResponse(
self._loadable_repository_symbols,
executable_path=self._loadable_target_origin.executable_path
if self._loadable_target_origin
else None,
repository_code_pointer_dict=self._repository_code_pointer_dict,
)
)
)
|
https://github.com/dagster-io/dagster/issues/2772
|
Traceback (most recent call last):
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 11, in execute_command_in_subprocess
subprocess.check_output(parts, stderr=subprocess.STDOUT)
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 356, in check_output
**kwargs).stdout
File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/subprocess.py", line 438, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/sryza/.pyenv/versions/dagster-3.6.8/bin/dagster", line 11, in <module>
load_entry_point('dagster', 'console_scripts', 'dagster')()
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main
cli(obj={}) # pylint:disable=E1123
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 262, in pipeline_execute_command
return _logged_pipeline_execute_command(config, preset, mode, DagsterInstance.get(), kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/core/telemetry.py", line 89, in wrap
result = f(*args, **kwargs)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 290, in _logged_pipeline_execute_command
result = execute_execute_command(env, kwargs, mode, tags)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/pipeline.py", line 297, in execute_execute_command
external_pipeline = get_external_pipeline_from_kwargs(cli_args, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 404, in get_external_pipeline_from_kwargs
external_repo = get_external_repository_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 367, in get_external_repository_from_kwargs
repo_location = get_repository_location_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 335, in get_repository_location_from_kwargs
workspace = get_workspace_from_kwargs(kwargs, instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 198, in get_workspace_from_kwargs
return workspace_from_load_target(created_workspace_load_target(kwargs), instance)
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/cli_target.py", line 168, in workspace_from_load_target
user_process_api=python_user_process_api,
File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/load.py", line 253, in location_handle_from_python_file
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/list_repositories.py", line 17, in sync_list_repositories
attribute=attribute,
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 32, in execute_unary_api_cli_command
execute_command_in_subprocess(parts)
File "/Users/sryza/dagster/python_modules/dagster/dagster/api/utils.py", line 14, in execute_command_in_subprocess
"Error when executing API command {cmd}: {output}".format(cmd=e.cmd, output=e.output)
dagster.serdes.ipc.DagsterIPCProtocolError: Error when executing API command ['/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/bin/python3.6', '-m', 'dagster', 'api', 'list_repositories', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpf_t93t_j', '/var/folders/df/2_jxd7dx073273d_mpywh4080000gn/T/tmpyyx3_gjt']: b'/Users/sryza/dagster/python_modules/libraries/dagster-pandas/dagster_pandas/data_frame.py:190: UserWarning: Using create_dagster_pandas_dataframe_type for dataframe types is deprecated,\n and is planned to be removed in a future version (tentatively 0.10.0).\n Please use create_structured_dataframe_type instead.\n Please use create_structured_dataframe_type instead."""\nTraceback (most recent call last):\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 193, in _run_module_as_main\n "__main__", mod_spec)\n File "/Users/sryza/.pyenv/versions/3.6.8/lib/python3.6/runpy.py", line 85, in _run_code\n exec(code, run_globals)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/__main__.py", line 3, in <module>\n main()\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/__init__.py", line 38, in main\n cli(obj={}) # pylint:disable=E1123\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 764, in __call__\n return self.main(*args, **kwargs)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 717, in main\n rv = self.invoke(ctx)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 1137, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 956, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File "/Users/sryza/.pyenv/versions/3.6.8/envs/dagster-3.6.8/lib/python3.6/site-packages/click/core.py", line 555, in invoke\n return callback(*args, **kwargs)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 115, in command\n output = check.inst(fn(args), output_cls)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/api.py", line 140, in list_repositories_command\n loadable_targets = get_loadable_targets(python_file, module_name, working_directory, attribute)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/grpc/utils.py", line 20, in get_loadable_targets\n else loadable_targets_from_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/cli/workspace/autodiscovery.py", line 11, in loadable_targets_from_python_file\n loaded_module = load_python_file(python_file, working_directory)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/code_pointer.py", line 88, in load_python_file\n return import_module_from_path(module_name, python_file)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/seven/__init__.py", line 110, in import_module_from_path\n spec.loader.exec_module(module)\n File "<frozen importlib._bootstrap_external>", line 678, in exec_module\n File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed\n File "examples/legacy_examples/dagster_examples/simple_lakehouse/simple_lakehouse.py", line 189, in <module>\n from dagster_examples.simple_lakehouse.daily_temperature_high_diffs import (\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 31, in <module>\n @repository\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 225, in repository\n return _Repository()(name)\n File "/Users/sryza/dagster/python_modules/dagster/dagster/core/definitions/decorators/repository.py", line 23, in __call__\n repository_definitions = fn()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 37, in legacy_examples\n + get_lakehouse_pipelines()\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/__init__.py", line 17, in get_lakehouse_pipelines\n from dagster_examples.simple_lakehouse.pipelines import simple_lakehouse_pipeline\n File "/Users/sryza/dagster/examples/legacy_examples/dagster_examples/simple_lakehouse/pipelines.py", line 7, in <module>\n from dagster_examples.simple_lakehouse.simple_lakehouse import simple_lakehouse\nImportError: cannot import name \'simple_lakehouse\'\n'
|
subprocess.CalledProcessError
|
def _evaluate_composite_solid_config(context):
"""Evaluates config for a composite solid and returns CompositeSolidEvaluationResult"""
# Support config mapping override functions
if not is_solid_container_config(context.config_type):
return EvaluateValueResult.empty()
handle = context.config_type.handle
# If we've already seen this handle, skip -- we've already run the block of code below
if not handle or handle in context.seen_handles:
return EvaluateValueResult.empty()
solid_def = context.pipeline.get_solid(context.config_type.handle).definition
solid_def_name = context.pipeline.get_solid(handle).definition.name
has_mapping = (
isinstance(solid_def, CompositeSolidDefinition) and solid_def.has_config_mapping
)
# If there's no config mapping function provided for this composite solid, bail
if not has_mapping:
return EvaluateValueResult.empty()
# We first validate the provided environment config as normal against the composite solid config
# schema. This will perform a full traversal rooted at the SolidContainerConfigDict and thread
# errors up to the root
config_context = context.new_context_with_handle(handle)
evaluate_value_result = _evaluate_config(config_context)
if not evaluate_value_result.success:
return evaluate_value_result
try:
mapped_config_value = solid_def.config_mapping.config_fn(
ConfigMappingContext(run_config=context.run_config),
# ensure we don't mutate the source environment dict
frozendict(evaluate_value_result.value.get("config")),
)
except Exception: # pylint: disable=W0703
return EvaluateValueResult.for_error(
create_bad_user_config_fn_error(
context,
solid_def.config_mapping.config_fn.__name__,
str(handle),
solid_def_name,
traceback.format_exc(),
)
)
if not mapped_config_value:
return EvaluateValueResult.empty()
# Perform basic validation on the mapped config value; remaining validation will happen via the
# evaluate_config call below
if not isinstance(mapped_config_value, dict):
return EvaluateValueResult.for_error(
create_bad_mapping_error(
context,
solid_def.config_mapping.config_fn.__name__,
solid_def_name,
str(handle),
mapped_config_value,
)
)
if "solids" in context.config_value:
return EvaluateValueResult.for_error(
create_bad_mapping_solids_key_error(context, solid_def_name, str(handle))
)
# We've validated the composite solid config; now validate the mapping fn overrides against the
# config schema subtree for child solids
evaluate_value_result = _evaluate_config(
context.for_mapped_composite_config(handle, mapped_config_value)
)
if evaluate_value_result.errors:
prefix = (
"Config override mapping function defined by solid {handle_name} from "
"definition {solid_def_name} {path_msg} caused error: ".format(
path_msg=get_friendly_path_msg(context.stack),
handle_name=str(handle),
solid_def_name=solid_def_name,
)
)
errors = [
e._replace(message=prefix + e.message) for e in evaluate_value_result.errors
]
return EvaluateValueResult.for_errors(errors)
return EvaluateValueResult.for_value(
dict_merge(context.config_value, {"solids": evaluate_value_result.value})
)
|
def _evaluate_composite_solid_config(context):
"""Evaluates config for a composite solid and returns CompositeSolidEvaluationResult"""
# Support config mapping override functions
if not is_solid_container_config(context.config_type):
return EvaluateValueResult.empty()
handle = context.config_type.handle
# If we've already seen this handle, skip -- we've already run the block of code below
if not handle or handle in context.seen_handles:
return EvaluateValueResult.empty()
solid_def = context.pipeline.get_solid(context.config_type.handle).definition
solid_def_name = context.pipeline.get_solid(handle).definition.name
has_mapping = (
isinstance(solid_def, CompositeSolidDefinition) and solid_def.has_config_mapping
)
# If there's no config mapping function provided for this composite solid, bail
if not has_mapping:
return EvaluateValueResult.empty()
# We first validate the provided environment config as normal against the composite solid config
# schema. This will perform a full traversal rooted at the SolidContainerConfigDict and thread
# errors up to the root
config_context = context.new_context_with_handle(handle)
evaluate_value_result = _evaluate_config(config_context)
if not evaluate_value_result.success:
return evaluate_value_result
try:
mapped_config_value = solid_def.config_mapping.config_fn(
ConfigMappingContext(run_config=context.run_config),
# ensure we don't mutate the source environment dict
frozendict(context.config_value.get("config")),
)
except Exception: # pylint: disable=W0703
return EvaluateValueResult.for_error(
create_bad_user_config_fn_error(
context,
solid_def.config_mapping.config_fn.__name__,
str(handle),
solid_def_name,
traceback.format_exc(),
)
)
if not mapped_config_value:
return EvaluateValueResult.empty()
# Perform basic validation on the mapped config value; remaining validation will happen via the
# evaluate_config call below
if not isinstance(mapped_config_value, dict):
return EvaluateValueResult.for_error(
create_bad_mapping_error(
context,
solid_def.config_mapping.config_fn.__name__,
solid_def_name,
str(handle),
mapped_config_value,
)
)
if "solids" in context.config_value:
return EvaluateValueResult.for_error(
create_bad_mapping_solids_key_error(context, solid_def_name, str(handle))
)
# We've validated the composite solid config; now validate the mapping fn overrides against the
# config schema subtree for child solids
evaluate_value_result = _evaluate_config(
context.for_mapped_composite_config(handle, mapped_config_value)
)
if evaluate_value_result.errors:
prefix = (
"Config override mapping function defined by solid {handle_name} from "
"definition {solid_def_name} {path_msg} caused error: ".format(
path_msg=get_friendly_path_msg(context.stack),
handle_name=str(handle),
solid_def_name=solid_def_name,
)
)
errors = [
e._replace(message=prefix + e.message) for e in evaluate_value_result.errors
]
return EvaluateValueResult.for_errors(errors)
return EvaluateValueResult.for_value(
dict_merge(context.config_value, {"solids": evaluate_value_result.value})
)
|
https://github.com/dagster-io/dagster/issues/1608
|
Exception occurred during execution of user config mapping function <lambda> defined by solid prefix_id from definition prefix_id at path root:solids:prefix_id:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dagster/core/types/evaluator/evaluation.py", line 252, in _evaluate_composite_solid_config
frozendict(context.config_value.get('config')),
File "/project/spendanalytics/nlp/normalize.py", line 17, in <lambda>
config_fn=lambda _, cfg: {'prefix_value': {'config': {'prefix': cfg['prefix']}}},
KeyError: 'prefix'
|
KeyError
|
def generate_pbx_build_file(self):
self.ofile.write("\n/* Begin PBXBuildFile section */\n")
templ = '%s /* %s */ = { isa = PBXBuildFile; fileRef = %s /* %s */; settings = { COMPILER_FLAGS = "%s"; }; };\n'
otempl = "%s /* %s */ = { isa = PBXBuildFile; fileRef = %s /* %s */;};\n"
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.write_line(
"%s /* %s.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = %s /* %s.framework */; };\n"
% (
self.native_frameworks[f],
f,
self.native_frameworks_fileref[f],
f,
)
)
for s in t.sources:
if isinstance(s, mesonlib.File):
s = os.path.join(s.subdir, s.fname)
if isinstance(s, str):
s = os.path.join(t.subdir, s)
idval = self.buildmap[s]
fullpath = os.path.join(self.environment.get_source_dir(), s)
fileref = self.filemap[s]
fullpath2 = fullpath
compiler_args = ""
self.write_line(
templ % (idval, fullpath, fileref, fullpath2, compiler_args)
)
for o in t.objects:
o = os.path.join(t.subdir, o)
idval = self.buildmap[o]
fileref = self.filemap[o]
fullpath = os.path.join(self.environment.get_source_dir(), o)
fullpath2 = fullpath
self.write_line(otempl % (idval, fullpath, fileref, fullpath2))
self.ofile.write("/* End PBXBuildFile section */\n")
|
def generate_pbx_build_file(self):
self.ofile.write("\n/* Begin PBXBuildFile section */\n")
templ = '%s /* %s */ = { isa = PBXBuildFile; fileRef = %s /* %s */; settings = { COMPILER_FLAGS = "%s"; }; };\n'
otempl = "%s /* %s */ = { isa = PBXBuildFile; fileRef = %s /* %s */;};\n"
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.write_line(
"%s /* %s.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = %s /* %s.framework */; };\n"
% (
self.native_frameworks[f],
f,
self.native_frameworks_fileref[f],
f,
)
)
for s in t.sources:
if isinstance(s, mesonlib.File):
s = s.fname
if isinstance(s, str):
s = os.path.join(t.subdir, s)
idval = self.buildmap[s]
fullpath = os.path.join(self.environment.get_source_dir(), s)
fileref = self.filemap[s]
fullpath2 = fullpath
compiler_args = ""
self.write_line(
templ % (idval, fullpath, fileref, fullpath2, compiler_args)
)
for o in t.objects:
o = os.path.join(t.subdir, o)
idval = self.buildmap[o]
fileref = self.filemap[o]
fullpath = os.path.join(self.environment.get_source_dir(), o)
fullpath2 = fullpath
self.write_line(otempl % (idval, fullpath, fileref, fullpath2))
self.ofile.write("/* End PBXBuildFile section */\n")
|
https://github.com/mesonbuild/meson/issues/589
|
Traceback (most recent call last):
File "/usr/local/Cellar/meson/0.31.0/lib/python3.5/site-packages/mesonbuild/mesonmain.py", line 254, in run
app.generate()
File "/usr/local/Cellar/meson/0.31.0/lib/python3.5/site-packages/mesonbuild/mesonmain.py", line 158, in generate
g.generate(intr)
File "/usr/local/Cellar/meson/0.31.0/lib/python3.5/site-packages/mesonbuild/backend/xcodebackend.py", line 88, in generate
self.generate_pbx_build_file()
File "/usr/local/Cellar/meson/0.31.0/lib/python3.5/site-packages/mesonbuild/backend/xcodebackend.py", line 234, in generate_pbx_build_file
idval = self.buildmap[s]
KeyError: 'tests/fileA.c'
|
KeyError
|
def __init__(
self,
name: str,
project: str,
suite: str,
fname: T.List[str],
is_cross_built: bool,
exe_wrapper: T.Optional[dependencies.ExternalProgram],
needs_exe_wrapper: bool,
is_parallel: bool,
cmd_args: T.List[str],
env: build.EnvironmentVariables,
should_fail: bool,
timeout: T.Optional[int],
workdir: T.Optional[str],
extra_paths: T.List[str],
protocol: TestProtocol,
priority: int,
cmd_is_built: bool,
depends: T.List[str],
version: str,
):
self.name = name
self.project_name = project
self.suite = suite
self.fname = fname
self.is_cross_built = is_cross_built
if exe_wrapper is not None:
assert isinstance(exe_wrapper, dependencies.ExternalProgram)
self.exe_runner = exe_wrapper
self.is_parallel = is_parallel
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.timeout = timeout
self.workdir = workdir
self.extra_paths = extra_paths
self.protocol = protocol
self.priority = priority
self.needs_exe_wrapper = needs_exe_wrapper
self.cmd_is_built = cmd_is_built
self.depends = depends
self.version = version
|
def __init__(
self,
name: str,
project: str,
suite: str,
fname: T.List[str],
is_cross_built: bool,
exe_wrapper: T.Optional[dependencies.ExternalProgram],
needs_exe_wrapper: bool,
is_parallel: bool,
cmd_args: T.List[str],
env: build.EnvironmentVariables,
should_fail: bool,
timeout: T.Optional[int],
workdir: T.Optional[str],
extra_paths: T.List[str],
protocol: TestProtocol,
priority: int,
cmd_is_built: bool,
depends: T.List[str],
):
self.name = name
self.project_name = project
self.suite = suite
self.fname = fname
self.is_cross_built = is_cross_built
if exe_wrapper is not None:
assert isinstance(exe_wrapper, dependencies.ExternalProgram)
self.exe_runner = exe_wrapper
self.is_parallel = is_parallel
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.timeout = timeout
self.workdir = workdir
self.extra_paths = extra_paths
self.protocol = protocol
self.priority = priority
self.needs_exe_wrapper = needs_exe_wrapper
self.cmd_is_built = cmd_is_built
self.depends = depends
|
https://github.com/mesonbuild/meson/issues/7613
|
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/mesonbuild/mesonmain.py", line 131, in run
return options.run_func(options)
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 1220, in run
return th.doit()
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 975, in doit
self.run_tests(tests)
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 1130, in run_tests
self.drain_futures(futures)
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 1146, in drain_futures
self.process_test_result(result.result())
File "/usr/lib/python3.8/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/usr/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result
raise self._exception
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 628, in run
cmd = self._get_cmd()
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 612, in _get_cmd
elif self.test.cmd_is_built and self.test.needs_exe_wrapper:
AttributeError: 'TestSerialisation' object has no attribute 'cmd_is_built'
|
AttributeError
|
def __init__(self, old_version: str, current_version: str) -> None:
super().__init__(
"Build directory has been generated with Meson version {}, "
"which is incompatible with the current version {}.".format(
old_version, current_version
)
)
self.old_version = old_version
self.current_version = current_version
|
def __init__(self, old_version: str, current_version: str) -> None:
super().__init__(
"Build directory has been generated with Meson version {}, "
"which is incompatible with current version {}.".format(
old_version, current_version
)
)
self.old_version = old_version
self.current_version = current_version
|
https://github.com/mesonbuild/meson/issues/7613
|
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/mesonbuild/mesonmain.py", line 131, in run
return options.run_func(options)
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 1220, in run
return th.doit()
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 975, in doit
self.run_tests(tests)
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 1130, in run_tests
self.drain_futures(futures)
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 1146, in drain_futures
self.process_test_result(result.result())
File "/usr/lib/python3.8/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/usr/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result
raise self._exception
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 628, in run
cmd = self._get_cmd()
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 612, in _get_cmd
elif self.test.cmd_is_built and self.test.needs_exe_wrapper:
AttributeError: 'TestSerialisation' object has no attribute 'cmd_is_built'
|
AttributeError
|
def major_versions_differ(v1: str, v2: str) -> bool:
return v1.split(".")[0:2] != v2.split(".")[0:2]
|
def major_versions_differ(v1, v2):
return v1.split(".")[0:2] != v2.split(".")[0:2]
|
https://github.com/mesonbuild/meson/issues/7613
|
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/mesonbuild/mesonmain.py", line 131, in run
return options.run_func(options)
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 1220, in run
return th.doit()
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 975, in doit
self.run_tests(tests)
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 1130, in run_tests
self.drain_futures(futures)
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 1146, in drain_futures
self.process_test_result(result.result())
File "/usr/lib/python3.8/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/usr/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result
raise self._exception
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 628, in run
cmd = self._get_cmd()
File "/usr/lib/python3.8/site-packages/mesonbuild/mtest.py", line 612, in _get_cmd
elif self.test.cmd_is_built and self.test.needs_exe_wrapper:
AttributeError: 'TestSerialisation' object has no attribute 'cmd_is_built'
|
AttributeError
|
def configure(self, extra_cmake_options: T.List[str]) -> None:
for_machine = MachineChoice.HOST # TODO make parameter
# Find CMake
cmake_exe = CMakeExecutor(self.env, ">=3.7", for_machine)
if not cmake_exe.found():
raise CMakeException("Unable to find CMake")
self.trace = CMakeTraceParser(cmake_exe.version(), self.build_dir, permissive=True)
preload_file = pkg_resources.resource_filename(
"mesonbuild", "cmake/data/preload.cmake"
)
# Prefere CMAKE_PROJECT_INCLUDE over CMAKE_TOOLCHAIN_FILE if possible,
# since CMAKE_PROJECT_INCLUDE was actually designed for code injection.
preload_var = "CMAKE_PROJECT_INCLUDE"
if version_compare(cmake_exe.version(), "<3.15"):
preload_var = "CMAKE_TOOLCHAIN_FILE"
generator = backend_generator_map[self.backend_name]
cmake_args = []
trace_args = self.trace.trace_args()
cmcmp_args = [
"-DCMAKE_POLICY_WARNING_{}=OFF".format(x) for x in disable_policy_warnings
]
pload_args = ["-D{}={}".format(preload_var, str(preload_file))]
if version_compare(cmake_exe.version(), ">=3.14"):
self.cmake_api = CMakeAPI.FILE
self.fileapi.setup_request()
# Map meson compiler to CMake variables
for lang, comp in self.env.coredata.compilers[for_machine].items():
if lang not in language_map:
continue
self.linkers.add(comp.get_linker_id())
cmake_lang = language_map[lang]
exelist = comp.get_exelist()
if len(exelist) == 1:
cmake_args += ["-DCMAKE_{}_COMPILER={}".format(cmake_lang, exelist[0])]
elif len(exelist) == 2:
cmake_args += [
"-DCMAKE_{}_COMPILER_LAUNCHER={}".format(cmake_lang, exelist[0]),
"-DCMAKE_{}_COMPILER={}".format(cmake_lang, exelist[1]),
]
if hasattr(comp, "get_linker_exelist") and comp.get_id() == "clang-cl":
cmake_args += ["-DCMAKE_LINKER={}".format(comp.get_linker_exelist()[0])]
cmake_args += ["-G", generator]
cmake_args += ["-DCMAKE_INSTALL_PREFIX={}".format(self.install_prefix)]
cmake_args += extra_cmake_options
# Run CMake
mlog.log()
with mlog.nested():
mlog.log(
"Configuring the build directory with",
mlog.bold("CMake"),
"version",
mlog.cyan(cmake_exe.version()),
)
mlog.log(mlog.bold("Running:"), " ".join(cmake_args))
mlog.log(mlog.bold(" - build directory: "), self.build_dir)
mlog.log(mlog.bold(" - source directory: "), self.src_dir)
mlog.log(mlog.bold(" - trace args: "), " ".join(trace_args))
mlog.log(mlog.bold(" - preload file: "), str(preload_file))
mlog.log(
mlog.bold(" - disabled policy warnings:"),
"[{}]".format(", ".join(disable_policy_warnings)),
)
mlog.log()
os.makedirs(self.build_dir, exist_ok=True)
os_env = os.environ.copy()
os_env["LC_ALL"] = "C"
final_args = cmake_args + trace_args + cmcmp_args + pload_args + [self.src_dir]
cmake_exe.set_exec_mode(
print_cmout=True, always_capture_stderr=self.trace.requires_stderr()
)
rc, _, self.raw_trace = cmake_exe.call(
final_args, self.build_dir, env=os_env, disable_cache=True
)
mlog.log()
h = mlog.green("SUCCEEDED") if rc == 0 else mlog.red("FAILED")
mlog.log("CMake configuration:", h)
if rc != 0:
raise CMakeException("Failed to configure the CMake subproject")
|
def configure(self, extra_cmake_options: T.List[str]) -> None:
for_machine = MachineChoice.HOST # TODO make parameter
# Find CMake
cmake_exe = CMakeExecutor(self.env, ">=3.7", for_machine)
if not cmake_exe.found():
raise CMakeException("Unable to find CMake")
self.trace = CMakeTraceParser(cmake_exe.version(), self.build_dir, permissive=True)
preload_file = Path(__file__).resolve().parent / "data" / "preload.cmake"
# Prefere CMAKE_PROJECT_INCLUDE over CMAKE_TOOLCHAIN_FILE if possible,
# since CMAKE_PROJECT_INCLUDE was actually designed for code injection.
preload_var = "CMAKE_PROJECT_INCLUDE"
if version_compare(cmake_exe.version(), "<3.15"):
preload_var = "CMAKE_TOOLCHAIN_FILE"
generator = backend_generator_map[self.backend_name]
cmake_args = []
trace_args = self.trace.trace_args()
cmcmp_args = [
"-DCMAKE_POLICY_WARNING_{}=OFF".format(x) for x in disable_policy_warnings
]
pload_args = ["-D{}={}".format(preload_var, str(preload_file))]
if version_compare(cmake_exe.version(), ">=3.14"):
self.cmake_api = CMakeAPI.FILE
self.fileapi.setup_request()
# Map meson compiler to CMake variables
for lang, comp in self.env.coredata.compilers[for_machine].items():
if lang not in language_map:
continue
self.linkers.add(comp.get_linker_id())
cmake_lang = language_map[lang]
exelist = comp.get_exelist()
if len(exelist) == 1:
cmake_args += ["-DCMAKE_{}_COMPILER={}".format(cmake_lang, exelist[0])]
elif len(exelist) == 2:
cmake_args += [
"-DCMAKE_{}_COMPILER_LAUNCHER={}".format(cmake_lang, exelist[0]),
"-DCMAKE_{}_COMPILER={}".format(cmake_lang, exelist[1]),
]
if hasattr(comp, "get_linker_exelist") and comp.get_id() == "clang-cl":
cmake_args += ["-DCMAKE_LINKER={}".format(comp.get_linker_exelist()[0])]
cmake_args += ["-G", generator]
cmake_args += ["-DCMAKE_INSTALL_PREFIX={}".format(self.install_prefix)]
cmake_args += extra_cmake_options
# Run CMake
mlog.log()
with mlog.nested():
mlog.log(
"Configuring the build directory with",
mlog.bold("CMake"),
"version",
mlog.cyan(cmake_exe.version()),
)
mlog.log(mlog.bold("Running:"), " ".join(cmake_args))
mlog.log(mlog.bold(" - build directory: "), self.build_dir)
mlog.log(mlog.bold(" - source directory: "), self.src_dir)
mlog.log(mlog.bold(" - trace args: "), " ".join(trace_args))
mlog.log(mlog.bold(" - preload file: "), str(preload_file))
mlog.log(
mlog.bold(" - disabled policy warnings:"),
"[{}]".format(", ".join(disable_policy_warnings)),
)
mlog.log()
os.makedirs(self.build_dir, exist_ok=True)
os_env = os.environ.copy()
os_env["LC_ALL"] = "C"
final_args = cmake_args + trace_args + cmcmp_args + pload_args + [self.src_dir]
cmake_exe.set_exec_mode(
print_cmout=True, always_capture_stderr=self.trace.requires_stderr()
)
rc, _, self.raw_trace = cmake_exe.call(
final_args, self.build_dir, env=os_env, disable_cache=True
)
mlog.log()
h = mlog.green("SUCCEEDED") if rc == 0 else mlog.red("FAILED")
mlog.log("CMake configuration:", h)
if rc != 0:
raise CMakeException("Failed to configure the CMake subproject")
|
https://github.com/mesonbuild/meson/issues/6801
|
C:\Users\icherepa\Desktop\bgpscanner> meson --buildtype=release ..
The Meson build system
Version: 0.53.2
Source dir: C:\Users\icherepa\Desktop\bgpscanner
Build dir: C:\Users\icherepa\Desktop
Build type: native build
Project name: bgpscanner
Project version: 2.31
C compiler for the host machine: gcc (gcc 6.3.0 "gcc (MinGW.org GCC-6.3.0-1) 6.3.0")
C linker for the host machine: gcc ld.bfd 2.28
Host machine cpu family: x86
Host machine cpu: x86
Run-time dependency threads found: YES
Found pkg-config: C:\MinGW\bin\pkg-config.EXE (0.23)
Found CMake: C:\Program Files\CMake\bin\cmake.EXE (3.17.0-rc3)
Traceback (most recent call last):
File "mesonbuild\mesonmain.py", line 129, in run
File "mesonbuild\msetup.py", line 245, in run
File "mesonbuild\msetup.py", line 159, in generate
File "mesonbuild\msetup.py", line 192, in _generate
File "mesonbuild\interpreter.py", line 4167, in run
File "mesonbuild\interpreterbase.py", line 412, in run
File "mesonbuild\interpreterbase.py", line 436, in evaluate_codeblock
File "mesonbuild\interpreterbase.py", line 430, in evaluate_codeblock
File "mesonbuild\interpreterbase.py", line 443, in evaluate_statement
File "mesonbuild\interpreterbase.py", line 1064, in assignment
File "mesonbuild\interpreterbase.py", line 441, in evaluate_statement
File "mesonbuild\interpreterbase.py", line 788, in function_call
File "mesonbuild\interpreterbase.py", line 285, in wrapped
File "mesonbuild\interpreterbase.py", line 285, in wrapped
File "mesonbuild\interpreterbase.py", line 285, in wrapped
[Previous line repeated 2 more times]
File "mesonbuild\interpreterbase.py", line 155, in wrapped
File "mesonbuild\interpreterbase.py", line 174, in wrapped
File "mesonbuild\interpreter.py", line 3236, in func_dependency
File "mesonbuild\interpreter.py", line 3283, in dependency_impl
File "mesonbuild\dependencies\base.py", line 2234, in find_external_dependency
File "mesonbuild\dependencies\base.py", line 1104, in __init__
File "mesonbuild\dependencies\base.py", line 1158, in _get_cmake_info
File "mesonbuild\dependencies\base.py", line 1533, in _call_cmake
File "mesonbuild\dependencies\base.py", line 1507, in _setup_cmake_dir
File "pathlib.py", line 1229, in read_text
File "pathlib.py", line 1215, in open
File "pathlib.py", line 1071, in _opener
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Program Files\\Meson\\mesonbuild\\dependencies\\data\\CMakePathInfo.txt'
|
FileNotFoundError
|
def pretend_to_be_meson(self) -> CodeBlockNode:
if not self.project_name:
raise CMakeException("CMakeInterpreter was not analysed")
def token(tid: str = "string", val="") -> Token:
return Token(tid, self.subdir, 0, 0, 0, None, val)
def string(value: str) -> StringNode:
return StringNode(token(val=value))
def id_node(value: str) -> IdNode:
return IdNode(token(val=value))
def number(value: int) -> NumberNode:
return NumberNode(token(val=value))
def nodeify(value):
if isinstance(value, str):
return string(value)
elif isinstance(value, bool):
return BooleanNode(token(val=value))
elif isinstance(value, int):
return number(value)
elif isinstance(value, list):
return array(value)
return value
def indexed(node: BaseNode, index: int) -> IndexNode:
return IndexNode(node, nodeify(index))
def array(elements) -> ArrayNode:
args = ArgumentNode(token())
if not isinstance(elements, list):
elements = [args]
args.arguments += [nodeify(x) for x in elements if x is not None]
return ArrayNode(args, 0, 0, 0, 0)
def function(name: str, args=None, kwargs=None) -> FunctionNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {
id_node(k): nodeify(v) for k, v in kwargs.items() if v is not None
}
func_n = FunctionNode(self.subdir, 0, 0, 0, 0, name, args_n)
return func_n
def method(obj: BaseNode, name: str, args=None, kwargs=None) -> MethodNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {
id_node(k): nodeify(v) for k, v in kwargs.items() if v is not None
}
return MethodNode(self.subdir, 0, 0, obj, name, args_n)
def assign(var_name: str, value: BaseNode) -> AssignmentNode:
return AssignmentNode(self.subdir, 0, 0, var_name, value)
# Generate the root code block and the project function call
root_cb = CodeBlockNode(token())
root_cb.lines += [function("project", [self.project_name] + self.languages)]
# Add the run script for custom commands
run_script = pkg_resources.resource_filename("mesonbuild", "cmake/data/run_ctgt.py")
run_script_var = "ctgt_run_script"
root_cb.lines += [
assign(
run_script_var, function("find_program", [[run_script]], {"required": True})
)
]
# Add the targets
processing = []
processed = {}
name_map = {}
def extract_tgt(
tgt: T.Union[ConverterTarget, ConverterCustomTarget, CustomTargetReference],
) -> IdNode:
tgt_name = None
if isinstance(tgt, (ConverterTarget, ConverterCustomTarget)):
tgt_name = tgt.name
elif isinstance(tgt, CustomTargetReference):
tgt_name = tgt.ctgt.name
assert tgt_name is not None and tgt_name in processed
res_var = processed[tgt_name]["tgt"]
return id_node(res_var) if res_var else None
def detect_cycle(tgt: T.Union[ConverterTarget, ConverterCustomTarget]) -> None:
if tgt.name in processing:
raise CMakeException("Cycle in CMake inputs/dependencies detected")
processing.append(tgt.name)
def resolve_ctgt_ref(ref: CustomTargetReference) -> BaseNode:
tgt_var = extract_tgt(ref)
if len(ref.ctgt.outputs) == 1:
return tgt_var
else:
return indexed(tgt_var, ref.index)
def process_target(tgt: ConverterTarget):
detect_cycle(tgt)
# First handle inter target dependencies
link_with = []
objec_libs = [] # type: T.List[IdNode]
sources = []
generated = []
generated_filenames = []
custom_targets = []
dependencies = []
for i in tgt.link_with:
assert isinstance(i, ConverterTarget)
if i.name not in processed:
process_target(i)
link_with += [extract_tgt(i)]
for i in tgt.object_libs:
assert isinstance(i, ConverterTarget)
if i.name not in processed:
process_target(i)
objec_libs += [extract_tgt(i)]
for i in tgt.depends:
if not isinstance(i, ConverterCustomTarget):
continue
if i.name not in processed:
process_custom_target(i)
dependencies += [extract_tgt(i)]
# Generate the source list and handle generated sources
for i in tgt.sources + tgt.generated:
if isinstance(i, CustomTargetReference):
if i.ctgt.name not in processed:
process_custom_target(i.ctgt)
generated += [resolve_ctgt_ref(i)]
generated_filenames += [i.filename()]
if i.ctgt not in custom_targets:
custom_targets += [i.ctgt]
else:
sources += [i]
# Add all header files from all used custom targets. This
# ensures that all custom targets are built before any
# sources of the current target are compiled and thus all
# header files are present. This step is necessary because
# CMake always ensures that a custom target is executed
# before another target if at least one output is used.
for i in custom_targets:
for j in i.outputs:
if not is_header(j) or j in generated_filenames:
continue
generated += [resolve_ctgt_ref(i.get_ref(j))]
generated_filenames += [j]
# Determine the meson function to use for the build target
tgt_func = tgt.meson_func()
if not tgt_func:
raise CMakeException('Unknown target type "{}"'.format(tgt.type))
# Determine the variable names
inc_var = "{}_inc".format(tgt.name)
dir_var = "{}_dir".format(tgt.name)
sys_var = "{}_sys".format(tgt.name)
src_var = "{}_src".format(tgt.name)
dep_var = "{}_dep".format(tgt.name)
tgt_var = tgt.name
# Generate target kwargs
tgt_kwargs = {
"build_by_default": tgt.install,
"link_args": tgt.link_flags + tgt.link_libraries,
"link_with": link_with,
"include_directories": id_node(inc_var),
"install": tgt.install,
"install_dir": tgt.install_dir,
"override_options": tgt.override_options,
"objects": [method(x, "extract_all_objects") for x in objec_libs],
}
# Handle compiler args
for key, val in tgt.compile_opts.items():
tgt_kwargs["{}_args".format(key)] = val
# Handle -fPCI, etc
if tgt_func == "executable":
tgt_kwargs["pie"] = tgt.pie
elif tgt_func == "static_library":
tgt_kwargs["pic"] = tgt.pie
# declare_dependency kwargs
dep_kwargs = {
"link_args": tgt.link_flags + tgt.link_libraries,
"link_with": id_node(tgt_var),
"compile_args": tgt.public_compile_opts,
"include_directories": id_node(inc_var),
}
if dependencies:
generated += dependencies
# Generate the function nodes
dir_node = assign(dir_var, function("include_directories", tgt.includes))
sys_node = assign(
sys_var,
function("include_directories", tgt.sys_includes, {"is_system": True}),
)
inc_node = assign(inc_var, array([id_node(dir_var), id_node(sys_var)]))
node_list = [dir_node, sys_node, inc_node]
if tgt_func == "header_only":
del dep_kwargs["link_with"]
dep_node = assign(
dep_var, function("declare_dependency", kwargs=dep_kwargs)
)
node_list += [dep_node]
src_var = None
tgt_var = None
else:
src_node = assign(src_var, function("files", sources))
tgt_node = assign(
tgt_var,
function(
tgt_func, [tgt_var, [id_node(src_var)] + generated], tgt_kwargs
),
)
node_list += [src_node, tgt_node]
if tgt_func in ["static_library", "shared_library"]:
dep_node = assign(
dep_var, function("declare_dependency", kwargs=dep_kwargs)
)
node_list += [dep_node]
else:
dep_var = None
# Add the nodes to the ast
root_cb.lines += node_list
processed[tgt.name] = {
"inc": inc_var,
"src": src_var,
"dep": dep_var,
"tgt": tgt_var,
"func": tgt_func,
}
name_map[tgt.cmake_name] = tgt.name
def process_custom_target(tgt: ConverterCustomTarget) -> None:
# CMake allows to specify multiple commands in a custom target.
# To map this to meson, a helper script is used to execute all
# commands in order. This additionally allows setting the working
# directory.
detect_cycle(tgt)
tgt_var = tgt.name # type: str
def resolve_source(x: T.Any) -> T.Any:
if isinstance(x, ConverterTarget):
if x.name not in processed:
process_target(x)
return extract_tgt(x)
if isinstance(x, ConverterCustomTarget):
if x.name not in processed:
process_custom_target(x)
return extract_tgt(x)
elif isinstance(x, CustomTargetReference):
if x.ctgt.name not in processed:
process_custom_target(x.ctgt)
return resolve_ctgt_ref(x)
else:
return x
# Generate the command list
command = []
command += [id_node(run_script_var)]
command += ["-o", "@OUTPUT@"]
if tgt.original_outputs:
command += ["-O"] + tgt.original_outputs
command += ["-d", tgt.working_dir]
# Generate the commands. Subcommands are separated by ';;;'
for cmd in tgt.command:
command += [resolve_source(x) for x in cmd] + [";;;"]
tgt_kwargs = {
"input": [resolve_source(x) for x in tgt.inputs],
"output": tgt.outputs,
"command": command,
"depends": [resolve_source(x) for x in tgt.depends],
}
root_cb.lines += [
assign(tgt_var, function("custom_target", [tgt.name], tgt_kwargs))
]
processed[tgt.name] = {
"inc": None,
"src": None,
"dep": None,
"tgt": tgt_var,
"func": "custom_target",
}
name_map[tgt.cmake_name] = tgt.name
# Now generate the target function calls
for i in self.custom_targets:
if i.name not in processed:
process_custom_target(i)
for i in self.targets:
if i.name not in processed:
process_target(i)
self.generated_targets = processed
self.internal_name_map = name_map
return root_cb
|
def pretend_to_be_meson(self) -> CodeBlockNode:
if not self.project_name:
raise CMakeException("CMakeInterpreter was not analysed")
def token(tid: str = "string", val="") -> Token:
return Token(tid, self.subdir, 0, 0, 0, None, val)
def string(value: str) -> StringNode:
return StringNode(token(val=value))
def id_node(value: str) -> IdNode:
return IdNode(token(val=value))
def number(value: int) -> NumberNode:
return NumberNode(token(val=value))
def nodeify(value):
if isinstance(value, str):
return string(value)
elif isinstance(value, bool):
return BooleanNode(token(val=value))
elif isinstance(value, int):
return number(value)
elif isinstance(value, list):
return array(value)
return value
def indexed(node: BaseNode, index: int) -> IndexNode:
return IndexNode(node, nodeify(index))
def array(elements) -> ArrayNode:
args = ArgumentNode(token())
if not isinstance(elements, list):
elements = [args]
args.arguments += [nodeify(x) for x in elements if x is not None]
return ArrayNode(args, 0, 0, 0, 0)
def function(name: str, args=None, kwargs=None) -> FunctionNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {
id_node(k): nodeify(v) for k, v in kwargs.items() if v is not None
}
func_n = FunctionNode(self.subdir, 0, 0, 0, 0, name, args_n)
return func_n
def method(obj: BaseNode, name: str, args=None, kwargs=None) -> MethodNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {
id_node(k): nodeify(v) for k, v in kwargs.items() if v is not None
}
return MethodNode(self.subdir, 0, 0, obj, name, args_n)
def assign(var_name: str, value: BaseNode) -> AssignmentNode:
return AssignmentNode(self.subdir, 0, 0, var_name, value)
# Generate the root code block and the project function call
root_cb = CodeBlockNode(token())
root_cb.lines += [function("project", [self.project_name] + self.languages)]
# Add the run script for custom commands
run_script = "{}/data/run_ctgt.py".format(
os.path.dirname(os.path.realpath(__file__))
)
run_script_var = "ctgt_run_script"
root_cb.lines += [
assign(
run_script_var, function("find_program", [[run_script]], {"required": True})
)
]
# Add the targets
processing = []
processed = {}
name_map = {}
def extract_tgt(
tgt: T.Union[ConverterTarget, ConverterCustomTarget, CustomTargetReference],
) -> IdNode:
tgt_name = None
if isinstance(tgt, (ConverterTarget, ConverterCustomTarget)):
tgt_name = tgt.name
elif isinstance(tgt, CustomTargetReference):
tgt_name = tgt.ctgt.name
assert tgt_name is not None and tgt_name in processed
res_var = processed[tgt_name]["tgt"]
return id_node(res_var) if res_var else None
def detect_cycle(tgt: T.Union[ConverterTarget, ConverterCustomTarget]) -> None:
if tgt.name in processing:
raise CMakeException("Cycle in CMake inputs/dependencies detected")
processing.append(tgt.name)
def resolve_ctgt_ref(ref: CustomTargetReference) -> BaseNode:
tgt_var = extract_tgt(ref)
if len(ref.ctgt.outputs) == 1:
return tgt_var
else:
return indexed(tgt_var, ref.index)
def process_target(tgt: ConverterTarget):
detect_cycle(tgt)
# First handle inter target dependencies
link_with = []
objec_libs = [] # type: T.List[IdNode]
sources = []
generated = []
generated_filenames = []
custom_targets = []
dependencies = []
for i in tgt.link_with:
assert isinstance(i, ConverterTarget)
if i.name not in processed:
process_target(i)
link_with += [extract_tgt(i)]
for i in tgt.object_libs:
assert isinstance(i, ConverterTarget)
if i.name not in processed:
process_target(i)
objec_libs += [extract_tgt(i)]
for i in tgt.depends:
if not isinstance(i, ConverterCustomTarget):
continue
if i.name not in processed:
process_custom_target(i)
dependencies += [extract_tgt(i)]
# Generate the source list and handle generated sources
for i in tgt.sources + tgt.generated:
if isinstance(i, CustomTargetReference):
if i.ctgt.name not in processed:
process_custom_target(i.ctgt)
generated += [resolve_ctgt_ref(i)]
generated_filenames += [i.filename()]
if i.ctgt not in custom_targets:
custom_targets += [i.ctgt]
else:
sources += [i]
# Add all header files from all used custom targets. This
# ensures that all custom targets are built before any
# sources of the current target are compiled and thus all
# header files are present. This step is necessary because
# CMake always ensures that a custom target is executed
# before another target if at least one output is used.
for i in custom_targets:
for j in i.outputs:
if not is_header(j) or j in generated_filenames:
continue
generated += [resolve_ctgt_ref(i.get_ref(j))]
generated_filenames += [j]
# Determine the meson function to use for the build target
tgt_func = tgt.meson_func()
if not tgt_func:
raise CMakeException('Unknown target type "{}"'.format(tgt.type))
# Determine the variable names
inc_var = "{}_inc".format(tgt.name)
dir_var = "{}_dir".format(tgt.name)
sys_var = "{}_sys".format(tgt.name)
src_var = "{}_src".format(tgt.name)
dep_var = "{}_dep".format(tgt.name)
tgt_var = tgt.name
# Generate target kwargs
tgt_kwargs = {
"build_by_default": tgt.install,
"link_args": tgt.link_flags + tgt.link_libraries,
"link_with": link_with,
"include_directories": id_node(inc_var),
"install": tgt.install,
"install_dir": tgt.install_dir,
"override_options": tgt.override_options,
"objects": [method(x, "extract_all_objects") for x in objec_libs],
}
# Handle compiler args
for key, val in tgt.compile_opts.items():
tgt_kwargs["{}_args".format(key)] = val
# Handle -fPCI, etc
if tgt_func == "executable":
tgt_kwargs["pie"] = tgt.pie
elif tgt_func == "static_library":
tgt_kwargs["pic"] = tgt.pie
# declare_dependency kwargs
dep_kwargs = {
"link_args": tgt.link_flags + tgt.link_libraries,
"link_with": id_node(tgt_var),
"compile_args": tgt.public_compile_opts,
"include_directories": id_node(inc_var),
}
if dependencies:
generated += dependencies
# Generate the function nodes
dir_node = assign(dir_var, function("include_directories", tgt.includes))
sys_node = assign(
sys_var,
function("include_directories", tgt.sys_includes, {"is_system": True}),
)
inc_node = assign(inc_var, array([id_node(dir_var), id_node(sys_var)]))
node_list = [dir_node, sys_node, inc_node]
if tgt_func == "header_only":
del dep_kwargs["link_with"]
dep_node = assign(
dep_var, function("declare_dependency", kwargs=dep_kwargs)
)
node_list += [dep_node]
src_var = None
tgt_var = None
else:
src_node = assign(src_var, function("files", sources))
tgt_node = assign(
tgt_var,
function(
tgt_func, [tgt_var, [id_node(src_var)] + generated], tgt_kwargs
),
)
node_list += [src_node, tgt_node]
if tgt_func in ["static_library", "shared_library"]:
dep_node = assign(
dep_var, function("declare_dependency", kwargs=dep_kwargs)
)
node_list += [dep_node]
else:
dep_var = None
# Add the nodes to the ast
root_cb.lines += node_list
processed[tgt.name] = {
"inc": inc_var,
"src": src_var,
"dep": dep_var,
"tgt": tgt_var,
"func": tgt_func,
}
name_map[tgt.cmake_name] = tgt.name
def process_custom_target(tgt: ConverterCustomTarget) -> None:
# CMake allows to specify multiple commands in a custom target.
# To map this to meson, a helper script is used to execute all
# commands in order. This additionally allows setting the working
# directory.
detect_cycle(tgt)
tgt_var = tgt.name # type: str
def resolve_source(x: T.Any) -> T.Any:
if isinstance(x, ConverterTarget):
if x.name not in processed:
process_target(x)
return extract_tgt(x)
if isinstance(x, ConverterCustomTarget):
if x.name not in processed:
process_custom_target(x)
return extract_tgt(x)
elif isinstance(x, CustomTargetReference):
if x.ctgt.name not in processed:
process_custom_target(x.ctgt)
return resolve_ctgt_ref(x)
else:
return x
# Generate the command list
command = []
command += [id_node(run_script_var)]
command += ["-o", "@OUTPUT@"]
if tgt.original_outputs:
command += ["-O"] + tgt.original_outputs
command += ["-d", tgt.working_dir]
# Generate the commands. Subcommands are separated by ';;;'
for cmd in tgt.command:
command += [resolve_source(x) for x in cmd] + [";;;"]
tgt_kwargs = {
"input": [resolve_source(x) for x in tgt.inputs],
"output": tgt.outputs,
"command": command,
"depends": [resolve_source(x) for x in tgt.depends],
}
root_cb.lines += [
assign(tgt_var, function("custom_target", [tgt.name], tgt_kwargs))
]
processed[tgt.name] = {
"inc": None,
"src": None,
"dep": None,
"tgt": tgt_var,
"func": "custom_target",
}
name_map[tgt.cmake_name] = tgt.name
# Now generate the target function calls
for i in self.custom_targets:
if i.name not in processed:
process_custom_target(i)
for i in self.targets:
if i.name not in processed:
process_target(i)
self.generated_targets = processed
self.internal_name_map = name_map
return root_cb
|
https://github.com/mesonbuild/meson/issues/6801
|
C:\Users\icherepa\Desktop\bgpscanner> meson --buildtype=release ..
The Meson build system
Version: 0.53.2
Source dir: C:\Users\icherepa\Desktop\bgpscanner
Build dir: C:\Users\icherepa\Desktop
Build type: native build
Project name: bgpscanner
Project version: 2.31
C compiler for the host machine: gcc (gcc 6.3.0 "gcc (MinGW.org GCC-6.3.0-1) 6.3.0")
C linker for the host machine: gcc ld.bfd 2.28
Host machine cpu family: x86
Host machine cpu: x86
Run-time dependency threads found: YES
Found pkg-config: C:\MinGW\bin\pkg-config.EXE (0.23)
Found CMake: C:\Program Files\CMake\bin\cmake.EXE (3.17.0-rc3)
Traceback (most recent call last):
File "mesonbuild\mesonmain.py", line 129, in run
File "mesonbuild\msetup.py", line 245, in run
File "mesonbuild\msetup.py", line 159, in generate
File "mesonbuild\msetup.py", line 192, in _generate
File "mesonbuild\interpreter.py", line 4167, in run
File "mesonbuild\interpreterbase.py", line 412, in run
File "mesonbuild\interpreterbase.py", line 436, in evaluate_codeblock
File "mesonbuild\interpreterbase.py", line 430, in evaluate_codeblock
File "mesonbuild\interpreterbase.py", line 443, in evaluate_statement
File "mesonbuild\interpreterbase.py", line 1064, in assignment
File "mesonbuild\interpreterbase.py", line 441, in evaluate_statement
File "mesonbuild\interpreterbase.py", line 788, in function_call
File "mesonbuild\interpreterbase.py", line 285, in wrapped
File "mesonbuild\interpreterbase.py", line 285, in wrapped
File "mesonbuild\interpreterbase.py", line 285, in wrapped
[Previous line repeated 2 more times]
File "mesonbuild\interpreterbase.py", line 155, in wrapped
File "mesonbuild\interpreterbase.py", line 174, in wrapped
File "mesonbuild\interpreter.py", line 3236, in func_dependency
File "mesonbuild\interpreter.py", line 3283, in dependency_impl
File "mesonbuild\dependencies\base.py", line 2234, in find_external_dependency
File "mesonbuild\dependencies\base.py", line 1104, in __init__
File "mesonbuild\dependencies\base.py", line 1158, in _get_cmake_info
File "mesonbuild\dependencies\base.py", line 1533, in _call_cmake
File "mesonbuild\dependencies\base.py", line 1507, in _setup_cmake_dir
File "pathlib.py", line 1229, in read_text
File "pathlib.py", line 1215, in open
File "pathlib.py", line 1071, in _opener
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Program Files\\Meson\\mesonbuild\\dependencies\\data\\CMakePathInfo.txt'
|
FileNotFoundError
|
def _setup_cmake_dir(self, cmake_file: str) -> str:
# Setup the CMake build environment and return the "build" directory
build_dir = self._get_build_dir()
# Insert language parameters into the CMakeLists.txt and write new CMakeLists.txt
# Per the warning in pkg_resources, this is *not* a path and os.path and Pathlib are *not* safe to use here.
cmake_txt = pkg_resources.resource_string(
"mesonbuild", "dependencies/data/" + cmake_file
).decode()
# In general, some Fortran CMake find_package() also require C language enabled,
# even if nothing from C is directly used. An easy Fortran example that fails
# without C language is
# find_package(Threads)
# To make this general to
# any other language that might need this, we use a list for all
# languages and expand in the cmake Project(... LANGUAGES ...) statement.
from ..cmake import language_map
cmake_language = [language_map[x] for x in self.language_list if x in language_map]
if not cmake_language:
cmake_language += ["NONE"]
cmake_txt = (
"""
cmake_minimum_required(VERSION ${{CMAKE_VERSION}})
project(MesonTemp LANGUAGES {})
""".format(" ".join(cmake_language))
+ cmake_txt
)
cm_file = Path(build_dir) / "CMakeLists.txt"
cm_file.write_text(cmake_txt)
mlog.cmd_ci_include(cm_file.absolute().as_posix())
return build_dir
|
def _setup_cmake_dir(self, cmake_file: str) -> str:
# Setup the CMake build environment and return the "build" directory
build_dir = self._get_build_dir()
# Insert language parameters into the CMakeLists.txt and write new CMakeLists.txt
src_cmake = Path(__file__).parent / "data" / cmake_file
cmake_txt = src_cmake.read_text()
# In general, some Fortran CMake find_package() also require C language enabled,
# even if nothing from C is directly used. An easy Fortran example that fails
# without C language is
# find_package(Threads)
# To make this general to
# any other language that might need this, we use a list for all
# languages and expand in the cmake Project(... LANGUAGES ...) statement.
from ..cmake import language_map
cmake_language = [language_map[x] for x in self.language_list if x in language_map]
if not cmake_language:
cmake_language += ["NONE"]
cmake_txt = (
"""
cmake_minimum_required(VERSION ${{CMAKE_VERSION}})
project(MesonTemp LANGUAGES {})
""".format(" ".join(cmake_language))
+ cmake_txt
)
cm_file = Path(build_dir) / "CMakeLists.txt"
cm_file.write_text(cmake_txt)
mlog.cmd_ci_include(cm_file.absolute().as_posix())
return build_dir
|
https://github.com/mesonbuild/meson/issues/6801
|
C:\Users\icherepa\Desktop\bgpscanner> meson --buildtype=release ..
The Meson build system
Version: 0.53.2
Source dir: C:\Users\icherepa\Desktop\bgpscanner
Build dir: C:\Users\icherepa\Desktop
Build type: native build
Project name: bgpscanner
Project version: 2.31
C compiler for the host machine: gcc (gcc 6.3.0 "gcc (MinGW.org GCC-6.3.0-1) 6.3.0")
C linker for the host machine: gcc ld.bfd 2.28
Host machine cpu family: x86
Host machine cpu: x86
Run-time dependency threads found: YES
Found pkg-config: C:\MinGW\bin\pkg-config.EXE (0.23)
Found CMake: C:\Program Files\CMake\bin\cmake.EXE (3.17.0-rc3)
Traceback (most recent call last):
File "mesonbuild\mesonmain.py", line 129, in run
File "mesonbuild\msetup.py", line 245, in run
File "mesonbuild\msetup.py", line 159, in generate
File "mesonbuild\msetup.py", line 192, in _generate
File "mesonbuild\interpreter.py", line 4167, in run
File "mesonbuild\interpreterbase.py", line 412, in run
File "mesonbuild\interpreterbase.py", line 436, in evaluate_codeblock
File "mesonbuild\interpreterbase.py", line 430, in evaluate_codeblock
File "mesonbuild\interpreterbase.py", line 443, in evaluate_statement
File "mesonbuild\interpreterbase.py", line 1064, in assignment
File "mesonbuild\interpreterbase.py", line 441, in evaluate_statement
File "mesonbuild\interpreterbase.py", line 788, in function_call
File "mesonbuild\interpreterbase.py", line 285, in wrapped
File "mesonbuild\interpreterbase.py", line 285, in wrapped
File "mesonbuild\interpreterbase.py", line 285, in wrapped
[Previous line repeated 2 more times]
File "mesonbuild\interpreterbase.py", line 155, in wrapped
File "mesonbuild\interpreterbase.py", line 174, in wrapped
File "mesonbuild\interpreter.py", line 3236, in func_dependency
File "mesonbuild\interpreter.py", line 3283, in dependency_impl
File "mesonbuild\dependencies\base.py", line 2234, in find_external_dependency
File "mesonbuild\dependencies\base.py", line 1104, in __init__
File "mesonbuild\dependencies\base.py", line 1158, in _get_cmake_info
File "mesonbuild\dependencies\base.py", line 1533, in _call_cmake
File "mesonbuild\dependencies\base.py", line 1507, in _setup_cmake_dir
File "pathlib.py", line 1229, in read_text
File "pathlib.py", line 1215, in open
File "pathlib.py", line 1071, in _opener
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Program Files\\Meson\\mesonbuild\\dependencies\\data\\CMakePathInfo.txt'
|
FileNotFoundError
|
def sanitize_dir_option_value(self, prefix: str, option: str, value: Any) -> Any:
"""
If the option is an installation directory option and the value is an
absolute path, check that it resides within prefix and return the value
as a path relative to the prefix.
This way everyone can do f.ex, get_option('libdir') and be sure to get
the library directory relative to prefix.
.as_posix() keeps the posix-like file seperators Meson uses.
"""
try:
value = PurePath(value)
except TypeError:
return value
if (
option.endswith("dir")
and value.is_absolute()
and option not in builtin_dir_noprefix_options
):
# Value must be a subdir of the prefix
# commonpath will always return a path in the native format, so we
# must use pathlib.PurePath to do the same conversion before
# comparing.
msg = (
"The value of the {!r} option is {!r} which must be a "
"subdir of the prefix {!r}.\nNote that if you pass a "
"relative path, it is assumed to be a subdir of prefix."
)
# os.path.commonpath doesn't understand case-insensitive filesystems,
# but PurePath().relative_to() does.
try:
value = value.relative_to(prefix)
except ValueError:
raise MesonException(msg.format(option, value, prefix))
if ".." in str(value):
raise MesonException(msg.format(option, value, prefix))
return value.as_posix()
|
def sanitize_dir_option_value(self, prefix, option, value):
"""
If the option is an installation directory option and the value is an
absolute path, check that it resides within prefix and return the value
as a path relative to the prefix.
This way everyone can do f.ex, get_option('libdir') and be sure to get
the library directory relative to prefix.
"""
if (
option.endswith("dir")
and os.path.isabs(value)
and option not in builtin_dir_noprefix_options
):
# Value must be a subdir of the prefix
# commonpath will always return a path in the native format, so we
# must use pathlib.PurePath to do the same conversion before
# comparing.
if os.path.commonpath([value, prefix]) != str(PurePath(prefix)):
m = (
"The value of the {!r} option is {!r} which must be a "
"subdir of the prefix {!r}.\nNote that if you pass a "
"relative path, it is assumed to be a subdir of prefix."
)
raise MesonException(m.format(option, value, prefix))
# Convert path to be relative to prefix
skip = len(prefix) + 1
value = value[skip:]
return value
|
https://github.com/mesonbuild/meson/issues/6395
|
$ meson setup build --libdir=E:/Documents/Coding/C/lib
The Meson build system
Version: 0.52.1
Source dir: E:\Documents\Coding\C\meson_test
Build dir: E:\Documents\Coding\C\meson_test\build
Build type: native build
Traceback (most recent call last):
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\mesonmain.py", line 129, in run
return options.run_func(options)
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\msetup.py", line 245, in run
app.generate()
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\msetup.py", line 159, in generate
self._generate(env)
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\msetup.py", line 176, in _generate
intr = interpreter.Interpreter(b)
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\interpreter.py", line 2110, in __init__
self.parse_project()
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\interpreterbase.py", line 397, in parse_project
self.evaluate_codeblock(self.ast, end=1)
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\interpreterbase.py", line 436, in evaluate_codeblock
raise e
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\interpreterbase.py", line 430, in evaluate_codeblock
self.evaluate_statement(cur)
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\interpreterbase.py", line 441, in evaluate_statement
return self.function_call(cur)
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\interpreterbase.py", line 776, in function_call
return func(node, posargs, kwargs)
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\interpreterbase.py", line 143, in wrapped
return f(*wrapped_args, **wrapped_kwargs)
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\interpreterbase.py", line 174, in wrapped
return f(*wrapped_args, **wrapped_kwargs)
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\interpreter.py", line 2723, in func_project
self.coredata.set_default_options(default_options, self.subproject, self.environment)
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\coredata.py", line 742, in set_default_options
self.set_options(options, subproject=subproject)
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\coredata.py", line 674, in set_options
if self._try_set_builtin_option(k, v):
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\coredata.py", line 531, in _try_set_builtin_option
value = self.sanitize_dir_option_value(prefix, optname, value)
File "C:\Users\<user>\AppData\Roaming\Python\Python38\site-packages\mesonbuild\coredata.py", line 479, in sanitize_dir_option_value
if os.path.commonpath([value, prefix]) != str(PurePath(prefix)):
File "c:\users\liz\appdata\local\programs\python\python38\lib\ntpath.py", line 763, in commonpath
raise ValueError("Paths don't have the same drive")
ValueError: Paths don't have the same drive
|
ValueError
|
def generate_single_compile(
self, target, outfile, src, is_generated=False, header_deps=[], order_deps=[]
):
"""
Compiles C/C++, ObjC/ObjC++, Fortran, and D sources
"""
if isinstance(src, str) and src.endswith(".h"):
raise AssertionError("BUG: sources should not contain headers {!r}".format(src))
if isinstance(src, RawFilename) and src.fname.endswith(".h"):
raise AssertionError(
"BUG: sources should not contain headers {!r}".format(src.fname)
)
extra_orderdeps = []
compiler = get_compiler_for_source(target.compilers.values(), src)
# Create an empty commands list, and start adding arguments from
# various sources in the order in which they must override each other
commands = CompilerArgs(compiler)
# Add compiler args for compiling this target derived from 'base' build
# options passed on the command-line, in default_options, etc.
# These have the lowest priority.
commands += compilers.get_base_compile_args(
self.environment.coredata.base_options, compiler
)
# The code generated by valac is usually crap and has tons of unused
# variables and such, so disable warnings for Vala C sources.
no_warn_args = is_generated == "vala"
# Add compiler args and include paths from several sources; defaults,
# build options, external dependencies, etc.
commands += self.generate_basic_compiler_args(target, compiler, no_warn_args)
# Add include dirs from the `include_directories:` kwarg on the target
# and from `include_directories:` of internal deps of the target.
#
# Target include dirs should override internal deps include dirs.
#
# Include dirs from internal deps should override include dirs from
# external deps.
for i in target.get_include_dirs():
basedir = i.get_curdir()
for d in i.get_incdirs():
# Avoid superfluous '/.' at the end of paths when d is '.'
if d not in ("", "."):
expdir = os.path.join(basedir, d)
else:
expdir = basedir
srctreedir = os.path.join(self.build_to_src, expdir)
# Add source subdir first so that the build subdir overrides it
sargs = compiler.get_include_args(srctreedir, i.is_system)
commands += sargs
# There may be include dirs where a build directory has not been
# created for some source dir. For example if someone does this:
#
# inc = include_directories('foo/bar/baz')
#
# But never subdir()s into the actual dir.
if os.path.isdir(os.path.join(self.environment.get_build_dir(), expdir)):
bargs = compiler.get_include_args(expdir, i.is_system)
else:
bargs = []
commands += bargs
for d in i.get_extra_build_dirs():
commands += compiler.get_include_args(d, i.is_system)
# Add per-target compile args, f.ex, `c_args : ['-DFOO']`. We set these
# near the end since these are supposed to override everything else.
commands += self.escape_extra_args(
compiler, target.get_extra_args(compiler.get_language())
)
# Add source dir and build dir. Project-specific and target-specific
# include paths must override per-target compile args, include paths
# from external dependencies, internal dependencies, and from
# per-target `include_directories:`
#
# We prefer headers in the build dir and the custom target dir over the
# source dir since, for instance, the user might have an
# srcdir == builddir Autotools build in their source tree. Many
# projects that are moving to Meson have both Meson and Autotools in
# parallel as part of the transition.
commands += self.get_source_dir_include_args(target, compiler)
commands += self.get_custom_target_dir_include_args(target, compiler)
commands += self.get_build_dir_include_args(target, compiler)
# Finally add the private dir for the target to the include path. This
# must override everything else and must be the final path added.
commands += compiler.get_include_args(self.get_target_private_dir(target), False)
# FIXME: This file handling is atrocious and broken. We need to
# replace it with File objects used consistently everywhere.
if isinstance(src, RawFilename):
rel_src = src.fname
if os.path.isabs(src.fname):
abs_src = src.fname
else:
abs_src = os.path.join(self.environment.get_build_dir(), src.fname)
elif is_generated:
raise AssertionError(
"BUG: broken generated source file handling for {!r}".format(src)
)
else:
if isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise InvalidArguments("Invalid source type: {!r}".format(src))
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
if isinstance(src, (RawFilename, File)):
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = src_filename.replace("/", "_").replace("\\", "_")
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += "." + self.environment.get_object_suffix()
dep_file = compiler.depfile_for_object(rel_obj)
# Add MSVC debug file generation compile flags: /Fd /FS
commands += self.get_compile_debugfile_args(compiler, target, rel_obj)
# PCH handling
if self.environment.coredata.base_options.get("b_pch", False):
commands += self.get_pch_include_args(compiler, target)
pchlist = target.get_pch(compiler.language)
else:
pchlist = []
if len(pchlist) == 0:
pch_dep = []
elif compiler.id == "intel":
pch_dep = []
else:
arr = []
i = os.path.join(
self.get_target_private_dir(target), compiler.get_pch_name(pchlist[0])
)
arr.append(i)
pch_dep = arr
crstr = ""
if target.is_cross:
crstr = "_CROSS"
compiler_name = "%s%s_COMPILER" % (compiler.get_language(), crstr)
extra_deps = []
if compiler.get_language() == "fortran":
# Can't read source file to scan for deps if it's generated later
# at build-time. Skip scanning for deps, and just set the module
# outdir argument instead.
# https://github.com/mesonbuild/meson/issues/1348
if not is_generated:
extra_deps += self.get_fortran_deps(compiler, abs_src, target)
# Dependency hack. Remove once multiple outputs in Ninja is fixed:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
for modname, srcfile in self.fortran_deps[target.get_basename()].items():
modfile = os.path.join(
self.get_target_private_dir(target),
compiler.module_name_to_filename(modname),
)
if srcfile == src:
depelem = NinjaBuildElement(
self.all_outputs, modfile, "FORTRAN_DEP_HACK", rel_obj
)
depelem.write(outfile)
commands += compiler.get_module_outdir_args(self.get_target_private_dir(target))
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler_name, rel_src)
for d in header_deps:
if isinstance(d, RawFilename):
d = d.fname
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_dep(d)
for d in extra_deps:
element.add_dep(d)
for d in order_deps:
if isinstance(d, RawFilename):
d = d.fname
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_orderdep(d)
element.add_orderdep(pch_dep)
element.add_orderdep(extra_orderdeps)
# Convert from GCC-style link argument naming to the naming used by the
# current compiler.
commands = commands.to_native()
for i in self.get_fortran_orderdeps(target, compiler):
element.add_orderdep(i)
element.add_item("DEPFILE", dep_file)
element.add_item("ARGS", commands)
element.write(outfile)
return rel_obj
|
def generate_single_compile(
self, target, outfile, src, is_generated=False, header_deps=[], order_deps=[]
):
"""
Compiles C/C++, ObjC/ObjC++, Fortran, and D sources
"""
if isinstance(src, str) and src.endswith(".h"):
raise AssertionError("BUG: sources should not contain headers {!r}".format(src))
if isinstance(src, RawFilename) and src.fname.endswith(".h"):
raise AssertionError(
"BUG: sources should not contain headers {!r}".format(src.fname)
)
extra_orderdeps = []
compiler = get_compiler_for_source(target.compilers.values(), src)
# Create an empty commands list, and start adding arguments from
# various sources in the order in which they must override each other
commands = CompilerArgs(compiler)
# Add compiler args for compiling this target derived from 'base' build
# options passed on the command-line, in default_options, etc.
# These have the lowest priority.
commands += compilers.get_base_compile_args(
self.environment.coredata.base_options, compiler
)
# The code generated by valac is usually crap and has tons of unused
# variables and such, so disable warnings for Vala C sources.
no_warn_args = is_generated == "vala"
# Add compiler args and include paths from several sources; defaults,
# build options, external dependencies, etc.
commands += self.generate_basic_compiler_args(target, compiler, no_warn_args)
# Add include dirs from the `include_directories:` kwarg on the target
# and from `include_directories:` of internal deps of the target.
#
# Target include dirs should override internal deps include dirs.
#
# Include dirs from internal deps should override include dirs from
# external deps.
for i in target.get_include_dirs():
basedir = i.get_curdir()
for d in i.get_incdirs():
# Avoid superfluous '/.' at the end of paths when d is '.'
if d not in ("", "."):
expdir = os.path.join(basedir, d)
else:
expdir = basedir
srctreedir = os.path.join(self.build_to_src, expdir)
# Add source subdir first so that the build subdir overrides it
sargs = compiler.get_include_args(srctreedir, i.is_system)
commands += sargs
# There may be include dirs where a build directory has not been
# created for some source dir. For example if someone does this:
#
# inc = include_directories('foo/bar/baz')
#
# But never subdir()s into the actual dir.
if os.path.isdir(os.path.join(self.environment.get_build_dir(), expdir)):
bargs = compiler.get_include_args(expdir, i.is_system)
else:
bargs = []
commands += bargs
for d in i.get_extra_build_dirs():
commands += compiler.get_include_args(d, i.is_system)
# Add per-target compile args, f.ex, `c_args : ['-DFOO']`. We set these
# near the end since these are supposed to override everything else.
commands += self.escape_extra_args(
compiler, target.get_extra_args(compiler.get_language())
)
# Add source dir and build dir. Project-specific and target-specific
# include paths must override per-target compile args, include paths
# from external dependencies, internal dependencies, and from
# per-target `include_directories:`
#
# We prefer headers in the build dir and the custom target dir over the
# source dir since, for instance, the user might have an
# srcdir == builddir Autotools build in their source tree. Many
# projects that are moving to Meson have both Meson and Autotools in
# parallel as part of the transition.
commands += self.get_source_dir_include_args(target, compiler)
commands += self.get_custom_target_dir_include_args(target, compiler)
commands += self.get_build_dir_include_args(target, compiler)
# Finally add the private dir for the target to the include path. This
# must override everything else and must be the final path added.
commands += compiler.get_include_args(self.get_target_private_dir(target), False)
# FIXME: This file handling is atrocious and broken. We need to
# replace it with File objects used consistently everywhere.
if isinstance(src, RawFilename):
rel_src = src.fname
if os.path.isabs(src.fname):
abs_src = src.fname
else:
abs_src = os.path.join(self.environment.get_build_dir(), src.fname)
elif is_generated:
raise AssertionError(
"BUG: broken generated source file handling for {!r}".format(src)
)
else:
if isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise InvalidArguments("Invalid source type: {!r}".format(src))
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
if isinstance(src, (RawFilename, File)):
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = src_filename.replace("/", "_").replace("\\", "_")
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += "." + self.environment.get_object_suffix()
dep_file = compiler.depfile_for_object(rel_obj)
# Add MSVC debug file generation compile flags: /Fd /FS
commands += self.get_compile_debugfile_args(compiler, target, rel_obj)
# PCH handling
if self.environment.coredata.base_options.get("b_pch", False):
commands += self.get_pch_include_args(compiler, target)
pchlist = target.get_pch(compiler.language)
else:
pchlist = []
if len(pchlist) == 0:
pch_dep = []
elif compiler.id == "intel":
pch_dep = []
else:
arr = []
i = os.path.join(
self.get_target_private_dir(target), compiler.get_pch_name(pchlist[0])
)
arr.append(i)
pch_dep = arr
crstr = ""
if target.is_cross:
crstr = "_CROSS"
compiler_name = "%s%s_COMPILER" % (compiler.get_language(), crstr)
extra_deps = []
if compiler.get_language() == "fortran":
extra_deps += self.get_fortran_deps(compiler, abs_src, target)
# Dependency hack. Remove once multiple outputs in Ninja is fixed:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
for modname, srcfile in self.fortran_deps[target.get_basename()].items():
modfile = os.path.join(
self.get_target_private_dir(target),
compiler.module_name_to_filename(modname),
)
if srcfile == src:
depelem = NinjaBuildElement(
self.all_outputs, modfile, "FORTRAN_DEP_HACK", rel_obj
)
depelem.write(outfile)
commands += compiler.get_module_outdir_args(self.get_target_private_dir(target))
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler_name, rel_src)
for d in header_deps:
if isinstance(d, RawFilename):
d = d.fname
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_dep(d)
for d in extra_deps:
element.add_dep(d)
for d in order_deps:
if isinstance(d, RawFilename):
d = d.fname
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_orderdep(d)
element.add_orderdep(pch_dep)
element.add_orderdep(extra_orderdeps)
# Convert from GCC-style link argument naming to the naming used by the
# current compiler.
commands = commands.to_native()
for i in self.get_fortran_orderdeps(target, compiler):
element.add_orderdep(i)
element.add_item("DEPFILE", dep_file)
element.add_item("ARGS", commands)
element.write(outfile)
return rel_obj
|
https://github.com/mesonbuild/meson/issues/1348
|
The Meson build system
Version: 0.38.0
Source dir: /home/Adama-docs/Adam/MyDocs/praca/IMGW/dev/meson_bug
Build dir: /home/Adama-docs/Adam/MyDocs/praca/IMGW/dev/meson_bug/build
Build type: native build
Project name: simple fortran
Native fortran compiler: gfortran (gcc 5.4.1)
Build machine cpu family: x86_64
Build machine cpu: x86_64
Program pp_ser.py found: YES (/usr/bin/env python /home/adam/meson_bug/pp_ser.py)
Build targets in project: 1
Traceback (most recent call last):
File "/home/adam/.local/lib/python3.5/site-packages/mesonbuild/mesonmain.py", line 286, in run
app.generate()
File "/home/adam/.local/lib/python3.5/site-packages/mesonbuild/mesonmain.py", line 170, in generate
g.generate(intr)
File "/home/adam/.local/lib/python3.5/site-packages/mesonbuild/backend/ninjabackend.py", line 191, in generate
self.generate_target(t, outfile)
File "/home/adam/.local/lib/python3.5/site-packages/mesonbuild/backend/ninjabackend.py", line 386, in generate_target
header_deps=header_deps)
File "/home/adam/.local/lib/python3.5/site-packages/mesonbuild/backend/ninjabackend.py", line 1916, in generate_single_compile
extra_deps += self.get_fortran_deps(compiler, abs_src, target)
File "/home/adam/.local/lib/python3.5/site-packages/mesonbuild/backend/ninjabackend.py", line 1632, in get_fortran_deps
with open(src) as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/adam/meson_bug/build/dwarf@exe/src1.f90'
|
FileNotFoundError
|
def scan_fortran_module_outputs(self, target):
compiler = None
for lang, c in self.build.compilers.items():
if lang == "fortran":
compiler = c
break
if compiler is None:
self.fortran_deps[target.get_basename()] = {}
return
modre = re.compile(r"\s*module\s+(\w+)", re.IGNORECASE)
module_files = {}
for s in target.get_sources():
# FIXME, does not work for Fortran sources generated by
# custom_target() and generator() as those are run after
# the configuration (configure_file() is OK)
if not compiler.can_compile(s):
continue
filename = s.absolute_path(
self.environment.get_source_dir(), self.environment.get_build_dir()
)
with open(filename) as f:
for line in f:
modmatch = modre.match(line)
if modmatch is not None:
modname = modmatch.group(1)
if modname.lower() == "procedure":
# MODULE PROCEDURE construct
continue
if modname in module_files:
raise InvalidArguments(
"Namespace collision: module %s defined in "
"two files %s and %s." % (modname, module_files[modname], s)
)
module_files[modname] = s
self.fortran_deps[target.get_basename()] = module_files
|
def scan_fortran_module_outputs(self, target):
compiler = None
for lang, c in self.build.compilers.items():
if lang == "fortran":
compiler = c
break
if compiler is None:
self.fortran_deps[target.get_basename()] = {}
return
modre = re.compile(r"\s*module\s+(\w+)", re.IGNORECASE)
module_files = {}
for s in target.get_sources():
# FIXME, does not work for generated Fortran sources,
# but those are really rare. I hope.
if not compiler.can_compile(s):
continue
filename = os.path.join(self.environment.get_source_dir(), s.subdir, s.fname)
with open(filename) as f:
for line in f:
modmatch = modre.match(line)
if modmatch is not None:
modname = modmatch.group(1)
if modname.lower() == "procedure":
# MODULE PROCEDURE construct
continue
if modname in module_files:
raise InvalidArguments(
"Namespace collision: module %s defined in "
"two files %s and %s." % (modname, module_files[modname], s)
)
module_files[modname] = s
self.fortran_deps[target.get_basename()] = module_files
|
https://github.com/mesonbuild/meson/issues/1348
|
The Meson build system
Version: 0.38.0
Source dir: /home/Adama-docs/Adam/MyDocs/praca/IMGW/dev/meson_bug
Build dir: /home/Adama-docs/Adam/MyDocs/praca/IMGW/dev/meson_bug/build
Build type: native build
Project name: simple fortran
Native fortran compiler: gfortran (gcc 5.4.1)
Build machine cpu family: x86_64
Build machine cpu: x86_64
Program pp_ser.py found: YES (/usr/bin/env python /home/adam/meson_bug/pp_ser.py)
Build targets in project: 1
Traceback (most recent call last):
File "/home/adam/.local/lib/python3.5/site-packages/mesonbuild/mesonmain.py", line 286, in run
app.generate()
File "/home/adam/.local/lib/python3.5/site-packages/mesonbuild/mesonmain.py", line 170, in generate
g.generate(intr)
File "/home/adam/.local/lib/python3.5/site-packages/mesonbuild/backend/ninjabackend.py", line 191, in generate
self.generate_target(t, outfile)
File "/home/adam/.local/lib/python3.5/site-packages/mesonbuild/backend/ninjabackend.py", line 386, in generate_target
header_deps=header_deps)
File "/home/adam/.local/lib/python3.5/site-packages/mesonbuild/backend/ninjabackend.py", line 1916, in generate_single_compile
extra_deps += self.get_fortran_deps(compiler, abs_src, target)
File "/home/adam/.local/lib/python3.5/site-packages/mesonbuild/backend/ninjabackend.py", line 1632, in get_fortran_deps
with open(src) as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/adam/meson_bug/build/dwarf@exe/src1.f90'
|
FileNotFoundError
|
def __init__(self, subdir, lineno, colno, condition, trueblock, falseblock):
self.subdir = subdir
self.lineno = lineno
self.colno = colno
self.condition = condition
self.trueblock = trueblock
self.falseblock = falseblock
|
def __init__(self, lineno, colno, condition, trueblock, falseblock):
self.lineno = lineno
self.colno = colno
self.condition = condition
self.trueblock = trueblock
self.falseblock = falseblock
|
https://github.com/mesonbuild/meson/issues/2404
|
Traceback (most recent call last):
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mesonmain.py", line 353, in run
app.generate()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mesonmain.py", line 148, in generate
self._generate(env)
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mesonmain.py", line 188, in _generate
intr = interpreter.Interpreter(b, g)
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/interpreter.py", line 1327, in __init__
self.load_root_meson_file()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/interpreterbase.py", line 124, in load_root_meson_file
self.ast = mparser.Parser(code, self.subdir).parse()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 443, in parse
block = self.codeblock()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 679, in codeblock
curline = self.line()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 673, in line
return self.statement()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 448, in statement
return self.e1()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 451, in e1
left = self.e2()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 476, in e2
left = self.e3()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 485, in e3
left = self.e4()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 494, in e4
left = self.e5()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 501, in e5
return self.e5add()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 504, in e5add
left = self.e5sub()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 510, in e5sub
left = self.e5mod()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 516, in e5mod
left = self.e5mul()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 522, in e5mul
left = self.e5div()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 528, in e5div
left = self.e6()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 538, in e6
return self.e7()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 544, in e7
args = self.args()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 589, in args
s = self.statement()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 448, in statement
return self.e1()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 451, in e1
left = self.e2()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 476, in e2
left = self.e3()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 485, in e3
left = self.e4()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 494, in e4
left = self.e5()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 501, in e5
return self.e5add()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 504, in e5add
left = self.e5sub()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 510, in e5sub
left = self.e5mod()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 516, in e5mod
left = self.e5mul()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 522, in e5mul
left = self.e5div()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 528, in e5div
left = self.e6()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 538, in e6
return self.e7()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 555, in e7
left = self.method_call(left)
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 618, in method_call
args = self.args()
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 590, in args
a = ArgumentNode(s)
File "/home/adrian/.local/lib/python3.5/site-packages/mesonbuild/mparser.py", line 351, in __init__
self.subdir = token.subdir
AttributeError: 'TernaryNode' object has no attribute 'subdir'
|
AttributeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.