after_merge
stringlengths
28
79.6k
before_merge
stringlengths
20
79.6k
url
stringlengths
38
71
full_traceback
stringlengths
43
922k
traceback_type
stringclasses
555 values
def keys(self): return list(sum(set(dir(obj)) for obj in self.objs))
def keys(self): return list(sum(set(dir(obj)) for obj in objs))
https://github.com/saulpw/visidata/issues/251
$ vd --debug -b -p newCol.vd Think about what you're doing opening newCol as vd "." opening . as dir "1" c=addColumn(SettableColumn("", width=options.default_width), cursorColIndex+1); draw(vd.scr); cursorVisibleColIndex=visibleCols.index(c); c.name=editCell(cursorVisibleColIndex, -1); c.width=None AttributeError: 'NoneType' object has no attribute 'erase' AttributeError: 'NoneType' object has no attribute 'erase' Traceback (most recent call last): File "/usr/local/bin/vd", line 4, in <module> __import__('pkg_resources').run_script('visidata==1.6.dev0', 'vd') File "/home/cwarden/.local/lib/python3.6/site-packages/pkg_resources/__init__.py", line 664, in run_script self.require(requires)[0].run_script(script_name, ns) File "/home/cwarden/.local/lib/python3.6/site-packages/pkg_resources/__init__.py", line 1451, in run_script exec(script_code, namespace, namespace) File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/EGG-INFO/scripts/vd", line 169, in <module> File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/EGG-INFO/scripts/vd", line 152, in main File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/cmdlog.py", line 289, in replay_sync File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/__init__.py", line 455, in exceptionCaught File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/cmdlog.py", line 284, in replay_sync File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/cmdlog.py", line 264, in replayOne File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/__init__.py", line 1132, in exec_command File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/__init__.py", line 1126, in exec_command File "<string>", line 1, in <module> File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/_sheet.py", line 566, in draw AttributeError: 'NoneType' object has no attribute 'erase'
AttributeError
def __getitem__(self, k): for obj in self.objs: if k in dir(obj): return getattr(obj, k) return self.locals[k]
def __getitem__(self, k): for obj in self.objs: if k in dir(obj): return getattr(obj, k) raise KeyError(k)
https://github.com/saulpw/visidata/issues/251
$ vd --debug -b -p newCol.vd Think about what you're doing opening newCol as vd "." opening . as dir "1" c=addColumn(SettableColumn("", width=options.default_width), cursorColIndex+1); draw(vd.scr); cursorVisibleColIndex=visibleCols.index(c); c.name=editCell(cursorVisibleColIndex, -1); c.width=None AttributeError: 'NoneType' object has no attribute 'erase' AttributeError: 'NoneType' object has no attribute 'erase' Traceback (most recent call last): File "/usr/local/bin/vd", line 4, in <module> __import__('pkg_resources').run_script('visidata==1.6.dev0', 'vd') File "/home/cwarden/.local/lib/python3.6/site-packages/pkg_resources/__init__.py", line 664, in run_script self.require(requires)[0].run_script(script_name, ns) File "/home/cwarden/.local/lib/python3.6/site-packages/pkg_resources/__init__.py", line 1451, in run_script exec(script_code, namespace, namespace) File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/EGG-INFO/scripts/vd", line 169, in <module> File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/EGG-INFO/scripts/vd", line 152, in main File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/cmdlog.py", line 289, in replay_sync File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/__init__.py", line 455, in exceptionCaught File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/cmdlog.py", line 284, in replay_sync File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/cmdlog.py", line 264, in replayOne File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/__init__.py", line 1132, in exec_command File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/__init__.py", line 1126, in exec_command File "<string>", line 1, in <module> File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/_sheet.py", line 566, in draw AttributeError: 'NoneType' object has no attribute 'erase'
AttributeError
def __setitem__(self, k, v): for obj in self.objs: if k in dir(obj): return setattr(obj, k, v) self.locals[k] = v
def __setitem__(self, k, v): for obj in self.objs: if k in dir(obj): return setattr(self.objs[0], k, v) return setattr(self.objs[-1], k, v)
https://github.com/saulpw/visidata/issues/251
$ vd --debug -b -p newCol.vd Think about what you're doing opening newCol as vd "." opening . as dir "1" c=addColumn(SettableColumn("", width=options.default_width), cursorColIndex+1); draw(vd.scr); cursorVisibleColIndex=visibleCols.index(c); c.name=editCell(cursorVisibleColIndex, -1); c.width=None AttributeError: 'NoneType' object has no attribute 'erase' AttributeError: 'NoneType' object has no attribute 'erase' Traceback (most recent call last): File "/usr/local/bin/vd", line 4, in <module> __import__('pkg_resources').run_script('visidata==1.6.dev0', 'vd') File "/home/cwarden/.local/lib/python3.6/site-packages/pkg_resources/__init__.py", line 664, in run_script self.require(requires)[0].run_script(script_name, ns) File "/home/cwarden/.local/lib/python3.6/site-packages/pkg_resources/__init__.py", line 1451, in run_script exec(script_code, namespace, namespace) File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/EGG-INFO/scripts/vd", line 169, in <module> File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/EGG-INFO/scripts/vd", line 152, in main File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/cmdlog.py", line 289, in replay_sync File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/__init__.py", line 455, in exceptionCaught File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/cmdlog.py", line 284, in replay_sync File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/cmdlog.py", line 264, in replayOne File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/__init__.py", line 1132, in exec_command File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/__init__.py", line 1126, in exec_command File "<string>", line 1, in <module> File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/_sheet.py", line 566, in draw AttributeError: 'NoneType' object has no attribute 'erase'
AttributeError
def __init__(self): self.sheets = [] # list of BaseSheet; all sheets on the sheet stack self.allSheets = ( weakref.WeakKeyDictionary() ) # [BaseSheet] -> sheetname (all non-precious sheets ever pushed) self.statuses = ( collections.OrderedDict() ) # (priority, statusmsg) -> num_repeats; shown until next action self.statusHistory = [] # list of [priority, statusmsg, repeats] for all status messages ever self.lastErrors = [] self.keystrokes = "" self.prefixWaiting = False self.scr = mock.MagicMock( __bool__=mock.Mock(return_value=False) ) # disable curses in batch mode self.mousereg = [] self.cmdlog = None # CommandLog
def __init__(self): self.sheets = [] # list of BaseSheet; all sheets on the sheet stack self.allSheets = ( weakref.WeakKeyDictionary() ) # [BaseSheet] -> sheetname (all non-precious sheets ever pushed) self.statuses = ( collections.OrderedDict() ) # (priority, statusmsg) -> num_repeats; shown until next action self.statusHistory = [] # list of [priority, statusmsg, repeats] for all status messages ever self.lastErrors = [] self.keystrokes = "" self.prefixWaiting = False self.scr = None # curses scr self.mousereg = [] self.cmdlog = None # CommandLog
https://github.com/saulpw/visidata/issues/251
$ vd --debug -b -p newCol.vd Think about what you're doing opening newCol as vd "." opening . as dir "1" c=addColumn(SettableColumn("", width=options.default_width), cursorColIndex+1); draw(vd.scr); cursorVisibleColIndex=visibleCols.index(c); c.name=editCell(cursorVisibleColIndex, -1); c.width=None AttributeError: 'NoneType' object has no attribute 'erase' AttributeError: 'NoneType' object has no attribute 'erase' Traceback (most recent call last): File "/usr/local/bin/vd", line 4, in <module> __import__('pkg_resources').run_script('visidata==1.6.dev0', 'vd') File "/home/cwarden/.local/lib/python3.6/site-packages/pkg_resources/__init__.py", line 664, in run_script self.require(requires)[0].run_script(script_name, ns) File "/home/cwarden/.local/lib/python3.6/site-packages/pkg_resources/__init__.py", line 1451, in run_script exec(script_code, namespace, namespace) File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/EGG-INFO/scripts/vd", line 169, in <module> File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/EGG-INFO/scripts/vd", line 152, in main File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/cmdlog.py", line 289, in replay_sync File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/__init__.py", line 455, in exceptionCaught File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/cmdlog.py", line 284, in replay_sync File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/cmdlog.py", line 264, in replayOne File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/__init__.py", line 1132, in exec_command File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/__init__.py", line 1126, in exec_command File "<string>", line 1, in <module> File "/usr/local/lib/python3.6/dist-packages/visidata-1.6.dev0-py3.6.egg/visidata/vdtui/_sheet.py", line 566, in draw AttributeError: 'NoneType' object has no attribute 'erase'
AttributeError
def _to_timestamp(dt: Optional[datetime.datetime]) -> Optional[timestamp_pb2.Timestamp]: if dt is not None: return timestamp_pb2.Timestamp(seconds=int(dt.timestamp())) return None
def _to_timestamp(dt: datetime.datetime): return timestamp_pb2.Timestamp(seconds=int(dt.timestamp()))
https://github.com/quantumlib/Cirq/issues/3787
Traceback (most recent call last): File "/home/lingyihu/miniconda3/envs/recirq/lib/python3.9/runpy.py", line 197, in _run_module_as_main return _run_code(code, main_globals, None, File "/home/lingyihu/miniconda3/envs/recirq/lib/python3.9/runpy.py", line 87, in _run_code exec(code, run_globals) File "/home/lingyihu/projects/ReCirq/recirq/quantum_chess/experiments/batch_moves.py", line 94, in <module> main_loop(parse()) File "/home/lingyihu/projects/ReCirq/recirq/quantum_chess/experiments/batch_moves.py", line 60, in main_loop available_processors = utils.get_available_processors(utils.QUANTUM_PROCESSORS.keys()) File "/home/lingyihu/projects/ReCirq/recirq/engine_utils.py", line 405, in get_available_processors time_slot = EngineTimeSlot.from_proto(time_slot) File "/home/lingyihu/miniconda3/envs/recirq/lib/python3.9/site-packages/cirq/google/engine/engine_timeslot.py", line 75, in from_proto end_time=datetime.datetime.fromtimestamp( ValueError: year 0 is out of range
ValueError
def from_proto(cls, proto: qtypes.QuantumTimeSlot): slot_type = qenums.QuantumTimeSlot.TimeSlotType(proto.slot_type) start_time = None end_time = None if proto.HasField("start_time"): start_time = datetime.datetime.fromtimestamp(proto.start_time.seconds) if proto.HasField("end_time"): end_time = datetime.datetime.fromtimestamp(proto.end_time.seconds) if proto.HasField("reservation_config"): return cls( processor_id=proto.processor_name, start_time=start_time, end_time=end_time, slot_type=slot_type, project_id=proto.reservation_config.project_id, ) if proto.HasField("maintenance_config"): return cls( processor_id=proto.processor_name, start_time=start_time, end_time=end_time, slot_type=slot_type, maintenance_title=proto.maintenance_config.title, maintenance_description=proto.maintenance_config.description, ) return cls( processor_id=proto.processor_name, start_time=start_time, end_time=end_time, slot_type=slot_type, )
def from_proto(cls, proto: qtypes.QuantumTimeSlot): slot_type = qenums.QuantumTimeSlot.TimeSlotType(proto.slot_type) if proto.HasField("reservation_config"): return cls( processor_id=proto.processor_name, start_time=datetime.datetime.fromtimestamp(proto.start_time.seconds), end_time=datetime.datetime.fromtimestamp(proto.end_time.seconds), slot_type=slot_type, project_id=proto.reservation_config.project_id, ) if proto.HasField("maintenance_config"): return cls( processor_id=proto.processor_name, start_time=datetime.datetime.fromtimestamp(proto.start_time.seconds), end_time=datetime.datetime.fromtimestamp(proto.end_time.seconds), slot_type=slot_type, maintenance_title=proto.maintenance_config.title, maintenance_description=proto.maintenance_config.description, ) return cls( processor_id=proto.processor_name, start_time=datetime.datetime.fromtimestamp(proto.start_time.seconds), end_time=datetime.datetime.fromtimestamp(proto.end_time.seconds), slot_type=slot_type, )
https://github.com/quantumlib/Cirq/issues/3787
Traceback (most recent call last): File "/home/lingyihu/miniconda3/envs/recirq/lib/python3.9/runpy.py", line 197, in _run_module_as_main return _run_code(code, main_globals, None, File "/home/lingyihu/miniconda3/envs/recirq/lib/python3.9/runpy.py", line 87, in _run_code exec(code, run_globals) File "/home/lingyihu/projects/ReCirq/recirq/quantum_chess/experiments/batch_moves.py", line 94, in <module> main_loop(parse()) File "/home/lingyihu/projects/ReCirq/recirq/quantum_chess/experiments/batch_moves.py", line 60, in main_loop available_processors = utils.get_available_processors(utils.QUANTUM_PROCESSORS.keys()) File "/home/lingyihu/projects/ReCirq/recirq/engine_utils.py", line 405, in get_available_processors time_slot = EngineTimeSlot.from_proto(time_slot) File "/home/lingyihu/miniconda3/envs/recirq/lib/python3.9/site-packages/cirq/google/engine/engine_timeslot.py", line 75, in from_proto end_time=datetime.datetime.fromtimestamp( ValueError: year 0 is out of range
ValueError
def cirq_class_resolver_dictionary(self) -> Dict[str, Type]: if self._crd is None: import cirq from cirq.devices.noise_model import _NoNoiseModel from cirq.experiments import ( CrossEntropyResult, CrossEntropyResultDict, GridInteractionLayer, ) from cirq.experiments.grid_parallel_two_qubit_xeb import GridParallelXEBMetadata from cirq.google.devices.known_devices import _NamedConstantXmonDevice def _identity_operation_from_dict(qubits, **kwargs): return cirq.identity_each(*qubits) def single_qubit_matrix_gate(matrix): if not isinstance(matrix, np.ndarray): matrix = np.array(matrix, dtype=np.complex128) return cirq.MatrixGate(matrix, qid_shape=(matrix.shape[0],)) def two_qubit_matrix_gate(matrix): if not isinstance(matrix, np.ndarray): matrix = np.array(matrix, dtype=np.complex128) return cirq.MatrixGate(matrix, qid_shape=(2, 2)) self._crd = { "AmplitudeDampingChannel": cirq.AmplitudeDampingChannel, "AsymmetricDepolarizingChannel": cirq.AsymmetricDepolarizingChannel, "BitFlipChannel": cirq.BitFlipChannel, "ProductState": cirq.ProductState, "CCNotPowGate": cirq.CCNotPowGate, "CCXPowGate": cirq.CCXPowGate, "CCZPowGate": cirq.CCZPowGate, "CNotPowGate": cirq.CNotPowGate, "ControlledGate": cirq.ControlledGate, "ControlledOperation": cirq.ControlledOperation, "CSwapGate": cirq.CSwapGate, "CXPowGate": cirq.CXPowGate, "CZPowGate": cirq.CZPowGate, "CrossEntropyResult": CrossEntropyResult, "CrossEntropyResultDict": CrossEntropyResultDict, "Circuit": cirq.Circuit, "CliffordState": cirq.CliffordState, "CliffordTableau": cirq.CliffordTableau, "DepolarizingChannel": cirq.DepolarizingChannel, "ConstantQubitNoiseModel": cirq.ConstantQubitNoiseModel, "Duration": cirq.Duration, "FSimGate": cirq.FSimGate, "DensePauliString": cirq.DensePauliString, "MutableDensePauliString": cirq.MutableDensePauliString, "MutablePauliString": cirq.MutablePauliString, "GateOperation": cirq.GateOperation, "GateTabulation": cirq.google.GateTabulation, "GeneralizedAmplitudeDampingChannel": cirq.GeneralizedAmplitudeDampingChannel, "GlobalPhaseOperation": cirq.GlobalPhaseOperation, "GridInteractionLayer": GridInteractionLayer, "GridParallelXEBMetadata": GridParallelXEBMetadata, "GridQid": cirq.GridQid, "GridQubit": cirq.GridQubit, "HPowGate": cirq.HPowGate, "ISwapPowGate": cirq.ISwapPowGate, "IdentityGate": cirq.IdentityGate, "IdentityOperation": _identity_operation_from_dict, "LinearDict": cirq.LinearDict, "LineQubit": cirq.LineQubit, "LineQid": cirq.LineQid, "MatrixGate": cirq.MatrixGate, "MeasurementGate": cirq.MeasurementGate, "Moment": cirq.Moment, "_XEigenState": cirq.value.product_state._XEigenState, # type: ignore "_YEigenState": cirq.value.product_state._YEigenState, # type: ignore "_ZEigenState": cirq.value.product_state._ZEigenState, # type: ignore "_NamedConstantXmonDevice": _NamedConstantXmonDevice, "_NoNoiseModel": _NoNoiseModel, "NamedQubit": cirq.NamedQubit, "NamedQid": cirq.NamedQid, "NoIdentifierQubit": cirq.testing.NoIdentifierQubit, "_PauliX": cirq.ops.pauli_gates._PauliX, "_PauliY": cirq.ops.pauli_gates._PauliY, "_PauliZ": cirq.ops.pauli_gates._PauliZ, "ParamResolver": cirq.ParamResolver, "PasqalDevice": cirq.pasqal.PasqalDevice, "PasqalVirtualDevice": cirq.pasqal.PasqalVirtualDevice, "PauliString": cirq.PauliString, "PhaseDampingChannel": cirq.PhaseDampingChannel, "PhaseFlipChannel": cirq.PhaseFlipChannel, "PhaseGradientGate": cirq.PhaseGradientGate, "PhasedISwapPowGate": cirq.PhasedISwapPowGate, "PhasedXPowGate": cirq.PhasedXPowGate, "PhasedXZGate": cirq.PhasedXZGate, "PhysicalZTag": cirq.google.PhysicalZTag, "RandomGateChannel": cirq.RandomGateChannel, "QuantumFourierTransformGate": cirq.QuantumFourierTransformGate, "ResetChannel": cirq.ResetChannel, "SingleQubitMatrixGate": single_qubit_matrix_gate, "SingleQubitPauliStringGateOperation": cirq.SingleQubitPauliStringGateOperation, "SingleQubitReadoutCalibrationResult": cirq.experiments.SingleQubitReadoutCalibrationResult, "StabilizerStateChForm": cirq.StabilizerStateChForm, "SwapPowGate": cirq.SwapPowGate, "SycamoreGate": cirq.google.SycamoreGate, "TaggedOperation": cirq.TaggedOperation, "ThreeDQubit": cirq.pasqal.ThreeDQubit, "Result": cirq.Result, "TrialResult": cirq.TrialResult, "TwoDQubit": cirq.pasqal.TwoDQubit, "TwoQubitMatrixGate": two_qubit_matrix_gate, "TwoQubitDiagonalGate": cirq.TwoQubitDiagonalGate, "_UnconstrainedDevice": cirq.devices.unconstrained_device._UnconstrainedDevice, "VirtualTag": cirq.VirtualTag, "WaitGate": cirq.WaitGate, "_QubitAsQid": raw_types._QubitAsQid, "XPowGate": cirq.XPowGate, "XXPowGate": cirq.XXPowGate, "YPowGate": cirq.YPowGate, "YYPowGate": cirq.YYPowGate, "ZPowGate": cirq.ZPowGate, "ZZPowGate": cirq.ZZPowGate, # not a cirq class, but treated as one: "pandas.DataFrame": pd.DataFrame, "pandas.Index": pd.Index, "pandas.MultiIndex": pd.MultiIndex.from_tuples, "sympy.Symbol": sympy.Symbol, "sympy.Add": lambda args: sympy.Add(*args), "sympy.Mul": lambda args: sympy.Mul(*args), "sympy.Pow": lambda args: sympy.Pow(*args), "sympy.Float": lambda approx: sympy.Float(approx), "sympy.Integer": sympy.Integer, "sympy.Rational": sympy.Rational, "complex": complex, } return self._crd
def cirq_class_resolver_dictionary(self) -> Dict[str, Type]: if self._crd is None: import cirq from cirq.devices.noise_model import _NoNoiseModel from cirq.experiments import ( CrossEntropyResult, CrossEntropyResultDict, GridInteractionLayer, ) from cirq.experiments.grid_parallel_two_qubit_xeb import GridParallelXEBMetadata from cirq.google.devices.known_devices import _NamedConstantXmonDevice def _identity_operation_from_dict(qubits, **kwargs): return cirq.identity_each(*qubits) def single_qubit_matrix_gate(matrix): if not isinstance(matrix, np.ndarray): matrix = np.array(matrix, dtype=np.complex128) return cirq.MatrixGate(matrix, qid_shape=(matrix.shape[0],)) def two_qubit_matrix_gate(matrix): if not isinstance(matrix, np.ndarray): matrix = np.array(matrix, dtype=np.complex128) return cirq.MatrixGate(matrix, qid_shape=(2, 2)) self._crd = { "AmplitudeDampingChannel": cirq.AmplitudeDampingChannel, "AsymmetricDepolarizingChannel": cirq.AsymmetricDepolarizingChannel, "BitFlipChannel": cirq.BitFlipChannel, "ProductState": cirq.ProductState, "CCNotPowGate": cirq.CCNotPowGate, "CCXPowGate": cirq.CCXPowGate, "CCZPowGate": cirq.CCZPowGate, "CNotPowGate": cirq.CNotPowGate, "ControlledGate": cirq.ControlledGate, "ControlledOperation": cirq.ControlledOperation, "CSwapGate": cirq.CSwapGate, "CXPowGate": cirq.CXPowGate, "CZPowGate": cirq.CZPowGate, "CrossEntropyResult": CrossEntropyResult, "CrossEntropyResultDict": CrossEntropyResultDict, "Circuit": cirq.Circuit, "CliffordState": cirq.CliffordState, "CliffordTableau": cirq.CliffordTableau, "DepolarizingChannel": cirq.DepolarizingChannel, "ConstantQubitNoiseModel": cirq.ConstantQubitNoiseModel, "Duration": cirq.Duration, "FSimGate": cirq.FSimGate, "DensePauliString": cirq.DensePauliString, "MutableDensePauliString": cirq.MutableDensePauliString, "MutablePauliString": cirq.MutablePauliString, "GateOperation": cirq.GateOperation, "GateTabulation": cirq.google.GateTabulation, "GeneralizedAmplitudeDampingChannel": cirq.GeneralizedAmplitudeDampingChannel, "GlobalPhaseOperation": cirq.GlobalPhaseOperation, "GridInteractionLayer": GridInteractionLayer, "GridParallelXEBMetadata": GridParallelXEBMetadata, "GridQid": cirq.GridQid, "GridQubit": cirq.GridQubit, "HPowGate": cirq.HPowGate, "ISwapPowGate": cirq.ISwapPowGate, "IdentityGate": cirq.IdentityGate, "IdentityOperation": _identity_operation_from_dict, "LinearDict": cirq.LinearDict, "LineQubit": cirq.LineQubit, "LineQid": cirq.LineQid, "MatrixGate": cirq.MatrixGate, "MeasurementGate": cirq.MeasurementGate, "Moment": cirq.Moment, "_XEigenState": cirq.value.product_state._XEigenState, # type: ignore "_YEigenState": cirq.value.product_state._YEigenState, # type: ignore "_ZEigenState": cirq.value.product_state._ZEigenState, # type: ignore "_NamedConstantXmonDevice": _NamedConstantXmonDevice, "_NoNoiseModel": _NoNoiseModel, "NamedQubit": cirq.NamedQubit, "NamedQid": cirq.NamedQid, "NoIdentifierQubit": cirq.testing.NoIdentifierQubit, "_PauliX": cirq.ops.pauli_gates._PauliX, "_PauliY": cirq.ops.pauli_gates._PauliY, "_PauliZ": cirq.ops.pauli_gates._PauliZ, "ParamResolver": cirq.ParamResolver, "PasqalDevice": cirq.pasqal.PasqalDevice, "PasqalVirtualDevice": cirq.pasqal.PasqalVirtualDevice, "PauliString": cirq.PauliString, "PhaseDampingChannel": cirq.PhaseDampingChannel, "PhaseFlipChannel": cirq.PhaseFlipChannel, "PhaseGradientGate": cirq.PhaseGradientGate, "PhasedISwapPowGate": cirq.PhasedISwapPowGate, "PhasedXPowGate": cirq.PhasedXPowGate, "PhasedXZGate": cirq.PhasedXZGate, "PhysicalZTag": cirq.google.PhysicalZTag, "RandomGateChannel": cirq.RandomGateChannel, "QuantumFourierTransformGate": cirq.QuantumFourierTransformGate, "ResetChannel": cirq.ResetChannel, "SingleQubitMatrixGate": single_qubit_matrix_gate, "SingleQubitPauliStringGateOperation": cirq.SingleQubitPauliStringGateOperation, "SingleQubitReadoutCalibrationResult": cirq.experiments.SingleQubitReadoutCalibrationResult, "StabilizerStateChForm": cirq.StabilizerStateChForm, "SwapPowGate": cirq.SwapPowGate, "SycamoreGate": cirq.google.SycamoreGate, "TaggedOperation": cirq.TaggedOperation, "ThreeDQubit": cirq.pasqal.ThreeDQubit, "Result": cirq.Result, "TwoDQubit": cirq.pasqal.TwoDQubit, "TwoQubitMatrixGate": two_qubit_matrix_gate, "TwoQubitDiagonalGate": cirq.TwoQubitDiagonalGate, "_UnconstrainedDevice": cirq.devices.unconstrained_device._UnconstrainedDevice, "VirtualTag": cirq.VirtualTag, "WaitGate": cirq.WaitGate, "_QubitAsQid": raw_types._QubitAsQid, "XPowGate": cirq.XPowGate, "XXPowGate": cirq.XXPowGate, "YPowGate": cirq.YPowGate, "YYPowGate": cirq.YYPowGate, "ZPowGate": cirq.ZPowGate, "ZZPowGate": cirq.ZZPowGate, # not a cirq class, but treated as one: "pandas.DataFrame": pd.DataFrame, "pandas.Index": pd.Index, "pandas.MultiIndex": pd.MultiIndex.from_tuples, "sympy.Symbol": sympy.Symbol, "sympy.Add": lambda args: sympy.Add(*args), "sympy.Mul": lambda args: sympy.Mul(*args), "sympy.Pow": lambda args: sympy.Pow(*args), "sympy.Float": lambda approx: sympy.Float(approx), "sympy.Integer": sympy.Integer, "sympy.Rational": sympy.Rational, "complex": complex, } return self._crd
https://github.com/quantumlib/Cirq/issues/3380
Traceback (most recent call last): File "/Users/balintp/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_1.py", line 3, in <module> print(cirq.read_json(json_text="""{ File "/Users/balintp/dev/proj/Cirq/cirq/protocols/json_serialization.py", line 526, in read_json return json.loads(json_text, object_hook=obj_hook) File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/json/__init__.py", line 370, in loads return cls(**kw).decode(s) File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/json/decoder.py", line 337, in decode obj, end = self.raw_decode(s, idx=_w(s, 0).end()) File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/json/decoder.py", line 353, in raw_decode obj, end = self.scan_once(s, idx) File "/Users/balintp/dev/proj/Cirq/cirq/protocols/json_serialization.py", line 523, in obj_hook return _cirq_object_hook(x, resolvers) File "/Users/balintp/dev/proj/Cirq/cirq/protocols/json_serialization.py", line 429, in _cirq_object_hook raise ValueError("Could not resolve type '{}' " ValueError: Could not resolve type 'TrialResult' during deserialization
ValueError
def grid_qubit_from_proto_id(proto_id: str) -> "cirq.GridQubit": """Parse a proto id to a `cirq.GridQubit`. Proto ids for grid qubits are of the form `{row}_{col}` where `{row}` is the integer row of the grid qubit, and `{col}` is the integer column of the qubit. Args: proto_id: The id to convert. Returns: A `cirq.GridQubit` corresponding to the proto id. Raises: ValueError: If the string not of the correct format. """ match = re.match(GRID_QUBIT_ID_PATTERN, proto_id) if match is None: raise ValueError( "GridQubit proto id must be of the form <int>_<int> but was {}".format( proto_id ) ) try: row, col = match.groups() return devices.GridQubit(row=int(row), col=int(col)) except ValueError: raise ValueError( "GridQubit proto id must be of the form <int>_<int> but was {}".format( proto_id ) )
def grid_qubit_from_proto_id(proto_id: str) -> "cirq.GridQubit": """Parse a proto id to a `cirq.GridQubit`. Proto ids for grid qubits are of the form `{row}_{col}` where `{row}` is the integer row of the grid qubit, and `{col}` is the integer column of the qubit. Args: proto_id: The id to convert. Returns: A `cirq.GridQubit` corresponding to the proto id. Raises: ValueError: If the string not of the correct format. """ parts = proto_id.split("_") if len(parts) != 2: raise ValueError( "GridQubit proto id must be of the form <int>_<int> but was {}".format( proto_id ) ) try: row, col = parts return devices.GridQubit(row=int(row), col=int(col)) except ValueError: raise ValueError( "GridQubit proto id must be of the form <int>_<int> but was {}".format( proto_id ) )
https://github.com/quantumlib/Cirq/issues/3219
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) ~/Projects/Cirq/cirq/google/api/v2/program.py in grid_qubit_from_proto_id(proto_id) 102 row, col = parts --> 103 return devices.GridQubit(row=int(row), col=int(col)) 104 except ValueError: ValueError: invalid literal for int() with base 10: 'q3' During handling of the above exception, another exception occurred: ValueError Traceback (most recent call last) <ipython-input-2-69914ce7bc88> in <module> 4 proto_version=cirq.google.ProtoVersion.V2) 5 engine_processor = engine.get_processor('rainbow') ----> 6 calibrations = engine_processor.list_calibrations() ~/Projects/Cirq/cirq/google/engine/engine_processor.py in list_calibrations(self, earliest_timestamp_seconds, latest_timestamp_seconds) 158 response = self.context.client.list_calibrations( 159 self.project_id, self.processor_id, filter_str) --> 160 return [self._to_calibration(c.data) for c in list(response)] 161 162 def get_calibration(self, calibration_timestamp_seconds: int ~/Projects/Cirq/cirq/google/engine/engine_processor.py in <listcomp>(.0) 158 response = self.context.client.list_calibrations( 159 self.project_id, self.processor_id, filter_str) --> 160 return [self._to_calibration(c.data) for c in list(response)] 161 162 def get_calibration(self, calibration_timestamp_seconds: int ~/Projects/Cirq/cirq/google/engine/engine_processor.py in _to_calibration(calibration_any) 130 metrics = v2.metrics_pb2.MetricsSnapshot.FromString( 131 calibration_any.value) --> 132 return calibration.Calibration(metrics) 133 134 def list_calibrations(self, ~/Projects/Cirq/cirq/google/engine/calibration.py in __init__(self, calibration) 53 def __init__(self, calibration: v2.metrics_pb2.MetricsSnapshot) -> None: 54 self.timestamp = calibration.timestamp_ms ---> 55 self._metric_dict = self._compute_metric_dict(calibration.metrics) 56 57 def _compute_metric_dict( ~/Projects/Cirq/cirq/google/engine/calibration.py in _compute_metric_dict(self, metrics) 68 ] 69 if metric.targets: ---> 70 qubits = tuple( 71 v2.grid_qubit_from_proto_id(t) for t in metric.targets) 72 results[name][qubits] = flat_values ~/Projects/Cirq/cirq/google/engine/calibration.py in <genexpr>(.0) 69 if metric.targets: 70 qubits = tuple( ---> 71 v2.grid_qubit_from_proto_id(t) for t in metric.targets) 72 results[name][qubits] = flat_values 73 else: ~/Projects/Cirq/cirq/google/api/v2/program.py in grid_qubit_from_proto_id(proto_id) 103 return devices.GridQubit(row=int(row), col=int(col)) 104 except ValueError: --> 105 raise ValueError( 106 'GridQubit proto id must be of the form <int>_<int> but was {}'. 107 format(proto_id)) ValueError: GridQubit proto id must be of the form <int>_<int> but was q3_2
ValueError
def main( self, args=None, prog_name=None, complete_var=None, standalone_mode=True, **extra, ): """This is the way to invoke a script with all the bells and whistles as a command line application. This will always terminate the application after a call. If this is not wanted, ``SystemExit`` needs to be caught. This method is also available by directly calling the instance of a :class:`Command`. .. versionadded:: 3.0 Added the `standalone_mode` flag to control the standalone mode. :param args: the arguments that should be used for parsing. If not provided, ``sys.argv[1:]`` is used. :param prog_name: the program name that should be used. By default the program name is constructed by taking the file name from ``sys.argv[0]``. :param complete_var: the environment variable that controls the bash completion support. The default is ``"_<prog_name>_COMPLETE"`` with prog_name in uppercase. :param standalone_mode: the default behavior is to invoke the script in standalone mode. Click will then handle exceptions and convert them into error messages and the function will never return but shut down the interpreter. If this is set to `False` they will be propagated to the caller and the return value of this function is the return value of :meth:`invoke`. :param extra: extra keyword arguments are forwarded to the context constructor. See :class:`Context` for more information. """ # Verify that the environment is configured correctly, or reject # further execution to avoid a broken script. _verify_python_env() if args is None: args = sys.argv[1:] else: args = list(args) if prog_name is None: prog_name = _detect_program_name() # Process shell completion requests and exit early. self._main_shell_completion(extra, prog_name, complete_var) try: try: with self.make_context(prog_name, args, **extra) as ctx: rv = self.invoke(ctx) if not standalone_mode: return rv # it's not safe to `ctx.exit(rv)` here! # note that `rv` may actually contain data like "1" which # has obvious effects # more subtle case: `rv=[None, None]` can come out of # chained commands which all returned `None` -- so it's not # even always obvious that `rv` indicates success/failure # by its truthiness/falsiness ctx.exit() except (EOFError, KeyboardInterrupt): echo(file=sys.stderr) raise Abort() except ClickException as e: if not standalone_mode: raise e.show() sys.exit(e.exit_code) except OSError as e: if e.errno == errno.EPIPE: sys.stdout = PacifyFlushWrapper(sys.stdout) sys.stderr = PacifyFlushWrapper(sys.stderr) sys.exit(1) else: raise except Exit as e: if standalone_mode: sys.exit(e.exit_code) else: # in non-standalone mode, return the exit code # note that this is only reached if `self.invoke` above raises # an Exit explicitly -- thus bypassing the check there which # would return its result # the results of non-standalone execution may therefore be # somewhat ambiguous: if there are codepaths which lead to # `ctx.exit(1)` and to `return 1`, the caller won't be able to # tell the difference between the two return e.exit_code except Abort: if not standalone_mode: raise echo("Aborted!", file=sys.stderr) sys.exit(1)
def main( self, args=None, prog_name=None, complete_var=None, standalone_mode=True, **extra, ): """This is the way to invoke a script with all the bells and whistles as a command line application. This will always terminate the application after a call. If this is not wanted, ``SystemExit`` needs to be caught. This method is also available by directly calling the instance of a :class:`Command`. .. versionadded:: 3.0 Added the `standalone_mode` flag to control the standalone mode. :param args: the arguments that should be used for parsing. If not provided, ``sys.argv[1:]`` is used. :param prog_name: the program name that should be used. By default the program name is constructed by taking the file name from ``sys.argv[0]``. :param complete_var: the environment variable that controls the bash completion support. The default is ``"_<prog_name>_COMPLETE"`` with prog_name in uppercase. :param standalone_mode: the default behavior is to invoke the script in standalone mode. Click will then handle exceptions and convert them into error messages and the function will never return but shut down the interpreter. If this is set to `False` they will be propagated to the caller and the return value of this function is the return value of :meth:`invoke`. :param extra: extra keyword arguments are forwarded to the context constructor. See :class:`Context` for more information. """ # Verify that the environment is configured correctly, or reject # further execution to avoid a broken script. _verify_python_env() if args is None: args = sys.argv[1:] else: args = list(args) if prog_name is None: prog_name = _detect_program_name() # Process shell completion requests and exit early. self._main_shell_completion(prog_name, complete_var) try: try: with self.make_context(prog_name, args, **extra) as ctx: rv = self.invoke(ctx) if not standalone_mode: return rv # it's not safe to `ctx.exit(rv)` here! # note that `rv` may actually contain data like "1" which # has obvious effects # more subtle case: `rv=[None, None]` can come out of # chained commands which all returned `None` -- so it's not # even always obvious that `rv` indicates success/failure # by its truthiness/falsiness ctx.exit() except (EOFError, KeyboardInterrupt): echo(file=sys.stderr) raise Abort() except ClickException as e: if not standalone_mode: raise e.show() sys.exit(e.exit_code) except OSError as e: if e.errno == errno.EPIPE: sys.stdout = PacifyFlushWrapper(sys.stdout) sys.stderr = PacifyFlushWrapper(sys.stderr) sys.exit(1) else: raise except Exit as e: if standalone_mode: sys.exit(e.exit_code) else: # in non-standalone mode, return the exit code # note that this is only reached if `self.invoke` above raises # an Exit explicitly -- thus bypassing the check there which # would return its result # the results of non-standalone execution may therefore be # somewhat ambiguous: if there are codepaths which lead to # `ctx.exit(1)` and to `return 1`, the caller won't be able to # tell the difference between the two return e.exit_code except Abort: if not standalone_mode: raise echo("Aborted!", file=sys.stderr) sys.exit(1)
https://github.com/pallets/click/issues/942
$ ./testclick subcommand <TAB>Traceback (most recent call last): File "./testclick", line 23, in <module> entrypoint(obj={'completions': ['abc', 'def', 'ghi', ]}) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 731, in __call__ return self.main(*args, **kwargs) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 701, in main _bashcomplete(self, prog_name, complete_var) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 46, in _bashcomplete if bashcomplete(cmd, prog_name, complete_var, complete_instr): File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 216, in bashcomplete return do_complete(cli, prog_name) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 205, in do_complete for item in get_choices(cli, prog_name, args, incomplete): File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 186, in get_choices ctx, all_args, incomplete, param)) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 124, in get_user_autocompletions incomplete=incomplete) File "./testclick", line 7, in _complete return ctx.obj['completions'] TypeError: 'NoneType' object is not subscriptable
TypeError
def _main_shell_completion(self, ctx_args, prog_name, complete_var=None): """Check if the shell is asking for tab completion, process that, then exit early. Called from :meth:`main` before the program is invoked. :param prog_name: Name of the executable in the shell. :param complete_var: Name of the environment variable that holds the completion instruction. Defaults to ``_{PROG_NAME}_COMPLETE``. """ if complete_var is None: complete_var = f"_{prog_name}_COMPLETE".replace("-", "_").upper() instruction = os.environ.get(complete_var) if not instruction: return from .shell_completion import shell_complete rv = shell_complete(self, ctx_args, prog_name, complete_var, instruction) _fast_exit(rv)
def _main_shell_completion(self, prog_name, complete_var=None): """Check if the shell is asking for tab completion, process that, then exit early. Called from :meth:`main` before the program is invoked. :param prog_name: Name of the executable in the shell. :param complete_var: Name of the environment variable that holds the completion instruction. Defaults to ``_{PROG_NAME}_COMPLETE``. """ if complete_var is None: complete_var = f"_{prog_name}_COMPLETE".replace("-", "_").upper() instruction = os.environ.get(complete_var) if not instruction: return from .shell_completion import shell_complete rv = shell_complete(self, prog_name, complete_var, instruction) _fast_exit(rv)
https://github.com/pallets/click/issues/942
$ ./testclick subcommand <TAB>Traceback (most recent call last): File "./testclick", line 23, in <module> entrypoint(obj={'completions': ['abc', 'def', 'ghi', ]}) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 731, in __call__ return self.main(*args, **kwargs) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 701, in main _bashcomplete(self, prog_name, complete_var) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 46, in _bashcomplete if bashcomplete(cmd, prog_name, complete_var, complete_instr): File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 216, in bashcomplete return do_complete(cli, prog_name) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 205, in do_complete for item in get_choices(cli, prog_name, args, incomplete): File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 186, in get_choices ctx, all_args, incomplete, param)) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 124, in get_user_autocompletions incomplete=incomplete) File "./testclick", line 7, in _complete return ctx.obj['completions'] TypeError: 'NoneType' object is not subscriptable
TypeError
def shell_complete(cli, ctx_args, prog_name, complete_var, instruction): """Perform shell completion for the given CLI program. :param cli: Command being called. :param ctx_args: Extra arguments to pass to ``cli.make_context``. :param prog_name: Name of the executable in the shell. :param complete_var: Name of the environment variable that holds the completion instruction. :param instruction: Value of ``complete_var`` with the completion instruction and shell, in the form ``instruction_shell``. :return: Status code to exit with. """ instruction, _, shell = instruction.partition("_") comp_cls = get_completion_class(shell) if comp_cls is None: return 1 comp = comp_cls(cli, ctx_args, prog_name, complete_var) if instruction == "source": echo(comp.source()) return 0 if instruction == "complete": echo(comp.complete()) return 0 return 1
def shell_complete(cli, prog_name, complete_var, instruction): """Perform shell completion for the given CLI program. :param cli: Command being called. :param prog_name: Name of the executable in the shell. :param complete_var: Name of the environment variable that holds the completion instruction. :param instruction: Value of ``complete_var`` with the completion instruction and shell, in the form ``instruction_shell``. :return: Status code to exit with. """ instruction, _, shell = instruction.partition("_") comp_cls = get_completion_class(shell) if comp_cls is None: return 1 comp = comp_cls(cli, prog_name, complete_var) if instruction == "source": echo(comp.source()) return 0 if instruction == "complete": echo(comp.complete()) return 0 return 1
https://github.com/pallets/click/issues/942
$ ./testclick subcommand <TAB>Traceback (most recent call last): File "./testclick", line 23, in <module> entrypoint(obj={'completions': ['abc', 'def', 'ghi', ]}) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 731, in __call__ return self.main(*args, **kwargs) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 701, in main _bashcomplete(self, prog_name, complete_var) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 46, in _bashcomplete if bashcomplete(cmd, prog_name, complete_var, complete_instr): File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 216, in bashcomplete return do_complete(cli, prog_name) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 205, in do_complete for item in get_choices(cli, prog_name, args, incomplete): File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 186, in get_choices ctx, all_args, incomplete, param)) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 124, in get_user_autocompletions incomplete=incomplete) File "./testclick", line 7, in _complete return ctx.obj['completions'] TypeError: 'NoneType' object is not subscriptable
TypeError
def __init__(self, cli, ctx_args, prog_name, complete_var): self.cli = cli self.ctx_args = ctx_args self.prog_name = prog_name self.complete_var = complete_var
def __init__(self, cli, prog_name, complete_var): self.cli = cli self.prog_name = prog_name self.complete_var = complete_var
https://github.com/pallets/click/issues/942
$ ./testclick subcommand <TAB>Traceback (most recent call last): File "./testclick", line 23, in <module> entrypoint(obj={'completions': ['abc', 'def', 'ghi', ]}) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 731, in __call__ return self.main(*args, **kwargs) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 701, in main _bashcomplete(self, prog_name, complete_var) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 46, in _bashcomplete if bashcomplete(cmd, prog_name, complete_var, complete_instr): File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 216, in bashcomplete return do_complete(cli, prog_name) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 205, in do_complete for item in get_choices(cli, prog_name, args, incomplete): File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 186, in get_choices ctx, all_args, incomplete, param)) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 124, in get_user_autocompletions incomplete=incomplete) File "./testclick", line 7, in _complete return ctx.obj['completions'] TypeError: 'NoneType' object is not subscriptable
TypeError
def get_completions(self, args, incomplete): """Determine the context and last complete command or parameter from the complete args. Call that object's ``shell_complete`` method to get the completions for the incomplete value. :param args: List of complete args before the incomplete value. :param incomplete: Value being completed. May be empty. """ ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args) if ctx is None: return [] obj, incomplete = _resolve_incomplete(ctx, args, incomplete) return obj.shell_complete(ctx, incomplete)
def get_completions(self, args, incomplete): """Determine the context and last complete command or parameter from the complete args. Call that object's ``shell_complete`` method to get the completions for the incomplete value. :param args: List of complete args before the incomplete value. :param incomplete: Value being completed. May be empty. """ ctx = _resolve_context(self.cli, self.prog_name, args) if ctx is None: return [] obj, incomplete = _resolve_incomplete(ctx, args, incomplete) return obj.shell_complete(ctx, incomplete)
https://github.com/pallets/click/issues/942
$ ./testclick subcommand <TAB>Traceback (most recent call last): File "./testclick", line 23, in <module> entrypoint(obj={'completions': ['abc', 'def', 'ghi', ]}) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 731, in __call__ return self.main(*args, **kwargs) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 701, in main _bashcomplete(self, prog_name, complete_var) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 46, in _bashcomplete if bashcomplete(cmd, prog_name, complete_var, complete_instr): File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 216, in bashcomplete return do_complete(cli, prog_name) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 205, in do_complete for item in get_choices(cli, prog_name, args, incomplete): File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 186, in get_choices ctx, all_args, incomplete, param)) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 124, in get_user_autocompletions incomplete=incomplete) File "./testclick", line 7, in _complete return ctx.obj['completions'] TypeError: 'NoneType' object is not subscriptable
TypeError
def _resolve_context(cli, ctx_args, prog_name, args): """Produce the context hierarchy starting with the command and traversing the complete arguments. This only follows the commands, it doesn't trigger input prompts or callbacks. :param cli: Command being called. :param prog_name: Name of the executable in the shell. :param args: List of complete args before the incomplete value. """ ctx_args["resilient_parsing"] = True ctx = cli.make_context(prog_name, args.copy(), **ctx_args) args = ctx.protected_args + ctx.args while args: if isinstance(ctx.command, MultiCommand): if not ctx.command.chain: name, cmd, args = ctx.command.resolve_command(ctx, args) if cmd is None: return ctx ctx = cmd.make_context(name, args, parent=ctx, resilient_parsing=True) args = ctx.protected_args + ctx.args else: while args: name, cmd, args = ctx.command.resolve_command(ctx, args) if cmd is None: return ctx sub_ctx = cmd.make_context( name, args, parent=ctx, allow_extra_args=True, allow_interspersed_args=False, resilient_parsing=True, ) args = sub_ctx.args ctx = sub_ctx args = sub_ctx.protected_args + sub_ctx.args else: break return ctx
def _resolve_context(cli, prog_name, args): """Produce the context hierarchy starting with the command and traversing the complete arguments. This only follows the commands, it doesn't trigger input prompts or callbacks. :param cli: Command being called. :param prog_name: Name of the executable in the shell. :param args: List of complete args before the incomplete value. """ ctx = cli.make_context(prog_name, args.copy(), resilient_parsing=True) args = ctx.protected_args + ctx.args while args: if isinstance(ctx.command, MultiCommand): if not ctx.command.chain: name, cmd, args = ctx.command.resolve_command(ctx, args) if cmd is None: return ctx ctx = cmd.make_context(name, args, parent=ctx, resilient_parsing=True) args = ctx.protected_args + ctx.args else: while args: name, cmd, args = ctx.command.resolve_command(ctx, args) if cmd is None: return ctx sub_ctx = cmd.make_context( name, args, parent=ctx, allow_extra_args=True, allow_interspersed_args=False, resilient_parsing=True, ) args = sub_ctx.args ctx = sub_ctx args = sub_ctx.protected_args + sub_ctx.args else: break return ctx
https://github.com/pallets/click/issues/942
$ ./testclick subcommand <TAB>Traceback (most recent call last): File "./testclick", line 23, in <module> entrypoint(obj={'completions': ['abc', 'def', 'ghi', ]}) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 731, in __call__ return self.main(*args, **kwargs) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 701, in main _bashcomplete(self, prog_name, complete_var) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/core.py", line 46, in _bashcomplete if bashcomplete(cmd, prog_name, complete_var, complete_instr): File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 216, in bashcomplete return do_complete(cli, prog_name) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 205, in do_complete for item in get_choices(cli, prog_name, args, incomplete): File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 186, in get_choices ctx, all_args, incomplete, param)) File "/home/user/testclick/venv/lib/python3.6/site-packages/click/_bashcomplete.py", line 124, in get_user_autocompletions incomplete=incomplete) File "./testclick", line 7, in _complete return ctx.obj['completions'] TypeError: 'NoneType' object is not subscriptable
TypeError
def resolve_ctx(cli, prog_name, args): """ Parse into a hierarchy of contexts. Contexts are connected through the parent variable. :param cli: command definition :param prog_name: the program that is running :param args: full list of args :return: the final context/command parsed """ ctx = cli.make_context(prog_name, args, resilient_parsing=True) args = ctx.protected_args + ctx.args while args: if isinstance(ctx.command, MultiCommand): if not ctx.command.chain: cmd_name, cmd, args = ctx.command.resolve_command(ctx, args) if cmd is None: return ctx ctx = cmd.make_context( cmd_name, args, parent=ctx, resilient_parsing=True ) args = ctx.protected_args + ctx.args else: # Walk chained subcommand contexts saving the last one. while args: cmd_name, cmd, args = ctx.command.resolve_command(ctx, args) if cmd is None: return ctx sub_ctx = cmd.make_context( cmd_name, args, parent=ctx, allow_extra_args=True, allow_interspersed_args=False, resilient_parsing=True, ) args = sub_ctx.args ctx = sub_ctx args = sub_ctx.protected_args + sub_ctx.args else: break return ctx
def resolve_ctx(cli, prog_name, args): """ Parse into a hierarchy of contexts. Contexts are connected through the parent variable. :param cli: command definition :param prog_name: the program that is running :param args: full list of args :return: the final context/command parsed """ ctx = cli.make_context(prog_name, args, resilient_parsing=True) args_remaining = ctx.protected_args + ctx.args while ctx is not None and args_remaining: if isinstance(ctx.command, MultiCommand): cmd = ctx.command.get_command(ctx, args_remaining[0]) if cmd is None: return None ctx = cmd.make_context( args_remaining[0], args_remaining[1:], parent=ctx, resilient_parsing=True, ) args_remaining = ctx.protected_args + ctx.args else: ctx = ctx.parent return ctx
https://github.com/pallets/click/issues/925
Traceback (most recent call last): File "test_compl.py", line 19, in <module> test_argument_choice() File "test_compl.py", line 14, in test_argument_choice assert list(get_choices(cli, 'lol', ['arg11'], '')) == ['arg21', 'arg22'] AssertionError
AssertionError
def get_choices(cli, prog_name, args, incomplete): """ :param cli: command definition :param prog_name: the program that is running :param args: full list of args :param incomplete: the incomplete text to autocomplete :return: all the possible completions for the incomplete """ all_args = copy.deepcopy(args) ctx = resolve_ctx(cli, prog_name, args) if ctx is None: return [] # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse # without the '=' if start_of_option(incomplete) and WORDBREAK in incomplete: partition_incomplete = incomplete.partition(WORDBREAK) all_args.append(partition_incomplete[0]) incomplete = partition_incomplete[2] elif incomplete == WORDBREAK: incomplete = "" completions = [] if start_of_option(incomplete): # completions for partial options for param in ctx.command.params: if isinstance(param, Option): param_opts = [ param_opt for param_opt in param.opts + param.secondary_opts if param_opt not in all_args or param.multiple ] completions.extend( [(o, param.help) for o in param_opts if o.startswith(incomplete)] ) return completions # completion for option values from user supplied values for param in ctx.command.params: if is_incomplete_option(all_args, param): return get_user_autocompletions(ctx, all_args, incomplete, param) # completion for argument values from user supplied values for param in ctx.command.params: if is_incomplete_argument(ctx.params, param): return get_user_autocompletions(ctx, all_args, incomplete, param) add_subcommand_completions(ctx, incomplete, completions) return completions
def get_choices(cli, prog_name, args, incomplete): """ :param cli: command definition :param prog_name: the program that is running :param args: full list of args :param incomplete: the incomplete text to autocomplete :return: all the possible completions for the incomplete """ all_args = copy.deepcopy(args) ctx = resolve_ctx(cli, prog_name, args) if ctx is None: return [] # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse # without the '=' if start_of_option(incomplete) and WORDBREAK in incomplete: partition_incomplete = incomplete.partition(WORDBREAK) all_args.append(partition_incomplete[0]) incomplete = partition_incomplete[2] elif incomplete == WORDBREAK: incomplete = "" completions = [] if start_of_option(incomplete): # completions for partial options for param in ctx.command.params: if isinstance(param, Option): param_opts = [ param_opt for param_opt in param.opts + param.secondary_opts if param_opt not in all_args or param.multiple ] completions.extend( [(o, param.help) for o in param_opts if o.startswith(incomplete)] ) return completions # completion for option values from user supplied values for param in ctx.command.params: if is_incomplete_option(all_args, param): return get_user_autocompletions(ctx, all_args, incomplete, param) # completion for argument values from user supplied values for param in ctx.command.params: if is_incomplete_argument(ctx.params, param): completions.extend( get_user_autocompletions(ctx, all_args, incomplete, param) ) # Stop looking for other completions only if this argument is required. if param.required: return completions break add_subcommand_completions(ctx, incomplete, completions) return completions
https://github.com/pallets/click/issues/925
Traceback (most recent call last): File "test_compl.py", line 19, in <module> test_argument_choice() File "test_compl.py", line 14, in test_argument_choice assert list(get_choices(cli, 'lol', ['arg11'], '')) == ['arg21', 'arg22'] AssertionError
AssertionError
def __init__( self, command, parent=None, info_name=None, obj=None, auto_envvar_prefix=None, default_map=None, terminal_width=None, max_content_width=None, resilient_parsing=False, allow_extra_args=None, allow_interspersed_args=None, ignore_unknown_options=None, help_option_names=None, token_normalize_func=None, color=None, ): #: the parent context or `None` if none exists. self.parent = parent #: the :class:`Command` for this context. self.command = command #: the descriptive information name self.info_name = info_name #: the parsed parameters except if the value is hidden in which #: case it's not remembered. self.params = {} #: the leftover arguments. self.args = [] #: protected arguments. These are arguments that are prepended #: to `args` when certain parsing scenarios are encountered but #: must be never propagated to another arguments. This is used #: to implement nested parsing. self.protected_args = [] if obj is None and parent is not None: obj = parent.obj #: the user object stored. self.obj = obj self._meta = getattr(parent, "meta", {}) #: A dictionary (-like object) with defaults for parameters. if default_map is None and parent is not None and parent.default_map is not None: default_map = parent.default_map.get(info_name) self.default_map = default_map #: This flag indicates if a subcommand is going to be executed. A #: group callback can use this information to figure out if it's #: being executed directly or because the execution flow passes #: onwards to a subcommand. By default it's None, but it can be #: the name of the subcommand to execute. #: #: If chaining is enabled this will be set to ``'*'`` in case #: any commands are executed. It is however not possible to #: figure out which ones. If you require this knowledge you #: should use a :func:`resultcallback`. self.invoked_subcommand = None if terminal_width is None and parent is not None: terminal_width = parent.terminal_width #: The width of the terminal (None is autodetection). self.terminal_width = terminal_width if max_content_width is None and parent is not None: max_content_width = parent.max_content_width #: The maximum width of formatted content (None implies a sensible #: default which is 80 for most things). self.max_content_width = max_content_width if allow_extra_args is None: allow_extra_args = command.allow_extra_args #: Indicates if the context allows extra args or if it should #: fail on parsing. #: #: .. versionadded:: 3.0 self.allow_extra_args = allow_extra_args if allow_interspersed_args is None: allow_interspersed_args = command.allow_interspersed_args #: Indicates if the context allows mixing of arguments and #: options or not. #: #: .. versionadded:: 3.0 self.allow_interspersed_args = allow_interspersed_args if ignore_unknown_options is None: ignore_unknown_options = command.ignore_unknown_options #: Instructs click to ignore options that a command does not #: understand and will store it on the context for later #: processing. This is primarily useful for situations where you #: want to call into external programs. Generally this pattern is #: strongly discouraged because it's not possibly to losslessly #: forward all arguments. #: #: .. versionadded:: 4.0 self.ignore_unknown_options = ignore_unknown_options if help_option_names is None: if parent is not None: help_option_names = parent.help_option_names else: help_option_names = ["--help"] #: The names for the help options. self.help_option_names = help_option_names if token_normalize_func is None and parent is not None: token_normalize_func = parent.token_normalize_func #: An optional normalization function for tokens. This is #: options, choices, commands etc. self.token_normalize_func = token_normalize_func #: Indicates if resilient parsing is enabled. In that case Click #: will do its best to not cause any failures and default values #: will be ignored. Useful for completion. self.resilient_parsing = resilient_parsing # If there is no envvar prefix yet, but the parent has one and # the command on this level has a name, we can expand the envvar # prefix automatically. if auto_envvar_prefix is None: if ( parent is not None and parent.auto_envvar_prefix is not None and self.info_name is not None ): auto_envvar_prefix = "%s_%s" % ( parent.auto_envvar_prefix, self.info_name.upper(), ) else: self.auto_envvar_prefix = auto_envvar_prefix.upper() self.auto_envvar_prefix = auto_envvar_prefix if color is None and parent is not None: color = parent.color #: Controls if styling output is wanted or not. self.color = color self._close_callbacks = [] self._depth = 0
def __init__( self, command, parent=None, info_name=None, obj=None, auto_envvar_prefix=None, default_map=None, terminal_width=None, max_content_width=None, resilient_parsing=False, allow_extra_args=None, allow_interspersed_args=None, ignore_unknown_options=None, help_option_names=None, token_normalize_func=None, color=None, ): #: the parent context or `None` if none exists. self.parent = parent #: the :class:`Command` for this context. self.command = command #: the descriptive information name self.info_name = info_name #: the parsed parameters except if the value is hidden in which #: case it's not remembered. self.params = {} #: the leftover arguments. self.args = [] #: protected arguments. These are arguments that are prepended #: to `args` when certain parsing scenarios are encountered but #: must be never propagated to another arguments. This is used #: to implement nested parsing. self.protected_args = [] if obj is None and parent is not None: obj = parent.obj #: the user object stored. self.obj = obj self._meta = getattr(parent, "meta", {}) #: A dictionary (-like object) with defaults for parameters. if default_map is None and parent is not None and parent.default_map is not None: default_map = parent.default_map.get(info_name) self.default_map = default_map #: This flag indicates if a subcommand is going to be executed. A #: group callback can use this information to figure out if it's #: being executed directly or because the execution flow passes #: onwards to a subcommand. By default it's None, but it can be #: the name of the subcommand to execute. #: #: If chaining is enabled this will be set to ``'*'`` in case #: any commands are executed. It is however not possible to #: figure out which ones. If you require this knowledge you #: should use a :func:`resultcallback`. self.invoked_subcommand = None if terminal_width is None and parent is not None: terminal_width = parent.terminal_width #: The width of the terminal (None is autodetection). self.terminal_width = terminal_width if max_content_width is None and parent is not None: max_content_width = parent.max_content_width #: The maximum width of formatted content (None implies a sensible #: default which is 80 for most things). self.max_content_width = max_content_width if allow_extra_args is None: allow_extra_args = command.allow_extra_args #: Indicates if the context allows extra args or if it should #: fail on parsing. #: #: .. versionadded:: 3.0 self.allow_extra_args = allow_extra_args if allow_interspersed_args is None: allow_interspersed_args = command.allow_interspersed_args #: Indicates if the context allows mixing of arguments and #: options or not. #: #: .. versionadded:: 3.0 self.allow_interspersed_args = allow_interspersed_args if ignore_unknown_options is None: ignore_unknown_options = command.ignore_unknown_options #: Instructs click to ignore options that a command does not #: understand and will store it on the context for later #: processing. This is primarily useful for situations where you #: want to call into external programs. Generally this pattern is #: strongly discouraged because it's not possibly to losslessly #: forward all arguments. #: #: .. versionadded:: 4.0 self.ignore_unknown_options = ignore_unknown_options if help_option_names is None: if parent is not None: help_option_names = parent.help_option_names else: help_option_names = ["--help"] #: The names for the help options. self.help_option_names = help_option_names if token_normalize_func is None and parent is not None: token_normalize_func = parent.token_normalize_func #: An optional normalization function for tokens. This is #: options, choices, commands etc. self.token_normalize_func = token_normalize_func #: Indicates if resilient parsing is enabled. In that case Click #: will do its best to not cause any failures. self.resilient_parsing = resilient_parsing # If there is no envvar prefix yet, but the parent has one and # the command on this level has a name, we can expand the envvar # prefix automatically. if auto_envvar_prefix is None: if ( parent is not None and parent.auto_envvar_prefix is not None and self.info_name is not None ): auto_envvar_prefix = "%s_%s" % ( parent.auto_envvar_prefix, self.info_name.upper(), ) else: self.auto_envvar_prefix = auto_envvar_prefix.upper() self.auto_envvar_prefix = auto_envvar_prefix if color is None and parent is not None: color = parent.color #: Controls if styling output is wanted or not. self.color = color self._close_callbacks = [] self._depth = 0
https://github.com/pallets/click/issues/925
Traceback (most recent call last): File "test_compl.py", line 19, in <module> test_argument_choice() File "test_compl.py", line 14, in test_argument_choice assert list(get_choices(cli, 'lol', ['arg11'], '')) == ['arg21', 'arg22'] AssertionError
AssertionError
def resolve_command(self, ctx, args): cmd_name = make_str(args[0]) original_cmd_name = cmd_name # Get the command cmd = self.get_command(ctx, cmd_name) # If we can't find the command but there is a normalization # function available, we try with that one. if cmd is None and ctx.token_normalize_func is not None: cmd_name = ctx.token_normalize_func(cmd_name) cmd = self.get_command(ctx, cmd_name) # If we don't find the command we want to show an error message # to the user that it was not provided. However, there is # something else we should do: if the first argument looks like # an option we want to kick off parsing again for arguments to # resolve things like --help which now should go to the main # place. if cmd is None and not ctx.resilient_parsing: if split_opt(cmd_name)[0]: self.parse_args(ctx, ctx.args) ctx.fail('No such command "%s".' % original_cmd_name) return cmd_name, cmd, args[1:]
def resolve_command(self, ctx, args): cmd_name = make_str(args[0]) original_cmd_name = cmd_name # Get the command cmd = self.get_command(ctx, cmd_name) # If we can't find the command but there is a normalization # function available, we try with that one. if cmd is None and ctx.token_normalize_func is not None: cmd_name = ctx.token_normalize_func(cmd_name) cmd = self.get_command(ctx, cmd_name) # If we don't find the command we want to show an error message # to the user that it was not provided. However, there is # something else we should do: if the first argument looks like # an option we want to kick off parsing again for arguments to # resolve things like --help which now should go to the main # place. if cmd is None: if split_opt(cmd_name)[0]: self.parse_args(ctx, ctx.args) ctx.fail('No such command "%s".' % original_cmd_name) return cmd_name, cmd, args[1:]
https://github.com/pallets/click/issues/925
Traceback (most recent call last): File "test_compl.py", line 19, in <module> test_argument_choice() File "test_compl.py", line 14, in test_argument_choice assert list(get_choices(cli, 'lol', ['arg11'], '')) == ['arg21', 'arg22'] AssertionError
AssertionError
def full_process_value(self, ctx, value): value = self.process_value(ctx, value) if value is None and not ctx.resilient_parsing: value = self.get_default(ctx) if self.required and self.value_is_missing(value): raise MissingParameter(ctx=ctx, param=self) return value
def full_process_value(self, ctx, value): value = self.process_value(ctx, value) if value is None: value = self.get_default(ctx) if self.required and self.value_is_missing(value): raise MissingParameter(ctx=ctx, param=self) return value
https://github.com/pallets/click/issues/925
Traceback (most recent call last): File "test_compl.py", line 19, in <module> test_argument_choice() File "test_compl.py", line 14, in test_argument_choice assert list(get_choices(cli, 'lol', ['arg11'], '')) == ['arg21', 'arg22'] AssertionError
AssertionError
def main( self, args=None, prog_name=None, complete_var=None, standalone_mode=True, **extra ): """This is the way to invoke a script with all the bells and whistles as a command line application. This will always terminate the application after a call. If this is not wanted, ``SystemExit`` needs to be caught. This method is also available by directly calling the instance of a :class:`Command`. .. versionadded:: 3.0 Added the `standalone_mode` flag to control the standalone mode. :param args: the arguments that should be used for parsing. If not provided, ``sys.argv[1:]`` is used. :param prog_name: the program name that should be used. By default the program name is constructed by taking the file name from ``sys.argv[0]``. :param complete_var: the environment variable that controls the bash completion support. The default is ``"_<prog_name>_COMPLETE"`` with prog name in uppercase. :param standalone_mode: the default behavior is to invoke the script in standalone mode. Click will then handle exceptions and convert them into error messages and the function will never return but shut down the interpreter. If this is set to `False` they will be propagated to the caller and the return value of this function is the return value of :meth:`invoke`. :param extra: extra keyword arguments are forwarded to the context constructor. See :class:`Context` for more information. """ # If we are in Python 3, we will verify that the environment is # sane at this point of reject further execution to avoid a # broken script. if not PY2: _verify_python3_env() else: _check_for_unicode_literals() if args is None: args = get_os_args() else: args = list(args) if prog_name is None: prog_name = make_str(os.path.basename(sys.argv and sys.argv[0] or __file__)) # Hook for the Bash completion. This only activates if the Bash # completion is actually enabled, otherwise this is quite a fast # noop. _bashcomplete(self, prog_name, complete_var) try: try: with self.make_context(prog_name, args, **extra) as ctx: rv = self.invoke(ctx) if not standalone_mode: return rv ctx.exit() except (EOFError, KeyboardInterrupt): echo(file=sys.stderr) raise Abort() except ClickException as e: if not standalone_mode: raise e.show() sys.exit(e.exit_code) except IOError as e: if e.errno == errno.EPIPE: sys.exit(1) else: raise except Abort: if not standalone_mode: raise echo("Aborted!", file=sys.stderr) sys.exit(1)
def main( self, args=None, prog_name=None, complete_var=None, standalone_mode=True, **extra ): """This is the way to invoke a script with all the bells and whistles as a command line application. This will always terminate the application after a call. If this is not wanted, ``SystemExit`` needs to be caught. This method is also available by directly calling the instance of a :class:`Command`. .. versionadded:: 3.0 Added the `standalone_mode` flag to control the standalone mode. :param args: the arguments that should be used for parsing. If not provided, ``sys.argv[1:]`` is used. :param prog_name: the program name that should be used. By default the program name is constructed by taking the file name from ``sys.argv[0]``. :param complete_var: the environment variable that controls the bash completion support. The default is ``"_<prog_name>_COMPLETE"`` with prog name in uppercase. :param standalone_mode: the default behavior is to invoke the script in standalone mode. Click will then handle exceptions and convert them into error messages and the function will never return but shut down the interpreter. If this is set to `False` they will be propagated to the caller and the return value of this function is the return value of :meth:`invoke`. :param extra: extra keyword arguments are forwarded to the context constructor. See :class:`Context` for more information. """ # If we are in Python 3, we will verify that the environment is # sane at this point of reject further execution to avoid a # broken script. if not PY2: _verify_python3_env() else: _check_for_unicode_literals() if args is None: args = get_os_args() else: args = list(args) if prog_name is None: prog_name = make_str(os.path.basename(sys.argv and sys.argv[0] or __file__)) # Hook for the Bash completion. This only activates if the Bash # completion is actually enabled, otherwise this is quite a fast # noop. _bashcomplete(self, prog_name, complete_var) try: try: with self.make_context(prog_name, args, **extra) as ctx: rv = self.invoke(ctx) if not standalone_mode: return rv ctx.exit() except (EOFError, KeyboardInterrupt): echo(file=sys.stderr) raise Abort() except ClickException as e: if not standalone_mode: raise e.show() sys.exit(e.exit_code) except Abort: if not standalone_mode: raise echo("Aborted!", file=sys.stderr) sys.exit(1)
https://github.com/pallets/click/issues/625
$ python ./test-click.py --name ob --count 1000 | head -1 Hello ob! Traceback (most recent call last): File "./test-click.py", line 13, in <module> hello() File "/Users/obonilla/o/click/click/core.py", line 716, in __call__ return self.main(*args, **kwargs) File "/Users/obonilla/o/click/click/core.py", line 696, in main rv = self.invoke(ctx) File "/Users/obonilla/o/click/click/core.py", line 889, in invoke return ctx.invoke(self.callback, **ctx.params) File "/Users/obonilla/o/click/click/core.py", line 534, in invoke return callback(*args, **kwargs) File "./test-click.py", line 10, in hello click.echo('Hello %s!' % name) File "/Users/obonilla/o/click/click/utils.py", line 259, in echo file.write(message) File "/Users/obonilla/o/click/click/_compat.py", line 63, in write return io.TextIOWrapper.write(self, x) IOError: [Errno 32] Broken pipe
IOError
def _get_ssh_client(self, host): """ Create a SSH Client based on host, username and password if provided. If there is any AuthenticationException/SSHException, raise HTTP Error 403 as permission denied. :param host: :return: ssh client instance """ ssh = None global remote_user global remote_pwd if remote_user is None: remote_user = os.getenv("EG_REMOTE_USER", getpass.getuser()) remote_pwd = os.getenv("EG_REMOTE_PWD") # this should use password-less ssh try: ssh = paramiko.SSHClient() ssh.load_system_host_keys() ssh.set_missing_host_key_policy(paramiko.RejectPolicy()) host_ip = gethostbyname(host) if remote_pwd: ssh.connect( host_ip, port=ssh_port, username=remote_user, password=remote_pwd ) else: ssh.connect(host_ip, port=ssh_port, username=remote_user) except Exception as e: http_status_code = 500 current_host = gethostbyname(gethostname()) error_message = ( "Exception '{}' occurred when creating a SSHClient at {} connecting " "to '{}:{}' with user '{}', message='{}'.".format( type(e).__name__, current_host, host, ssh_port, remote_user, e ) ) if e is paramiko.SSHException or paramiko.AuthenticationException: http_status_code = 403 error_message_prefix = "Failed to authenticate SSHClient with password" error_message = error_message_prefix + ( " provided" if remote_pwd else "-less SSH" ) self.log_and_raise(http_status_code=http_status_code, reason=error_message) return ssh
def _get_ssh_client(self, host): """ Create a SSH Client based on host, username and password if provided. If there is any AuthenticationException/SSHException, raise HTTP Error 403 as permission denied. :param host: :return: ssh client instance """ ssh = None try: ssh = paramiko.SSHClient() ssh.load_system_host_keys() ssh.set_missing_host_key_policy(paramiko.RejectPolicy()) host_ip = gethostbyname(host) if remote_pwd: ssh.connect( host_ip, port=ssh_port, username=remote_user, password=remote_pwd ) else: ssh.connect(host_ip, port=ssh_port, username=remote_user) except Exception as e: http_status_code = 500 current_host = gethostbyname(gethostname()) error_message = ( "Exception '{}' occurred when creating a SSHClient at {} connecting " "to '{}:{}' with user '{}', message='{}'.".format( type(e).__name__, current_host, host, ssh_port, remote_user, e ) ) if e is paramiko.SSHException or paramiko.AuthenticationException: http_status_code = 403 error_message_prefix = "Failed to authenticate SSHClient with password" error_message = error_message_prefix + ( " provided" if remote_pwd else "-less SSH" ) self.log_and_raise(http_status_code=http_status_code, reason=error_message) return ssh
https://github.com/jupyter/enterprise_gateway/issues/705
Starting Jupyter Enterprise Gateway... Traceback (most recent call last): File "/opt/conda/bin/jupyter-enterprisegateway", line 6, in <module> from enterprise_gateway import launch_instance File "/opt/conda/lib/python3.7/site-packages/enterprise_gateway/__init__.py", line 4, in <module> from .enterprisegatewayapp import launch_instance File "/opt/conda/lib/python3.7/site-packages/enterprise_gateway/enterprisegatewayapp.py", line 24, in <module> from .services.kernels.remotemanager import RemoteMappingKernelManager File "/opt/conda/lib/python3.7/site-packages/enterprise_gateway/services/kernels/remotemanager.py", line 15, in <module> from ..processproxies.processproxy import LocalProcessProxy, RemoteProcessProxy File "/opt/conda/lib/python3.7/site-packages/enterprise_gateway/services/processproxies/processproxy.py", line 58, in <module> remote_user = os.getenv('EG_REMOTE_USER', getpass.getuser()) File "/opt/conda/lib/python3.7/getpass.py", line 169, in getuser return pwd.getpwuid(os.getuid())[0] KeyError: 'getpwuid(): uid not found: 1000160000'
KeyError
def execute(self, code, timeout=DEFAULT_TIMEOUT): """ Executes the code provided and returns the result of that execution. """ response = [] try: msg_id = self._send_request(code) post_idle = False while True: response_message = self._get_response(msg_id, timeout, post_idle) if response_message: response_message_type = response_message["msg_type"] if response_message_type == "error": response.append( "{}:{}:{}".format( response_message["content"]["ename"], response_message["content"]["evalue"], response_message["content"]["traceback"], ) ) elif response_message_type == "stream": response.append( Kernel._convert_raw_response( response_message["content"]["text"] ) ) elif ( response_message_type == "execute_result" or response_message_type == "display_data" ): if "text/plain" in response_message["content"]["data"]: response.append( Kernel._convert_raw_response( response_message["content"]["data"]["text/plain"] ) ) elif "text/html" in response_message["content"]["data"]: response.append( Kernel._convert_raw_response( response_message["content"]["data"]["text/html"] ) ) elif response_message_type == "status": if response_message["content"]["execution_state"] == "idle": post_idle = True # indicate we're at the logical end and timeout poll for next message continue if post_idle and response_message is None: break except BaseException as b: print(b) return "".join(response)
def execute(self, code, timeout=DEFAULT_TIMEOUT): """ Executes the code provided and returns the result of that execution. """ response = [] try: msg_id = self._send_request(code) post_idle = False while True: response_message = self._get_response(msg_id, timeout, post_idle) if response_message: response_message_type = response_message["msg_type"] if response_message_type == "error": response.append( "{}:{}:{}".format( response_message["content"]["ename"], response_message["content"]["evalue"], response_message["content"]["traceback"], ) ) elif response_message_type == "stream": response.append( Kernel._convert_raw_response( response_message["content"]["text"] ) ) elif ( response_message_type == "execute_result" or response_message_type == "display_data" ): if "text/plain" in response_message["content"]["data"]: response.append( Kernel._convert_raw_response( response_message["content"]["data"]["text/plain"] ) ) elif "text/html" in response_message["content"]["data"]: response.append( Kernel._convert_raw_response( response_message["content"]["data"]["text/html"] ) ) elif response_message_type == "status": if response_message["content"]["execution_state"] == "idle": post_idle = True # indicate we're at the logical end and timeout poll for next message continue if post_idle and response_message is None: break except BaseException as b: print(b) return "\n".join(response)
https://github.com/jupyter/enterprise_gateway/issues/421
====================================================================== FAIL: test_hello_world (enterprise_gateway.itests.test_scala_kernel.TestScalaKernelLocal) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/travis/build/kevin-bates/enterprise_gateway/enterprise_gateway/itests/test_scala_kernel.py", line 13, in test_hello_world self.assertRegexpMatches(result, 'Hello World') AssertionError: Regex didn't match: 'Hello World' not found in 'Hel\nlo World\n' Pulled response from queue for kernel with msg_id: 3596c2f3741f48b49f27637002e4bbc1 {'buffers': [], 'channel': 'iopub', 'content': {'name': 'stdout', 'text': 'Hel'}, 'header': {'date': '2018-08-25T03:28:48.696524Z', 'msg_id': '235a1ed5-5906-4a09-928e-9921a16ca6ab', 'msg_type': 'stream', 'session': 'b777e71c-f760-459a-a18e-72857bdb76cc', 'username': 'elyra', 'version': '5.0'}, 'metadata': {'timestamp': '1535167728691'}, 'msg_id': '235a1ed5-5906-4a09-928e-9921a16ca6ab', 'msg_type': 'stream', 'parent_header': {'msg_id': '3596c2f3741f48b49f27637002e4bbc1', 'msg_type': 'execute_request', 'session': '', 'username': '', 'version': '5.0'}} <<<<<<<<<<<<<<< Pulled response from queue for kernel with msg_id: 3596c2f3741f48b49f27637002e4bbc1 {'buffers': [], 'channel': 'iopub', 'content': {'name': 'stdout', 'text': 'lo World\n'}, 'header': {'date': '2018-08-25T03:28:48.706633Z', 'msg_id': '8251396e-dfcb-4a48-95e4-08eb6911c37f', 'msg_type': 'stream', 'session': 'b777e71c-f760-459a-a18e-72857bdb76cc', 'username': 'elyra', 'version': '5.0'}, 'metadata': {'timestamp': '1535167728702'}, 'msg_id': '8251396e-dfcb-4a48-95e4-08eb6911c37f', 'msg_type': 'stream', 'parent_header': {'msg_id': '3596c2f3741f48b49f27637002e4bbc1', 'msg_type': 'execute_request', 'session': '', 'username': '', 'version': '5.0'}}
AssertionError
def _enforce_limits(self, **kw): """ Enforces any limits that may be imposed by the configuration. """ # if kernels-per-user is configured, ensure that this next kernel is still within the limit. If this # is due to a restart, skip enforcement since we're re-using that id. max_kernels_per_user = self.kernel_manager.parent.parent.max_kernels_per_user if max_kernels_per_user >= 0 and not self.kernel_manager.restarting: env_dict = kw.get("env") username = env_dict["KERNEL_USERNAME"] current_kernel_count = ( self.kernel_manager.parent.parent.kernel_session_manager.active_sessions( username ) ) if current_kernel_count >= max_kernels_per_user: error_message = ( "A max kernels per user limit has been set to {} and user '{}' currently has {} " "active {}.".format( max_kernels_per_user, username, current_kernel_count, "kernel" if max_kernels_per_user == 1 else "kernels", ) ) self.log_and_raise(http_status_code=403, reason=error_message)
def _enforce_limits(self, **kw): """ Enforces any limits that may be imposed by the configuration. """ # if kernels-per-user is configured, ensure that this next kernel is still within the limit max_kernels_per_user = self.kernel_manager.parent.parent.max_kernels_per_user if max_kernels_per_user >= 0: env_dict = kw.get("env") username = env_dict["KERNEL_USERNAME"] current_kernel_count = ( self.kernel_manager.parent.parent.kernel_session_manager.active_sessions( username ) ) if current_kernel_count >= max_kernels_per_user: error_message = ( "A max kernels per user limit has been set to {} and user '{}' currently has {} " "active {}.".format( max_kernels_per_user, username, current_kernel_count, "kernel" if max_kernels_per_user == 1 else "kernels", ) ) self.log_and_raise(http_status_code=403, reason=error_message)
https://github.com/jupyter/enterprise_gateway/issues/331
[I 2018-05-08 15:50:53.686 EnterpriseGatewayApp] KernelRestarter: restarting kernel (1/5), keep random ports [W 180508 15:50:53 handlers:465] kernel 2a96df20-5527-42f0-ae4f-884c99f624ae restarted [D 2018-05-08 15:50:53.688 EnterpriseGatewayApp] RemoteKernelManager.signal_kernel(9) [D 2018-05-08 15:50:53.688 EnterpriseGatewayApp] YarnClusterProcessProxy.send_signal 9 [D 2018-05-08 15:50:53.739 EnterpriseGatewayApp] YarnClusterProcessProxy.kill: kill_app_by_id(application_1525280245118_0073) response: {u'state': u'FAILED'}, confirming app state is not RUNNING [D 2018-05-08 15:50:59.113 EnterpriseGatewayApp] Sending signal: 15 to target: 17033 on host: 172.16.163.194 [D 2018-05-08 15:50:59.493 EnterpriseGatewayApp] SIGTERM signal sent to pid: 17033 [D 2018-05-08 15:50:59.518 EnterpriseGatewayApp] YarnClusterProcessProxy.kill, application ID: application_1525280245118_0073, kernel ID: 2a96df20-5527-42f0-ae4f-884c99f624ae, state: FAILED [D 2018-05-08 15:50:59.551 EnterpriseGatewayApp] cleanup: terminating CONTROL tunnel process. [D 2018-05-08 15:50:59.652 EnterpriseGatewayApp] cleanup: terminating SHELL tunnel process. [D 2018-05-08 15:50:59.753 EnterpriseGatewayApp] cleanup: terminating STDIN tunnel process. [D 2018-05-08 15:50:59.854 EnterpriseGatewayApp] cleanup: terminating IOPUB tunnel process. [D 2018-05-08 15:50:59.955 EnterpriseGatewayApp] cleanup: terminating HB tunnel process. [D 2018-05-08 15:51:00.058 EnterpriseGatewayApp] cleanup: terminating EG_COMM tunnel process. [D 2018-05-08 15:51:00.762 EnterpriseGatewayApp] Instantiating kernel 'Spark - R (YARN Cluster Mode)' with process proxy: enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy [D 2018-05-08 15:51:00.764 EnterpriseGatewayApp] Response socket launched on 172.16.159.125, port: 56438 using 5.0s timeout [D 2018-05-08 15:51:00.765 EnterpriseGatewayApp] Starting kernel: [u'/usr/local/share/jupyter/kernels/spark_R_yarn_cluster/bin/run.sh', u'/home/elyra/.local/share/jupyter/runtime/kernel-2a96df20-5527-42f0-ae4f-884c99f624ae.json', u'--RemoteProcessProxy.response-address', u'172.16.159.125:56438', u'--RemoteProcessProxy.port-range', u'0..0', u'--RemoteProcessProxy.spark-context-initialization-mode', u'lazy'] [D 2018-05-08 15:51:00.765 EnterpriseGatewayApp] Launching kernel: Spark - R (YARN Cluster Mode) with command: [u'/usr/local/share/jupyter/kernels/spark_R_yarn_cluster/bin/run.sh', u'/home/elyra/.local/share/jupyter/runtime/kernel-2a96df20-5527-42f0-ae4f-884c99f624ae.json', u'--RemoteProcessProxy.response-address', u'172.16.159.125:56438', u'--RemoteProcessProxy.port-range', u'0..0', u'--RemoteProcessProxy.spark-context-initialization-mode', u'lazy'] [E 2018-05-08 15:51:00.765 EnterpriseGatewayApp] A max kernels per user limit has been set to 2 and user 'jovyan' currently has 2 active kernels. [E 180508 15:51:00 ioloop:792] Exception in callback <bound method IOLoopKernelRestarter.poll of <jupyter_client.ioloop.restarter.IOLoopKernelRestarter object at 0x7fa4360f5c10>> Traceback (most recent call last): File "/opt/anaconda2/lib/python2.7/site-packages/tornado/ioloop.py", line 1208, in _run return self.callback() File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/restarter.py", line 113, in poll self.kernel_manager.restart_kernel(now=True, newports=newports) File "/opt/anaconda2/lib/python2.7/site-packages/enterprise_gateway/services/kernels/remotemanager.py", line 156, in restart_kernel super(RemoteKernelManager, self).restart_kernel(now, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/manager.py", line 368, in restart_kernel self.start_kernel(**self._launch_args) File "/opt/anaconda2/lib/python2.7/site-packages/enterprise_gateway/services/kernels/remotemanager.py", line 102, in start_kernel return super(RemoteKernelManager, self).start_kernel(**kw) File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/manager.py", line 259, in start_kernel **kw) File "/opt/anaconda2/lib/python2.7/site-packages/enterprise_gateway/services/kernels/remotemanager.py", line 131, in _launch_kernel return self.process_proxy.launch_process(kernel_cmd, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/enterprise_gateway/services/processproxies/yarn.py", line 46, in launch_process super(YarnClusterProcessProxy, self).launch_process(kernel_cmd, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/enterprise_gateway/services/processproxies/processproxy.py", line 538, in launch_process super(RemoteProcessProxy, self).launch_process(kernel_cmd, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/enterprise_gateway/services/processproxies/processproxy.py", line 152, in launch_process self._enforce_limits(**kw) File "/opt/anaconda2/lib/python2.7/site-packages/enterprise_gateway/services/processproxies/processproxy.py", line 397, in _enforce_limits raise tornado.web.HTTPError(403, reason=error_message) HTTPError: HTTP 403: A max kernels per user limit has been set to 2 and user 'jovyan' currently has 2 active kernels.
HTTPError
def start_kernel_from_session( self, kernel_id, kernel_name, connection_info, process_info, launch_args ): # Create a KernelManger instance and load connection and process info, then confirm the kernel is still # alive. constructor_kwargs = {} if self.kernel_spec_manager: constructor_kwargs["kernel_spec_manager"] = self.kernel_spec_manager # Construct a kernel manager... km = self.kernel_manager_factory( connection_file=os.path.join(self.connection_dir, "kernel-%s.json" % kernel_id), parent=self, log=self.log, kernel_name=kernel_name, **constructor_kwargs, ) # Load connection info into member vars - no need to write out connection file km.load_connection_info(connection_info) km._launch_args = launch_args # Construct a process-proxy if km.kernel_spec.process_proxy_class: process_proxy_class = import_item(km.kernel_spec.process_proxy_class) kw = {"env": {}} km.process_proxy = process_proxy_class(km) km.process_proxy.load_process_info(process_info) # Confirm we can even poll the process. If not, remove the persisted session. if km.process_proxy.poll() is False: return False km.kernel = km.process_proxy km.start_restarter() km._connect_control_socket() self._kernels[kernel_id] = km self._kernel_connections[kernel_id] = 0 self.start_watching_activity(kernel_id) self.add_restart_callback( kernel_id, lambda: self._handle_kernel_died(kernel_id), "dead", ) # Only initialize culling if available. Warning message will be issued in gatewayapp at startup. func = getattr(self, "initialize_culler", None) if func: func() return True
def start_kernel_from_session( self, kernel_id, kernel_name, connection_info, process_info, launch_args ): # Create a KernelManger instance and load connection and process info, then confirm the kernel is still # alive. constructor_kwargs = {} if self.kernel_spec_manager: constructor_kwargs["kernel_spec_manager"] = self.kernel_spec_manager # Construct a kernel manager... km = self.kernel_manager_factory( connection_file=os.path.join(self.connection_dir, "kernel-%s.json" % kernel_id), parent=self, log=self.log, kernel_name=kernel_name, **constructor_kwargs, ) # Load connection info into member vars - no need to write out connection file km.load_connection_info(connection_info) km._launch_args = launch_args # Construct a process-proxy if km.kernel_spec.process_proxy_class: process_proxy_class = import_item(km.kernel_spec.process_proxy_class) kw = {"env": {}} km.process_proxy = process_proxy_class( km, km.kernel_spec.process_proxy_connection_file_mode, **kw ) km.process_proxy.load_process_info(process_info) # Confirm we can even poll the process. If not, remove the persisted session. if km.process_proxy.poll() is False: return False km.kernel = km.process_proxy km.start_restarter() km._connect_control_socket() self._kernels[kernel_id] = km self._kernel_connections[kernel_id] = 0 self.start_watching_activity(kernel_id) self.add_restart_callback( kernel_id, lambda: self._handle_kernel_died(kernel_id), "dead", ) # Only initialize culling if available. Warning message will be issued in gatewayapp at startup. func = getattr(self, "initialize_culler", None) if func: func() return True
https://github.com/jupyter/enterprise_gateway/issues/108
[D 2017-08-03 15:27:28.840 KernelGatewayApp] Found kernel python2 in /usr/share/jupyter/kernels Traceback (most recent call last): File "/usr/bin/jupyter-kernelgateway", line 11, in <module> load_entry_point('jupyter-kernel-gateway', 'console_scripts', 'jupyter-kernelgateway')() File "/usr/lib/python2.7/site-packages/jupyter_core/application.py", line 267, in launch_instance return super(JupyterApp, cls).launch_instance(argv=argv, **kwargs) File "/usr/lib/python2.7/site-packages/traitlets/config/application.py", line 657, in launch_instance app.initialize(argv) File "/root/elyra/kernel_gateway/gatewayapp.py", line 352, in initialize self.init_configurables() File "/root/elyra/kernel_gateway/gatewayapp.py", line 402, in init_configurables self.kernel_session_manager.start_sessions() File "/root/elyra/kernel_gateway/services/sessions/kernelsessionmanager.py", line 83, in start_sessions if not self._start_session(kernel_session): File "/root/elyra/kernel_gateway/services/sessions/kernelsessionmanager.py", line 96, in _start_session launch_args=kernel_session['launch_args']) File "/root/elyra/kernel_gateway/services/kernels/remotemanager.py", line 55, in start_kernel_from_session km.process_proxy = process_proxy_class(km, km.kernel_spec.process_proxy_connection_file_mode, **kw) AttributeError: 'RemoteKernelSpec' object has no attribute 'process_proxy_connection_file_mode'
AttributeError
def __init__(self, prespawn_count, kernel_manager): self.kernel_manager = kernel_manager # Make sure we've got a int if not prespawn_count: prespawn_count = 0 env = dict(os.environ.copy()) env["KERNEL_USERNAME"] = prespawn_username for _ in range(prespawn_count): self.kernel_manager.start_seeded_kernel(env=env)
def __init__(self, prespawn_count, kernel_manager): self.kernel_manager = kernel_manager # Make sure we've got a int if not prespawn_count: prespawn_count = 0 env = dict(os.environ) env["KERNEL_USERNAME"] = prespawn_username for _ in range(prespawn_count): self.kernel_manager.start_seeded_kernel(env=env)
https://github.com/jupyter/enterprise_gateway/issues/87
[D 2017-07-26 12:02:29.586 KernelGatewayApp] Instantiating kernel 'Spark 2.1 - Scala (YARN Cluster Mode)' with process proxy: kernel_gateway.services.kernels.processproxy.YarnProcessProxy [I 170726 12:02:29 base:37] Request http://elyra-fyi-node-1:8088//ws/v1/cluster/nodes [E 2017-07-26 12:02:29.591 KernelGatewayApp] Unhandled error in API request Traceback (most recent call last): File "/opt/anaconda2/lib/python2.7/site-packages/notebook/base/handlers.py", line 521, in wrapper result = yield gen.maybe_future(method(self, *args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/notebook/services/kernels/handlers.py", line 49, in post kernel_id = yield gen.maybe_future(km.start_kernel(kernel_name=model['name'])) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/remotemanager.py", line 24, in start_kernel kernel_id = yield gen.maybe_future(super(RemoteMappingKernelManager, self).start_kernel(*args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/manager.py", line 81, in start_kernel kernel_id = yield gen.maybe_future(super(SeedingMappingKernelManager, self).start_kernel(*args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 307, in wrapper yielded = next(result) File "/opt/anaconda2/lib/python2.7/site-packages/notebook/services/kernels/kernelmanager.py", line 123, in start_kernel super(MappingKernelManager, self).start_kernel(**kwargs) File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/multikernelmanager.py", line 110, in start_kernel km.start_kernel(**kwargs) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/remotemanager.py", line 91, in start_kernel self.process_proxy = process_proxy_class(self, self.kernel_spec.process_proxy_connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 570, in __init__ super(YarnProcessProxy, self).__init__(kernel_manager, connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 283, in __init__ super(RemoteProcessProxy, self).__init__(kernel_manager, connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 69, in __init__ env_dict = kw['env'] KeyError: 'env'
KeyError
def __init__(self, kernel_manager, connection_file_mode, **kw): self.kernel_manager = kernel_manager # use the zero-ip from the start, can prevent having to write out connection file again self.kernel_manager.ip = "0.0.0.0" self.connection_file_mode = connection_file_mode if self.connection_file_mode: if self.connection_file_mode not in connection_file_modes: self.log.warning( "Unknown connection file mode detected '{}'! Continuing...".format( self.connection_file_mode ) ) self.log = kernel_manager.log # extract the kernel_id string from the connection file and set the KERNEL_ID environment variable self.kernel_id = ( os.path.basename(self.kernel_manager.connection_file) .replace("kernel-", "") .replace(".json", "") ) # ask the subclass for the set of applicable hosts self.hosts = self.get_hosts() # Represents the local process (from popen) if applicable. Note that we could have local_proc = None even when # the subclass is a LocalProcessProxy (or YarnProcessProxy). This will happen if the JKG is restarted and the # persisted kernel-sessions indicate that its now running on a different server. In those case, we use the ip # member variable to determine if the persisted state is local or remote and use signals with the pid to # implement the poll, kill and send_signal methods. self.local_proc = None self.ip = None self.pid = 0
def __init__(self, kernel_manager, connection_file_mode, **kw): self.kernel_manager = kernel_manager # use the zero-ip from the start, can prevent having to write out connection file again self.kernel_manager.ip = "0.0.0.0" self.connection_file_mode = connection_file_mode if self.connection_file_mode: if self.connection_file_mode not in connection_file_modes: self.log.warning( "Unknown connection file mode detected '{}'! Continuing...".format( self.connection_file_mode ) ) self.log = kernel_manager.log # extract the kernel_id string from the connection file and set the KERNEL_ID environment variable self.kernel_id = ( os.path.basename(self.kernel_manager.connection_file) .replace("kernel-", "") .replace(".json", "") ) # ask the subclass for the set of applicable hosts self.hosts = self.get_hosts() env_dict = kw["env"] # see if KERNEL_LAUNCH_TIMEOUT was included from user self.kernel_launch_timeout = float( env_dict.get("KERNEL_LAUNCH_TIMEOUT", elyra_kernel_launch_timeout) ) # add the applicable kernel_id to the env dict env_dict["KERNEL_ID"] = self.kernel_id for k in env_pop_list: env_dict.pop(k, None) self.log.debug("BaseProcessProxy env: {}".format(kw["env"])) # Represents the local process (from popen) if applicable. Note that we could have local_proc = None even when # the subclass is a LocalProcessProxy (or YarnProcessProxy). This will happen if the JKG is restarted and the # persisted kernel-sessions indicate that its now running on a different server. In those case, we use the ip # member variable to determine if the persisted state is local or remote and use signals with the pid to # implement the poll, kill and send_signal methods. self.local_proc = None self.ip = None self.pid = 0
https://github.com/jupyter/enterprise_gateway/issues/87
[D 2017-07-26 12:02:29.586 KernelGatewayApp] Instantiating kernel 'Spark 2.1 - Scala (YARN Cluster Mode)' with process proxy: kernel_gateway.services.kernels.processproxy.YarnProcessProxy [I 170726 12:02:29 base:37] Request http://elyra-fyi-node-1:8088//ws/v1/cluster/nodes [E 2017-07-26 12:02:29.591 KernelGatewayApp] Unhandled error in API request Traceback (most recent call last): File "/opt/anaconda2/lib/python2.7/site-packages/notebook/base/handlers.py", line 521, in wrapper result = yield gen.maybe_future(method(self, *args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/notebook/services/kernels/handlers.py", line 49, in post kernel_id = yield gen.maybe_future(km.start_kernel(kernel_name=model['name'])) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/remotemanager.py", line 24, in start_kernel kernel_id = yield gen.maybe_future(super(RemoteMappingKernelManager, self).start_kernel(*args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/manager.py", line 81, in start_kernel kernel_id = yield gen.maybe_future(super(SeedingMappingKernelManager, self).start_kernel(*args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 307, in wrapper yielded = next(result) File "/opt/anaconda2/lib/python2.7/site-packages/notebook/services/kernels/kernelmanager.py", line 123, in start_kernel super(MappingKernelManager, self).start_kernel(**kwargs) File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/multikernelmanager.py", line 110, in start_kernel km.start_kernel(**kwargs) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/remotemanager.py", line 91, in start_kernel self.process_proxy = process_proxy_class(self, self.kernel_spec.process_proxy_connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 570, in __init__ super(YarnProcessProxy, self).__init__(kernel_manager, connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 283, in __init__ super(RemoteProcessProxy, self).__init__(kernel_manager, connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 69, in __init__ env_dict = kw['env'] KeyError: 'env'
KeyError
def launch_process(self, cmd, **kw): env_dict = kw.get("env") if env_dict is None: env_dict = dict(os.environ.copy()) kw.update({"env": env_dict}) # see if KERNEL_LAUNCH_TIMEOUT was included from user self.kernel_launch_timeout = float( env_dict.get("KERNEL_LAUNCH_TIMEOUT", elyra_kernel_launch_timeout) ) # add the applicable kernel_id to the env dict env_dict["KERNEL_ID"] = self.kernel_id for k in env_pop_list: env_dict.pop(k, None) self.log.debug("BaseProcessProxy.launch_process() env: {}".format(kw.get("env")))
def launch_process(self, cmd, **kw): pass
https://github.com/jupyter/enterprise_gateway/issues/87
[D 2017-07-26 12:02:29.586 KernelGatewayApp] Instantiating kernel 'Spark 2.1 - Scala (YARN Cluster Mode)' with process proxy: kernel_gateway.services.kernels.processproxy.YarnProcessProxy [I 170726 12:02:29 base:37] Request http://elyra-fyi-node-1:8088//ws/v1/cluster/nodes [E 2017-07-26 12:02:29.591 KernelGatewayApp] Unhandled error in API request Traceback (most recent call last): File "/opt/anaconda2/lib/python2.7/site-packages/notebook/base/handlers.py", line 521, in wrapper result = yield gen.maybe_future(method(self, *args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/notebook/services/kernels/handlers.py", line 49, in post kernel_id = yield gen.maybe_future(km.start_kernel(kernel_name=model['name'])) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/remotemanager.py", line 24, in start_kernel kernel_id = yield gen.maybe_future(super(RemoteMappingKernelManager, self).start_kernel(*args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/manager.py", line 81, in start_kernel kernel_id = yield gen.maybe_future(super(SeedingMappingKernelManager, self).start_kernel(*args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 307, in wrapper yielded = next(result) File "/opt/anaconda2/lib/python2.7/site-packages/notebook/services/kernels/kernelmanager.py", line 123, in start_kernel super(MappingKernelManager, self).start_kernel(**kwargs) File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/multikernelmanager.py", line 110, in start_kernel km.start_kernel(**kwargs) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/remotemanager.py", line 91, in start_kernel self.process_proxy = process_proxy_class(self, self.kernel_spec.process_proxy_connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 570, in __init__ super(YarnProcessProxy, self).__init__(kernel_manager, connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 283, in __init__ super(RemoteProcessProxy, self).__init__(kernel_manager, connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 69, in __init__ env_dict = kw['env'] KeyError: 'env'
KeyError
def __init__(self, kernel_manager, **kwargs): super(KernelSessionManager, self).__init__(**kwargs) self.kernel_manager = kernel_manager self._sessions = dict() self.kernel_session_file = os.path.join(self._get_sessions_loc(), "kernels.json") self._load_sessions()
def __init__(self, kernel_manager, **kwargs): super(KernelSessionManager, self).__init__(**kwargs) self.kernel_manager = kernel_manager self._sessions = dict() self.kernel_session_file = os.path.join(self.get_sessions_loc(), "kernels.json") self._load_sessions()
https://github.com/jupyter/enterprise_gateway/issues/87
[D 2017-07-26 12:02:29.586 KernelGatewayApp] Instantiating kernel 'Spark 2.1 - Scala (YARN Cluster Mode)' with process proxy: kernel_gateway.services.kernels.processproxy.YarnProcessProxy [I 170726 12:02:29 base:37] Request http://elyra-fyi-node-1:8088//ws/v1/cluster/nodes [E 2017-07-26 12:02:29.591 KernelGatewayApp] Unhandled error in API request Traceback (most recent call last): File "/opt/anaconda2/lib/python2.7/site-packages/notebook/base/handlers.py", line 521, in wrapper result = yield gen.maybe_future(method(self, *args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/notebook/services/kernels/handlers.py", line 49, in post kernel_id = yield gen.maybe_future(km.start_kernel(kernel_name=model['name'])) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/remotemanager.py", line 24, in start_kernel kernel_id = yield gen.maybe_future(super(RemoteMappingKernelManager, self).start_kernel(*args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/manager.py", line 81, in start_kernel kernel_id = yield gen.maybe_future(super(SeedingMappingKernelManager, self).start_kernel(*args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 307, in wrapper yielded = next(result) File "/opt/anaconda2/lib/python2.7/site-packages/notebook/services/kernels/kernelmanager.py", line 123, in start_kernel super(MappingKernelManager, self).start_kernel(**kwargs) File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/multikernelmanager.py", line 110, in start_kernel km.start_kernel(**kwargs) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/remotemanager.py", line 91, in start_kernel self.process_proxy = process_proxy_class(self, self.kernel_spec.process_proxy_connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 570, in __init__ super(YarnProcessProxy, self).__init__(kernel_manager, connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 283, in __init__ super(RemoteProcessProxy, self).__init__(kernel_manager, connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 69, in __init__ env_dict = kw['env'] KeyError: 'env'
KeyError
def create_session(self, kernel_id, **kwargs): # Persists information about the kernel session within the designated repository. km = self.kernel_manager.get_kernel(kernel_id) # Compose the kernel_session entry kernel_session = dict() kernel_session["kernel_id"] = kernel_id kernel_session["username"] = self._get_kernel_username(kwargs.get("env", {})) kernel_session["kernel_name"] = km.kernel_name # Build the inner dictionaries: connection_info, process_proxy and add to kernel_session kernel_session["connection_info"] = km.get_connection_info() kernel_session["launch_args"] = kwargs.copy() kernel_session["process_info"] = ( km.process_proxy.get_process_info() if km.process_proxy else {} ) self._save_session(kernel_id, kernel_session)
def create_session(self, kernel_id, **kwargs): # Persists information about the kernel session within the designated repository. km = self.kernel_manager.get_kernel(kernel_id) # Compose the kernel_session entry kernel_session = dict() kernel_session["kernel_id"] = kernel_id kernel_session["username"] = kwargs["env"]["KERNEL_USERNAME"] kernel_session["kernel_name"] = km.kernel_name # Build the inner dictionaries: connection_info, process_proxy and add to kernel_session kernel_session["connection_info"] = km.get_connection_info() kernel_session["launch_args"] = kwargs.copy() kernel_session["process_info"] = ( km.process_proxy.get_process_info() if km.process_proxy else {} ) self._save_session(kernel_id, kernel_session)
https://github.com/jupyter/enterprise_gateway/issues/87
[D 2017-07-26 12:02:29.586 KernelGatewayApp] Instantiating kernel 'Spark 2.1 - Scala (YARN Cluster Mode)' with process proxy: kernel_gateway.services.kernels.processproxy.YarnProcessProxy [I 170726 12:02:29 base:37] Request http://elyra-fyi-node-1:8088//ws/v1/cluster/nodes [E 2017-07-26 12:02:29.591 KernelGatewayApp] Unhandled error in API request Traceback (most recent call last): File "/opt/anaconda2/lib/python2.7/site-packages/notebook/base/handlers.py", line 521, in wrapper result = yield gen.maybe_future(method(self, *args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/notebook/services/kernels/handlers.py", line 49, in post kernel_id = yield gen.maybe_future(km.start_kernel(kernel_name=model['name'])) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/remotemanager.py", line 24, in start_kernel kernel_id = yield gen.maybe_future(super(RemoteMappingKernelManager, self).start_kernel(*args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/manager.py", line 81, in start_kernel kernel_id = yield gen.maybe_future(super(SeedingMappingKernelManager, self).start_kernel(*args, **kwargs)) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/opt/anaconda2/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/opt/anaconda2/lib/python2.7/site-packages/tornado/gen.py", line 307, in wrapper yielded = next(result) File "/opt/anaconda2/lib/python2.7/site-packages/notebook/services/kernels/kernelmanager.py", line 123, in start_kernel super(MappingKernelManager, self).start_kernel(**kwargs) File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/multikernelmanager.py", line 110, in start_kernel km.start_kernel(**kwargs) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/remotemanager.py", line 91, in start_kernel self.process_proxy = process_proxy_class(self, self.kernel_spec.process_proxy_connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 570, in __init__ super(YarnProcessProxy, self).__init__(kernel_manager, connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 283, in __init__ super(RemoteProcessProxy, self).__init__(kernel_manager, connection_file_mode, **kw) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 69, in __init__ env_dict = kw['env'] KeyError: 'env'
KeyError
def query_app_state_by_id(app_id): """Return the state of an application. :param app_id: :return: """ url = "%s/apps/%s/state" % (YarnProcessProxy.yarn_endpoint, app_id) cmd = ["curl", "-X", "GET", url] process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True ) output, stderr = process.communicate() return json.loads(output).get("state") if output else None
def query_app_state_by_id(app_id): """Return the state of an application. :param app_id: :return: """ url = "%s/apps/%s/state" % (YarnProcessProxy.yarn_endpoint, app_id) cmd = ["curl", "-X", "GET", url] process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, stderr = process.communicate() return json.loads(output).get("state") if output else None
https://github.com/jupyter/enterprise_gateway/issues/85
[D 2017-07-20 12:22:23.076 KernelGatewayApp] kernel_id=237badc9-ce85-42ea-bd13-4d8432993ca3, kernel_name=spark_2.1_scala_yarn_cluster, last_activity=2017-07-20 19:03:50.488255+00:00 [D 2017-07-20 12:22:23.077 KernelGatewayApp] kernel_id=d371a88a-53fb-42c9-827b-1ee263160716, kernel_name=spark_2.1_scala_yarn_cluster, last_activity=2017-07-20 18:56:10.546248+00:00 [E 170720 12:22:23 ioloop:638] Exception in callback <bound method IOLoopKernelRestarter.poll of <jupyter_client.ioloop.restarter.IOLoopKernelRestarter object at 0x7f15185a9210>> Traceback (most recent call last): File "/opt/anaconda2/lib/python2.7/site-packages/tornado/ioloop.py", line 1026, in _run return self.callback() File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/restarter.py", line 88, in poll if not self.kernel_manager.is_alive(): File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/manager.py", line 433, in is_alive if self.kernel.poll() is None: File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 440, in poll state = YarnProcessProxy.query_app_state_by_id(self.application_id) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 780, in query_app_state_by_id process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) File "/opt/anaconda2/lib/python2.7/subprocess.py", line 382, in __init__ errread, errwrite), to_close = self._get_handles(stdin, stdout, stderr) File "/opt/anaconda2/lib/python2.7/subprocess.py", line 812, in _get_handles c2pread, c2pwrite = self.pipe_cloexec() File "/opt/anaconda2/lib/python2.7/subprocess.py", line 860, in pipe_cloexec r, w = os.pipe() OSError: [Errno 24] Too many open files [E 170720 12:22:23 ioloop:638] Exception in callback <bound method IOLoopKernelRestarter.poll of <jupyter_client.ioloop.restarter.IOLoopKernelRestarter object at 0x7f1516a88a90>> Traceback (most recent call last): File "/opt/anaconda2/lib/python2.7/site-packages/tornado/ioloop.py", line 1026, in _run return self.callback() File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/restarter.py", line 88, in poll if not self.kernel_manager.is_alive(): File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/manager.py", line 433, in is_alive if self.kernel.poll() is None: File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 440, in poll state = YarnProcessProxy.query_app_state_by_id(self.application_id) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 780, in query_app_state_by_id process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) File "/opt/anaconda2/lib/python2.7/subprocess.py", line 382, in __init__ errread, errwrite), to_close = self._get_handles(stdin, stdout, stderr) File "/opt/anaconda2/lib/python2.7/subprocess.py", line 812, in _get_handles c2pread, c2pwrite = self.pipe_cloexec() File "/opt/anaconda2/lib/python2.7/subprocess.py", line 860, in pipe_cloexec r, w = os.pipe()
OSError
def kill_app_by_id(app_id): """Kill an application. If the app's state is FINISHED or FAILED, it won't be changed to KILLED. TODO: extend the yarn_api_client to support cluster_application_kill with PUT, e.g.: YarnProcessProxy.resource_mgr.cluster_application_kill(application_id=app_id) :param app_id :return: The JSON response of killing the application. """ header = "Content-Type: application/json" data = '{"state": "KILLED"}' url = "%s/apps/%s/state" % (YarnProcessProxy.yarn_endpoint, app_id) cmd = ["curl", "-X", "PUT", "-H", header, "-d", data, url] process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True ) output, stderr = process.communicate() return json.loads(output) if output else None
def kill_app_by_id(app_id): """Kill an application. If the app's state is FINISHED or FAILED, it won't be changed to KILLED. TODO: extend the yarn_api_client to support cluster_application_kill with PUT, e.g.: YarnProcessProxy.resource_mgr.cluster_application_kill(application_id=app_id) :param app_id :return: The JSON response of killing the application. """ header = "Content-Type: application/json" data = '{"state": "KILLED"}' url = "%s/apps/%s/state" % (YarnProcessProxy.yarn_endpoint, app_id) cmd = ["curl", "-X", "PUT", "-H", header, "-d", data, url] process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, stderr = process.communicate() return json.loads(output) if output else None
https://github.com/jupyter/enterprise_gateway/issues/85
[D 2017-07-20 12:22:23.076 KernelGatewayApp] kernel_id=237badc9-ce85-42ea-bd13-4d8432993ca3, kernel_name=spark_2.1_scala_yarn_cluster, last_activity=2017-07-20 19:03:50.488255+00:00 [D 2017-07-20 12:22:23.077 KernelGatewayApp] kernel_id=d371a88a-53fb-42c9-827b-1ee263160716, kernel_name=spark_2.1_scala_yarn_cluster, last_activity=2017-07-20 18:56:10.546248+00:00 [E 170720 12:22:23 ioloop:638] Exception in callback <bound method IOLoopKernelRestarter.poll of <jupyter_client.ioloop.restarter.IOLoopKernelRestarter object at 0x7f15185a9210>> Traceback (most recent call last): File "/opt/anaconda2/lib/python2.7/site-packages/tornado/ioloop.py", line 1026, in _run return self.callback() File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/restarter.py", line 88, in poll if not self.kernel_manager.is_alive(): File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/manager.py", line 433, in is_alive if self.kernel.poll() is None: File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 440, in poll state = YarnProcessProxy.query_app_state_by_id(self.application_id) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 780, in query_app_state_by_id process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) File "/opt/anaconda2/lib/python2.7/subprocess.py", line 382, in __init__ errread, errwrite), to_close = self._get_handles(stdin, stdout, stderr) File "/opt/anaconda2/lib/python2.7/subprocess.py", line 812, in _get_handles c2pread, c2pwrite = self.pipe_cloexec() File "/opt/anaconda2/lib/python2.7/subprocess.py", line 860, in pipe_cloexec r, w = os.pipe() OSError: [Errno 24] Too many open files [E 170720 12:22:23 ioloop:638] Exception in callback <bound method IOLoopKernelRestarter.poll of <jupyter_client.ioloop.restarter.IOLoopKernelRestarter object at 0x7f1516a88a90>> Traceback (most recent call last): File "/opt/anaconda2/lib/python2.7/site-packages/tornado/ioloop.py", line 1026, in _run return self.callback() File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/restarter.py", line 88, in poll if not self.kernel_manager.is_alive(): File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/manager.py", line 433, in is_alive if self.kernel.poll() is None: File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 440, in poll state = YarnProcessProxy.query_app_state_by_id(self.application_id) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/processproxy.py", line 780, in query_app_state_by_id process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) File "/opt/anaconda2/lib/python2.7/subprocess.py", line 382, in __init__ errread, errwrite), to_close = self._get_handles(stdin, stdout, stderr) File "/opt/anaconda2/lib/python2.7/subprocess.py", line 812, in _get_handles c2pread, c2pwrite = self.pipe_cloexec() File "/opt/anaconda2/lib/python2.7/subprocess.py", line 860, in pipe_cloexec r, w = os.pipe()
OSError
def start_kernel_from_session( self, kernel_id, kernel_name, connection_info, process_info, launch_args ): # Create a KernelManger instance and load connection and process info, then confirm the kernel is still # alive. constructor_kwargs = {} if self.kernel_spec_manager: constructor_kwargs["kernel_spec_manager"] = self.kernel_spec_manager # Construct a kernel manager... km = self.kernel_manager_factory( connection_file=os.path.join(self.connection_dir, "kernel-%s.json" % kernel_id), parent=self, log=self.log, kernel_name=kernel_name, **constructor_kwargs, ) # Load connection info into member vars - no need to write out connection file km.load_connection_info(connection_info) km._launch_args = launch_args # Construct a process-proxy if km.kernel_spec.process_proxy_class: process_proxy_class = import_item(km.kernel_spec.process_proxy_class) kw = {"env": {}} km.process_proxy = process_proxy_class( km, km.kernel_spec.process_proxy_connection_file_mode, **kw ) km.process_proxy.load_process_info(process_info) # Confirm we can even poll the process. If not, remove the persisted session. if km.process_proxy.poll() is False: return False km.kernel = km.process_proxy km.start_restarter() km._connect_control_socket() self._kernels[kernel_id] = km self._kernel_connections[kernel_id] = 0 self.start_watching_activity(kernel_id) self.add_restart_callback( kernel_id, lambda: self._handle_kernel_died(kernel_id), "dead", ) # Only initialize culling if available. Warning message will be issued in gatewayapp at startup. func = getattr(self, "initialize_culler", None) if func: func() return True
def start_kernel_from_session( self, kernel_id, kernel_name, connection_info, process_info, launch_args ): # Create a KernelManger instance and load connection and process info, then confirm the kernel is still # alive. constructor_kwargs = {} if self.kernel_spec_manager: constructor_kwargs["kernel_spec_manager"] = self.kernel_spec_manager # Construct a kernel manager... km = self.kernel_manager_factory( connection_file=os.path.join(self.connection_dir, "kernel-%s.json" % kernel_id), parent=self, log=self.log, kernel_name=kernel_name, **constructor_kwargs, ) km.load_connection_info(connection_info) km.write_connection_file() km._launch_args = launch_args # Construct a process-proxy if km.kernel_spec.process_proxy_class: process_proxy_class = import_item(km.kernel_spec.process_proxy_class) kw = {"env": {}} km.process_proxy = process_proxy_class( km, km.kernel_spec.process_proxy_connection_file_mode, **kw ) km.process_proxy.load_process_info(process_info) # Confirm we can even poll the process. If not, remove the persisted session. if km.process_proxy.poll() is False: return False km.kernel = km.process_proxy km.start_restarter() km._connect_control_socket() self._kernels[kernel_id] = km self._kernel_connections[kernel_id] = 0 self.start_watching_activity(kernel_id) self.add_restart_callback( kernel_id, lambda: self._handle_kernel_died(kernel_id), "dead", ) # Only initialize culling if available. Warning message will be issued in gatewayapp at startup. func = getattr(self, "initialize_culler", None) if func: func() return True
https://github.com/jupyter/enterprise_gateway/issues/74
[D 2017-07-16 15:56:15.197 KernelGatewayApp] RemoteKernelManager: Writing connection file with ip=169.45.103.144, control=33001, hb=45662, iopub=44204, stdin=33960, shell=44543 Traceback (most recent call last): File "/opt/anaconda2/bin/jupyter-kernelgateway", line 11, in <module> sys.exit(launch_instance()) File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_core/application.py", line 267, in launch_instance return super(JupyterApp, cls).launch_instance(argv=argv, **kwargs) File "/opt/anaconda2/lib/python2.7/site-packages/traitlets/config/application.py", line 657, in launch_instance app.initialize(argv) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/gatewayapp.py", line 352, in initialize self.init_configurables() File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/gatewayapp.py", line 402, in init_configurables self.kernel_session_manager.start_sessions() File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/sessions/kernelsessionmanager.py", line 84, in start_sessions if not self._start_session(kernel_session): File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/sessions/kernelsessionmanager.py", line 97, in _start_session launch_args=kernel_session['launch_args']) File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/remotemanager.py", line 46, in start_kernel_from_session km.write_connection_file() File "/opt/anaconda2/lib/python2.7/site-packages/kernel_gateway/services/kernels/remotemanager.py", line 143, in write_connection_file return super(RemoteKernelManager, self).write_connection_file() File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/connect.py", line 431, in write_connection_file kernel_name=self.kernel_name File "/opt/anaconda2/lib/python2.7/site-packages/jupyter_client/connect.py", line 136, in write_connection_file with open(fname, 'w') as f: IOError: [Errno 13] Permission denied: u'/var/run/elyra/runtime/kernel-7388b059-8a68-48a0-811c-2d9803f04250.json'
IOError
def styleSheet(self) -> Optional[StyleSheet]: return next(self.model.select(StyleSheet), None)
def styleSheet(self) -> Optional[StyleSheet]: model = self.model style_sheet = next(model.select(StyleSheet), None) if not style_sheet: style_sheet = self.model.create(StyleSheet) style_sheet.styleSheet = DEFAULT_STYLE_SHEET return style_sheet
https://github.com/gaphor/gaphor/issues/578
Traceback (most recent call last): File "/home/dan/Projects/gaphor/gaphor/ui/actiongroup.py", line 138, in _action_activate method(from_variant(param)) File "/home/dan/Projects/gaphor/gaphor/ui/appfilemanager.py", line 121, in action_open_recent self.load(filename) File "/home/dan/Projects/gaphor/gaphor/ui/appfilemanager.py", line 49, in load file_manager.load(filename) File "/home/dan/Projects/gaphor/gaphor/ui/filemanager.py", line 109, in load worker.reraise() File "/home/dan/Projects/gaphor/gaphor/ui/gidlethread.py", line 115, in reraise raise exc File "/home/dan/Projects/gaphor/gaphor/ui/gidlethread.py", line 119, in __generator_executer result = next(self._generator) File "/home/dan/Projects/gaphor/gaphor/storage/storage.py", line 308, in load_generator factory.model_ready() File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 159, in model_ready self.handle(ModelReady(self)) File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 182, in handle self.event_manager.handle(event) File "/home/dan/Projects/gaphor/gaphor/core/eventmanager.py", line 55, in handle self._events.handle(e) File "/home/dan/Projects/gaphor/.venv/lib/python3.9/site-packages/generic/event.py", line 64, in handle handler(event) File "/home/dan/Projects/gaphor/gaphor/ui/mainwindow.py", line 237, in _new_model_content for diagram in self.element_factory.select( File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 116, in select yield from (e for e in self._elements.values() if expression(e)) File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 116, in <genexpr> yield from (e for e in self._elements.values() if expression(e)) RuntimeError: OrderedDict mutated during iteration
RuntimeError
def style(self, node: StyleNode) -> Style: style_sheet = self.styleSheet return { **FALLBACK_STYLE, # type: ignore[misc] **(style_sheet.match(node) if style_sheet else {}), }
def style(self, node: StyleNode) -> Style: style_sheet = self.styleSheet return { **FALLBACK_STYLE, # type: ignore[misc] **style_sheet.match(node), }
https://github.com/gaphor/gaphor/issues/578
Traceback (most recent call last): File "/home/dan/Projects/gaphor/gaphor/ui/actiongroup.py", line 138, in _action_activate method(from_variant(param)) File "/home/dan/Projects/gaphor/gaphor/ui/appfilemanager.py", line 121, in action_open_recent self.load(filename) File "/home/dan/Projects/gaphor/gaphor/ui/appfilemanager.py", line 49, in load file_manager.load(filename) File "/home/dan/Projects/gaphor/gaphor/ui/filemanager.py", line 109, in load worker.reraise() File "/home/dan/Projects/gaphor/gaphor/ui/gidlethread.py", line 115, in reraise raise exc File "/home/dan/Projects/gaphor/gaphor/ui/gidlethread.py", line 119, in __generator_executer result = next(self._generator) File "/home/dan/Projects/gaphor/gaphor/storage/storage.py", line 308, in load_generator factory.model_ready() File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 159, in model_ready self.handle(ModelReady(self)) File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 182, in handle self.event_manager.handle(event) File "/home/dan/Projects/gaphor/gaphor/core/eventmanager.py", line 55, in handle self._events.handle(e) File "/home/dan/Projects/gaphor/.venv/lib/python3.9/site-packages/generic/event.py", line 64, in handle handler(event) File "/home/dan/Projects/gaphor/gaphor/ui/mainwindow.py", line 237, in _new_model_content for diagram in self.element_factory.select( File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 116, in select yield from (e for e in self._elements.values() if expression(e)) File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 116, in <genexpr> yield from (e for e in self._elements.values() if expression(e)) RuntimeError: OrderedDict mutated during iteration
RuntimeError
def load_elements_generator(elements, factory, modeling_language, gaphor_version): """Load a file and create a model if possible. Exceptions: IOError, ValueError. """ log.debug(f"Loading {len(elements)} elements") # The elements are iterated three times: size = len(elements) * 3 def update_status_queue(_n=[0]): n = _n[0] = _n[0] + 1 if n % 30 == 0: yield (n * 100) / size # First create elements and canvas items in the factory # The elements are stored as attribute 'element' on the parser objects: yield from _load_elements_and_canvasitems( elements, factory, modeling_language, gaphor_version, update_status_queue ) yield from _load_attributes_and_references(elements, update_status_queue) ensure_style_sheet_is_present(factory) for id, elem in list(elements.items()): yield from update_status_queue() elem.element.postload()
def load_elements_generator(elements, factory, modeling_language, gaphor_version): """Load a file and create a model if possible. Exceptions: IOError, ValueError. """ log.debug(f"Loading {len(elements)} elements") # The elements are iterated three times: size = len(elements) * 3 def update_status_queue(_n=[0]): n = _n[0] = _n[0] + 1 if n % 30 == 0: yield (n * 100) / size # First create elements and canvas items in the factory # The elements are stored as attribute 'element' on the parser objects: yield from _load_elements_and_canvasitems( elements, factory, modeling_language, gaphor_version, update_status_queue ) yield from _load_attributes_and_references(elements, update_status_queue) for id, elem in list(elements.items()): yield from update_status_queue() elem.element.postload()
https://github.com/gaphor/gaphor/issues/578
Traceback (most recent call last): File "/home/dan/Projects/gaphor/gaphor/ui/actiongroup.py", line 138, in _action_activate method(from_variant(param)) File "/home/dan/Projects/gaphor/gaphor/ui/appfilemanager.py", line 121, in action_open_recent self.load(filename) File "/home/dan/Projects/gaphor/gaphor/ui/appfilemanager.py", line 49, in load file_manager.load(filename) File "/home/dan/Projects/gaphor/gaphor/ui/filemanager.py", line 109, in load worker.reraise() File "/home/dan/Projects/gaphor/gaphor/ui/gidlethread.py", line 115, in reraise raise exc File "/home/dan/Projects/gaphor/gaphor/ui/gidlethread.py", line 119, in __generator_executer result = next(self._generator) File "/home/dan/Projects/gaphor/gaphor/storage/storage.py", line 308, in load_generator factory.model_ready() File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 159, in model_ready self.handle(ModelReady(self)) File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 182, in handle self.event_manager.handle(event) File "/home/dan/Projects/gaphor/gaphor/core/eventmanager.py", line 55, in handle self._events.handle(e) File "/home/dan/Projects/gaphor/.venv/lib/python3.9/site-packages/generic/event.py", line 64, in handle handler(event) File "/home/dan/Projects/gaphor/gaphor/ui/mainwindow.py", line 237, in _new_model_content for diagram in self.element_factory.select( File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 116, in select yield from (e for e in self._elements.values() if expression(e)) File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 116, in <genexpr> yield from (e for e in self._elements.values() if expression(e)) RuntimeError: OrderedDict mutated during iteration
RuntimeError
def load_default_model(session): element_factory = session.get_service("element_factory") element_factory.flush() with element_factory.block_events(): element_factory.create(StyleSheet) model = element_factory.create(UML.Package) model.name = gettext("New model") diagram = element_factory.create(UML.Diagram) diagram.package = model diagram.name = gettext("main") element_factory.model_ready()
def load_default_model(session): element_factory = session.get_service("element_factory") element_factory.flush() with element_factory.block_events(): model = element_factory.create(UML.Package) model.name = gettext("New model") diagram = element_factory.create(UML.Diagram) diagram.package = model diagram.name = gettext("main") element_factory.model_ready()
https://github.com/gaphor/gaphor/issues/578
Traceback (most recent call last): File "/home/dan/Projects/gaphor/gaphor/ui/actiongroup.py", line 138, in _action_activate method(from_variant(param)) File "/home/dan/Projects/gaphor/gaphor/ui/appfilemanager.py", line 121, in action_open_recent self.load(filename) File "/home/dan/Projects/gaphor/gaphor/ui/appfilemanager.py", line 49, in load file_manager.load(filename) File "/home/dan/Projects/gaphor/gaphor/ui/filemanager.py", line 109, in load worker.reraise() File "/home/dan/Projects/gaphor/gaphor/ui/gidlethread.py", line 115, in reraise raise exc File "/home/dan/Projects/gaphor/gaphor/ui/gidlethread.py", line 119, in __generator_executer result = next(self._generator) File "/home/dan/Projects/gaphor/gaphor/storage/storage.py", line 308, in load_generator factory.model_ready() File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 159, in model_ready self.handle(ModelReady(self)) File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 182, in handle self.event_manager.handle(event) File "/home/dan/Projects/gaphor/gaphor/core/eventmanager.py", line 55, in handle self._events.handle(e) File "/home/dan/Projects/gaphor/.venv/lib/python3.9/site-packages/generic/event.py", line 64, in handle handler(event) File "/home/dan/Projects/gaphor/gaphor/ui/mainwindow.py", line 237, in _new_model_content for diagram in self.element_factory.select( File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 116, in select yield from (e for e in self._elements.values() if expression(e)) File "/home/dan/Projects/gaphor/gaphor/core/modeling/elementfactory.py", line 116, in <genexpr> yield from (e for e in self._elements.values() if expression(e)) RuntimeError: OrderedDict mutated during iteration
RuntimeError
def namespace_popup_model(self): assert self.view model = Gio.Menu.new() part = Gio.Menu.new() part.append(gettext("_Open"), "tree-view.open") part.append(gettext("_Rename"), "tree-view.rename") model.append_section(None, part) part = Gio.Menu.new() part.append(gettext("New _Diagram"), "tree-view.create-diagram") part.append(gettext("New _Package"), "tree-view.create-package") model.append_section(None, part) part = Gio.Menu.new() part.append(gettext("De_lete"), "tree-view.delete") model.append_section(None, part) element = self.view.get_selected_element() part = Gio.Menu.new() for presentation in element.presentation: diagram = presentation.diagram if diagram: menu_item = Gio.MenuItem.new( gettext('Show in "{diagram}"').format(diagram=diagram.name), "tree-view.show-in-diagram", ) menu_item.set_attribute_value("target", GLib.Variant.new_string(diagram.id)) part.append_item(menu_item) # Play it safe with an (arbitrary) upper bound if part.get_n_items() > 29: break if part.get_n_items() > 0: model.append_section(None, part) return model
def namespace_popup_model(self): assert self.view model = Gio.Menu.new() part = Gio.Menu.new() part.append(gettext("_Open"), "tree-view.open") part.append(gettext("_Rename"), "tree-view.rename") model.append_section(None, part) part = Gio.Menu.new() part.append(gettext("New _Diagram"), "tree-view.create-diagram") part.append(gettext("New _Package"), "tree-view.create-package") model.append_section(None, part) part = Gio.Menu.new() part.append(gettext("De_lete"), "tree-view.delete") model.append_section(None, part) element = self.view.get_selected_element() part = Gio.Menu.new() for presentation in element.presentation: diagram = presentation.diagram menu_item = Gio.MenuItem.new( gettext('Show in "{diagram}"').format(diagram=diagram.name), "tree-view.show-in-diagram", ) menu_item.set_attribute_value("target", GLib.Variant.new_string(diagram.id)) part.append_item(menu_item) # Play it safe with an (arbitrary) upper bound if part.get_n_items() > 29: break if part.get_n_items() > 0: model.append_section(None, part) return model
https://github.com/gaphor/gaphor/issues/438
Traceback (most recent call last): File "/home/dan/Projects/gaphor/gaphor/ui/namespace.py", line 568, in _on_view_event menu = Gtk.Menu.new_from_model(self.namespace_popup_model()) File "/home/dan/Projects/gaphor/gaphor/ui/namespace.py", line 544, in namespace_popup_model gettext('Show in "{diagram}"').format(diagram=diagram.name), AttributeError: 'NoneType' object has no attribute 'name'
AttributeError
def create_as(self, type, id, parent=None, subject=None): if not type or not issubclass(type, gaphas.Item): raise TypeError( f"Type {type} can not be added to a diagram as it is not a diagram item" ) item = type(id, self.model) if subject: item.subject = subject self.canvas.add(item, parent) self.model.handle(DiagramItemCreated(self.model, item)) return item
def create_as(self, type, id, parent=None, subject=None): item = type(id, self.model) if subject: item.subject = subject self.canvas.add(item, parent) self.model.handle(DiagramItemCreated(self.model, item)) return item
https://github.com/gaphor/gaphor/issues/286
gaphor.storage.storage INFO Loading file deployment_diagram.gaphor gaphor.storage.parser ERROR File corrupted, remove element 8cef44be-7376-11ea-80af-e4f89cae6ece and try again NoneType: None gaphor.storage.storage INFO Read 140 elements from file gaphor.storage.storage WARNING file b'/home/lie/projects/test/deployment_diagram.gaphor' could not be loaded (module 'gaphor.UML' has no attribute 'CommentItem') Traceback (most recent call last): File "/usr/lib/python3.8/site-packages/gaphor/misc/gidlethread.py", line 127, in __generator_executer result = next(self._generator) File "/usr/lib/python3.8/site-packages/gaphor/storage/storage.py", line 311, in load_generator for percentage in load_elements_generator( File "/usr/lib/python3.8/site-packages/gaphor/storage/storage.py", line 182, in load_elements_generator yield from _load_elements_and_canvasitems( File "/usr/lib/python3.8/site-packages/gaphor/storage/storage.py", line 222, in _load_elements_and_canvasitems cls = getattr(UML, elem.type) AttributeError: module 'gaphor.UML' has no attribute 'CommentItem'
AttributeError
def create_as(self, type: Type[T], id: str) -> T: """ Create a new model element of type 'type' with 'id' as its ID. This method should only be used when loading models, since it does not emit an ElementCreated event. """ if not type or not issubclass(type, Element) or issubclass(type, Presentation): raise TypeError(f"Type {type} is not a valid model element") obj = type(id, self) self._elements[id] = obj return obj
def create_as(self, type: Type[T], id: str) -> T: """ Create a new model element of type 'type' with 'id' as its ID. This method should only be used when loading models, since it does not emit an ElementCreated event. """ assert issubclass(type, Element) obj = type(id, self) self._elements[id] = obj return obj
https://github.com/gaphor/gaphor/issues/286
gaphor.storage.storage INFO Loading file deployment_diagram.gaphor gaphor.storage.parser ERROR File corrupted, remove element 8cef44be-7376-11ea-80af-e4f89cae6ece and try again NoneType: None gaphor.storage.storage INFO Read 140 elements from file gaphor.storage.storage WARNING file b'/home/lie/projects/test/deployment_diagram.gaphor' could not be loaded (module 'gaphor.UML' has no attribute 'CommentItem') Traceback (most recent call last): File "/usr/lib/python3.8/site-packages/gaphor/misc/gidlethread.py", line 127, in __generator_executer result = next(self._generator) File "/usr/lib/python3.8/site-packages/gaphor/storage/storage.py", line 311, in load_generator for percentage in load_elements_generator( File "/usr/lib/python3.8/site-packages/gaphor/storage/storage.py", line 182, in load_elements_generator yield from _load_elements_and_canvasitems( File "/usr/lib/python3.8/site-packages/gaphor/storage/storage.py", line 222, in _load_elements_and_canvasitems cls = getattr(UML, elem.type) AttributeError: module 'gaphor.UML' has no attribute 'CommentItem'
AttributeError
def _on_name_change(self, event): if event.property is UML.Diagram.name: for page in range(0, self._notebook.get_n_pages()): widget = self._notebook.get_nth_page(page) if event.element is widget.diagram_page.diagram: self._notebook.set_tab_label( widget, self.tab_label(event.new_value, widget) )
def _on_name_change(self, event): if event.property is UML.Diagram.name: for page in range(0, self._notebook.get_n_pages()): widget = self._notebook.get_nth_page(page) if event.element is widget.diagram_page.diagram: print("Name change", event.__dict__) self._notebook.set_tab_label( widget, self.tab_label(event.new_value, widget) )
https://github.com/gaphor/gaphor/issues/149
gaphor.transaction ERROR Transaction terminated due to an exception, performing a rollback Traceback (most recent call last): File "/app/lib/python3.7/site-packages/gaphor/transaction.py", line 27, in _transactional r = func(*args, **kwargs) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 600, in tree_view_create_diagram self.select_element(diagram) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 552, in select_element self._namespace.get_model().path_from_element(element) AttributeError: 'TreeModelSort' object has no attribute 'path_from_element' Traceback (most recent call last): File "/app/lib/python3.7/site-packages/gaphor/action.py", line 179, in _action_activate method() File "/app/lib/python3.7/site-packages/gaphor/transaction.py", line 27, in _transactional r = func(*args, **kwargs) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 600, in tree_view_create_diagram self.select_element(diagram) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 552, in select_element self._namespace.get_model().path_from_element(element) AttributeError: 'TreeModelSort' object has no attribute 'path_from_element' `` ### System Information OS: - [X] Linux (Please put in notes the specific distro) - [ ] MacOS - [ ] Windows 10 - [ ] Windows 7 NOTES:
AttributeError
def iter_for_element(self, element, old_namespace=0): """Get the Gtk.TreeIter for an element in the Namespace. Args: element: The element contained in the in the Namespace. old_namespace: The old namespace containing the element, optional. Returns: Gtk.TreeIter object """ # Using `0` as sentinel if old_namespace != 0: parent_iter = self.iter_for_element(old_namespace) elif element and element.namespace: parent_iter = self.iter_for_element(element.namespace) else: parent_iter = None child_iter = self.model.iter_children(parent_iter) while child_iter: if self.model.get_value(child_iter, 0) is element: return child_iter child_iter = self.model.iter_next(child_iter) return None
def iter_for_element(self, element, old_namespace=0): # Using `0` as sentinel if old_namespace != 0: parent_iter = self.iter_for_element(old_namespace) elif element and element.namespace: parent_iter = self.iter_for_element(element.namespace) else: parent_iter = None child_iter = self.model.iter_children(parent_iter) while child_iter: if self.model.get_value(child_iter, 0) is element: return child_iter child_iter = self.model.iter_next(child_iter) return None
https://github.com/gaphor/gaphor/issues/149
gaphor.transaction ERROR Transaction terminated due to an exception, performing a rollback Traceback (most recent call last): File "/app/lib/python3.7/site-packages/gaphor/transaction.py", line 27, in _transactional r = func(*args, **kwargs) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 600, in tree_view_create_diagram self.select_element(diagram) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 552, in select_element self._namespace.get_model().path_from_element(element) AttributeError: 'TreeModelSort' object has no attribute 'path_from_element' Traceback (most recent call last): File "/app/lib/python3.7/site-packages/gaphor/action.py", line 179, in _action_activate method() File "/app/lib/python3.7/site-packages/gaphor/transaction.py", line 27, in _transactional r = func(*args, **kwargs) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 600, in tree_view_create_diagram self.select_element(diagram) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 552, in select_element self._namespace.get_model().path_from_element(element) AttributeError: 'TreeModelSort' object has no attribute 'path_from_element' `` ### System Information OS: - [X] Linux (Please put in notes the specific distro) - [ ] MacOS - [ ] Windows 10 - [ ] Windows 7 NOTES:
AttributeError
def _on_element_delete(self, event): element = event.element if type(element) in self.filter: iter = self.iter_for_element(element) # iter should be here, unless we try to delete an element who's # parent element is already deleted, so let's be lenient. if iter: self.model.remove(iter)
def _on_element_delete(self, event): element = event.element if type(element) in self.filter: iter = self.iter_for_element(element) # iter should be here, unless we try to delete an element who's parent element is already deleted, so let's be lenient. if iter: self.model.remove(iter)
https://github.com/gaphor/gaphor/issues/149
gaphor.transaction ERROR Transaction terminated due to an exception, performing a rollback Traceback (most recent call last): File "/app/lib/python3.7/site-packages/gaphor/transaction.py", line 27, in _transactional r = func(*args, **kwargs) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 600, in tree_view_create_diagram self.select_element(diagram) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 552, in select_element self._namespace.get_model().path_from_element(element) AttributeError: 'TreeModelSort' object has no attribute 'path_from_element' Traceback (most recent call last): File "/app/lib/python3.7/site-packages/gaphor/action.py", line 179, in _action_activate method() File "/app/lib/python3.7/site-packages/gaphor/transaction.py", line 27, in _transactional r = func(*args, **kwargs) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 600, in tree_view_create_diagram self.select_element(diagram) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 552, in select_element self._namespace.get_model().path_from_element(element) AttributeError: 'TreeModelSort' object has no attribute 'path_from_element' `` ### System Information OS: - [X] Linux (Please put in notes the specific distro) - [ ] MacOS - [ ] Windows 10 - [ ] Windows 7 NOTES:
AttributeError
def select_element(self, element): """Select an element from the Namespace view. The element is selected. After this an action may be executed, such as OpenModelElement, which will try to open the element (if it's a Diagram). """ tree_iter = self.iter_for_element(element) path = self.model.get_path(tree_iter) path_indices = path.get_indices() # Expand the parent row if len(path_indices) > 1: parent_path = Gtk.TreePath.new_from_indices(path_indices[:-1]) self._namespace.expand_row(path=parent_path, open_all=False) selection = self._namespace.get_selection() selection.select_path(path) self._on_view_cursor_changed(self._namespace)
def select_element(self, element): """ Select an element from the Namespace view. The element is selected. After this an action may be executed, such as OpenModelElement, which will try to open the element (if it's a Diagram). """ path = Gtk.TreePath.new_from_indices( self._namespace.get_model().path_from_element(element) ) # Expand the first row: if len(path.get_indices()) > 1: self._namespace.expand_row(path=path, open_all=False) selection = self._namespace.get_selection() selection.select_path(path) self._on_view_cursor_changed(self._namespace)
https://github.com/gaphor/gaphor/issues/149
gaphor.transaction ERROR Transaction terminated due to an exception, performing a rollback Traceback (most recent call last): File "/app/lib/python3.7/site-packages/gaphor/transaction.py", line 27, in _transactional r = func(*args, **kwargs) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 600, in tree_view_create_diagram self.select_element(diagram) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 552, in select_element self._namespace.get_model().path_from_element(element) AttributeError: 'TreeModelSort' object has no attribute 'path_from_element' Traceback (most recent call last): File "/app/lib/python3.7/site-packages/gaphor/action.py", line 179, in _action_activate method() File "/app/lib/python3.7/site-packages/gaphor/transaction.py", line 27, in _transactional r = func(*args, **kwargs) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 600, in tree_view_create_diagram self.select_element(diagram) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 552, in select_element self._namespace.get_model().path_from_element(element) AttributeError: 'TreeModelSort' object has no attribute 'path_from_element' `` ### System Information OS: - [X] Linux (Please put in notes the specific distro) - [ ] MacOS - [ ] Windows 10 - [ ] Windows 7 NOTES:
AttributeError
def tree_view_rename_selected(self): view = self._namespace element = view.get_selected_element() if element is not None: selection = view.get_selection() model, iter = selection.get_selected() path = model.get_path(iter) column = view.get_column(0) cell = column.get_cells()[1] cell.set_property("editable", 1) cell.set_property("text", element.name) view.set_cursor(path, column, True) cell.set_property("editable", 0)
def tree_view_rename_selected(self): view = self._namespace element = view.get_selected_element() if element is not None: path = view.get_model().path_from_element(element) column = view.get_column(0) cell = column.get_cells()[1] cell.set_property("editable", 1) cell.set_property("text", element.name) view.set_cursor(path, column, True) cell.set_property("editable", 0)
https://github.com/gaphor/gaphor/issues/149
gaphor.transaction ERROR Transaction terminated due to an exception, performing a rollback Traceback (most recent call last): File "/app/lib/python3.7/site-packages/gaphor/transaction.py", line 27, in _transactional r = func(*args, **kwargs) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 600, in tree_view_create_diagram self.select_element(diagram) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 552, in select_element self._namespace.get_model().path_from_element(element) AttributeError: 'TreeModelSort' object has no attribute 'path_from_element' Traceback (most recent call last): File "/app/lib/python3.7/site-packages/gaphor/action.py", line 179, in _action_activate method() File "/app/lib/python3.7/site-packages/gaphor/transaction.py", line 27, in _transactional r = func(*args, **kwargs) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 600, in tree_view_create_diagram self.select_element(diagram) File "/app/lib/python3.7/site-packages/gaphor/ui/namespace.py", line 552, in select_element self._namespace.get_model().path_from_element(element) AttributeError: 'TreeModelSort' object has no attribute 'path_from_element' `` ### System Information OS: - [X] Linux (Please put in notes the specific distro) - [ ] MacOS - [ ] Windows 10 - [ ] Windows 7 NOTES:
AttributeError
def get_tcp_dstip(self, sock): pfile = self.firewall.pfile try: peer = sock.getpeername() except socket.error: _, e = sys.exc_info()[:2] if e.args[0] == errno.EINVAL: return sock.getsockname() proxy = sock.getsockname() argv = ( sock.family, socket.IPPROTO_TCP, peer[0].encode("ASCII"), peer[1], proxy[0].encode("ASCII"), proxy[1], ) out_line = b"QUERY_PF_NAT %d,%d,%s,%d,%s,%d\n" % argv pfile.write(out_line) pfile.flush() in_line = pfile.readline() debug2(out_line.decode("ASCII") + " > " + in_line.decode("ASCII")) if in_line.startswith(b"QUERY_PF_NAT_SUCCESS "): (ip, port) = in_line[21:].split(b",") return (ip.decode("ASCII"), int(port)) return sock.getsockname()
def get_tcp_dstip(self, sock): pfile = self.firewall.pfile try: peer = sock.getpeername() except socket.error: _, e = sys.exc_info()[:2] if e.args[0] == errno.EINVAL: debug2( "get_tcp_dstip error: sock.getpeername() %s\nsocket is probably closed.\n" % e ) return sock.getsockname() proxy = sock.getsockname() argv = ( sock.family, socket.IPPROTO_TCP, peer[0].encode("ASCII"), peer[1], proxy[0].encode("ASCII"), proxy[1], ) out_line = b"QUERY_PF_NAT %d,%d,%s,%d,%s,%d\n" % argv pfile.write(out_line) pfile.flush() in_line = pfile.readline() debug2(out_line.decode("ASCII") + " > " + in_line.decode("ASCII")) if in_line.startswith(b"QUERY_PF_NAT_SUCCESS "): (ip, port) = in_line[21:].split(b",") return (ip.decode("ASCII"), int(port)) return sock.getsockname()
https://github.com/sshuttle/sshuttle/issues/287
Traceback (most recent call last): File "/usr/local/bin/sshuttle", line 11, in <module> load_entry_point('sshuttle==0.78.4', 'console_scripts', 'sshuttle')() File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/cmdline.py", line 79, in main opt.user) File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/client.py", line 778, in main seed_hosts, auto_hosts, auto_nets, daemon, to_nameserver) File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/client.py", line 538, in _main ssnet.runonce(handlers, mux) File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/ssnet.py", line 596, in runonce h.callback(s) File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/client.py", line 139, in <lambda> lambda sock: callback(sock, method, mux, handlers) File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/client.py", line 355, in onaccept_tcp dstip = method.get_tcp_dstip(sock) File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/methods/pf.py", line 423, in get_tcp_dstip peer = sock.getpeername() OSError: [Errno 22] Invalid argument
OSError
def _try_peername(sock): try: pn = sock.getpeername() if pn: return "%s:%s" % (pn[0], pn[1]) except socket.error: _, e = sys.exc_info()[:2] if e.args[0] == errno.EINVAL: pass elif e.args[0] not in (errno.ENOTCONN, errno.ENOTSOCK): raise except AttributeError: pass return "unknown"
def _try_peername(sock): try: pn = sock.getpeername() if pn: return "%s:%s" % (pn[0], pn[1]) except socket.error: _, e = sys.exc_info()[:2] if e.args[0] == errno.EINVAL: debug2( "_try_peername error: sock.getpeername() %s\nsocket is probably closed.\n" % e ) pass elif e.args[0] not in (errno.ENOTCONN, errno.ENOTSOCK): raise except AttributeError: pass return "unknown"
https://github.com/sshuttle/sshuttle/issues/287
Traceback (most recent call last): File "/usr/local/bin/sshuttle", line 11, in <module> load_entry_point('sshuttle==0.78.4', 'console_scripts', 'sshuttle')() File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/cmdline.py", line 79, in main opt.user) File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/client.py", line 778, in main seed_hosts, auto_hosts, auto_nets, daemon, to_nameserver) File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/client.py", line 538, in _main ssnet.runonce(handlers, mux) File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/ssnet.py", line 596, in runonce h.callback(s) File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/client.py", line 139, in <lambda> lambda sock: callback(sock, method, mux, handlers) File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/client.py", line 355, in onaccept_tcp dstip = method.get_tcp_dstip(sock) File "/usr/local/Cellar/sshuttle/0.78.4_1/libexec/lib/python3.7/site-packages/sshuttle/methods/pf.py", line 423, in get_tcp_dstip peer = sock.getpeername() OSError: [Errno 22] Invalid argument
OSError
def _pre_experiment_hook(self, experiment: Experiment): monitoring_params = experiment.monitoring_params monitoring_params["dir"] = str(Path(experiment.logdir).absolute()) log_on_batch_end: bool = monitoring_params.pop("log_on_batch_end", False) log_on_epoch_end: bool = monitoring_params.pop("log_on_epoch_end", True) checkpoints_glob: List[str] = monitoring_params.pop("checkpoints_glob", []) self._init( log_on_batch_end=log_on_batch_end, log_on_epoch_end=log_on_epoch_end, checkpoints_glob=checkpoints_glob, ) if isinstance(experiment, ConfigExperiment): exp_config = utils.flatten_dict(experiment.stages_config) wandb.init(**monitoring_params, config=exp_config) else: wandb.init(**monitoring_params)
def _pre_experiment_hook(self, experiment: Experiment): monitoring_params = experiment.monitoring_params monitoring_params["dir"] = str(Path(experiment.logdir).absolute()) log_on_batch_end: bool = monitoring_params.pop("log_on_batch_end", False) log_on_epoch_end: bool = monitoring_params.pop("log_on_epoch_end", True) checkpoints_glob: List[str] = monitoring_params.pop("checkpoints_glob", None) self._init( log_on_batch_end=log_on_batch_end, log_on_epoch_end=log_on_epoch_end, checkpoints_glob=checkpoints_glob, ) if isinstance(experiment, ConfigExperiment): exp_config = utils.flatten_dict(experiment.stages_config) wandb.init(**monitoring_params, config=exp_config) else: wandb.init(**monitoring_params)
https://github.com/catalyst-team/catalyst/issues/640
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-279a2ce060b8> in <module> 24 monitoring_params={ 25 'project': "yt", ---> 26 'group': "aa" 27 } 28 ) ~/.local/lib/python3.6/site-packages/catalyst/dl/runner/supervised.py in train(self, model, criterion, optimizer, loaders, logdir, callbacks, scheduler, resume, num_epochs, valid_loader, main_metric, minimize_metric, verbose, state_kwargs, checkpoint_data, fp16, monitoring_params, check) 204 monitoring_params=monitoring_params 205 ) --> 206 self.run_experiment(experiment, check=check) 207 208 def infer( ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in run_experiment(self, experiment, check) 141 self._pre_experiment_hook(experiment=experiment) 142 super().run_experiment(experiment=experiment, check=check) --> 143 self._post_experiment_hook(experiment=experiment) 144 145 ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in _post_experiment_hook(self, experiment) 104 105 checkpoint_paths = [] --> 106 for glob in self.checkpoints_glob: 107 checkpoint_paths.extend(list(checkpoints_src.glob(glob))) 108 checkpoint_paths = list(set(checkpoint_paths)) TypeError: 'NoneType' object is not iterable
TypeError
def _init(self, **kwargs): global WANDB_ENABLED assert len(kwargs) == 0 if WANDB_ENABLED: if self.monitoring_params is not None: self.checkpoints_glob: List[str] = self.monitoring_params.pop( "checkpoints_glob", [] ) wandb.init(**self.monitoring_params) else: WANDB_ENABLED = False self.wandb_mode = "sampler"
def _init(self, **kwargs): global WANDB_ENABLED assert len(kwargs) == 0 if WANDB_ENABLED: if self.monitoring_params is not None: self.checkpoints_glob: List[str] = self.monitoring_params.pop( "checkpoints_glob", ["best.pth", "last.pth"] ) wandb.init(**self.monitoring_params) else: WANDB_ENABLED = False self.wandb_mode = "sampler"
https://github.com/catalyst-team/catalyst/issues/640
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-279a2ce060b8> in <module> 24 monitoring_params={ 25 'project': "yt", ---> 26 'group': "aa" 27 } 28 ) ~/.local/lib/python3.6/site-packages/catalyst/dl/runner/supervised.py in train(self, model, criterion, optimizer, loaders, logdir, callbacks, scheduler, resume, num_epochs, valid_loader, main_metric, minimize_metric, verbose, state_kwargs, checkpoint_data, fp16, monitoring_params, check) 204 monitoring_params=monitoring_params 205 ) --> 206 self.run_experiment(experiment, check=check) 207 208 def infer( ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in run_experiment(self, experiment, check) 141 self._pre_experiment_hook(experiment=experiment) 142 super().run_experiment(experiment=experiment, check=check) --> 143 self._post_experiment_hook(experiment=experiment) 144 145 ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in _post_experiment_hook(self, experiment) 104 105 checkpoint_paths = [] --> 106 for glob in self.checkpoints_glob: 107 checkpoint_paths.extend(list(checkpoints_src.glob(glob))) 108 checkpoint_paths = list(set(checkpoint_paths)) TypeError: 'NoneType' object is not iterable
TypeError
def _init(self, **kwargs): global WANDB_ENABLED assert len(kwargs) == 0 if WANDB_ENABLED: if self.monitoring_params is not None: self.checkpoints_glob: List[str] = self.monitoring_params.pop( "checkpoints_glob", [] ) wandb.init(**self.monitoring_params) logdir_src = Path(self.logdir) logdir_dst = Path(wandb.run.dir) configs_src = logdir_src.joinpath("configs") os.makedirs(f"{logdir_dst}/{configs_src.name}", exist_ok=True) shutil.rmtree(f"{logdir_dst}/{configs_src.name}") shutil.copytree( f"{str(configs_src.absolute())}", f"{logdir_dst}/{configs_src.name}" ) code_src = logdir_src.joinpath("code") if code_src.exists(): os.makedirs(f"{logdir_dst}/{code_src.name}", exist_ok=True) shutil.rmtree(f"{logdir_dst}/{code_src.name}") shutil.copytree( f"{str(code_src.absolute())}", f"{logdir_dst}/{code_src.name}" ) else: WANDB_ENABLED = False self.wandb_mode = "trainer"
def _init(self, **kwargs): global WANDB_ENABLED assert len(kwargs) == 0 if WANDB_ENABLED: if self.monitoring_params is not None: self.checkpoints_glob: List[str] = self.monitoring_params.pop( "checkpoints_glob", ["best.pth", "last.pth"] ) wandb.init(**self.monitoring_params) logdir_src = Path(self.logdir) logdir_dst = Path(wandb.run.dir) configs_src = logdir_src.joinpath("configs") os.makedirs(f"{logdir_dst}/{configs_src.name}", exist_ok=True) shutil.rmtree(f"{logdir_dst}/{configs_src.name}") shutil.copytree( f"{str(configs_src.absolute())}", f"{logdir_dst}/{configs_src.name}" ) code_src = logdir_src.joinpath("code") if code_src.exists(): os.makedirs(f"{logdir_dst}/{code_src.name}", exist_ok=True) shutil.rmtree(f"{logdir_dst}/{code_src.name}") shutil.copytree( f"{str(code_src.absolute())}", f"{logdir_dst}/{code_src.name}" ) else: WANDB_ENABLED = False self.wandb_mode = "trainer"
https://github.com/catalyst-team/catalyst/issues/640
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-279a2ce060b8> in <module> 24 monitoring_params={ 25 'project': "yt", ---> 26 'group': "aa" 27 } 28 ) ~/.local/lib/python3.6/site-packages/catalyst/dl/runner/supervised.py in train(self, model, criterion, optimizer, loaders, logdir, callbacks, scheduler, resume, num_epochs, valid_loader, main_metric, minimize_metric, verbose, state_kwargs, checkpoint_data, fp16, monitoring_params, check) 204 monitoring_params=monitoring_params 205 ) --> 206 self.run_experiment(experiment, check=check) 207 208 def infer( ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in run_experiment(self, experiment, check) 141 self._pre_experiment_hook(experiment=experiment) 142 super().run_experiment(experiment=experiment, check=check) --> 143 self._post_experiment_hook(experiment=experiment) 144 145 ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in _post_experiment_hook(self, experiment) 104 105 checkpoint_paths = [] --> 106 for glob in self.checkpoints_glob: 107 checkpoint_paths.extend(list(checkpoints_src.glob(glob))) 108 checkpoint_paths = list(set(checkpoint_paths)) TypeError: 'NoneType' object is not iterable
TypeError
def load_checkpoint(*, filename, state: _State): if os.path.isfile(filename): print(f"=> loading checkpoint {filename}") checkpoint = utils.load_checkpoint(filename) state.epoch = checkpoint["epoch"] state.stage_epoch = checkpoint["stage_epoch"] state.stage = checkpoint["stage"] utils.unpack_checkpoint( checkpoint, model=state.model, criterion=state.criterion, optimizer=state.optimizer, scheduler=state.scheduler, ) print( f"loaded checkpoint {filename} " f"(epoch {checkpoint['epoch']}, " f"stage_epoch {checkpoint['stage_epoch']}, " f"stage {checkpoint['stage']})" ) else: raise Exception(f"No checkpoint found at {filename}")
def load_checkpoint(*, filename, state: _State): if os.path.isfile(filename): print(f"=> loading checkpoint {filename}") checkpoint = utils.load_checkpoint(filename) state.epoch = checkpoint["epoch"] utils.unpack_checkpoint( checkpoint, model=state.model, criterion=state.criterion, optimizer=state.optimizer, scheduler=state.scheduler, ) print(f"loaded checkpoint {filename} (epoch {checkpoint['epoch']})") else: raise Exception(f"No checkpoint found at {filename}")
https://github.com/catalyst-team/catalyst/issues/640
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-279a2ce060b8> in <module> 24 monitoring_params={ 25 'project': "yt", ---> 26 'group': "aa" 27 } 28 ) ~/.local/lib/python3.6/site-packages/catalyst/dl/runner/supervised.py in train(self, model, criterion, optimizer, loaders, logdir, callbacks, scheduler, resume, num_epochs, valid_loader, main_metric, minimize_metric, verbose, state_kwargs, checkpoint_data, fp16, monitoring_params, check) 204 monitoring_params=monitoring_params 205 ) --> 206 self.run_experiment(experiment, check=check) 207 208 def infer( ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in run_experiment(self, experiment, check) 141 self._pre_experiment_hook(experiment=experiment) 142 super().run_experiment(experiment=experiment, check=check) --> 143 self._post_experiment_hook(experiment=experiment) 144 145 ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in _post_experiment_hook(self, experiment) 104 105 checkpoint_paths = [] --> 106 for glob in self.checkpoints_glob: 107 checkpoint_paths.extend(list(checkpoints_src.glob(glob))) 108 checkpoint_paths = list(set(checkpoint_paths)) TypeError: 'NoneType' object is not iterable
TypeError
def on_stage_start(self, state: _State): for key in self._keys_from_state: value = getattr(state, key, None) if value is not None: setattr(self, key, value) if self.resume_dir is not None: self.resume = str(self.resume_dir) + "/" + str(self.resume) if self.resume is not None: self.load_checkpoint(filename=self.resume, state=state) state.resume = None
def on_stage_start(self, state: _State): for key in self._keys_from_state: value = getattr(state, key, None) if value is not None: setattr(self, key, value) if self.resume_dir is not None: self.resume = str(self.resume_dir) + "/" + str(self.resume) if self.resume is not None: self.load_checkpoint(filename=self.resume, state=state)
https://github.com/catalyst-team/catalyst/issues/640
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-279a2ce060b8> in <module> 24 monitoring_params={ 25 'project': "yt", ---> 26 'group': "aa" 27 } 28 ) ~/.local/lib/python3.6/site-packages/catalyst/dl/runner/supervised.py in train(self, model, criterion, optimizer, loaders, logdir, callbacks, scheduler, resume, num_epochs, valid_loader, main_metric, minimize_metric, verbose, state_kwargs, checkpoint_data, fp16, monitoring_params, check) 204 monitoring_params=monitoring_params 205 ) --> 206 self.run_experiment(experiment, check=check) 207 208 def infer( ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in run_experiment(self, experiment, check) 141 self._pre_experiment_hook(experiment=experiment) 142 super().run_experiment(experiment=experiment, check=check) --> 143 self._post_experiment_hook(experiment=experiment) 144 145 ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in _post_experiment_hook(self, experiment) 104 105 checkpoint_paths = [] --> 106 for glob in self.checkpoints_glob: 107 checkpoint_paths.extend(list(checkpoints_src.glob(glob))) 108 checkpoint_paths = list(set(checkpoint_paths)) TypeError: 'NoneType' object is not iterable
TypeError
def _prepare_for_stage(self, stage: str): utils.set_global_seed(self.experiment.initial_seed) migrating_params = dict(**self.experiment.get_state_params(stage)) migrate_from_previous_stage = migrating_params.get( "migrate_from_previous_stage", True ) if self.state is not None and migrate_from_previous_stage: migrating_params.update( { "step": self.state.step, "epoch": self.state.epoch, "resume": getattr(self.state, "resume", None), } ) utils.set_global_seed(self.experiment.initial_seed) self.model, criterion, optimizer, scheduler, self.device = ( self._get_experiment_components(stage) ) utils.set_global_seed(self.experiment.initial_seed) self.state = self.state_fn( stage=stage, model=self.model, device=self.device, criterion=criterion, optimizer=optimizer, scheduler=scheduler, **migrating_params, ) utils.set_global_seed(self.experiment.initial_seed) callbacks = self.experiment.get_callbacks(stage) loggers = utils.process_callbacks( OrderedDict( [(k, v) for k, v in callbacks.items() if isinstance(v, LoggerCallback)] ) ) callbacks = utils.process_callbacks( OrderedDict( [(k, v) for k, v in callbacks.items() if not isinstance(v, LoggerCallback)] ) ) self.state.loggers = loggers self.loggers = loggers self.callbacks = callbacks
def _prepare_for_stage(self, stage: str): utils.set_global_seed(self.experiment.initial_seed) migrating_params = {} stage_state_params = self.experiment.get_state_params(stage) migrate_from_previous_stage = stage_state_params.get( "migrate_from_previous_stage", True ) if self.state is not None and migrate_from_previous_stage: migrating_params.update({"step": self.state.step, "epoch": self.state.epoch}) utils.set_global_seed(self.experiment.initial_seed) self.model, criterion, optimizer, scheduler, self.device = ( self._get_experiment_components(stage) ) utils.set_global_seed(self.experiment.initial_seed) self.state = self.state_fn( stage=stage, model=self.model, device=self.device, criterion=criterion, optimizer=optimizer, scheduler=scheduler, **stage_state_params, **migrating_params, ) utils.set_global_seed(self.experiment.initial_seed) callbacks = self.experiment.get_callbacks(stage) loggers = utils.process_callbacks( OrderedDict( [(k, v) for k, v in callbacks.items() if isinstance(v, LoggerCallback)] ) ) callbacks = utils.process_callbacks( OrderedDict( [(k, v) for k, v in callbacks.items() if not isinstance(v, LoggerCallback)] ) ) self.state.loggers = loggers self.loggers = loggers self.callbacks = callbacks
https://github.com/catalyst-team/catalyst/issues/640
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-279a2ce060b8> in <module> 24 monitoring_params={ 25 'project': "yt", ---> 26 'group': "aa" 27 } 28 ) ~/.local/lib/python3.6/site-packages/catalyst/dl/runner/supervised.py in train(self, model, criterion, optimizer, loaders, logdir, callbacks, scheduler, resume, num_epochs, valid_loader, main_metric, minimize_metric, verbose, state_kwargs, checkpoint_data, fp16, monitoring_params, check) 204 monitoring_params=monitoring_params 205 ) --> 206 self.run_experiment(experiment, check=check) 207 208 def infer( ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in run_experiment(self, experiment, check) 141 self._pre_experiment_hook(experiment=experiment) 142 super().run_experiment(experiment=experiment, check=check) --> 143 self._post_experiment_hook(experiment=experiment) 144 145 ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in _post_experiment_hook(self, experiment) 104 105 checkpoint_paths = [] --> 106 for glob in self.checkpoints_glob: 107 checkpoint_paths.extend(list(checkpoints_src.glob(glob))) 108 checkpoint_paths = list(set(checkpoint_paths)) TypeError: 'NoneType' object is not iterable
TypeError
def _run_stage(self, stage: str): self._prepare_for_stage(stage) self._run_event("stage", moment="start") while self.state.stage_epoch < self.state.num_epochs: self._run_event("epoch", moment="start") utils.set_global_seed(self.experiment.initial_seed + self.state.epoch + 1) self._run_epoch(stage=stage, epoch=self.state.stage_epoch) self._run_event("epoch", moment="end") if self._check_run and self.state.stage_epoch >= 2: break if self.state.early_stop: self.state.early_stop = False break self.state.epoch += 1 self.state.stage_epoch += 1 self._run_event("stage", moment="end")
def _run_stage(self, stage: str): self._prepare_for_stage(stage) self._run_event("stage", moment="start") for epoch in range(self.state.num_epochs): self.state.stage_epoch = epoch self._run_event("epoch", moment="start") self._run_epoch(stage=stage, epoch=epoch) self._run_event("epoch", moment="end") if self._check_run and self.state.stage_epoch >= 2: break if self.state.early_stop: self.state.early_stop = False break self.state.epoch += 1 self._run_event("stage", moment="end")
https://github.com/catalyst-team/catalyst/issues/640
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-279a2ce060b8> in <module> 24 monitoring_params={ 25 'project': "yt", ---> 26 'group': "aa" 27 } 28 ) ~/.local/lib/python3.6/site-packages/catalyst/dl/runner/supervised.py in train(self, model, criterion, optimizer, loaders, logdir, callbacks, scheduler, resume, num_epochs, valid_loader, main_metric, minimize_metric, verbose, state_kwargs, checkpoint_data, fp16, monitoring_params, check) 204 monitoring_params=monitoring_params 205 ) --> 206 self.run_experiment(experiment, check=check) 207 208 def infer( ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in run_experiment(self, experiment, check) 141 self._pre_experiment_hook(experiment=experiment) 142 super().run_experiment(experiment=experiment, check=check) --> 143 self._post_experiment_hook(experiment=experiment) 144 145 ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in _post_experiment_hook(self, experiment) 104 105 checkpoint_paths = [] --> 106 for glob in self.checkpoints_glob: 107 checkpoint_paths.extend(list(checkpoints_src.glob(glob))) 108 checkpoint_paths = list(set(checkpoint_paths)) TypeError: 'NoneType' object is not iterable
TypeError
def stages(self) -> List[str]: """Experiment's stage names""" stages_keys = list(self.stages_config.keys()) # Change start `stages_keys` if resume data were founded state_params = self.get_state_params(stages_keys[0]) resume, resume_dir = [ state_params.get(key, None) for key in ["resume", "resume_dir"] ] if resume_dir is not None: resume = resume_dir / str(resume) if resume is not None and Path(resume).is_file(): checkpoint = utils.load_checkpoint(resume) start_stage = checkpoint["stage"] start_idx = stages_keys.index(start_stage) stages_keys = stages_keys[start_idx:] return stages_keys
def stages(self) -> List[str]: """Experiment's stage names""" stages_keys = list(self.stages_config.keys()) return stages_keys
https://github.com/catalyst-team/catalyst/issues/640
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-279a2ce060b8> in <module> 24 monitoring_params={ 25 'project': "yt", ---> 26 'group': "aa" 27 } 28 ) ~/.local/lib/python3.6/site-packages/catalyst/dl/runner/supervised.py in train(self, model, criterion, optimizer, loaders, logdir, callbacks, scheduler, resume, num_epochs, valid_loader, main_metric, minimize_metric, verbose, state_kwargs, checkpoint_data, fp16, monitoring_params, check) 204 monitoring_params=monitoring_params 205 ) --> 206 self.run_experiment(experiment, check=check) 207 208 def infer( ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in run_experiment(self, experiment, check) 141 self._pre_experiment_hook(experiment=experiment) 142 super().run_experiment(experiment=experiment, check=check) --> 143 self._post_experiment_hook(experiment=experiment) 144 145 ~/.local/lib/python3.6/site-packages/catalyst/contrib/dl/runner/wandb.py in _post_experiment_hook(self, experiment) 104 105 checkpoint_paths = [] --> 106 for glob in self.checkpoints_glob: 107 checkpoint_paths.extend(list(checkpoints_src.glob(glob))) 108 checkpoint_paths = list(set(checkpoint_paths)) TypeError: 'NoneType' object is not iterable
TypeError
def imread(uri, grayscale=False, expand_dims=True, rootpath=None, **kwargs): """ Args: uri: {str, pathlib.Path, bytes, file} The resource to load the image from, e.g. a filename, pathlib.Path, http address or file object, see the docs for more info. grayscale: expand_dims: rootpath: Returns: """ if rootpath is not None: uri = uri if uri.startswith(rootpath) else os.path.join(rootpath, uri) if JPEG4PY_ENABLED and uri.endswith(("jpg", "JPG", "jpeg", "JPEG")): img = jpeg.JPEG(uri).decode() else: img = imageio.imread(uri, **kwargs) if grayscale: img = rgb2gray(img) if expand_dims and len(img.shape) < 3: # grayscale img = np.expand_dims(img, -1) return img
def imread(uri, grayscale=False, expand_dims=True, rootpath=None, **kwargs): """ Args: uri: {str, pathlib.Path, bytes, file} The resource to load the image from, e.g. a filename, pathlib.Path, http address or file object, see the docs for more info. grayscale: expand_dims: rootpath: Returns: """ if rootpath is not None: uri = uri if uri.startswith(rootpath) else os.path.join(rootpath, uri) if JPEG4PY_ENABLED and uri.endswith(("jpg", "JPG", "jpeg", "JPEG")): img = jpeg.JPEG(uri).decode() if grayscale: img = rgb2gray(img) else: img = imageio.imread(uri, as_gray=grayscale, pilmode="RGB", **kwargs) if expand_dims and len(img.shape) < 3: # grayscale img = np.expand_dims(img, -1) return img
https://github.com/catalyst-team/catalyst/issues/472
TypeError Traceback (most recent call last) <ipython-input-92-cf55df030d98> in <module> 3 4 os.system("wget -O img.png https://www.sample-videos.com/img/Sample-png-image-200kb.png") ----> 5 imread("img.png") /usr/local/lib/python3.6/dist-packages/catalyst/utils/image.py in imread(uri, grayscale, expand_dims, rootpath, **kwargs) 63 img = rgb2gray(img) 64 else: ---> 65 img = imageio.imread(uri, as_gray=grayscale, pilmode="RGB", **kwargs) 66 67 if expand_dims and len(img.shape) < 3: # grayscale /usr/local/lib/python3.6/dist-packages/imageio/core/functions.py in imread(uri, format, **kwargs) 183 ) 184 --> 185 # Return its reader object 186 return format.get_reader(request) 187 /usr/local/lib/python3.6/dist-packages/imageio/core/functions.py in get_reader(uri, format, mode, **kwargs) 109 110 match = mem_re.match(arg) --> 111 if match is None: 112 raise ValueError( 113 "Memory size could not be parsed " /usr/local/lib/python3.6/dist-packages/imageio/core/format.py in get_reader(self, request) 156 from the given file. Users are encouraged to use 157 imageio.get_reader() instead. --> 158 """ 159 select_mode = request.mode[1] if request.mode[1] in "iIvV" else "" 160 if select_mode not in self.modes: /usr/local/lib/python3.6/dist-packages/imageio/core/format.py in __init__(self, format, request) 205 and context management as Python's file objects. 206 """ --> 207 208 def __init__(self, format, request): 209 self.__closed = False TypeError: _open() got an unexpected keyword argument 'as_gray'
TypeError
def deploy( self, contract: Any, *args: Tuple, amount: int = 0, gas_limit: Optional[int] = None, gas_price: Optional[int] = None, nonce: Optional[int] = None, ) -> Any: """Deploys a contract. Args: contract: ContractContainer instance. *args: Constructor arguments. The last argument may optionally be a dictionary of transaction values. Kwargs: amount: Amount of ether to send with transaction, in wei. gas_limit: Gas limit of the transaction. gas_price: Gas price of the transaction. nonce: Nonce to use for the transaction. Returns: * Contract instance if the transaction confirms and the contract exists * TransactionReceipt if the transaction is pending or reverts """ evm = contract._build["compiler"]["evm_version"] if rpc.is_active() and not rpc.evm_compatible(evm): raise IncompatibleEVMVersion( f"Local RPC using '{rpc.evm_version()}' but contract was compiled for '{evm}'" ) data = contract.deploy.encode_input(*args) try: txid = self._transact( # type: ignore { "from": self.address, "value": Wei(amount), "nonce": nonce if nonce is not None else self.nonce, "gasPrice": Wei(gas_price) or self._gas_price(), "gas": Wei(gas_limit) or self._gas_limit(None, amount, data), "data": HexBytes(data), } ) exc, revert_data = None, None except ValueError as e: exc = VirtualMachineError(e) if not hasattr(exc, "txid"): raise exc from None txid = exc.txid revert_data = (exc.revert_msg, exc.pc, exc.revert_type) receipt = TransactionReceipt( txid, self, name=contract._name + ".constructor", revert_data=revert_data ) add_thread = threading.Thread( target=contract._add_from_tx, args=(receipt,), daemon=True ) add_thread.start() if rpc.is_active(): undo_thread = threading.Thread( target=rpc._add_to_undo_buffer, args=( receipt, self.deploy, (contract, *args), {"amount": amount, "gas_limit": gas_limit, "gas_price": gas_price}, ), daemon=True, ) undo_thread.start() if receipt.status != 1: receipt._raise_if_reverted(exc) return receipt add_thread.join() try: return contract.at(receipt.contract_address) except ContractNotFound: # if the contract self-destructed during deployment return receipt
def deploy( self, contract: Any, *args: Tuple, amount: int = 0, gas_limit: Optional[int] = None, gas_price: Optional[int] = None, nonce: Optional[int] = None, ) -> Any: """Deploys a contract. Args: contract: ContractContainer instance. *args: Constructor arguments. The last argument may optionally be a dictionary of transaction values. Kwargs: amount: Amount of ether to send with transaction, in wei. gas_limit: Gas limit of the transaction. gas_price: Gas price of the transaction. nonce: Nonce to use for the transaction. Returns: * Contract instance if the transaction confirms * TransactionReceipt if the transaction is pending or reverts""" evm = contract._build["compiler"]["evm_version"] if rpc.is_active() and not rpc.evm_compatible(evm): raise IncompatibleEVMVersion( f"Local RPC using '{rpc.evm_version()}' but contract was compiled for '{evm}'" ) data = contract.deploy.encode_input(*args) try: txid = self._transact( # type: ignore { "from": self.address, "value": Wei(amount), "nonce": nonce if nonce is not None else self.nonce, "gasPrice": Wei(gas_price) or self._gas_price(), "gas": Wei(gas_limit) or self._gas_limit(None, amount, data), "data": HexBytes(data), } ) exc, revert_data = None, None except ValueError as e: exc = VirtualMachineError(e) if not hasattr(exc, "txid"): raise exc from None txid = exc.txid revert_data = (exc.revert_msg, exc.pc, exc.revert_type) receipt = TransactionReceipt( txid, self, name=contract._name + ".constructor", revert_data=revert_data ) add_thread = threading.Thread( target=contract._add_from_tx, args=(receipt,), daemon=True ) add_thread.start() if rpc.is_active(): undo_thread = threading.Thread( target=rpc._add_to_undo_buffer, args=( receipt, self.deploy, (contract, *args), {"amount": amount, "gas_limit": gas_limit, "gas_price": gas_price}, ), daemon=True, ) undo_thread.start() if receipt.status != 1: receipt._raise_if_reverted(exc) return receipt add_thread.join() return contract.at(receipt.contract_address)
https://github.com/eth-brownie/brownie/issues/537
Running 'scripts.dev-deploy.main'... Transaction sent: 0x36f1599c9b5b5ab9f243f5d0c64e8f2637a355818bc4cf5e1a67168e9e22713e Gas price: 0.0 gwei Gas limit: 6721975 ArgobytesOwnedVaultDeployer.constructor confirmed - Block: 1 Gas used: 3545980 (52.75%) ArgobytesOwnedVaultDeployer deployed at: 0x04246fAA61004668E1C0a388A3bF60E67827A17E Exception in thread Thread-2: Traceback (most recent call last): File "/usr/lib/python3.8/threading.py", line 932, in _bootstrap_inner self.run() File "/usr/lib/python3.8/threading.py", line 870, in run self._target(*self._args, **self._kwargs) File "/home/ski/code/argobytes-contracts-brownie/venv/lib/python3.8/site-packages/brownie/network/contract.py", line 190, in _add_from_tx self.at(tx.contract_address, tx.sender, tx) File "/home/ski/code/argobytes-contracts-brownie/venv/lib/python3.8/site-packages/brownie/network/contract.py", line 174, in at contract = ProjectContract(self._project, build, address, owner, tx) File "/home/ski/code/argobytes-contracts-brownie/venv/lib/python3.8/site-packages/brownie/network/contract.py", line 728, in __init__ _DeployedContractBase.__init__(self, address, owner, tx) File "/home/ski/code/argobytes-contracts-brownie/venv/lib/python3.8/site-packages/brownie/network/contract.py", line 309, in __init__ raise ContractNotFound(f"No contract deployed at {address}") brownie.exceptions.ContractNotFound: No contract deployed at 0x04246fAA61004668E1C0a388A3bF60E67827A17E
brownie.exceptions.ContractNotFound
def _add_from_tx(self, tx: TransactionReceiptType) -> None: tx._confirmed.wait() if tx.status: try: self.at(tx.contract_address, tx.sender, tx) except ContractNotFound: # if the contract self-destructed during deployment pass
def _add_from_tx(self, tx: TransactionReceiptType) -> None: tx._confirmed.wait() if tx.status: self.at(tx.contract_address, tx.sender, tx)
https://github.com/eth-brownie/brownie/issues/537
Running 'scripts.dev-deploy.main'... Transaction sent: 0x36f1599c9b5b5ab9f243f5d0c64e8f2637a355818bc4cf5e1a67168e9e22713e Gas price: 0.0 gwei Gas limit: 6721975 ArgobytesOwnedVaultDeployer.constructor confirmed - Block: 1 Gas used: 3545980 (52.75%) ArgobytesOwnedVaultDeployer deployed at: 0x04246fAA61004668E1C0a388A3bF60E67827A17E Exception in thread Thread-2: Traceback (most recent call last): File "/usr/lib/python3.8/threading.py", line 932, in _bootstrap_inner self.run() File "/usr/lib/python3.8/threading.py", line 870, in run self._target(*self._args, **self._kwargs) File "/home/ski/code/argobytes-contracts-brownie/venv/lib/python3.8/site-packages/brownie/network/contract.py", line 190, in _add_from_tx self.at(tx.contract_address, tx.sender, tx) File "/home/ski/code/argobytes-contracts-brownie/venv/lib/python3.8/site-packages/brownie/network/contract.py", line 174, in at contract = ProjectContract(self._project, build, address, owner, tx) File "/home/ski/code/argobytes-contracts-brownie/venv/lib/python3.8/site-packages/brownie/network/contract.py", line 728, in __init__ _DeployedContractBase.__init__(self, address, owner, tx) File "/home/ski/code/argobytes-contracts-brownie/venv/lib/python3.8/site-packages/brownie/network/contract.py", line 309, in __init__ raise ContractNotFound(f"No contract deployed at {address}") brownie.exceptions.ContractNotFound: No contract deployed at 0x04246fAA61004668E1C0a388A3bF60E67827A17E
brownie.exceptions.ContractNotFound
def __init__( self, address: str, owner: Optional[AccountsType] = None, tx: TransactionReceiptType = None, ) -> None: address = _resolve_address(address) self.bytecode = web3.eth.getCode(address).hex()[2:] if not self.bytecode: raise ContractNotFound(f"No contract deployed at {address}") self._owner = owner self.tx = tx self.address = address _add_deployment_topics(address, self.abi) fn_names = [i["name"] for i in self.abi if i["type"] == "function"] for abi in [i for i in self.abi if i["type"] == "function"]: name = f"{self._name}.{abi['name']}" sig = build_function_signature(abi) natspec: Dict = {} if self._build.get("natspec"): natspec = self._build["natspec"]["methods"].get(sig, {}) if fn_names.count(abi["name"]) == 1: fn = _get_method_object(address, abi, name, owner, natspec) self._check_and_set(abi["name"], fn) continue # special logic to handle function overloading if not hasattr(self, abi["name"]): overloaded = OverloadedMethod(address, name, owner) self._check_and_set(abi["name"], overloaded) getattr(self, abi["name"])._add_fn(abi, natspec) self._initialized = True
def __init__( self, address: str, owner: Optional[AccountsType] = None, tx: TransactionReceiptType = None, ) -> None: address = _resolve_address(address) self.bytecode = web3.eth.getCode(address).hex()[2:] if not self.bytecode: raise ContractNotFound(f"No contract deployed at {address}") self._owner = owner self.tx = tx self.address = address _add_deployment_topics(address, self.abi) fn_names = [i["name"] for i in self.abi if i["type"] == "function"] for abi in [i for i in self.abi if i["type"] == "function"]: name = f"{self._name}.{abi['name']}" sig = build_function_signature(abi) natspec: Dict = {} if self._build.get("natspec"): natspec = self._build["natspec"]["methods"].get(sig, {}) if fn_names.count(abi["name"]) == 1: fn = _get_method_object(address, abi, name, owner, natspec) self._check_and_set(abi["name"], fn) continue # special logic to handle function overloading if not hasattr(self, abi["name"]): overloaded = OverloadedMethod(address, name, owner) self._check_and_set(abi["name"], overloaded) getattr(self, abi["name"])._add_fn(abi, natspec)
https://github.com/eth-brownie/brownie/issues/537
Running 'scripts.dev-deploy.main'... Transaction sent: 0x36f1599c9b5b5ab9f243f5d0c64e8f2637a355818bc4cf5e1a67168e9e22713e Gas price: 0.0 gwei Gas limit: 6721975 ArgobytesOwnedVaultDeployer.constructor confirmed - Block: 1 Gas used: 3545980 (52.75%) ArgobytesOwnedVaultDeployer deployed at: 0x04246fAA61004668E1C0a388A3bF60E67827A17E Exception in thread Thread-2: Traceback (most recent call last): File "/usr/lib/python3.8/threading.py", line 932, in _bootstrap_inner self.run() File "/usr/lib/python3.8/threading.py", line 870, in run self._target(*self._args, **self._kwargs) File "/home/ski/code/argobytes-contracts-brownie/venv/lib/python3.8/site-packages/brownie/network/contract.py", line 190, in _add_from_tx self.at(tx.contract_address, tx.sender, tx) File "/home/ski/code/argobytes-contracts-brownie/venv/lib/python3.8/site-packages/brownie/network/contract.py", line 174, in at contract = ProjectContract(self._project, build, address, owner, tx) File "/home/ski/code/argobytes-contracts-brownie/venv/lib/python3.8/site-packages/brownie/network/contract.py", line 728, in __init__ _DeployedContractBase.__init__(self, address, owner, tx) File "/home/ski/code/argobytes-contracts-brownie/venv/lib/python3.8/site-packages/brownie/network/contract.py", line 309, in __init__ raise ContractNotFound(f"No contract deployed at {address}") brownie.exceptions.ContractNotFound: No contract deployed at 0x04246fAA61004668E1C0a388A3bF60E67827A17E
brownie.exceptions.ContractNotFound
def attach(self, laddr: Union[str, Tuple]) -> None: """Attaches to an already running RPC client subprocess. Args: laddr: Address that the client is listening at. Can be supplied as a string "http://127.0.0.1:8545" or tuple ("127.0.0.1", 8545)""" if self.is_active(): raise SystemError("RPC is already active.") if isinstance(laddr, str): o = urlparse(laddr) if not o.port: raise ValueError("No RPC port given") laddr = (o.hostname, o.port) try: proc = next(i for i in psutil.process_iter() if _check_connections(i, laddr)) except StopIteration: raise ProcessLookupError("Could not find RPC process.") print(f"Attached to local RPC client listening at '{laddr[0]}:{laddr[1]}'...") self._rpc = psutil.Process(proc.pid) if web3.provider: self._reset_id = self._snap() _notify_registry(0)
def attach(self, laddr: Union[str, Tuple]) -> None: """Attaches to an already running RPC client subprocess. Args: laddr: Address that the client is listening at. Can be supplied as a string "http://127.0.0.1:8545" or tuple ("127.0.0.1", 8545)""" if self.is_active(): raise SystemError("RPC is already active.") if isinstance(laddr, str): o = urlparse(laddr) if not o.port: raise ValueError("No RPC port given") laddr = (o.hostname, o.port) try: proc = next(i for i in psutil.net_connections() if i.laddr == laddr) except StopIteration: raise ProcessLookupError("Could not find RPC process.") self._rpc = psutil.Process(proc.pid) if web3.provider: self._reset_id = self._snap() _notify_registry(0)
https://github.com/eth-brownie/brownie/issues/263
Brownie v1.1.0 - Python development framework for Ethereum Brownie project has been compiled at /Users/alper/eBlocBroker/contract/build/contracts ==================================================================== test session starts ===================================================================== platform darwin -- Python 3.7.5, pytest-5.0.1, py-1.8.0, pluggy-0.12.0 rootdir: /Users/alper/eBlocBroker plugins: eth-brownie-1.1.0, web3-5.0.0 collected 6 items INTERNALERROR> Traceback (most recent call last): INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/psutil-5.6.3-py3.7-macosx-10.13-x86_64.egg/psutil/_psosx.py", line 339, in wrapper INTERNALERROR> return fun(self, *args, **kwargs) INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/psutil-5.6.3-py3.7-macosx-10.13-x86_64.egg/psutil/_psosx.py", line 528, in connections INTERNALERROR> rawlist = cext.proc_connections(self.pid, families, types) INTERNALERROR> PermissionError: [Errno 1] Operation not permitted INTERNALERROR> INTERNALERROR> During handling of the above exception, another exception occurred: INTERNALERROR> INTERNALERROR> Traceback (most recent call last): INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/pytest-5.0.1-py3.7.egg/_pytest/main.py", line 213, in wrap_session INTERNALERROR> session.exitstatus = doit(config, session) or 0 INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/pytest-5.0.1-py3.7.egg/_pytest/main.py", line 257, in _main INTERNALERROR> config.hook.pytest_runtestloop(session=session) INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/pluggy-0.12.0-py3.7.egg/pluggy/hooks.py", line 289, in __call__ INTERNALERROR> return self._hookexec(self, self.get_hookimpls(), kwargs) INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/pluggy-0.12.0-py3.7.egg/pluggy/manager.py", line 87, in _hookexec INTERNALERROR> return self._inner_hookexec(hook, methods, kwargs) INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/pluggy-0.12.0-py3.7.egg/pluggy/manager.py", line 81, in <lambda> INTERNALERROR> firstresult=hook.spec.opts.get("firstresult") if hook.spec else False, INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/pluggy-0.12.0-py3.7.egg/pluggy/callers.py", line 208, in _multicall INTERNALERROR> return outcome.get_result() INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/pluggy-0.12.0-py3.7.egg/pluggy/callers.py", line 80, in get_result INTERNALERROR> raise ex[1].with_traceback(ex[2]) INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/pluggy-0.12.0-py3.7.egg/pluggy/callers.py", line 187, in _multicall INTERNALERROR> res = hook_impl.function(*args) INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/brownie/test/plugin.py", line 131, in pytest_runtestloop INTERNALERROR> brownie.network.connect(ARGV["network"]) INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/brownie/network/main.py", line 41, in connect INTERNALERROR> rpc.attach(host) INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/brownie/network/rpc.py", line 109, in attach INTERNALERROR> proc = next(i for i in psutil.net_connections() if i.laddr == laddr) INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/psutil-5.6.3-py3.7-macosx-10.13-x86_64.egg/psutil/__init__.py", line 2263, in net_connections INTERNALERROR> return _psplatform.net_connections(kind) INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/psutil-5.6.3-py3.7-macosx-10.13-x86_64.egg/psutil/_psosx.py", line 252, in net_connections INTERNALERROR> cons = Process(pid).connections(kind) INTERNALERROR> File "/Users/alper/v/lib/python3.7/site-packages/psutil-5.6.3-py3.7-macosx-10.13-x86_64.egg/psutil/_psosx.py", line 344, in wrapper INTERNALERROR> raise AccessDenied(self.pid, self._name) INTERNALERROR> psutil.AccessDenied: psutil.AccessDenied (pid=28029) ================================================================ no tests ran in 0.10 seconds ================================================================
PermissionError
def get_str_dtype(cls, pandas_dtype_arg) -> Optional[str]: """Get pandas-compatible string representation of dtype.""" pandas_dtype = cls.get_dtype(pandas_dtype_arg) if pandas_dtype is None: return pandas_dtype elif isinstance(pandas_dtype, PandasDtype): return pandas_dtype.str_alias return str(pandas_dtype)
def get_str_dtype(cls, pandas_dtype_arg): """Get pandas-compatible string representation of dtype.""" dtype_ = pandas_dtype_arg if dtype_ is None: return dtype_ if is_extension_array_dtype(dtype_): if isinstance(dtype_, type): try: # Convert to str here because some pandas dtypes allow # an empty constructor for compatatibility but fail on # str(). e.g: PeriodDtype return str(dtype_()) except (TypeError, AttributeError) as err: raise TypeError( f"Pandas dtype {dtype_} cannot be instantiated: " f"{err}\n Usage Tip: Use an instance or a string " "representation." ) from err return str(dtype_) if dtype_ in NUMPY_TYPES: dtype_ = cls.from_numpy_type(dtype_) elif isinstance(dtype_, str): dtype_ = cls.from_str_alias(dtype_) elif isinstance(dtype_, type): dtype_ = cls.from_python_type(dtype_) if isinstance(dtype_, cls): return dtype_.str_alias raise TypeError( "type of `pandas_dtype` argument not recognized: " f"{type(pandas_dtype_arg)}. Please specify a pandera PandasDtype " "enum, legal pandas data type, pandas data type string alias, or " "numpy data type string alias" )
https://github.com/pandera-dev/pandera/issues/418
Traceback (most recent call last): File "foo.py", line 8, in <module> yaml_schema = schema.to_yaml() File "/Users/nielsbantilan/git/pandera/pandera/schemas.py", line 1197, in to_yaml return pandera.io.to_yaml(self, fp) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 223, in to_yaml statistics = _serialize_schema(dataframe_schema) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 93, in _serialize_schema columns = { File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 94, in <dictcomp> col_name: _serialize_component_stats(column_stats) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 68, in _serialize_component_stats "pandas_dtype": component_stats["pandas_dtype"].value, AttributeError: type object 'int' has no attribute 'value'
AttributeError
def __eq__(self, other): # pylint: disable=comparison-with-callable # see https://github.com/PyCQA/pylint/issues/2306 if other is None: return False other_dtype = PandasDtype.get_dtype(other) if self.value == "string" and LEGACY_PANDAS: return PandasDtype.String.value == other_dtype.value elif self.value == "string": return self.value == other_dtype.value return self.str_alias == other_dtype.str_alias
def __eq__(self, other): # pylint: disable=comparison-with-callable # see https://github.com/PyCQA/pylint/issues/2306 if other is None: return False if isinstance(other, str): other = self.from_str_alias(other) if self.value == "string" and LEGACY_PANDAS: return PandasDtype.String.value == other.value elif self.value == "string": return self.value == other.value return self.str_alias == other.str_alias
https://github.com/pandera-dev/pandera/issues/418
Traceback (most recent call last): File "foo.py", line 8, in <module> yaml_schema = schema.to_yaml() File "/Users/nielsbantilan/git/pandera/pandera/schemas.py", line 1197, in to_yaml return pandera.io.to_yaml(self, fp) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 223, in to_yaml statistics = _serialize_schema(dataframe_schema) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 93, in _serialize_schema columns = { File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 94, in <dictcomp> col_name: _serialize_component_stats(column_stats) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 68, in _serialize_component_stats "pandas_dtype": component_stats["pandas_dtype"].value, AttributeError: type object 'int' has no attribute 'value'
AttributeError
def _deserialize_component_stats(serialized_component_stats): from pandera import Check # pylint: disable=import-outside-toplevel pandas_dtype = PandasDtype.from_str_alias( serialized_component_stats["pandas_dtype"] ) checks = None if serialized_component_stats.get("checks") is not None: checks = [ _deserialize_check_stats( getattr(Check, check_name), check_stats, pandas_dtype ) for check_name, check_stats in serialized_component_stats["checks"].items() ] return { "pandas_dtype": pandas_dtype, "checks": checks, **{ key: serialized_component_stats.get(key) for key in [ "name", "nullable", "allow_duplicates", "coerce", "required", "regex", ] if key in serialized_component_stats }, }
def _deserialize_component_stats(serialized_component_stats): from pandera import Check # pylint: disable=import-outside-toplevel pandas_dtype = PandasDtype.from_str_alias( serialized_component_stats["pandas_dtype"] ) checks = None if serialized_component_stats["checks"] is not None: checks = [ _deserialize_check_stats( getattr(Check, check_name), check_stats, pandas_dtype ) for check_name, check_stats in serialized_component_stats["checks"].items() ] return { "pandas_dtype": pandas_dtype, "nullable": serialized_component_stats["nullable"], "checks": checks, **{ key: serialized_component_stats.get(key) for key in [ "name", "allow_duplicates", "coerce", "required", "regex", ] if key in serialized_component_stats }, }
https://github.com/pandera-dev/pandera/issues/418
Traceback (most recent call last): File "foo.py", line 8, in <module> yaml_schema = schema.to_yaml() File "/Users/nielsbantilan/git/pandera/pandera/schemas.py", line 1197, in to_yaml return pandera.io.to_yaml(self, fp) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 223, in to_yaml statistics = _serialize_schema(dataframe_schema) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 93, in _serialize_schema columns = { File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 94, in <dictcomp> col_name: _serialize_component_stats(column_stats) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 68, in _serialize_component_stats "pandas_dtype": component_stats["pandas_dtype"].value, AttributeError: type object 'int' has no attribute 'value'
AttributeError
def get_dataframe_schema_statistics(dataframe_schema): """Get statistical properties from dataframe schema.""" statistics = { "columns": { col_name: { "pandas_dtype": column.pdtype, "nullable": column.nullable, "allow_duplicates": column.allow_duplicates, "coerce": column.coerce, "required": column.required, "regex": column.regex, "checks": parse_checks(column.checks), } for col_name, column in dataframe_schema.columns.items() }, "index": ( None if dataframe_schema.index is None else get_index_schema_statistics(dataframe_schema.index) ), "coerce": dataframe_schema.coerce, } return statistics
def get_dataframe_schema_statistics(dataframe_schema): """Get statistical properties from dataframe schema.""" statistics = { "columns": { col_name: { "pandas_dtype": column._pandas_dtype, "nullable": column.nullable, "allow_duplicates": column.allow_duplicates, "coerce": column.coerce, "required": column.required, "regex": column.regex, "checks": parse_checks(column.checks), } for col_name, column in dataframe_schema.columns.items() }, "index": ( None if dataframe_schema.index is None else get_index_schema_statistics(dataframe_schema.index) ), "coerce": dataframe_schema.coerce, } return statistics
https://github.com/pandera-dev/pandera/issues/418
Traceback (most recent call last): File "foo.py", line 8, in <module> yaml_schema = schema.to_yaml() File "/Users/nielsbantilan/git/pandera/pandera/schemas.py", line 1197, in to_yaml return pandera.io.to_yaml(self, fp) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 223, in to_yaml statistics = _serialize_schema(dataframe_schema) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 93, in _serialize_schema columns = { File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 94, in <dictcomp> col_name: _serialize_component_stats(column_stats) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 68, in _serialize_component_stats "pandas_dtype": component_stats["pandas_dtype"].value, AttributeError: type object 'int' has no attribute 'value'
AttributeError
def pdtype(self) -> Optional[PandasDtype]: """PandasDtype of the dataframe.""" pandas_dtype = PandasDtype.get_str_dtype(self.pandas_dtype) if pandas_dtype is None: return pandas_dtype return PandasDtype.from_str_alias(pandas_dtype)
def pdtype(self) -> Optional[PandasDtype]: """PandasDtype of the dataframe.""" if self.pandas_dtype is None: return self.pandas_dtype return PandasDtype.from_str_alias(PandasDtype.get_str_dtype(self.pandas_dtype))
https://github.com/pandera-dev/pandera/issues/418
Traceback (most recent call last): File "foo.py", line 8, in <module> yaml_schema = schema.to_yaml() File "/Users/nielsbantilan/git/pandera/pandera/schemas.py", line 1197, in to_yaml return pandera.io.to_yaml(self, fp) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 223, in to_yaml statistics = _serialize_schema(dataframe_schema) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 93, in _serialize_schema columns = { File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 94, in <dictcomp> col_name: _serialize_component_stats(column_stats) File "/Users/nielsbantilan/git/pandera/pandera/io.py", line 68, in _serialize_component_stats "pandas_dtype": component_stats["pandas_dtype"].value, AttributeError: type object 'int' has no attribute 'value'
AttributeError
def _parse_annotation(self, raw_annotation: Type) -> None: """Parse key information from annotation. :param annotation: A subscripted type. :returns: Annotation """ self.raw_annotation = raw_annotation self.optional = typing_inspect.is_optional_type(raw_annotation) if self.optional: # e.g: Typing.Union[pandera.typing.Index[str], NoneType] if _LEGACY_TYPING: # pragma: no cover # get_args -> ((pandera.typing.Index, <class 'str'>), <class 'NoneType'>) self.origin, self.arg = typing_inspect.get_args(raw_annotation)[0] # get_args -> (pandera.typing.Index[str], <class 'NoneType'>) raw_annotation = typing_inspect.get_args(raw_annotation)[0] if not (self.optional and _LEGACY_TYPING): self.origin = typing_inspect.get_origin(raw_annotation) args = typing_inspect.get_args(raw_annotation) self.arg = args[0] if args else args self.literal = typing_inspect.is_literal_type(self.arg) if self.literal: self.arg = typing_inspect.get_args(self.arg)[0]
def _parse_annotation(self, raw_annotation: Type) -> None: """Parse key information from annotation. :param annotation: A subscripted type. :returns: Annotation """ self.raw_annotation = raw_annotation self.optional = typing_inspect.is_optional_type(raw_annotation) if self.optional: # e.g: Typing.Union[pandera.typing.Index[str], NoneType] if _LEGACY_TYPING: # pragma: no cover # get_args -> ((pandera.typing.Index, <class 'str'>), <class 'NoneType'>) self.origin, self.arg = typing_inspect.get_args(raw_annotation)[0] return # get_args -> (pandera.typing.Index[str], <class 'NoneType'>) raw_annotation = typing_inspect.get_args(raw_annotation)[0] self.origin = typing_inspect.get_origin(raw_annotation) args = typing_inspect.get_args(raw_annotation) self.arg = args[0] if args else args self.literal = typing_inspect.is_literal_type(self.arg) if self.literal: self.arg = typing_inspect.get_args(self.arg)[0]
https://github.com/pandera-dev/pandera/issues/378
TypeError Traceback (most recent call last) <ipython-input-10-1d3df28d227a> in <module> ----> 1 Schema.validate(df) /cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/model.py in validate(cls, check_obj, head, tail, sample, random_state, lazy) 146 ) -> pd.DataFrame: 147 """%(validate_doc)s""" --> 148 return cls.to_schema().validate( 149 check_obj, head, tail, sample, random_state, lazy 150 ) /cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/model.py in to_schema(cls) 119 } 120 columns, index = cls._build_columns_index( --> 121 cls.__fields__, cls.__checks__, **mi_kwargs 122 ) 123 cls.__schema__ = DataFrameSchema( /cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/model.py in _build_columns_index(cls, fields, checks, **multiindex_kwargs) 199 required=not annotation.optional, 200 checks=field_checks, --> 201 name=field_name, 202 ) 203 elif annotation.origin is Index: /cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/schema_components.py in __init__(self, pandas_dtype, checks, nullable, allow_duplicates, coerce, required, name, regex) 77 """ 78 super().__init__( ---> 79 pandas_dtype, checks, nullable, allow_duplicates, coerce 80 ) 81 if ( /cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/schemas.py in __init__(self, pandas_dtype, checks, nullable, allow_duplicates, coerce, name) 1453 1454 # make sure pandas dtype is valid -> 1455 self.dtype # pylint: disable=pointless-statement 1456 1457 # this attribute is not meant to be accessed by users and is explicitly /cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/schemas.py in dtype(self) 1526 def dtype(self) -> Optional[str]: 1527 """String representation of the dtype.""" -> 1528 return PandasDtype.get_str_dtype(self._pandas_dtype) 1529 1530 @property /cw/dtaijupiter/NoCsBack/dtai/pieterr/Projects/socceraction/.venv/lib/python3.6/site-packages/pandera/dtypes.py in get_str_dtype(cls, pandas_dtype_arg) 310 return dtype_.str_alias 311 raise TypeError( --> 312 "type of `pandas_dtype` argument not recognized: " 313 f"{type(pandas_dtype_arg)}. Please specify a pandera PandasDtype " 314 "enum, legal pandas data type, pandas data type string alias, or " TypeError: type of `pandas_dtype` argument not recognized: typing_extensions.Literal. Please specify a pandera PandasDtype enum, legal pandas data type, pandas data type string alias, or numpy data type string alias
TypeError
def _serialize_component_stats(component_stats): """ Serialize column or index statistics into json/yaml-compatible format. """ serialized_checks = None if component_stats["checks"] is not None: serialized_checks = {} for check_name, check_stats in component_stats["checks"].items(): if check_stats is None: warnings.warn( f"Check {check_name} cannot be serialized. This check will be " f"ignored" ) else: serialized_checks[check_name] = _serialize_check_stats( check_stats, component_stats["pandas_dtype"] ) return { "pandas_dtype": component_stats["pandas_dtype"].value, "nullable": component_stats["nullable"], "checks": serialized_checks, **{ key: component_stats.get(key) for key in ["name", "allow_duplicates", "coerce", "required", "regex"] if key in component_stats }, }
def _serialize_component_stats(component_stats): """ Serialize column or index statistics into json/yaml-compatible format. """ serialized_checks = None if component_stats["checks"] is not None: serialized_checks = { check_name: _serialize_check_stats( check_stats, component_stats["pandas_dtype"] ) for check_name, check_stats in component_stats["checks"].items() } return { "pandas_dtype": component_stats["pandas_dtype"].value, "nullable": component_stats["nullable"], "checks": serialized_checks, **{ key: component_stats.get(key) for key in ["name", "allow_duplicates", "coerce", "required", "regex"] if key in component_stats }, }
https://github.com/pandera-dev/pandera/issues/246
--------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-7-6d1fa2fde789> in <module> ----> 1 pa.io.to_script(schema) ~/git/pandera/pandera/io.py in to_script(dataframe_schema, path_or_buf) 309 properties['pandas_dtype'].name 310 ), --> 311 checks=_format_checks(properties["checks"]), 312 nullable=properties["nullable"], 313 ) ~/git/pandera/pandera/io.py in _format_checks(checks_dict) 248 for check_name, check_kwargs in checks_dict.items(): 249 args = ", ".join( --> 250 "{}={}".format(k, v.__repr__()) for k, v in check_kwargs.items() 251 ) 252 checks.append("Check.{}({})".format(check_name, args)) AttributeError: 'NoneType' object has no attribute 'items'
AttributeError
def _format_checks(checks_dict): if checks_dict is None: return "None" checks = [] for check_name, check_kwargs in checks_dict.items(): if check_kwargs is None: warnings.warn( f"Check {check_name} cannot be serialized. This check will be ignored" ) else: args = ", ".join( "{}={}".format(k, v.__repr__()) for k, v in check_kwargs.items() ) checks.append("Check.{}({})".format(check_name, args)) return "[{}]".format(", ".join(checks))
def _format_checks(checks_dict): if checks_dict is None: return "None" checks = [] for check_name, check_kwargs in checks_dict.items(): args = ", ".join( "{}={}".format(k, v.__repr__()) for k, v in check_kwargs.items() ) checks.append("Check.{}({})".format(check_name, args)) return "[{}]".format(", ".join(checks))
https://github.com/pandera-dev/pandera/issues/246
--------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-7-6d1fa2fde789> in <module> ----> 1 pa.io.to_script(schema) ~/git/pandera/pandera/io.py in to_script(dataframe_schema, path_or_buf) 309 properties['pandas_dtype'].name 310 ), --> 311 checks=_format_checks(properties["checks"]), 312 nullable=properties["nullable"], 313 ) ~/git/pandera/pandera/io.py in _format_checks(checks_dict) 248 for check_name, check_kwargs in checks_dict.items(): 249 args = ", ".join( --> 250 "{}={}".format(k, v.__repr__()) for k, v in check_kwargs.items() 251 ) 252 checks.append("Check.{}({})".format(check_name, args)) AttributeError: 'NoneType' object has no attribute 'items'
AttributeError
def get_route_to(self, destination="", protocol="", longer=False): routes = {} # Placeholder for vrf arg vrf = "" # Right not iterating through vrfs is necessary # show ipv6 route doesn't support vrf 'all' if vrf == "": vrfs = sorted(self._get_vrfs()) else: vrfs = [vrf] if protocol.lower() == "direct": protocol = "connected" ipv = "" if IPNetwork(destination).version == 6: ipv = "v6" commands = [] for _vrf in vrfs: commands.append( "show ip{ipv} route vrf {_vrf} {destination} {longer} {protocol} detail".format( ipv=ipv, _vrf=_vrf, destination=destination, longer="longer-prefixes" if longer else "", protocol=protocol, ) ) commands_output = self.device.run_commands(commands) vrf_cache = {} for _vrf, command_output in zip(vrfs, commands_output): if ipv == "v6": routes_out = command_output.get("routes", {}) else: routes_out = command_output.get("vrfs", {}).get(_vrf, {}).get("routes", {}) for prefix, route_details in routes_out.items(): if prefix not in routes.keys(): routes[prefix] = [] route_protocol = route_details.get("routeType") preference = route_details.get("preference", 0) route = { "current_active": True, "last_active": True, "age": 0, "next_hop": "", "protocol": route_protocol, "outgoing_interface": "", "preference": preference, "inactive_reason": "", "routing_table": _vrf, "selected_next_hop": True, "protocol_attributes": {}, } if protocol == "bgp" or route_protocol.lower() in ("ebgp", "ibgp"): nexthop_interface_map = {} for next_hop in route_details.get("vias"): nexthop_ip = napalm.base.helpers.ip(next_hop.get("nexthopAddr")) nexthop_interface_map[nexthop_ip] = next_hop.get("interface") metric = route_details.get("metric") if _vrf not in vrf_cache.keys(): try: command = ( "show ip{ipv} bgp {dest} {longer} detail vrf {_vrf}".format( ipv=ipv, dest=destination, longer="longer-prefixes" if longer else "", _vrf=_vrf, ) ) vrf_cache.update( { _vrf: self.device.run_commands([command])[0] .get("vrfs", {}) .get(_vrf, {}) } ) except CommandError: # Newer EOS can't mix longer-prefix and detail command = "show ip{ipv} bgp {dest} {longer} vrf {_vrf}".format( ipv=ipv, dest=destination, longer="longer-prefixes" if longer else "", _vrf=_vrf, ) vrf_cache.update( { _vrf: self.device.run_commands([command])[0] .get("vrfs", {}) .get(_vrf, {}) } ) vrf_details = vrf_cache.get(_vrf) local_as = napalm.base.helpers.as_number(vrf_details.get("asn")) bgp_routes = ( vrf_details.get("bgpRouteEntries", {}) .get(prefix, {}) .get("bgpRoutePaths", []) ) for bgp_route_details in bgp_routes: bgp_route = route.copy() as_path = bgp_route_details.get("asPathEntry", {}).get("asPath", "") as_path_type = bgp_route_details.get("asPathEntry", {}).get( "asPathType", "" ) if as_path_type in ["Internal", "Local"]: remote_as = local_as else: remote_as = napalm.base.helpers.as_number( as_path.strip("()").split()[-1] ) try: remote_address = napalm.base.helpers.ip( bgp_route_details.get("routeDetail", {}) .get("peerEntry", {}) .get("peerAddr", "") ) except AddrFormatError: remote_address = napalm.base.helpers.ip( bgp_route_details.get("peerEntry", {}).get("peerAddr", "") ) local_preference = bgp_route_details.get("localPreference") next_hop = napalm.base.helpers.ip(bgp_route_details.get("nextHop")) active_route = bgp_route_details.get("routeType", {}).get( "active", False ) last_active = active_route # should find smth better communities = bgp_route_details.get("routeDetail", {}).get( "communityList", [] ) preference2 = bgp_route_details.get("weight") inactive_reason = bgp_route_details.get("reasonNotBestpath", "") bgp_route.update( { "current_active": active_route, "inactive_reason": inactive_reason, "last_active": last_active, "next_hop": next_hop, "outgoing_interface": nexthop_interface_map.get(next_hop), "selected_next_hop": active_route, "protocol_attributes": { "metric": metric, "as_path": as_path, "local_preference": local_preference, "local_as": local_as, "remote_as": remote_as, "remote_address": remote_address, "preference2": preference2, "communities": communities, }, } ) routes[prefix].append(bgp_route) else: if route_details.get("routeAction") in ("drop",): route["next_hop"] = "NULL" if route_details.get("routingDisabled") is True: route["last_active"] = False route["current_active"] = False for next_hop in route_details.get("vias"): route_next_hop = route.copy() if next_hop.get("nexthopAddr") is None: route_next_hop.update( { "next_hop": "", "outgoing_interface": next_hop.get("interface"), } ) else: route_next_hop.update( { "next_hop": napalm.base.helpers.ip( next_hop.get("nexthopAddr") ), "outgoing_interface": next_hop.get("interface"), } ) routes[prefix].append(route_next_hop) if route_details.get("vias") == []: # empty list routes[prefix].append(route) return routes
def get_route_to(self, destination="", protocol="", longer=False): routes = {} # Placeholder for vrf arg vrf = "" # Right not iterating through vrfs is necessary # show ipv6 route doesn't support vrf 'all' if vrf == "": vrfs = sorted(self._get_vrfs()) else: vrfs = [vrf] if protocol.lower() == "direct": protocol = "connected" ipv = "" if IPNetwork(destination).version == 6: ipv = "v6" commands = [] for _vrf in vrfs: commands.append( "show ip{ipv} route vrf {_vrf} {destination} {longer} {protocol} detail".format( ipv=ipv, _vrf=_vrf, destination=destination, longer="longer-prefixes" if longer else "", protocol=protocol, ) ) commands_output = self.device.run_commands(commands) vrf_cache = {} for _vrf, command_output in zip(vrfs, commands_output): if ipv == "v6": routes_out = command_output.get("routes", {}) else: routes_out = command_output.get("vrfs", {}).get(_vrf, {}).get("routes", {}) for prefix, route_details in routes_out.items(): if prefix not in routes.keys(): routes[prefix] = [] route_protocol = route_details.get("routeType") preference = route_details.get("preference", 0) route = { "current_active": True, "last_active": True, "age": 0, "next_hop": "", "protocol": route_protocol, "outgoing_interface": "", "preference": preference, "inactive_reason": "", "routing_table": _vrf, "selected_next_hop": True, "protocol_attributes": {}, } if protocol == "bgp" or route_protocol.lower() in ("ebgp", "ibgp"): nexthop_interface_map = {} for next_hop in route_details.get("vias"): nexthop_ip = napalm.base.helpers.ip(next_hop.get("nexthopAddr")) nexthop_interface_map[nexthop_ip] = next_hop.get("interface") metric = route_details.get("metric") if _vrf not in vrf_cache.keys(): try: command = ( "show ip{ipv} bgp {dest} {longer} detail vrf {_vrf}".format( ipv=ipv, dest=destination, longer="longer-prefixes" if longer else "", _vrf=_vrf, ) ) vrf_cache.update( { _vrf: self.device.run_commands([command])[0] .get("vrfs", {}) .get(_vrf, {}) } ) except CommandError: # Newer EOS can't mix longer-prefix and detail command = "show ip{ipv} bgp {dest} {longer} vrf {_vrf}".format( ipv=ipv, dest=destination, longer="longer-prefixes" if longer else "", _vrf=_vrf, ) vrf_cache.update( { _vrf: self.device.run_commands([command])[0] .get("vrfs", {}) .get(_vrf, {}) } ) vrf_details = vrf_cache.get(_vrf) local_as = napalm.base.helpers.as_number(vrf_details.get("asn")) bgp_routes = ( vrf_details.get("bgpRouteEntries", {}) .get(prefix, {}) .get("bgpRoutePaths", []) ) for bgp_route_details in bgp_routes: bgp_route = route.copy() as_path = bgp_route_details.get("asPathEntry", {}).get("asPath", "") as_path_type = bgp_route_details.get("asPathEntry", {}).get( "asPathType", "" ) if as_path_type in ["Internal", "Local"]: remote_as = local_as else: remote_as = napalm.base.helpers.as_number( as_path.strip("()").split()[-1] ) remote_address = napalm.base.helpers.ip( bgp_route_details.get("routeDetail", {}) .get("peerEntry", {}) .get("peerAddr", "") ) local_preference = bgp_route_details.get("localPreference") next_hop = napalm.base.helpers.ip(bgp_route_details.get("nextHop")) active_route = bgp_route_details.get("routeType", {}).get( "active", False ) last_active = active_route # should find smth better communities = bgp_route_details.get("routeDetail", {}).get( "communityList", [] ) preference2 = bgp_route_details.get("weight") inactive_reason = bgp_route_details.get("reasonNotBestpath", "") bgp_route.update( { "current_active": active_route, "inactive_reason": inactive_reason, "last_active": last_active, "next_hop": next_hop, "outgoing_interface": nexthop_interface_map.get(next_hop), "selected_next_hop": active_route, "protocol_attributes": { "metric": metric, "as_path": as_path, "local_preference": local_preference, "local_as": local_as, "remote_as": remote_as, "remote_address": remote_address, "preference2": preference2, "communities": communities, }, } ) routes[prefix].append(bgp_route) else: if route_details.get("routeAction") in ("drop",): route["next_hop"] = "NULL" if route_details.get("routingDisabled") is True: route["last_active"] = False route["current_active"] = False for next_hop in route_details.get("vias"): route_next_hop = route.copy() if next_hop.get("nexthopAddr") is None: route_next_hop.update( { "next_hop": "", "outgoing_interface": next_hop.get("interface"), } ) else: route_next_hop.update( { "next_hop": napalm.base.helpers.ip( next_hop.get("nexthopAddr") ), "outgoing_interface": next_hop.get("interface"), } ) routes[prefix].append(route_next_hop) if route_details.get("vias") == []: # empty list routes[prefix].append(route) return routes
https://github.com/napalm-automation/napalm/issues/1069
Traceback (most recent call last): File "eos2.py", line 7, in <module> dev_info = dev.get_route_to(u'3.3.3.3/32') File "/usr/local/lib/python2.7/dist-packages/napalm/eos/eos.py", line 1258, in get_route_to .get("peerAddr", "") File "/usr/local/lib/python2.7/dist-packages/napalm/base/helpers.py", line 330, in ip addr_obj = IPAddress(addr) File "/usr/local/lib/python2.7/dist-packages/netaddr/ip/__init__.py", line 306, in __init__ 'address from %r' % addr) netaddr.core.AddrFormatError: failed to detect a valid IP address from u''
netaddr.core.AddrFormatError
def _send_command( self, command, delay_factor=None, start=None, expect_string=None, read_output=None, receive=False, ): if not expect_string: expect_string = self._XML_MODE_PROMPT if read_output is None: read_output = "" if not delay_factor: delay_factor = self._READ_DELAY if not start: start = time.time() output = read_output last_read = "" if not read_output and not receive: # because the XML agent is able to process only one single request over the same SSH # session at a time first come first served self._lock_xml_agent(start) try: max_loops = self.timeout / delay_factor last_read = self.device.send_command_expect( command, expect_string=expect_string, strip_prompt=False, strip_command=False, delay_factor=delay_factor, max_loops=max_loops, ) output += last_read except IOError: if (not last_read and self._in_cli_mode()) or ( self._cli_prompt in output and "% Invalid input detected at '^' marker." in output ): # something happened # e.g. connection with the XML agent died while reading # netmiko throws error and the last output read is empty (ofc) # and in CLI mode # # OR # # Sometimes the XML agent simply exits and all issued commands provide the # following output (as in CLI mode) # <? # ^ # % Invalid input detected at '^' marker. # RP/0/RSP1/CPU0:edge01.dus01#<xml version="1.0" encoding="UTF-8"? # ^ # % Invalid input detected at '^' marker. # RP/0/RSP1/CPU0:edge01.dus01#<xml version # # Which of course does not contain the XML and netmiko throws the not found # error therefore we need to re-enter in XML mode self._enter_xml_mode() # and let's issue the command again if still got time if not self._timeout_exceeded(start=start): # if still got time # reiterate the command from the beginning return self._send_command( command, expect_string=expect_string, delay_factor=delay_factor, ) else: output += self.device._read_channel_timing() # try to read some more if "0xa3679e00" in output or "0xa367da00" in output: # when multiple parallel request are made, the device throws one of the the errors: # --- # ERROR: 0xa3679e00 'XML Service Library' detected the 'fatal' condition # 'Multiple concurrent requests are not allowed over the same session. # A request is already in progress on this session.' # # ERROR: 0xa367da00 XML Service Library' detected the 'fatal' condition # 'Sending multiple documents is not supported.' # --- # we could use a mechanism similar to NETCONF and push the requests in queue and serve # them sequentially, BUT we are not able to assign unique IDs and identify the # request-reply map so will throw an error that does not help too much :( raise XMLCLIError("XML agent cannot process parallel requests!", self) if not output.strip().endswith("XML>"): if "0x44318c06" in output or ( self._cli_prompt and expect_string != self._cli_prompt and ( output.startswith(self._cli_prompt) or output.endswith(self._cli_prompt) ) ): # sometimes the device throws a stupid error like: # ERROR: 0x44318c06 'XML-TTY' detected the 'warning' condition # 'A Light Weight Messaging library communication function returned an error': No # such device or address and the XML agent connection is closed, but the SSH # connection is fortunately maintained # OR sometimes, the device simply exits from the XML mode without any clue # In both cases, we need to re-enter in XML mode... # so, whenever the CLI promt is detected, will re-enter in XML mode # unless the expected string is the prompt self._unlock_xml_agent() self._enter_xml_mode() # however, the command could not be executed properly, so we need to raise the # XMLCLIError exception raise XMLCLIError( "Could not properly execute the command. Re-entering XML mode...", self, ) if ( not output.strip() ): # empty output, means that the device did not start delivering the output # but for sure is still in XML mode as netmiko did not throw error if not self._timeout_exceeded(start=start): return self._send_command( command, receive=True, start=start ) # let's try receiving more raise XMLCLIError(output.strip(), self) self._unlock_xml_agent() return str(output.replace("XML>", "").strip())
def _send_command( self, command, delay_factor=None, start=None, expect_string=None, read_output=None, receive=False, ): if not expect_string: expect_string = self._XML_MODE_PROMPT if read_output is None: read_output = "" if not delay_factor: delay_factor = self._READ_DELAY if not start: start = time.time() output = read_output last_read = "" if not read_output and not receive: # because the XML agent is able to process only one single request over the same SSH # session at a time first come first served self._lock_xml_agent(start) try: max_loops = self.timeout / delay_factor last_read = self.device.send_command_expect( command, expect_string=expect_string, strip_prompt=False, strip_command=False, delay_factor=delay_factor, max_loops=max_loops, ) output += last_read except IOError: if (not last_read and self._in_cli_mode()) or ( self._cli_prompt in output and "% Invalid input detected at '^' marker." in output ): # something happened # e.g. connection with the XML agent died while reading # netmiko throws error and the last output read is empty (ofc) # and in CLI mode # # OR # # Sometimes the XML agent simply exits and all issued commands provide the # following output (as in CLI mode) # <? # ^ # % Invalid input detected at '^' marker. # RP/0/RSP1/CPU0:edge01.dus01#<xml version="1.0" encoding="UTF-8"? # ^ # % Invalid input detected at '^' marker. # RP/0/RSP1/CPU0:edge01.dus01#<xml version # # Which of course does not contain the XML and netmiko throws the not found # error therefore we need to re-enter in XML mode self._enter_xml_mode() # and let's issue the command again if still got time if not self._timeout_exceeded(start=start): # if still got time # reiterate the command from the beginning return self._send_command( command, expect_string=expect_string, delay_factor=delay_factor, ) else: output += self._netmiko_recv() # try to read some more if "0xa3679e00" in output or "0xa367da00" in output: # when multiple parallel request are made, the device throws one of the the errors: # --- # ERROR: 0xa3679e00 'XML Service Library' detected the 'fatal' condition # 'Multiple concurrent requests are not allowed over the same session. # A request is already in progress on this session.' # # ERROR: 0xa367da00 XML Service Library' detected the 'fatal' condition # 'Sending multiple documents is not supported.' # --- # we could use a mechanism similar to NETCONF and push the requests in queue and serve # them sequentially, BUT we are not able to assign unique IDs and identify the # request-reply map so will throw an error that does not help too much :( raise XMLCLIError("XML agent cannot process parallel requests!", self) if not output.strip().endswith("XML>"): if "0x44318c06" in output or ( self._cli_prompt and expect_string != self._cli_prompt and ( output.startswith(self._cli_prompt) or output.endswith(self._cli_prompt) ) ): # sometimes the device throws a stupid error like: # ERROR: 0x44318c06 'XML-TTY' detected the 'warning' condition # 'A Light Weight Messaging library communication function returned an error': No # such device or address and the XML agent connection is closed, but the SSH # connection is fortunately maintained # OR sometimes, the device simply exits from the XML mode without any clue # In both cases, we need to re-enter in XML mode... # so, whenever the CLI promt is detected, will re-enter in XML mode # unless the expected string is the prompt self._unlock_xml_agent() self._enter_xml_mode() # however, the command could not be executed properly, so we need to raise the # XMLCLIError exception raise XMLCLIError( "Could not properly execute the command. Re-entering XML mode...", self, ) if ( not output.strip() ): # empty output, means that the device did not start delivering the output # but for sure is still in XML mode as netmiko did not throw error if not self._timeout_exceeded(start=start): return self._send_command( command, receive=True, start=start ) # let's try receiving more raise XMLCLIError(output.strip(), self) self._unlock_xml_agent() return str(output.replace("XML>", "").strip())
https://github.com/napalm-automation/napalm/issues/1312
root@1dab935bca2b:/opt/peering-manager# napalm --debug --user root --password xxxx --vendor iosxr --optional_args "ssh_config_file='/root/.ssh/config'" router1 call get_facts 2020-10-28 20:41:43,221 - napalm - DEBUG - Starting napalm's debugging tool 2020-10-28 20:41:43,221 - napalm - DEBUG - Gathering napalm packages 2020-10-28 20:41:43,221 - napalm - DEBUG - napalm==3.1.0 2020-10-28 20:41:43,221 - napalm - DEBUG - get_network_driver - Calling with args: ('iosxr',), {} 2020-10-28 20:41:43,221 - napalm - DEBUG - get_network_driver - Successful 2020-10-28 20:41:43,221 - napalm - DEBUG - __init__ - Calling with args: (<class 'napalm.iosxr.iosxr.IOSXRDriver'>, 'router1', 'root'), {'password': '*******', 'timeout': 60, 'optional_args': {'ssh_config_file': '/root/.ssh/config'}} 2020-10-28 20:41:43,222 - napalm - DEBUG - __init__ - Successful 2020-10-28 20:41:43,222 - napalm - DEBUG - pre_connection_tests - Calling with args: (<napalm.iosxr.iosxr.IOSXRDriver object at 0x7f8c067282e0>,), {} 2020-10-28 20:41:43,222 - napalm - DEBUG - open - Calling with args: (<napalm.iosxr.iosxr.IOSXRDriver object at 0x7f8c067282e0>,), {} 2020-10-28 20:41:46,145 - napalm - DEBUG - open - Successful 2020-10-28 20:41:46,145 - napalm - DEBUG - connection_tests - Calling with args: (<napalm.iosxr.iosxr.IOSXRDriver object at 0x7f8c067282e0>,), {} 2020-10-28 20:41:46,146 - napalm - DEBUG - get_facts - Calling with args: (<napalm.iosxr.iosxr.IOSXRDriver object at 0x7f8c067282e0>,), {} 2020-10-28 20:41:57,415 - napalm.pyIOSXR.iosxr - DEBUG - <Element Response at 0x7f8c0670e440> 2020-10-28 20:42:00,274 - napalm.pyIOSXR.iosxr - DEBUG - <Element Response at 0x7f8c0672c400> 2020-10-28 20:42:00,301 - napalm - DEBUG - Gathered facts: { "vendor": "Cisco", "os_version": "6.1.2", "hostname": "router1", "uptime": 19475078, "serial_number": "FOX24000", "fqdn": "router1", "model": "ASR-9006-AC-V2", "interface_list": [ "BVI1", "Bundle-Ether1", "GigabitEthernet0/1/0/0", (...) ] } { "vendor": "Cisco", "os_version": "6.1.2", "hostname": "router1", "uptime": 19475078, "serial_number": "FOX24000", "fqdn": "router1", "model": "ASR-9006-AC-V2", "interface_list": [ "BVI1", "Bundle-Ether1", "GigabitEthernet0/1/0/0", (...) ] } 2020-10-28 20:42:00,301 - napalm - DEBUG - get_facts - Successful 2020-10-28 20:42:00,301 - napalm - DEBUG - method - Calling with args: (<napalm.iosxr.iosxr.IOSXRDriver object at 0x7f8c067282e0>, 'get_facts'), {} 2020-10-28 20:42:00,301 - napalm - DEBUG - get_facts - Attempting to resolve method 2020-10-28 20:42:00,301 - napalm - DEBUG - get_facts - Attempting to call method with kwargs: {} 2020-10-28 20:42:12,707 - napalm - ERROR - method - Failed: 'CiscoXrSSH' object has no attribute 'receive_data_generator' ================= Traceback ================= Traceback (most recent call last): File "/usr/local/bin/napalm", line 8, in <module> sys.exit(main()) File "/usr/local/lib/python3.9/site-packages/napalm/base/clitools/cl_napalm.py", line 308, in main run_tests(args) File "/usr/local/lib/python3.9/site-packages/napalm/base/clitools/cl_napalm.py", line 291, in run_tests call_getter(device, args.method, **method_kwargs) File "/usr/local/lib/python3.9/site-packages/napalm/base/clitools/cl_napalm.py", line 27, in wrapper r = func(*args, **kwargs) File "/usr/local/lib/python3.9/site-packages/napalm/base/clitools/cl_napalm.py", line 255, in call_getter r = func(**kwargs) File "/usr/local/lib/python3.9/site-packages/napalm/iosxr/iosxr.py", line 166, in get_facts facts_rpc_reply = ETREE.fromstring(self.device.make_rpc_call(facts_rpc_request)) File "/usr/local/lib/python3.9/site-packages/napalm/pyIOSXR/iosxr.py", line 172, in make_rpc_call result = self._execute_rpc(rpc_command) File "/usr/local/lib/python3.9/site-packages/napalm/pyIOSXR/iosxr.py", line 426, in _execute_rpc response = self._send_command(xml_rpc_command, delay_factor=delay_factor) File "/usr/local/lib/python3.9/site-packages/napalm/pyIOSXR/iosxr.py", line 399, in _send_command return self._send_command( File "/usr/local/lib/python3.9/site-packages/napalm/pyIOSXR/iosxr.py", line 351, in _send_command output += self._netmiko_recv() # try to read some more File "/usr/local/lib/python3.9/site-packages/napalm/pyIOSXR/iosxr.py", line 412, in _netmiko_recv for tmp_output in self.device.receive_data_generator(): AttributeError: 'CiscoXrSSH' object has no attribute 'receive_data_generator'
AttributeError
def get_interfaces_counters(self): """ Return interface counters and errors. 'tx_errors': int, 'rx_errors': int, 'tx_discards': int, 'rx_discards': int, 'tx_octets': int, 'rx_octets': int, 'tx_unicast_packets': int, 'rx_unicast_packets': int, 'tx_multicast_packets': int, 'rx_multicast_packets': int, 'tx_broadcast_packets': int, 'rx_broadcast_packets': int, Currently doesn't determine output broadcasts, multicasts """ counters = {} command = "show interfaces" output = self._send_command(command) sh_int_sum_cmd = "show interface summary" sh_int_sum_cmd_out = self._send_command(sh_int_sum_cmd) # Break output into per-interface sections interface_strings = re.split(r".* line protocol is .*", output, flags=re.M) header_strings = re.findall(r".* line protocol is .*", output, flags=re.M) empty = interface_strings.pop(0).strip() if empty: raise ValueError("Unexpected output from: {}".format(command)) # Parse out the interface names intf = [] for intf_line in header_strings: interface, _ = re.split(r" is .* line protocol is ", intf_line) intf.append(interface.strip()) if len(intf) != len(interface_strings): raise ValueError("Unexpected output from: {}".format(command)) # Re-join interface names with interface strings for interface, interface_str in zip(intf, interface_strings): counters.setdefault(interface, {}) for line in interface_str.splitlines(): if "packets input" in line: # '0 packets input, 0 bytes, 0 no buffer' match = re.search(r"(\d+) packets input.* (\d+) bytes", line) counters[interface]["rx_unicast_packets"] = int(match.group(1)) counters[interface]["rx_octets"] = int(match.group(2)) elif "broadcast" in line: # 'Received 0 broadcasts (0 multicasts)' # 'Received 264071 broadcasts (39327 IP multicasts)' # 'Received 338 broadcasts, 0 runts, 0 giants, 0 throttles' match = re.search(r"Received (\d+) broadcasts.*(\d+).*multicasts", line) alt_match = re.search(r"Received (\d+) broadcasts.*", line) if match: counters[interface]["rx_broadcast_packets"] = int(match.group(1)) counters[interface]["rx_multicast_packets"] = int(match.group(2)) elif alt_match: counters[interface]["rx_broadcast_packets"] = int( alt_match.group(1) ) counters[interface]["rx_multicast_packets"] = -1 else: counters[interface]["rx_broadcast_packets"] = -1 counters[interface]["rx_multicast_packets"] = -1 elif "packets output" in line: # '0 packets output, 0 bytes, 0 underruns' match = re.search(r"(\d+) packets output.* (\d+) bytes", line) counters[interface]["tx_unicast_packets"] = int(match.group(1)) counters[interface]["tx_octets"] = int(match.group(2)) counters[interface]["tx_broadcast_packets"] = -1 counters[interface]["tx_multicast_packets"] = -1 elif "input errors" in line: # '0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored' match = re.search(r"(\d+) input errors", line) counters[interface]["rx_errors"] = int(match.group(1)) counters[interface]["rx_discards"] = -1 elif "output errors" in line: # '0 output errors, 0 collisions, 1 interface resets' match = re.search(r"(\d+) output errors", line) counters[interface]["tx_errors"] = int(match.group(1)) counters[interface]["tx_discards"] = -1 interface_type, interface_number = split_interface(interface) if interface_type in [ "HundredGigabitEthernet", "FortyGigabitEthernet", "TenGigabitEthernet", ]: interface = abbreviated_interface_name(interface) for line in sh_int_sum_cmd_out.splitlines(): if interface in line: # Line is tabular output with columns # Interface IHQ IQD OHQ OQD RXBS RXPS TXBS TXPS TRTL # where columns (excluding interface) are integers regex = ( r"\b" + interface + r"\b\s+(\d+)\s+(?P<IQD>\d+)\s+(\d+)" + r"\s+(?P<OQD>\d+)\s+(\d+)\s+(\d+)" + r"\s+(\d+)\s+(\d+)\s+(\d+)" ) match = re.search(regex, line) if match: can_interface = canonical_interface_name(interface) try: counters[can_interface]["rx_discards"] = int(match.group("IQD")) counters[can_interface]["tx_discards"] = int(match.group("OQD")) except KeyError: counters[interface]["rx_discards"] = int(match.group("IQD")) counters[interface]["tx_discards"] = int(match.group("OQD")) return counters
def get_interfaces_counters(self): """ Return interface counters and errors. 'tx_errors': int, 'rx_errors': int, 'tx_discards': int, 'rx_discards': int, 'tx_octets': int, 'rx_octets': int, 'tx_unicast_packets': int, 'rx_unicast_packets': int, 'tx_multicast_packets': int, 'rx_multicast_packets': int, 'tx_broadcast_packets': int, 'rx_broadcast_packets': int, Currently doesn't determine output broadcasts, multicasts """ counters = {} command = "show interfaces" output = self._send_command(command) sh_int_sum_cmd = "show interface summary" sh_int_sum_cmd_out = self._send_command(sh_int_sum_cmd) # Break output into per-interface sections interface_strings = re.split(r".* line protocol is .*", output, flags=re.M) header_strings = re.findall(r".* line protocol is .*", output, flags=re.M) empty = interface_strings.pop(0).strip() if empty: raise ValueError("Unexpected output from: {}".format(command)) # Parse out the interface names intf = [] for intf_line in header_strings: interface, _ = re.split(r" is .* line protocol is ", intf_line) intf.append(interface.strip()) if len(intf) != len(interface_strings): raise ValueError("Unexpected output from: {}".format(command)) # Re-join interface names with interface strings for interface, interface_str in zip(intf, interface_strings): counters.setdefault(interface, {}) for line in interface_str.splitlines(): if "packets input" in line: # '0 packets input, 0 bytes, 0 no buffer' match = re.search(r"(\d+) packets input.* (\d+) bytes", line) counters[interface]["rx_unicast_packets"] = int(match.group(1)) counters[interface]["rx_octets"] = int(match.group(2)) elif "broadcast" in line: # 'Received 0 broadcasts (0 multicasts)' # 'Received 264071 broadcasts (39327 IP multicasts)' # 'Received 338 broadcasts, 0 runts, 0 giants, 0 throttles' match = re.search(r"Received (\d+) broadcasts.*(\d+).*multicasts", line) alt_match = re.search(r"Received (\d+) broadcasts.*", line) if match: counters[interface]["rx_broadcast_packets"] = int(match.group(1)) counters[interface]["rx_multicast_packets"] = int(match.group(2)) elif alt_match: counters[interface]["rx_broadcast_packets"] = int( alt_match.group(1) ) counters[interface]["rx_multicast_packets"] = -1 else: counters[interface]["rx_broadcast_packets"] = -1 counters[interface]["rx_multicast_packets"] = -1 elif "packets output" in line: # '0 packets output, 0 bytes, 0 underruns' match = re.search(r"(\d+) packets output.* (\d+) bytes", line) counters[interface]["tx_unicast_packets"] = int(match.group(1)) counters[interface]["tx_octets"] = int(match.group(2)) counters[interface]["tx_broadcast_packets"] = -1 counters[interface]["tx_multicast_packets"] = -1 elif "input errors" in line: # '0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored' match = re.search(r"(\d+) input errors", line) counters[interface]["rx_errors"] = int(match.group(1)) counters[interface]["rx_discards"] = -1 elif "output errors" in line: # '0 output errors, 0 collisions, 1 interface resets' match = re.search(r"(\d+) output errors", line) counters[interface]["tx_errors"] = int(match.group(1)) counters[interface]["tx_discards"] = -1 interface_type, interface_number = split_interface(interface) if interface_type in [ "HundredGigabitEthernet", "FortyGigabitEthernet", "TenGigabitEthernet", ]: interface = abbreviated_interface_name(interface) for line in sh_int_sum_cmd_out.splitlines(): if interface in line: # Line is tabular output with columns # Interface IHQ IQD OHQ OQD RXBS RXPS TXBS TXPS TRTL # where columns (excluding interface) are integers regex = ( r"\b" + interface + r"\b\s+(\d+)\s+(?P<IQD>\d+)\s+(\d+)" + r"\s+(?P<OQD>\d+)\s+(\d+)\s+(\d+)" + r"\s+(\d+)\s+(\d+)\s+(\d+)" ) match = re.search(regex, line) if match: interface = canonical_interface_name(interface) counters[interface]["rx_discards"] = int(match.group("IQD")) counters[interface]["tx_discards"] = int(match.group("OQD")) return counters
https://github.com/napalm-automation/napalm/issues/1200
Traceback (most recent call last): File "list-vlan2.py", line 19, in <module> print(yaml.dump(device.get_interfaces_counters())) File "/srv/home/adm-brin-f/Devel/pythonspace/test-napalm/.venv/lib/python3.6/site-packages/napalm/ios/ios.py", line 2133, in get_interfaces_counters counters[interface]["rx_discards"] = int(match.group("IQD")) KeyError: 'Management0'
KeyError
def _process_optional_args(self, optional_args): # Define locking method self.lock_disable = optional_args.get("lock_disable", False) self.enablepwd = optional_args.pop("enable_password", "") self.eos_autoComplete = optional_args.pop("eos_autoComplete", None) # eos_transport is there for backwards compatibility, transport is the preferred method transport = optional_args.get( "transport", optional_args.get("eos_transport", "https") ) self.fn0039_config = optional_args.pop("eos_fn0039_config", False) try: self.transport_class = pyeapi.client.TRANSPORTS[transport] except KeyError: raise ConnectionException("Unknown transport: {}".format(self.transport)) init_args = inspect.getfullargspec(self.transport_class.__init__)[0] init_args.pop(0) # Remove "self" init_args.append("enforce_verification") # Not an arg for unknown reason filter_args = ["host", "username", "password", "timeout", "lock_disable"] self.eapi_kwargs = { k: v for k, v in optional_args.items() if k in init_args and k not in filter_args }
def _process_optional_args(self, optional_args): # Define locking method self.lock_disable = optional_args.get("lock_disable", False) self.enablepwd = optional_args.pop("enable_password", "") self.eos_autoComplete = optional_args.pop("eos_autoComplete", None) # eos_transport is there for backwards compatibility, transport is the preferred method transport = optional_args.get( "transport", optional_args.get("eos_transport", "https") ) try: self.transport_class = pyeapi.client.TRANSPORTS[transport] except KeyError: raise ConnectionException("Unknown transport: {}".format(self.transport)) init_args = inspect.getfullargspec(self.transport_class.__init__)[0] init_args.pop(0) # Remove "self" init_args.append("enforce_verification") # Not an arg for unknown reason filter_args = ["host", "username", "password", "timeout", "lock_disable"] self.eapi_kwargs = { k: v for k, v in optional_args.items() if k in init_args and k not in filter_args }
https://github.com/napalm-automation/napalm/issues/1200
Traceback (most recent call last): File "list-vlan2.py", line 19, in <module> print(yaml.dump(device.get_interfaces_counters())) File "/srv/home/adm-brin-f/Devel/pythonspace/test-napalm/.venv/lib/python3.6/site-packages/napalm/ios/ios.py", line 2133, in get_interfaces_counters counters[interface]["rx_discards"] = int(match.group("IQD")) KeyError: 'Management0'
KeyError
def _load_config(self, filename=None, config=None, replace=True): if self.config_session is None: self.config_session = "napalm_{}".format(datetime.now().microsecond) commands = [] commands.append("configure session {}".format(self.config_session)) if replace: commands.append("rollback clean-config") if filename is not None: with open(filename, "r") as f: lines = f.readlines() else: if isinstance(config, list): lines = config else: lines = config.splitlines() for line in lines: line = line.strip() if line == "": continue if line.startswith("!") and not line.startswith("!!"): continue commands.append(line) for start, depth in [(s, d) for (s, d) in self.HEREDOC_COMMANDS if s in commands]: commands = self._multiline_convert(commands, start=start, depth=depth) commands = self._mode_comment_convert(commands) try: if self.eos_autoComplete is not None: self.device.run_commands( commands, autoComplete=self.eos_autoComplete, fn0039_transform=self.fn0039_config, ) else: self.device.run_commands(commands, fn0039_transform=self.fn0039_config) except pyeapi.eapilib.CommandError as e: self.discard_config() msg = str(e) if replace: raise ReplaceConfigException(msg) else: raise MergeConfigException(msg)
def _load_config(self, filename=None, config=None, replace=True): if self.config_session is None: self.config_session = "napalm_{}".format(datetime.now().microsecond) commands = [] commands.append("configure session {}".format(self.config_session)) if replace: commands.append("rollback clean-config") if filename is not None: with open(filename, "r") as f: lines = f.readlines() else: if isinstance(config, list): lines = config else: lines = config.splitlines() for line in lines: line = line.strip() if line == "": continue if line.startswith("!") and not line.startswith("!!"): continue commands.append(line) for start, depth in [(s, d) for (s, d) in self.HEREDOC_COMMANDS if s in commands]: commands = self._multiline_convert(commands, start=start, depth=depth) commands = self._mode_comment_convert(commands) try: if self.eos_autoComplete is not None: self.device.run_commands(commands, autoComplete=self.eos_autoComplete) else: self.device.run_commands(commands) except pyeapi.eapilib.CommandError as e: self.discard_config() msg = str(e) if replace: raise ReplaceConfigException(msg) else: raise MergeConfigException(msg)
https://github.com/napalm-automation/napalm/issues/1200
Traceback (most recent call last): File "list-vlan2.py", line 19, in <module> print(yaml.dump(device.get_interfaces_counters())) File "/srv/home/adm-brin-f/Devel/pythonspace/test-napalm/.venv/lib/python3.6/site-packages/napalm/ios/ios.py", line 2133, in get_interfaces_counters counters[interface]["rx_discards"] = int(match.group("IQD")) KeyError: 'Management0'
KeyError
def run_commands(self, commands, **kwargs): """ Run commands wrapper :param commands: list of commands :param kwargs: other args :return: list of outputs """ fn0039_transform = kwargs.pop("fn0039_transform", True) if fn0039_transform: if isinstance(commands, str): commands = [cli_convert(commands, self.cli_version)] else: commands = [cli_convert(cmd, self.cli_version) for cmd in commands] return super(Node, self).run_commands(commands, **kwargs)
def run_commands(self, commands, **kwargs): """ Run commands wrapper :param commands: list of commands :param kwargs: other args :return: list of outputs """ if isinstance(commands, str): new_commands = [cli_convert(commands, self.cli_version)] else: new_commands = [cli_convert(cmd, self.cli_version) for cmd in commands] return super(Node, self).run_commands(new_commands, **kwargs)
https://github.com/napalm-automation/napalm/issues/1200
Traceback (most recent call last): File "list-vlan2.py", line 19, in <module> print(yaml.dump(device.get_interfaces_counters())) File "/srv/home/adm-brin-f/Devel/pythonspace/test-napalm/.venv/lib/python3.6/site-packages/napalm/ios/ios.py", line 2133, in get_interfaces_counters counters[interface]["rx_discards"] = int(match.group("IQD")) KeyError: 'Management0'
KeyError
def get_optics(self): command = "show interfaces transceiver" output = self._send_command(command) is_vss = False # Check if router supports the command if "% Invalid input" in output: return {} elif "% Incomplete command" in output: if self._is_vss(): is_vss = True command1 = "show interfaces transceiver switch 1" command2 = "show interfaces transceiver switch 2" output1 = self._send_command(command1) output2 = self._send_command(command2) # Formatting data into return data structure optics_detail = {} if is_vss: try: split_output = re.split(r"^---------.*$", output1, flags=re.M)[1] split_output += re.split(r"^---------.*$", output2, flags=re.M)[1] except IndexError: return {} else: try: split_output = re.split(r"^---------.*$", output, flags=re.M)[1] except IndexError: return {} split_output = split_output.strip() for optics_entry in split_output.splitlines(): # Example, Te1/0/1 34.6 3.29 -2.0 -3.5 try: split_list = optics_entry.split() except ValueError: return {} int_brief = split_list[0] output_power = split_list[3] input_power = split_list[4] port = canonical_interface_name(int_brief) port_detail = {"physical_channels": {"channel": []}} # If interface is shutdown it returns "N/A" as output power # or "N/A" as input power # Converting that to -100.0 float try: float(output_power) except ValueError: output_power = -100.0 try: float(input_power) except ValueError: input_power = -100.0 # Defaulting avg, min, max values to -100.0 since device does not # return these values optic_states = { "index": 0, "state": { "input_power": { "instant": (float(input_power) if "input_power" else -100.0), "avg": -100.0, "min": -100.0, "max": -100.0, }, "output_power": { "instant": (float(output_power) if "output_power" else -100.0), "avg": -100.0, "min": -100.0, "max": -100.0, }, "laser_bias_current": { "instant": 0.0, "avg": 0.0, "min": 0.0, "max": 0.0, }, }, } port_detail["physical_channels"]["channel"].append(optic_states) optics_detail[port] = port_detail return optics_detail
def get_optics(self): command = "show interfaces transceiver" output = self._send_command(command) # Check if router supports the command if "% Invalid input" in output: return {} # Formatting data into return data structure optics_detail = {} try: split_output = re.split(r"^---------.*$", output, flags=re.M)[1] except IndexError: return {} split_output = split_output.strip() for optics_entry in split_output.splitlines(): # Example, Te1/0/1 34.6 3.29 -2.0 -3.5 try: split_list = optics_entry.split() except ValueError: return {} int_brief = split_list[0] output_power = split_list[3] input_power = split_list[4] port = canonical_interface_name(int_brief) port_detail = {"physical_channels": {"channel": []}} # If interface is shutdown it returns "N/A" as output power. # Converting that to -100.0 float try: float(output_power) except ValueError: output_power = -100.0 # Defaulting avg, min, max values to -100.0 since device does not # return these values optic_states = { "index": 0, "state": { "input_power": { "instant": (float(input_power) if "input_power" else -100.0), "avg": -100.0, "min": -100.0, "max": -100.0, }, "output_power": { "instant": (float(output_power) if "output_power" else -100.0), "avg": -100.0, "min": -100.0, "max": -100.0, }, "laser_bias_current": { "instant": 0.0, "avg": 0.0, "min": 0.0, "max": 0.0, }, }, } port_detail["physical_channels"]["channel"].append(optic_states) optics_detail[port] = port_detail return optics_detail
https://github.com/napalm-automation/napalm/issues/1200
Traceback (most recent call last): File "list-vlan2.py", line 19, in <module> print(yaml.dump(device.get_interfaces_counters())) File "/srv/home/adm-brin-f/Devel/pythonspace/test-napalm/.venv/lib/python3.6/site-packages/napalm/ios/ios.py", line 2133, in get_interfaces_counters counters[interface]["rx_discards"] = int(match.group("IQD")) KeyError: 'Management0'
KeyError
def commit_config(self, message=""): """Commit configuration.""" commit_args = {"comment": message} if message else {} self.device.cu.commit(ignore_warning=self.ignore_warning, **commit_args) if not self.lock_disable and not self.session_config_lock: self._unlock() if self.config_private: self.device.rpc.close_configuration()
def commit_config(self, message=""): """Commit configuration.""" commit_args = {"comment": message} if message else {} self.device.cu.commit(ignore_warning=self.ignore_warning, **commit_args) if not self.lock_disable and not self.session_config_lock: self._unlock()
https://github.com/napalm-automation/napalm/issues/1200
Traceback (most recent call last): File "list-vlan2.py", line 19, in <module> print(yaml.dump(device.get_interfaces_counters())) File "/srv/home/adm-brin-f/Devel/pythonspace/test-napalm/.venv/lib/python3.6/site-packages/napalm/ios/ios.py", line 2133, in get_interfaces_counters counters[interface]["rx_discards"] = int(match.group("IQD")) KeyError: 'Management0'
KeyError
def discard_config(self): """Discard changes (rollback 0).""" self.device.cu.rollback(rb_id=0) if not self.lock_disable and not self.session_config_lock: self._unlock() if self.config_private: self.device.rpc.close_configuration()
def discard_config(self): """Discard changes (rollback 0).""" self.device.cu.rollback(rb_id=0) if not self.lock_disable and not self.session_config_lock: self._unlock()
https://github.com/napalm-automation/napalm/issues/1200
Traceback (most recent call last): File "list-vlan2.py", line 19, in <module> print(yaml.dump(device.get_interfaces_counters())) File "/srv/home/adm-brin-f/Devel/pythonspace/test-napalm/.venv/lib/python3.6/site-packages/napalm/ios/ios.py", line 2133, in get_interfaces_counters counters[interface]["rx_discards"] = int(match.group("IQD")) KeyError: 'Management0'
KeyError
def _send_command(self, command, raw_text=False, cmd_verify=True): """ Wrapper for Netmiko's send_command method. raw_text argument is not used and is for code sharing with NX-API. """ return self.device.send_command(command, cmd_verify=cmd_verify)
def _send_command(self, command, raw_text=False): """ Wrapper for Netmiko's send_command method. raw_text argument is not used and is for code sharing with NX-API. """ return self.device.send_command(command)
https://github.com/napalm-automation/napalm/issues/1200
Traceback (most recent call last): File "list-vlan2.py", line 19, in <module> print(yaml.dump(device.get_interfaces_counters())) File "/srv/home/adm-brin-f/Devel/pythonspace/test-napalm/.venv/lib/python3.6/site-packages/napalm/ios/ios.py", line 2133, in get_interfaces_counters counters[interface]["rx_discards"] = int(match.group("IQD")) KeyError: 'Management0'
KeyError
def is_alive(self): """Returns a flag with the state of the SSH connection.""" null = chr(0) try: if self.device is None: return {"is_alive": False} else: # Try sending ASCII null byte to maintain the connection alive self._send_command(null, cmd_verify=False) except (socket.error, EOFError): # If unable to send, we can tell for sure that the connection is unusable, # hence return False. return {"is_alive": False} return {"is_alive": self.device.remote_conn.transport.is_active()}
def is_alive(self): """Returns a flag with the state of the SSH connection.""" null = chr(0) try: if self.device is None: return {"is_alive": False} else: # Try sending ASCII null byte to maintain the connection alive self._send_command(null) except (socket.error, EOFError): # If unable to send, we can tell for sure that the connection is unusable, # hence return False. return {"is_alive": False} return {"is_alive": self.device.remote_conn.transport.is_active()}
https://github.com/napalm-automation/napalm/issues/1200
Traceback (most recent call last): File "list-vlan2.py", line 19, in <module> print(yaml.dump(device.get_interfaces_counters())) File "/srv/home/adm-brin-f/Devel/pythonspace/test-napalm/.venv/lib/python3.6/site-packages/napalm/ios/ios.py", line 2133, in get_interfaces_counters counters[interface]["rx_discards"] = int(match.group("IQD")) KeyError: 'Management0'
KeyError
def get_interfaces_ip(self): """ Get interface IP details. Returns a dictionary of dictionaries. Sample output: { "Ethernet2/3": { "ipv4": { "4.4.4.4": { "prefix_length": 16 } }, "ipv6": { "2001:db8::1": { "prefix_length": 10 }, "fe80::2ec2:60ff:fe4f:feb2": { "prefix_length": "128" } } }, "Ethernet2/2": { "ipv4": { "2.2.2.2": { "prefix_length": 27 } } } } """ interfaces_ip = {} ipv4_command = "show ip interface vrf all" ipv6_command = "show ipv6 interface vrf all" output_v4 = self._send_command(ipv4_command) output_v6 = self._send_command(ipv6_command) v4_interfaces = {} for line in output_v4.splitlines(): # Ethernet2/2, Interface status: protocol-up/link-up/admin-up, iod: 38, # IP address: 2.2.2.2, IP subnet: 2.2.2.0/27 route-preference: 0, tag: 0 # IP address: 3.3.3.3, IP subnet: 3.3.3.0/25 secondary route-preference: 0, tag: 0 if "Interface status" in line: interface = line.split(",")[0] continue if "IP address" in line: ip_address = line.split(",")[0].split()[2] try: prefix_len = int(line.split()[5].split("/")[1]) except (ValueError, IndexError): prefix_len = "N/A" if ip_address == "none": v4_interfaces.setdefault(interface, {}) else: val = {"prefix_length": prefix_len} v4_interfaces.setdefault(interface, {})[ip_address] = val v6_interfaces = {} for line in output_v6.splitlines(): # Ethernet2/4, Interface status: protocol-up/link-up/admin-up, iod: 40 # IPv6 address: # 2001:11:2233::a1/24 [VALID] # 2001:cc11:22bb:0:2ec2:60ff:fe4f:feb2/64 [VALID] # IPv6 subnet: 2001::/24 # IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID] # IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID] if "Interface status" in line: interface = line.split(",")[0] continue if "VALID" in line: line = line.strip() if "link-local address" in line: # match the following format: # IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID] ip_address = line.split()[3] prefix_len = "64" elif "IPv6 address" in line: # match the following format: # IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID] ip_address = line.split()[2] prefix_len = "64" else: ip_address, prefix_len = line.split()[0].split("/") prefix_len = int(prefix_len) val = {"prefix_length": prefix_len} v6_interfaces.setdefault(interface, {})[ip_address] = val else: # match the following format: # IPv6 address: none v6_interfaces.setdefault(interface, {}) # Join data from intermediate dictionaries. for interface, data in v4_interfaces.items(): interfaces_ip.setdefault(interface, {"ipv4": {}})["ipv4"] = data for interface, data in v6_interfaces.items(): interfaces_ip.setdefault(interface, {"ipv6": {}})["ipv6"] = data return interfaces_ip
def get_interfaces_ip(self): """ Get interface IP details. Returns a dictionary of dictionaries. Sample output: { "Ethernet2/3": { "ipv4": { "4.4.4.4": { "prefix_length": 16 } }, "ipv6": { "2001:db8::1": { "prefix_length": 10 }, "fe80::2ec2:60ff:fe4f:feb2": { "prefix_length": "128" } } }, "Ethernet2/2": { "ipv4": { "2.2.2.2": { "prefix_length": 27 } } } } """ interfaces_ip = {} ipv4_command = "show ip interface vrf all" ipv6_command = "show ipv6 interface vrf all" output_v4 = self._send_command(ipv4_command) output_v6 = self._send_command(ipv6_command) v4_interfaces = {} for line in output_v4.splitlines(): # Ethernet2/2, Interface status: protocol-up/link-up/admin-up, iod: 38, # IP address: 2.2.2.2, IP subnet: 2.2.2.0/27 route-preference: 0, tag: 0 # IP address: 3.3.3.3, IP subnet: 3.3.3.0/25 secondary route-preference: 0, tag: 0 if "Interface status" in line: interface = line.split(",")[0] continue if "IP address" in line: ip_address = line.split(",")[0].split()[2] try: prefix_len = int(line.split()[5].split("/")[1]) except ValueError: prefix_len = "N/A" val = {"prefix_length": prefix_len} v4_interfaces.setdefault(interface, {})[ip_address] = val v6_interfaces = {} for line in output_v6.splitlines(): # Ethernet2/4, Interface status: protocol-up/link-up/admin-up, iod: 40 # IPv6 address: # 2001:11:2233::a1/24 [VALID] # 2001:cc11:22bb:0:2ec2:60ff:fe4f:feb2/64 [VALID] # IPv6 subnet: 2001::/24 # IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID] # IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID] if "Interface status" in line: interface = line.split(",")[0] continue if "VALID" in line: line = line.strip() if "link-local address" in line: # match the following format: # IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID] ip_address = line.split()[3] prefix_len = "64" elif "IPv6 address" in line: # match the following format: # IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID] ip_address = line.split()[2] prefix_len = "64" else: ip_address, prefix_len = line.split()[0].split("/") prefix_len = int(prefix_len) val = {"prefix_length": prefix_len} v6_interfaces.setdefault(interface, {})[ip_address] = val # Join data from intermediate dictionaries. for interface, data in v4_interfaces.items(): interfaces_ip.setdefault(interface, {"ipv4": {}})["ipv4"] = data for interface, data in v6_interfaces.items(): interfaces_ip.setdefault(interface, {"ipv6": {}})["ipv6"] = data return interfaces_ip
https://github.com/napalm-automation/napalm/issues/1020
$ napalm -v nxos_ssh --user itifous --password ***** n9k-hostname call get_interfaces_ip 2019-07-16 13:51:56,644 - napalm - ERROR - method - Failed: list index out of range ================= Traceback ================= Traceback (most recent call last): File "/home/itifous/.local/share/virtualenvs/netbox-netprod-importer-1U3vzfIV/bin/napalm", line 11, in <module> load_entry_point('napalm', 'console_scripts', 'napalm')() File "/home/itifous/.local/share/virtualenvs/netbox-netprod-importer-1U3vzfIV/lib/python3.7/site-packages/napalm-2.4.0-py3.7.egg/napalm/base/clitools/cl_napalm.py", line 309, in main run_tests(args) File "/home/itifous/.local/share/virtualenvs/netbox-netprod-importer-1U3vzfIV/lib/python3.7/site-packages/napalm-2.4.0-py3.7.egg/napalm/base/clitools/cl_napalm.py", line 292, in run_tests call_getter(device, args.method, **method_kwargs) File "/home/itifous/.local/share/virtualenvs/netbox-netprod-importer-1U3vzfIV/lib/python3.7/site-packages/napalm-2.4.0-py3.7.egg/napalm/base/clitools/cl_napalm.py", line 31, in wrapper r = func(*args, **kwargs) File "/home/itifous/.local/share/virtualenvs/netbox-netprod-importer-1U3vzfIV/lib/python3.7/site-packages/napalm-2.4.0-py3.7.egg/napalm/base/clitools/cl_napalm.py", line 256, in call_getter r = func(**kwargs) File "/home/itifous/.local/share/virtualenvs/netbox-netprod-importer-1U3vzfIV/lib/python3.7/site-packages/napalm-2.4.0-py3.7.egg/napalm/nxos_ssh/nxos_ssh.py", line 892, in get_interfaces_ip prefix_len = int(line.split()[5].split("/")[1]) IndexError: list index out of range
IndexError
def get_bgp_neighbors(self): """BGP neighbor information. Supports both IPv4 and IPv6. vrf aware """ supported_afi = [ "ipv4 unicast", "ipv4 multicast", "ipv6 unicast", "ipv6 multicast", "vpnv4 unicast", "vpnv6 unicast", "ipv4 mdt", ] bgp_neighbor_data = dict() # vrfs where bgp is configured bgp_config_vrfs = [] # get summary output from device cmd_bgp_all_sum = "show bgp all summary" summary_output = self._send_command(cmd_bgp_all_sum).strip() bgp_not_running = ["Invalid input", "BGP not active"] if any((s in summary_output for s in bgp_not_running)): return {} # get neighbor output from device neighbor_output = "" for afi in supported_afi: if afi in [ "ipv4 unicast", "ipv4 multicast", "ipv6 unicast", "ipv6 multicast", ]: cmd_bgp_neighbor = "show bgp %s neighbors" % afi neighbor_output += self._send_command(cmd_bgp_neighbor).strip() # trailing newline required for parsing neighbor_output += "\n" elif afi in ["vpnv4 unicast", "vpnv6 unicast", "ipv4 mdt"]: cmd_bgp_neighbor = "show bgp %s all neighbors" % afi neighbor_output += self._send_command(cmd_bgp_neighbor).strip() # trailing newline required for parsing neighbor_output += "\n" # Regular expressions used for parsing BGP summary parse_summary = { "patterns": [ # For address family: IPv4 Unicast # variable afi contains both afi and safi, i.e 'IPv4 Unicast' { "regexp": re.compile(r"^For address family: (?P<afi>[\S ]+)$"), "record": False, }, # Capture router_id and local_as values, e.g.: # BGP router identifier 10.0.1.1, local AS number 65000 { "regexp": re.compile( r"^.* router identifier (?P<router_id>{}), " r"local AS number (?P<local_as>{})".format( IPV4_ADDR_REGEX, ASN_REGEX ) ), "record": False, }, # Match neighbor summary row, capturing useful details and # discarding the 5 columns that we don't care about, e.g.: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd # 10.0.0.2 4 65000 1336020 64337701 1011343614 0 0 8w0d 3143 { "regexp": re.compile( r"^\*?(?P<remote_addr>({})|({}))" r"\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+" r"(?P<uptime>(never)|\d+\S+)" r"\s+(?P<accepted_prefixes>\d+)".format( IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX ) ), "record": True, }, # Same as above, but for peer that are not Established, e.g.: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd # 192.168.0.2 4 65002 0 0 1 0 0 never Active { "regexp": re.compile( r"^\*?(?P<remote_addr>({})|({}))" r"\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+" r"(?P<uptime>(never)|\d+\S+)\s+(?P<state>\D.*)".format( IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX ) ), "record": True, }, # ipv6 peers often break accross rows because of the longer peer address, # match as above, but in separate expressions, e.g.: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd # 2001:DB8::4 # 4 65004 9900690 612449 155362939 0 0 26w6d 36391 { "regexp": re.compile( r"^\*?(?P<remote_addr>({})|({}))".format( IPV4_ADDR_REGEX, IPV6_ADDR_REGEX ) ), "record": False, }, { "regexp": re.compile( r"^\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+" r"(?P<uptime>(never)|\d+\S+)" r"\s+(?P<accepted_prefixes>\d+)".format(ASN_REGEX) ), "record": True, }, # Same as above, but for peers that are not Established, e.g.: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd # 2001:DB8::3 # 4 65003 0 0 1 0 0 never Idle (Admin) { "regexp": re.compile( r"^\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+" r"(?P<uptime>(never)|\d+\S+)\s+(?P<state>\D.*)".format(ASN_REGEX) ), "record": True, }, ], "no_fill_fields": [ "accepted_prefixes", "state", "uptime", "remote_as", "remote_addr", ], } parse_neighbors = { "patterns": [ # Capture BGP neighbor is 10.0.0.2, remote AS 65000, internal link { "regexp": re.compile( r"^BGP neighbor is (?P<remote_addr>({})|({}))," r"(\s+vrf (?P<vrf>\S+),)?" r"\s+remote AS (?P<remote_as>{}).*".format( IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX ) ), "record": False, }, # Capture description { "regexp": re.compile(r"^\s+Description: (?P<description>.+)"), "record": False, }, # Capture remote_id, e.g.: # BGP version 4, remote router ID 10.0.1.2 { "regexp": re.compile( r"^\s+BGP version \d+, remote router ID " r"(?P<remote_id>{})".format(IPV4_ADDR_REGEX) ), "record": False, }, # Capture state { "regexp": re.compile(r"^\s+BGP state = (?P<state>\w+)"), "record": False, }, # Capture AFI and SAFI names, e.g.: # For address family: IPv4 Unicast { "regexp": re.compile(r"^\s+For address family: (?P<afi>[\S ]+)$"), "record": False, }, # Capture current sent and accepted prefixes, e.g.: # Prefixes Current: 637213 3142 (Consumes 377040 bytes) { "regexp": re.compile( r"^\s+Prefixes Current:\s+(?P<sent_prefixes>\d+)\s+" r"(?P<accepted_prefixes>\d+).*" ), "record": False, }, # Capture received_prefixes if soft-reconfig is enabled for the peer { "regexp": re.compile( r"^\s+Saved (soft-reconfig):.+(?P<received_prefixes>\d+).*" ), "record": True, }, # Otherwise, use the following as an end of row marker { "regexp": re.compile(r"^\s+Local Policy Denied Prefixes:.+"), "record": True, }, ], # fields that should not be "filled down" across table rows "no_fill_fields": [ "received_prefixes", "accepted_prefixes", "sent_prefixes", ], } # Parse outputs into a list of dicts summary_data = [] summary_data_entry = {} for line in summary_output.splitlines(): # check for matches against each pattern for item in parse_summary["patterns"]: match = item["regexp"].match(line) if match: # a match was found, so update the temp entry with the match's groupdict summary_data_entry.update(match.groupdict()) if item["record"]: # Record indicates the last piece of data has been obtained; move # on to next entry summary_data.append(copy.deepcopy(summary_data_entry)) # remove keys that are listed in no_fill_fields before the next pass for field in parse_summary["no_fill_fields"]: try: del summary_data_entry[field] except KeyError: pass break neighbor_data = [] neighbor_data_entry = {} for line in neighbor_output.splitlines(): # check for matches against each pattern for item in parse_neighbors["patterns"]: match = item["regexp"].match(line) if match: # a match was found, so update the temp entry with the match's groupdict neighbor_data_entry.update(match.groupdict()) if item["record"]: # update list of vrfs where bgp is configured if not neighbor_data_entry["vrf"]: vrf_to_add = "global" else: vrf_to_add = neighbor_data_entry["vrf"] if vrf_to_add not in bgp_config_vrfs: bgp_config_vrfs.append(vrf_to_add) # Record indicates the last piece of data has been obtained; move # on to next entry neighbor_data.append(copy.deepcopy(neighbor_data_entry)) # remove keys that are listed in no_fill_fields before the next pass for field in parse_neighbors["no_fill_fields"]: try: del neighbor_data_entry[field] except KeyError: pass break router_id = None for entry in summary_data: if not router_id: router_id = entry["router_id"] elif entry["router_id"] != router_id: raise ValueError # check the router_id looks like an ipv4 address router_id = napalm.base.helpers.ip(router_id, version=4) # create dict keys for vrfs where bgp is configured for vrf in bgp_config_vrfs: bgp_neighbor_data[vrf] = {} bgp_neighbor_data[vrf]["router_id"] = router_id bgp_neighbor_data[vrf]["peers"] = {} # add parsed data to output dict for entry in summary_data: remote_addr = napalm.base.helpers.ip(entry["remote_addr"]) afi = entry["afi"].lower() # check that we're looking at a supported afi if afi not in supported_afi: continue # get neighbor_entry out of neighbor data neighbor_entry = None for neighbor in neighbor_data: if ( neighbor["afi"].lower() == afi and napalm.base.helpers.ip(neighbor["remote_addr"]) == remote_addr ): neighbor_entry = neighbor break # check for proper session data for the afi if neighbor_entry is None: continue elif not isinstance(neighbor_entry, dict): raise ValueError( msg="Couldn't find neighbor data for %s in afi %s" % (remote_addr, afi) ) # check for admin down state try: if "(Admin)" in entry["state"]: is_enabled = False else: is_enabled = True except KeyError: is_enabled = True # parse uptime value uptime = self.bgp_time_conversion(entry["uptime"]) # BGP is up if state is Established is_up = "Established" in neighbor_entry["state"] # check whether session is up for address family and get prefix count try: accepted_prefixes = int(entry["accepted_prefixes"]) except (ValueError, KeyError): accepted_prefixes = -1 # Only parse neighbor detailed data if BGP session is-up if is_up: try: # overide accepted_prefixes with neighbor data if possible (since that's newer) accepted_prefixes = int(neighbor_entry["accepted_prefixes"]) except (ValueError, KeyError): pass # try to get received prefix count, otherwise set to accepted_prefixes received_prefixes = neighbor_entry.get( "received_prefixes", accepted_prefixes ) # try to get sent prefix count and convert to int, otherwise set to -1 sent_prefixes = int(neighbor_entry.get("sent_prefixes", -1)) else: received_prefixes = -1 sent_prefixes = -1 uptime = -1 # get description try: description = str(neighbor_entry["description"]) except KeyError: description = "" # check the remote router_id looks like an ipv4 address remote_id = napalm.base.helpers.ip(neighbor_entry["remote_id"], version=4) # get vrf name, if None use 'global' if neighbor_entry["vrf"]: vrf = neighbor_entry["vrf"] else: vrf = "global" if remote_addr not in bgp_neighbor_data[vrf]["peers"]: bgp_neighbor_data[vrf]["peers"][remote_addr] = { "local_as": napalm.base.helpers.as_number(entry["local_as"]), "remote_as": napalm.base.helpers.as_number(entry["remote_as"]), "remote_id": remote_id, "is_up": is_up, "is_enabled": is_enabled, "description": description, "uptime": uptime, "address_family": { afi: { "received_prefixes": received_prefixes, "accepted_prefixes": accepted_prefixes, "sent_prefixes": sent_prefixes, } }, } else: # found previous data for matching remote_addr, but for different afi existing = bgp_neighbor_data[vrf]["peers"][remote_addr] assert afi not in existing["address_family"] # compare with existing values and croak if they don't match assert existing["local_as"] == napalm.base.helpers.as_number( entry["local_as"] ) assert existing["remote_as"] == napalm.base.helpers.as_number( entry["remote_as"] ) assert existing["remote_id"] == remote_id assert existing["is_enabled"] == is_enabled assert existing["description"] == description # merge other values in a sane manner existing["is_up"] = existing["is_up"] or is_up existing["uptime"] = max(existing["uptime"], uptime) existing["address_family"][afi] = { "received_prefixes": received_prefixes, "accepted_prefixes": accepted_prefixes, "sent_prefixes": sent_prefixes, } return bgp_neighbor_data
def get_bgp_neighbors(self): """BGP neighbor information. Supports both IPv4 and IPv6. vrf aware """ supported_afi = [ "ipv4 unicast", "ipv4 multicast", "ipv6 unicast", "ipv6 multicast", "vpnv4 unicast", "vpnv6 unicast", "ipv4 mdt", ] bgp_neighbor_data = dict() # vrfs where bgp is configured bgp_config_vrfs = [] # get summary output from device cmd_bgp_all_sum = "show bgp all summary" summary_output = self._send_command(cmd_bgp_all_sum).strip() if "Invalid input detected" in summary_output: raise CommandErrorException("BGP is not running on this device") # get neighbor output from device neighbor_output = "" for afi in supported_afi: if afi in [ "ipv4 unicast", "ipv4 multicast", "ipv6 unicast", "ipv6 multicast", ]: cmd_bgp_neighbor = "show bgp %s neighbors" % afi neighbor_output += self._send_command(cmd_bgp_neighbor).strip() # trailing newline required for parsing neighbor_output += "\n" elif afi in ["vpnv4 unicast", "vpnv6 unicast", "ipv4 mdt"]: cmd_bgp_neighbor = "show bgp %s all neighbors" % afi neighbor_output += self._send_command(cmd_bgp_neighbor).strip() # trailing newline required for parsing neighbor_output += "\n" # Regular expressions used for parsing BGP summary parse_summary = { "patterns": [ # For address family: IPv4 Unicast # variable afi contains both afi and safi, i.e 'IPv4 Unicast' { "regexp": re.compile(r"^For address family: (?P<afi>[\S ]+)$"), "record": False, }, # Capture router_id and local_as values, e.g.: # BGP router identifier 10.0.1.1, local AS number 65000 { "regexp": re.compile( r"^.* router identifier (?P<router_id>{}), " r"local AS number (?P<local_as>{})".format( IPV4_ADDR_REGEX, ASN_REGEX ) ), "record": False, }, # Match neighbor summary row, capturing useful details and # discarding the 5 columns that we don't care about, e.g.: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd # 10.0.0.2 4 65000 1336020 64337701 1011343614 0 0 8w0d 3143 { "regexp": re.compile( r"^\*?(?P<remote_addr>({})|({}))" r"\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+" r"(?P<uptime>(never)|\d+\S+)" r"\s+(?P<accepted_prefixes>\d+)".format( IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX ) ), "record": True, }, # Same as above, but for peer that are not Established, e.g.: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd # 192.168.0.2 4 65002 0 0 1 0 0 never Active { "regexp": re.compile( r"^\*?(?P<remote_addr>({})|({}))" r"\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+" r"(?P<uptime>(never)|\d+\S+)\s+(?P<state>\D.*)".format( IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX ) ), "record": True, }, # ipv6 peers often break accross rows because of the longer peer address, # match as above, but in separate expressions, e.g.: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd # 2001:DB8::4 # 4 65004 9900690 612449 155362939 0 0 26w6d 36391 { "regexp": re.compile( r"^\*?(?P<remote_addr>({})|({}))".format( IPV4_ADDR_REGEX, IPV6_ADDR_REGEX ) ), "record": False, }, { "regexp": re.compile( r"^\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+" r"(?P<uptime>(never)|\d+\S+)" r"\s+(?P<accepted_prefixes>\d+)".format(ASN_REGEX) ), "record": True, }, # Same as above, but for peers that are not Established, e.g.: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd # 2001:DB8::3 # 4 65003 0 0 1 0 0 never Idle (Admin) { "regexp": re.compile( r"^\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+" r"(?P<uptime>(never)|\d+\S+)\s+(?P<state>\D.*)".format(ASN_REGEX) ), "record": True, }, ], "no_fill_fields": [ "accepted_prefixes", "state", "uptime", "remote_as", "remote_addr", ], } parse_neighbors = { "patterns": [ # Capture BGP neighbor is 10.0.0.2, remote AS 65000, internal link { "regexp": re.compile( r"^BGP neighbor is (?P<remote_addr>({})|({}))," r"(\s+vrf (?P<vrf>\S+),)?" r"\s+remote AS (?P<remote_as>{}).*".format( IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX ) ), "record": False, }, # Capture description { "regexp": re.compile(r"^\s+Description: (?P<description>.+)"), "record": False, }, # Capture remote_id, e.g.: # BGP version 4, remote router ID 10.0.1.2 { "regexp": re.compile( r"^\s+BGP version \d+, remote router ID " r"(?P<remote_id>{})".format(IPV4_ADDR_REGEX) ), "record": False, }, # Capture state { "regexp": re.compile(r"^\s+BGP state = (?P<state>\w+)"), "record": False, }, # Capture AFI and SAFI names, e.g.: # For address family: IPv4 Unicast { "regexp": re.compile(r"^\s+For address family: (?P<afi>[\S ]+)$"), "record": False, }, # Capture current sent and accepted prefixes, e.g.: # Prefixes Current: 637213 3142 (Consumes 377040 bytes) { "regexp": re.compile( r"^\s+Prefixes Current:\s+(?P<sent_prefixes>\d+)\s+" r"(?P<accepted_prefixes>\d+).*" ), "record": False, }, # Capture received_prefixes if soft-reconfig is enabled for the peer { "regexp": re.compile( r"^\s+Saved (soft-reconfig):.+(?P<received_prefixes>\d+).*" ), "record": True, }, # Otherwise, use the following as an end of row marker { "regexp": re.compile(r"^\s+Local Policy Denied Prefixes:.+"), "record": True, }, ], # fields that should not be "filled down" across table rows "no_fill_fields": [ "received_prefixes", "accepted_prefixes", "sent_prefixes", ], } # Parse outputs into a list of dicts summary_data = [] summary_data_entry = {} for line in summary_output.splitlines(): # check for matches against each pattern for item in parse_summary["patterns"]: match = item["regexp"].match(line) if match: # a match was found, so update the temp entry with the match's groupdict summary_data_entry.update(match.groupdict()) if item["record"]: # Record indicates the last piece of data has been obtained; move # on to next entry summary_data.append(copy.deepcopy(summary_data_entry)) # remove keys that are listed in no_fill_fields before the next pass for field in parse_summary["no_fill_fields"]: try: del summary_data_entry[field] except KeyError: pass break neighbor_data = [] neighbor_data_entry = {} for line in neighbor_output.splitlines(): # check for matches against each pattern for item in parse_neighbors["patterns"]: match = item["regexp"].match(line) if match: # a match was found, so update the temp entry with the match's groupdict neighbor_data_entry.update(match.groupdict()) if item["record"]: # update list of vrfs where bgp is configured if not neighbor_data_entry["vrf"]: vrf_to_add = "global" else: vrf_to_add = neighbor_data_entry["vrf"] if vrf_to_add not in bgp_config_vrfs: bgp_config_vrfs.append(vrf_to_add) # Record indicates the last piece of data has been obtained; move # on to next entry neighbor_data.append(copy.deepcopy(neighbor_data_entry)) # remove keys that are listed in no_fill_fields before the next pass for field in parse_neighbors["no_fill_fields"]: try: del neighbor_data_entry[field] except KeyError: pass break router_id = None for entry in summary_data: if not router_id: router_id = entry["router_id"] elif entry["router_id"] != router_id: raise ValueError # check the router_id looks like an ipv4 address router_id = napalm.base.helpers.ip(router_id, version=4) # create dict keys for vrfs where bgp is configured for vrf in bgp_config_vrfs: bgp_neighbor_data[vrf] = {} bgp_neighbor_data[vrf]["router_id"] = router_id bgp_neighbor_data[vrf]["peers"] = {} # add parsed data to output dict for entry in summary_data: remote_addr = napalm.base.helpers.ip(entry["remote_addr"]) afi = entry["afi"].lower() # check that we're looking at a supported afi if afi not in supported_afi: continue # get neighbor_entry out of neighbor data neighbor_entry = None for neighbor in neighbor_data: if ( neighbor["afi"].lower() == afi and napalm.base.helpers.ip(neighbor["remote_addr"]) == remote_addr ): neighbor_entry = neighbor break # check for proper session data for the afi if neighbor_entry is None: continue elif not isinstance(neighbor_entry, dict): raise ValueError( msg="Couldn't find neighbor data for %s in afi %s" % (remote_addr, afi) ) # check for admin down state try: if "(Admin)" in entry["state"]: is_enabled = False else: is_enabled = True except KeyError: is_enabled = True # parse uptime value uptime = self.bgp_time_conversion(entry["uptime"]) # BGP is up if state is Established is_up = "Established" in neighbor_entry["state"] # check whether session is up for address family and get prefix count try: accepted_prefixes = int(entry["accepted_prefixes"]) except (ValueError, KeyError): accepted_prefixes = -1 # Only parse neighbor detailed data if BGP session is-up if is_up: try: # overide accepted_prefixes with neighbor data if possible (since that's newer) accepted_prefixes = int(neighbor_entry["accepted_prefixes"]) except (ValueError, KeyError): pass # try to get received prefix count, otherwise set to accepted_prefixes received_prefixes = neighbor_entry.get( "received_prefixes", accepted_prefixes ) # try to get sent prefix count and convert to int, otherwise set to -1 sent_prefixes = int(neighbor_entry.get("sent_prefixes", -1)) else: received_prefixes = -1 sent_prefixes = -1 uptime = -1 # get description try: description = str(neighbor_entry["description"]) except KeyError: description = "" # check the remote router_id looks like an ipv4 address remote_id = napalm.base.helpers.ip(neighbor_entry["remote_id"], version=4) # get vrf name, if None use 'global' if neighbor_entry["vrf"]: vrf = neighbor_entry["vrf"] else: vrf = "global" if remote_addr not in bgp_neighbor_data[vrf]["peers"]: bgp_neighbor_data[vrf]["peers"][remote_addr] = { "local_as": napalm.base.helpers.as_number(entry["local_as"]), "remote_as": napalm.base.helpers.as_number(entry["remote_as"]), "remote_id": remote_id, "is_up": is_up, "is_enabled": is_enabled, "description": description, "uptime": uptime, "address_family": { afi: { "received_prefixes": received_prefixes, "accepted_prefixes": accepted_prefixes, "sent_prefixes": sent_prefixes, } }, } else: # found previous data for matching remote_addr, but for different afi existing = bgp_neighbor_data[vrf]["peers"][remote_addr] assert afi not in existing["address_family"] # compare with existing values and croak if they don't match assert existing["local_as"] == napalm.base.helpers.as_number( entry["local_as"] ) assert existing["remote_as"] == napalm.base.helpers.as_number( entry["remote_as"] ) assert existing["remote_id"] == remote_id assert existing["is_enabled"] == is_enabled assert existing["description"] == description # merge other values in a sane manner existing["is_up"] = existing["is_up"] or is_up existing["uptime"] = max(existing["uptime"], uptime) existing["address_family"][afi] = { "received_prefixes": received_prefixes, "accepted_prefixes": accepted_prefixes, "sent_prefixes": sent_prefixes, } return bgp_neighbor_data
https://github.com/napalm-automation/napalm/issues/987
In [3]: with IOSDriver("ios01", os.environ.get("NAPALM_USERNAME"), os.environ.get("NAPALM_PASSWORD")) as d: ...: d.get_bgp_neighbors() ...: NAPALM didn't catch this exception. Please, fill a bugfix on https://github.com/napalm-automation/napalm/issues Don't forget to include this traceback. --------------------------------------------------------------------------- AddrFormatError Traceback (most recent call last) <ipython-input-3-fc2e1223ed9d> in <module> 1 with IOSDriver("ios01", os.environ.get("NAPALM_USERNAME"), os.environ.get("NAPALM_PASSWORD")) as d: ----> 2 d.get_bgp_neighbors() 3 /mnt/c/Users/bewing/PycharmProjects/napalm/napalm/ios/ios.py in get_bgp_neighbors(self) 1697 1698 # check the router_id looks like an ipv4 address -> 1699 router_id = napalm.base.helpers.ip(router_id, version=4) 1700 1701 # add parsed data to output dict /mnt/c/Users/bewing/PycharmProjects/napalm/napalm/base/helpers.py in ip(addr, version) 328 u'2001:db8:85a3::8a2e:370:7334' 329 """ --> 330 addr_obj = IPAddress(addr) 331 if version and addr_obj.version != version: 332 raise ValueError("{} is not an ipv{} address".format(addr, version)) /mnt/c/Users/bewing/.pyenv/versions/3.6.6/envs/napalm/lib/python3.6/site-packages/netaddr/ip/__init__.py in __init__(self, addr, version, flags) 304 if self._module is None: 305 raise AddrFormatError('failed to detect a valid IP ' \ --> 306 'address from %r' % addr) 307 else: 308 # IP version is explicit. AddrFormatError: failed to detect a valid IP address from None
AddrFormatError
def build_help(): parser = argparse.ArgumentParser( description="Command line tool to handle configuration on devices using NAPALM." "The script will print the diff on the screen", epilog="Automate all the things!!!", ) parser.add_argument( dest="hostname", action="store", help="Host where you want to deploy the configuration.", ) parser.add_argument( "--user", "-u", dest="user", action="store", default=getpass.getuser(), help="User for authenticating to the host. Default: user running the script.", ) parser.add_argument( "--password", "-p", dest="password", action="store", help="Password for authenticating to the host." "If you do not provide a password in the CLI you will be prompted.", ) parser.add_argument( "--vendor", "-v", dest="vendor", action="store", required=True, help="Host Operating System.", ) parser.add_argument( "--optional_args", "-o", dest="optional_args", action="store", help="String with comma separated key=value pairs passed via optional_args to the driver.", ) parser.add_argument( "--debug", dest="debug", action="store_true", help="Enables debug mode; more verbosity.", ) subparser = parser.add_subparsers(title="actions") config = subparser.add_parser("configure", help="Perform a configuration operation") config.set_defaults(which="config") config.add_argument( dest="config_file", action="store", help="File containing the configuration you want to deploy.", ) config.add_argument( "--strategy", "-s", dest="strategy", action="store", choices=["replace", "merge"], default="replace", help="Strategy to use to deploy configuration. Default: replace.", ) config.add_argument( "--dry-run", "-d", dest="dry_run", action="store_true", default=None, help="Only returns diff, it does not deploy the configuration.", ) call = subparser.add_parser("call", help="Call a napalm method") call.set_defaults(which="call") call.add_argument(dest="method", action="store", help="Run this method") call.add_argument( "--method-kwargs", "-k", dest="method_kwargs", action="store", help='kwargs to pass to the method. For example: "destination=1.1.1.1,protocol=bgp"', ) validate = subparser.add_parser("validate", help="Validate configuration/state") validate.set_defaults(which="validate") validate.add_argument( dest="validation_file", action="store", help="Validation file containing resources derised states", ) args = parser.parse_args() if not hasattr(args, "which"): args.which = None if args.password is None: password = getpass.getpass("Enter password: ") setattr(args, "password", password) return args
def build_help(): parser = argparse.ArgumentParser( description="Command line tool to handle configuration on devices using NAPALM." "The script will print the diff on the screen", epilog="Automate all the things!!!", ) parser.add_argument( dest="hostname", action="store", help="Host where you want to deploy the configuration.", ) parser.add_argument( "--user", "-u", dest="user", action="store", default=getpass.getuser(), help="User for authenticating to the host. Default: user running the script.", ) parser.add_argument( "--password", "-p", dest="password", action="store", help="Password for authenticating to the host." "If you do not provide a password in the CLI you will be prompted.", ) parser.add_argument( "--vendor", "-v", dest="vendor", action="store", required=True, help="Host Operating System.", ) parser.add_argument( "--optional_args", "-o", dest="optional_args", action="store", help="String with comma separated key=value pairs passed via optional_args to the driver.", ) parser.add_argument( "--debug", dest="debug", action="store_true", help="Enables debug mode; more verbosity.", ) subparser = parser.add_subparsers(title="actions") config = subparser.add_parser("configure", help="Perform a configuration operation") config.set_defaults(which="config") config.add_argument( dest="config_file", action="store", help="File containing the configuration you want to deploy.", ) config.add_argument( "--strategy", "-s", dest="strategy", action="store", choices=["replace", "merge"], default="replace", help="Strategy to use to deploy configuration. Default: replace.", ) config.add_argument( "--dry-run", "-d", dest="dry_run", action="store_true", default=None, help="Only returns diff, it does not deploy the configuration.", ) call = subparser.add_parser("call", help="Call a napalm method") call.set_defaults(which="call") call.add_argument(dest="method", action="store", help="Run this method") call.add_argument( "--method-kwargs", "-k", dest="method_kwargs", action="store", help='kwargs to pass to the method. For example: "destination=1.1.1.1,protocol=bgp"', ) validate = subparser.add_parser("validate", help="Validate configuration/state") validate.set_defaults(which="validate") validate.add_argument( dest="validation_file", action="store", help="Validation file containing resources derised states", ) args = parser.parse_args() if args.password is None: password = getpass.getpass("Enter password: ") setattr(args, "password", password) return args
https://github.com/napalm-automation/napalm/issues/936
user@host:/opt/netbox$ napalm --user username--password xxxxx --vendor nxos_ssh --debug 10.28.210.5 2019-02-28 15:07:06,834 - napalm - DEBUG - Starting napalm's debugging tool 2019-02-28 15:07:06,834 - napalm - DEBUG - Gathering napalm packages 2019-02-28 15:07:06,835 - napalm - DEBUG - napalm==2.4.0 2019-02-28 15:07:06,835 - napalm - DEBUG - get_network_driver - Calling with args: ('nxos_ssh',), {} 2019-02-28 15:07:06,971 - napalm - DEBUG - get_network_driver - Successful 2019-02-28 15:07:06,971 - napalm - DEBUG - __init__ - Calling with args: (<class 'napalm.nxos_ssh.nxos_ssh.NXOSSSHDriver'>, '10.28.210.5', 'magna'), {'password': '*******', 'timeout': 60, 'optional_args': {}} 2019-02-28 15:07:06,972 - napalm - DEBUG - __init__ - Successful 2019-02-28 15:07:06,972 - napalm - DEBUG - pre_connection_tests - Calling with args: (<napalm.nxos_ssh.nxos_ssh.NXOSSSHDriver object at 0x7f4ff9fe9ef0>,), {} 2019-02-28 15:07:06,972 - napalm - DEBUG - open - Calling with args: (<napalm.nxos_ssh.nxos_ssh.NXOSSSHDriver object at 0x7f4ff9fe9ef0>,), {} 2019-02-28 15:07:11,190 - napalm - DEBUG - open - Successful 2019-02-28 15:07:11,191 - napalm - DEBUG - connection_tests - Calling with args: (<napalm.nxos_ssh.nxos_ssh.NXOSSSHDriver object at 0x7f4ff9fe9ef0>,), {} 2019-02-28 15:07:11,191 - napalm - DEBUG - get_facts - Calling with args: (<napalm.nxos_ssh.nxos_ssh.NXOSSSHDriver object at 0x7f4ff9fe9ef0>,), {} 2019-02-28 15:07:14,198 - napalm - DEBUG - Gathered facts: { "uptime": 7532976, "vendor": "Cisco", "os_version": "7.0(3)I2(3)", "serial_number": "FOCXXX080WJ", "model": "Nexus 3172T Chassis", "hostname": "XXXXXXX", "fqdn": "xxx.xxx.xxxl", "interface_list": [ "mgmt0", "Ethernet1/1", "Ethernet1/2", "Ethernet1/3", "Ethernet1/4", "Ethernet1/5", "Ethernet1/6", "Ethernet1/7", "Ethernet1/8", "Ethernet1/9", "Ethernet1/10", "Ethernet1/11", "Ethernet1/12", "Ethernet1/13", "Ethernet1/14", "Ethernet1/15", "Ethernet1/16", "Ethernet1/17", "Ethernet1/18", "Ethernet1/19", "Ethernet1/20", "Ethernet1/21", "Ethernet1/22", "Ethernet1/23", "Ethernet1/24", "Ethernet1/25", "Ethernet1/26", "Ethernet1/27", "Ethernet1/28", "Ethernet1/29", "Ethernet1/30", "Ethernet1/31", "Ethernet1/32", "Ethernet1/33", "Ethernet1/34", "Ethernet1/35", "Ethernet1/36", "Ethernet1/37", "Ethernet1/38", "Ethernet1/39", "Ethernet1/40", "Ethernet1/41", "Ethernet1/42", "Ethernet1/43", "Ethernet1/44", "Ethernet1/45", "Ethernet1/46", "Ethernet1/47", "Ethernet1/48", "Ethernet1/49", "Ethernet1/50", "Ethernet1/51/1", "Ethernet1/51/2", "Ethernet1/51/3", "Ethernet1/51/4", "Ethernet1/52/1", "Ethernet1/52/2", "Ethernet1/52/3", "Ethernet1/52/4", "Ethernet1/53", "Ethernet1/54", "Port-channel1", "Port-channel100", "Port-channel101", "Port-channel102", "Port-channel103", "Vlan1", "Vlan888", "Vlan889", "Vlan890", "Vlan891" ] } { "uptime": 7532976, "vendor": "Cisco", "os_version": "7.0(3)I2(3)", "serial_number": "FOC202080WJ", "model": "Nexus 3172T Chassis", "hostname": "SAISW_NEX005CIS", "fqdn": "SAISW_NEX005CIS.magna.global", "interface_list": [ "mgmt0", "Ethernet1/1", "Ethernet1/2", "Ethernet1/3", "Ethernet1/4", "Ethernet1/5", "Ethernet1/6", "Ethernet1/7", "Ethernet1/8", "Ethernet1/9", "Ethernet1/10", "Ethernet1/11", "Ethernet1/12", "Ethernet1/13", "Ethernet1/14", "Ethernet1/15", "Ethernet1/16", "Ethernet1/17", "Ethernet1/18", "Ethernet1/19", "Ethernet1/20", "Ethernet1/21", "Ethernet1/22", "Ethernet1/23", "Ethernet1/24", "Ethernet1/25", "Ethernet1/26", "Ethernet1/27", "Ethernet1/28", "Ethernet1/29", "Ethernet1/30", "Ethernet1/31", "Ethernet1/32", "Ethernet1/33", "Ethernet1/34", "Ethernet1/35", "Ethernet1/36", "Ethernet1/37", "Ethernet1/38", "Ethernet1/39", "Ethernet1/40", "Ethernet1/41", "Ethernet1/42", "Ethernet1/43", "Ethernet1/44", "Ethernet1/45", "Ethernet1/46", "Ethernet1/47", "Ethernet1/48", "Ethernet1/49", "Ethernet1/50", "Ethernet1/51/1", "Ethernet1/51/2", "Ethernet1/51/3", "Ethernet1/51/4", "Ethernet1/52/1", "Ethernet1/52/2", "Ethernet1/52/3", "Ethernet1/52/4", "Ethernet1/53", "Ethernet1/54", "Port-channel1", "Port-channel100", "Port-channel101", "Port-channel102", "Port-channel103", "Vlan1", "Vlan888", "Vlan889", "Vlan890", "Vlan891" ] } 2019-02-28 15:07:14,198 - napalm - DEBUG - get_facts - Successful Traceback (most recent call last): File "/usr/local/bin/napalm", line 11, in <module> sys.exit(main()) File "/usr/local/lib/python3.6/dist-packages/napalm/base/clitools/cl_napalm.py", line 309, in main run_tests(args) File "/usr/local/lib/python3.6/dist-packages/napalm/base/clitools/cl_napalm.py", line 290, in run_tests if args.which == "call": AttributeError: 'Namespace' object has no attribute 'which' user@host:/opt/netbox$
AttributeError
def get_lldp_neighbors_detail(self, interface=""): """Detailed view of the LLDP neighbors.""" lldp_neighbors = {} lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device) try: lldp_table.get() except RpcError as rpcerr: # this assumes the library runs in an environment # able to handle logs # otherwise, the user just won't see this happening log.error("Unable to retrieve the LLDP neighbors information:") log.error(rpcerr.message) return {} interfaces = lldp_table.get().keys() # get lldp neighbor by interface rpc for EX Series, QFX Series, J Series # and SRX Series is get-lldp-interface-neighbors-information, # and rpc for M, MX, and T Series is get-lldp-interface-neighbors # ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later) # ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa # Exceptions: # EX9208 personality = SWITCH RPC: <get-lldp-interface-neighbors><interface-device> lldp_table.GET_RPC = "get-lldp-interface-neighbors" if "EX9208" in self.device.facts.get("model"): pass elif self.device.facts.get("personality") not in ("MX", "M", "T"): lldp_table.GET_RPC = "get-lldp-interface-neighbors-information" for interface in interfaces: if "EX9208" in self.device.facts.get("model"): lldp_table.get(interface_device=interface) elif self.device.facts.get("personality") not in ("MX", "M", "T"): lldp_table.get(interface_name=interface) else: lldp_table.get(interface_device=interface) for item in lldp_table: if interface not in lldp_neighbors.keys(): lldp_neighbors[interface] = [] lldp_neighbors[interface].append( { "parent_interface": item.parent_interface, "remote_port": item.remote_port, "remote_chassis_id": napalm.base.helpers.convert( napalm.base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id, ), "remote_port_description": napalm.base.helpers.convert( py23_compat.text_type, item.remote_port_description ), "remote_system_name": item.remote_system_name, "remote_system_description": item.remote_system_description, "remote_system_capab": item.remote_system_capab, "remote_system_enable_capab": item.remote_system_enable_capab, } ) return lldp_neighbors
def get_lldp_neighbors_detail(self, interface=""): """Detailed view of the LLDP neighbors.""" lldp_neighbors = {} lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device) try: lldp_table.get() except RpcError as rpcerr: # this assumes the library runs in an environment # able to handle logs # otherwise, the user just won't see this happening log.error("Unable to retrieve the LLDP neighbors information:") log.error(rpcerr.message) return {} interfaces = lldp_table.get().keys() # get lldp neighbor by interface rpc for EX Series, QFX Series, J Series # and SRX Series is get-lldp-interface-neighbors-information, # and rpc for M, MX, and T Series is get-lldp-interface-neighbors # ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later) # ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa lldp_table.GET_RPC = "get-lldp-interface-neighbors" if self.device.facts.get("personality") not in ("MX", "M", "T"): lldp_table.GET_RPC = "get-lldp-interface-neighbors-information" for interface in interfaces: if self.device.facts.get("personality") not in ("MX", "M", "T"): lldp_table.get(interface_name=interface) else: lldp_table.get(interface_device=interface) for item in lldp_table: if interface not in lldp_neighbors.keys(): lldp_neighbors[interface] = [] lldp_neighbors[interface].append( { "parent_interface": item.parent_interface, "remote_port": item.remote_port, "remote_chassis_id": napalm.base.helpers.convert( napalm.base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id, ), "remote_port_description": napalm.base.helpers.convert( py23_compat.text_type, item.remote_port_description ), "remote_system_name": item.remote_system_name, "remote_system_description": item.remote_system_description, "remote_system_capab": item.remote_system_capab, "remote_system_enable_capab": item.remote_system_enable_capab, } ) return lldp_neighbors
https://github.com/napalm-automation/napalm/issues/430
NAPALM didn't catch this exception. Please, fill a bugfix on https://github.com/napalm-automation/napalm/issues Don't forget to include this traceback. Traceback (most recent call last): File "./probe.py", line 42, in <module> main() File "./probe.py", line 17, in main lldp = dev.get_lldp_neighbors_detail() File "/Users/brian/Env/napalm/lib/python2.7/site-packages/napalm_junos/junos.py", line 633, in get_lldp_neighbors_detail lldp_table.get(interface_name=interface) File "/Users/brian/Env/napalm/lib/python2.7/site-packages/jnpr/junos/factory/optable.py", line 64, in get self.xml = getattr(self.RPC, self.GET_RPC)(**rpc_args) File "/Users/brian/Env/napalm/lib/python2.7/site-packages/jnpr/junos/rpcmeta.py", line 336, in _exec_rpc return self._junos.execute(rpc, **dec_args) File "/Users/brian/Env/napalm/lib/python2.7/site-packages/jnpr/junos/decorators.py", line 63, in wrapper result = function(*args, **kwargs) File "/Users/brian/Env/napalm/lib/python2.7/site-packages/jnpr/junos/decorators.py", line 31, in wrapper return function(*args, **kwargs) File "/Users/brian/Env/napalm/lib/python2.7/site-packages/jnpr/junos/device.py", line 736, in execute errs=ex) jnpr.junos.exception.RpcError: RpcError(severity: error, bad_element: get-lldp-interface-neighbors-information, message: error: syntax error error: syntax error)
jnpr.junos.exception.RpcError
def parse_intf_section(interface): """Parse a single entry from show interfaces output. Different cases: mgmt0 is up admin state is up Ethernet2/1 is up admin state is up, Dedicated Interface Vlan1 is down (Administratively down), line protocol is down, autostate enabled Ethernet154/1/48 is up (with no 'admin state') """ interface = interface.strip() re_protocol = ( r"^(?P<intf_name>\S+?)\s+is\s+(?P<status>.+?)" r",\s+line\s+protocol\s+is\s+(?P<protocol>\S+).*$" ) re_intf_name_state = r"^(?P<intf_name>\S+) is (?P<intf_state>\S+).*" re_is_enabled_1 = r"^admin state is (?P<is_enabled>\S+)$" re_is_enabled_2 = r"^admin state is (?P<is_enabled>\S+), " re_is_enabled_3 = r"^.* is down.*Administratively down.*$" re_mac = r"^\s+Hardware:\s+(?P<hardware>.*),\s+address:\s+(?P<mac_address>\S+) " re_speed = r"\s+MTU .*?,\s+BW\s+(?P<speed>\S+)\s+(?P<speed_unit>\S+).*$" re_description_1 = r"^\s+Description:\s+(?P<description>.*) (?:MTU|Internet)" re_description_2 = r"^\s+Description:\s+(?P<description>.*)$" re_hardware = r"^.* Hardware: (?P<hardware>\S+)$" # Check for 'protocol is ' lines match = re.search(re_protocol, interface, flags=re.M) if match: intf_name = match.group("intf_name") status = match.group("status") protocol = match.group("protocol") if "admin" in status.lower(): is_enabled = False else: is_enabled = True is_up = bool("up" in protocol) else: # More standard is up, next line admin state is lines match = re.search(re_intf_name_state, interface) intf_name = match.group("intf_name") intf_state = match.group("intf_state").strip() is_up = True if intf_state == "up" else False admin_state_present = re.search("admin state is", interface) if admin_state_present: # Parse cases where 'admin state' string exists for x_pattern in [re_is_enabled_1, re_is_enabled_2]: match = re.search(x_pattern, interface, flags=re.M) if match: is_enabled = match.group("is_enabled").strip() is_enabled = True if re.search("up", is_enabled) else False break else: msg = "Error parsing intf, 'admin state' never detected:\n\n{}".format( interface ) raise ValueError(msg) else: # No 'admin state' should be 'is up' or 'is down' strings # If interface is up; it is enabled is_enabled = True if not is_up: match = re.search(re_is_enabled_3, interface, flags=re.M) if match: is_enabled = False match = re.search(re_mac, interface, flags=re.M) if match: mac_address = match.group("mac_address") mac_address = napalm.base.helpers.mac(mac_address) else: mac_address = "" match = re.search(re_hardware, interface, flags=re.M) speed_exist = True if match: if match.group("hardware") == "NVE": speed_exist = False if speed_exist: match = re.search(re_speed, interface, flags=re.M) speed = int(match.group("speed")) speed_unit = match.group("speed_unit") speed_unit = speed_unit.rstrip(",") # This was alway in Kbit (in the data I saw) if speed_unit != "Kbit": msg = "Unexpected speed unit in show interfaces parsing:\n\n{}".format( interface ) raise ValueError(msg) speed = int(round(speed / 1000.0)) else: speed = -1 description = "" for x_pattern in [re_description_1, re_description_2]: match = re.search(x_pattern, interface, flags=re.M) if match: description = match.group("description") break return { intf_name: { "description": description, "is_enabled": is_enabled, "is_up": is_up, "last_flapped": -1.0, "mac_address": mac_address, "speed": speed, } }
def parse_intf_section(interface): """Parse a single entry from show interfaces output. Different cases: mgmt0 is up admin state is up Ethernet2/1 is up admin state is up, Dedicated Interface Vlan1 is down (Administratively down), line protocol is down, autostate enabled Ethernet154/1/48 is up (with no 'admin state') """ interface = interface.strip() re_protocol = ( r"^(?P<intf_name>\S+?)\s+is\s+(?P<status>.+?)" r",\s+line\s+protocol\s+is\s+(?P<protocol>\S+).*$" ) re_intf_name_state = r"^(?P<intf_name>\S+) is (?P<intf_state>\S+).*" re_is_enabled_1 = r"^admin state is (?P<is_enabled>\S+)$" re_is_enabled_2 = r"^admin state is (?P<is_enabled>\S+), " re_is_enabled_3 = r"^.* is down.*Administratively down.*$" re_mac = r"^\s+Hardware:\s+(?P<hardware>.*),\s+address:\s+(?P<mac_address>\S+) " re_speed = r"\s+MTU .*,\s+BW\s+(?P<speed>\S+)\s+(?P<speed_unit>\S+), " re_description_1 = r"^\s+Description:\s+(?P<description>.*) (?:MTU|Internet)" re_description_2 = r"^\s+Description:\s+(?P<description>.*)$" re_hardware = r"^.* Hardware: (?P<hardware>\S+)$" # Check for 'protocol is ' lines match = re.search(re_protocol, interface, flags=re.M) if match: intf_name = match.group("intf_name") status = match.group("status") protocol = match.group("protocol") if "admin" in status.lower(): is_enabled = False else: is_enabled = True is_up = bool("up" in protocol) else: # More standard is up, next line admin state is lines match = re.search(re_intf_name_state, interface) intf_name = match.group("intf_name") intf_state = match.group("intf_state").strip() is_up = True if intf_state == "up" else False admin_state_present = re.search("admin state is", interface) if admin_state_present: # Parse cases where 'admin state' string exists for x_pattern in [re_is_enabled_1, re_is_enabled_2]: match = re.search(x_pattern, interface, flags=re.M) if match: is_enabled = match.group("is_enabled").strip() is_enabled = True if re.search("up", is_enabled) else False break else: msg = "Error parsing intf, 'admin state' never detected:\n\n{}".format( interface ) raise ValueError(msg) else: # No 'admin state' should be 'is up' or 'is down' strings # If interface is up; it is enabled is_enabled = True if not is_up: match = re.search(re_is_enabled_3, interface, flags=re.M) if match: is_enabled = False match = re.search(re_mac, interface, flags=re.M) if match: mac_address = match.group("mac_address") mac_address = napalm.base.helpers.mac(mac_address) else: mac_address = "" match = re.search(re_hardware, interface, flags=re.M) speed_exist = True if match: if match.group("hardware") == "NVE": speed_exist = False if speed_exist: match = re.search(re_speed, interface, flags=re.M) speed = int(match.group("speed")) speed_unit = match.group("speed_unit") # This was alway in Kbit (in the data I saw) if speed_unit != "Kbit": msg = "Unexpected speed unit in show interfaces parsing:\n\n{}".format( interface ) raise ValueError(msg) speed = int(round(speed / 1000.0)) else: speed = -1 description = "" for x_pattern in [re_description_1, re_description_2]: match = re.search(x_pattern, interface, flags=re.M) if match: description = match.group("description") break return { intf_name: { "description": description, "is_enabled": is_enabled, "is_up": is_up, "last_flapped": -1.0, "mac_address": mac_address, "speed": speed, } }
https://github.com/napalm-automation/napalm/issues/855
Traceback (most recent call last): File "/usr/lib/python2.7/dist-packages/salt/utils/napalm.py", line 167, in call out = getattr(napalm_device.get('DRIVER'), method)(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/napalm/nxos_ssh/nxos_ssh.py", line 836, in get_interfaces interfaces.update(parse_intf_section(entry)) File "/usr/local/lib/python2.7/dist-packages/napalm/nxos_ssh/nxos_ssh.py", line 154, in parse_intf_section speed = int(match.group('speed')) AttributeError: 'NoneType' object has no attribute 'group'
AttributeError
def get_environment(self): def extract_temperature_data(data): for s in data: temp = s["currentTemperature"] if "currentTemperature" in s else 0.0 name = s["name"] values = { "temperature": temp, "is_alert": temp > s["overheatThreshold"], "is_critical": temp > s["criticalThreshold"], } yield name, values sh_version_out = self.device.run_commands(["show version"]) is_veos = sh_version_out[0]["modelName"].lower() == "veos" commands = ["show environment cooling", "show environment temperature"] if not is_veos: commands.append("show environment power") fans_output, temp_output, power_output = self.device.run_commands(commands) else: fans_output, temp_output = self.device.run_commands(commands) environment_counters = {"fans": {}, "temperature": {}, "power": {}, "cpu": {}} cpu_output = self.device.run_commands(["show processes top once"], encoding="text")[ 0 ]["output"] for slot in fans_output["fanTraySlots"]: environment_counters["fans"][slot["label"]] = {"status": slot["status"] == "ok"} # First check FRU's for fru_type in ["cardSlots", "powerSupplySlots"]: for fru in temp_output[fru_type]: t = { name: value for name, value in extract_temperature_data(fru["tempSensors"]) } environment_counters["temperature"].update(t) # On board sensors parsed = {n: v for n, v in extract_temperature_data(temp_output["tempSensors"])} environment_counters["temperature"].update(parsed) if not is_veos: for psu, data in power_output["powerSupplies"].items(): environment_counters["power"][psu] = { "status": data.get("state", "ok") == "ok", "capacity": data.get("capacity", -1.0), "output": data.get("outputPower", -1.0), } cpu_lines = cpu_output.splitlines() # Matches either of # Cpu(s): 5.2%us, 1.4%sy, 0.0%ni, 92.2%id, 0.6%wa, 0.3%hi, 0.4%si, 0.0%st ( 4.16 > ) # %Cpu(s): 4.2 us, 0.9 sy, 0.0 ni, 94.6 id, 0.0 wa, 0.1 hi, 0.2 si, 0.0 st ( 4.16 < ) m = re.match(".*ni, (?P<idle>.*).id.*", cpu_lines[2]) environment_counters["cpu"][0] = {"%usage": round(100 - float(m.group("idle")), 1)} # Matches either of # Mem: 3844356k total, 3763184k used, 81172k free, 16732k buffers ( 4.16 > ) # KiB Mem: 32472080 total, 5697604 used, 26774476 free, 372052 buffers ( 4.16 < ) mem_regex = ( r"[^\d]*(?P<total>\d+)[k\s]+total," r"\s+(?P<used>\d+)[k\s]+used," r"\s+(?P<free>\d+)[k\s]+free,.*" ) m = re.match(mem_regex, cpu_lines[3]) environment_counters["memory"] = { "available_ram": int(m.group("total")), "used_ram": int(m.group("used")), } return environment_counters
def get_environment(self): def extract_temperature_data(data): for s in data: temp = s["currentTemperature"] if "currentTemperature" in s else 0.0 name = s["name"] values = { "temperature": temp, "is_alert": temp > s["overheatThreshold"], "is_critical": temp > s["criticalThreshold"], } yield name, values sh_version_out = self.device.run_commands(["show version"]) is_veos = sh_version_out[0]["modelName"].lower() == "veos" commands = ["show environment cooling", "show environment temperature"] if not is_veos: commands.append("show environment power") fans_output, temp_output, power_output = self.device.run_commands(commands) else: fans_output, temp_output = self.device.run_commands(commands) environment_counters = {"fans": {}, "temperature": {}, "power": {}, "cpu": {}} cpu_output = self.device.run_commands(["show processes top once"], encoding="text")[ 0 ]["output"] for slot in fans_output["fanTraySlots"]: environment_counters["fans"][slot["label"]] = {"status": slot["status"] == "ok"} # First check FRU's for fru_type in ["cardSlots", "powerSupplySlots"]: for fru in temp_output[fru_type]: t = { name: value for name, value in extract_temperature_data(fru["tempSensors"]) } environment_counters["temperature"].update(t) # On board sensors parsed = {n: v for n, v in extract_temperature_data(temp_output["tempSensors"])} environment_counters["temperature"].update(parsed) if not is_veos: for psu, data in power_output["powerSupplies"].items(): environment_counters["power"][psu] = { "status": data["state"] == "ok", "capacity": data["capacity"], "output": data["outputPower"], } cpu_lines = cpu_output.splitlines() # Matches either of # Cpu(s): 5.2%us, 1.4%sy, 0.0%ni, 92.2%id, 0.6%wa, 0.3%hi, 0.4%si, 0.0%st ( 4.16 > ) # %Cpu(s): 4.2 us, 0.9 sy, 0.0 ni, 94.6 id, 0.0 wa, 0.1 hi, 0.2 si, 0.0 st ( 4.16 < ) m = re.match(".*ni, (?P<idle>.*).id.*", cpu_lines[2]) environment_counters["cpu"][0] = {"%usage": round(100 - float(m.group("idle")), 1)} # Matches either of # Mem: 3844356k total, 3763184k used, 81172k free, 16732k buffers ( 4.16 > ) # KiB Mem: 32472080 total, 5697604 used, 26774476 free, 372052 buffers ( 4.16 < ) mem_regex = ( r"[^\d]*(?P<total>\d+)[k\s]+total," r"\s+(?P<used>\d+)[k\s]+used," r"\s+(?P<free>\d+)[k\s]+free,.*" ) m = re.match(mem_regex, cpu_lines[3]) environment_counters["memory"] = { "available_ram": int(m.group("total")), "used_ram": int(m.group("used")), } return environment_counters
https://github.com/napalm-automation/napalm/issues/810
In [4]: d.get_environment() --------------------------------------------------------------------------- KeyError Traceback (most recent call last) <ipython-input-4-b99851033f4f> in <module>() ----> 1 d.get_environment() ~/.pyenv/versions/3.7.0/lib/python3.7/site-packages/napalm/eos/eos.py in get_environment(self) 545 'status': data['state'] == 'ok', 546 'capacity': data['capacity'], --> 547 'output': data['outputPower'] 548 } 549 cpu_lines = cpu_output.splitlines() KeyError: 'outputPower'
KeyError
def get_route_to(self, destination="", protocol=""): routes = {} # Placeholder for vrf arg vrf = "" # Right not iterating through vrfs is necessary # show ipv6 route doesn't support vrf 'all' if vrf == "": vrfs = sorted(self._get_vrfs()) else: vrfs = [vrf] if protocol.lower() == "direct": protocol = "connected" try: ipv = "" if IPNetwork(destination).version == 6: ipv = "v6" except AddrFormatError: return "Please specify a valid destination!" commands = [] for _vrf in vrfs: commands.append( "show ip{ipv} route vrf {_vrf} {destination} {protocol} detail".format( ipv=ipv, _vrf=_vrf, destination=destination, protocol=protocol, ) ) commands_output = self.device.run_commands(commands) for _vrf, command_output in zip(vrfs, commands_output): if ipv == "v6": routes_out = command_output.get("routes", {}) else: routes_out = command_output.get("vrfs", {}).get(_vrf, {}).get("routes", {}) for prefix, route_details in routes_out.items(): if prefix not in routes.keys(): routes[prefix] = [] route_protocol = route_details.get("routeType") preference = route_details.get("preference", 0) route = { "current_active": True, "last_active": True, "age": 0, "next_hop": "", "protocol": route_protocol, "outgoing_interface": "", "preference": preference, "inactive_reason": "", "routing_table": _vrf, "selected_next_hop": True, "protocol_attributes": {}, } if protocol == "bgp" or route_protocol.lower() in ("ebgp", "ibgp"): nexthop_interface_map = {} for next_hop in route_details.get("vias"): nexthop_ip = napalm.base.helpers.ip(next_hop.get("nexthopAddr")) nexthop_interface_map[nexthop_ip] = next_hop.get("interface") metric = route_details.get("metric") command = "show ip{ipv} bgp {destination} detail vrf {_vrf}".format( ipv=ipv, destination=prefix, _vrf=_vrf ) vrf_details = ( self.device.run_commands([command])[0].get("vrfs", {}).get(_vrf, {}) ) local_as = vrf_details.get("asn") bgp_routes = ( vrf_details.get("bgpRouteEntries", {}) .get(prefix, {}) .get("bgpRoutePaths", []) ) for bgp_route_details in bgp_routes: bgp_route = route.copy() as_path = bgp_route_details.get("asPathEntry", {}).get("asPath", "") remote_as = int(as_path.strip("()").split()[-1]) remote_address = napalm.base.helpers.ip( bgp_route_details.get("routeDetail", {}) .get("peerEntry", {}) .get("peerAddr", "") ) local_preference = bgp_route_details.get("localPreference") next_hop = napalm.base.helpers.ip(bgp_route_details.get("nextHop")) active_route = bgp_route_details.get("routeType", {}).get( "active", False ) last_active = active_route # should find smth better communities = bgp_route_details.get("routeDetail", {}).get( "communityList", [] ) preference2 = bgp_route_details.get("weight") inactive_reason = bgp_route_details.get("reasonNotBestpath", "") bgp_route.update( { "current_active": active_route, "inactive_reason": inactive_reason, "last_active": last_active, "next_hop": next_hop, "outgoing_interface": nexthop_interface_map.get(next_hop), "selected_next_hop": active_route, "protocol_attributes": { "metric": metric, "as_path": as_path, "local_preference": local_preference, "local_as": local_as, "remote_as": remote_as, "remote_address": remote_address, "preference2": preference2, "communities": communities, }, } ) routes[prefix].append(bgp_route) else: if route_details.get("routeAction") in ("drop",): route["next_hop"] = "NULL" if route_details.get("routingDisabled") is True: route["last_active"] = False route["current_active"] = False for next_hop in route_details.get("vias"): route_next_hop = route.copy() if next_hop.get("nexthopAddr") is None: route_next_hop.update( { "next_hop": "", "outgoing_interface": next_hop.get("interface"), } ) else: route_next_hop.update( { "next_hop": napalm.base.helpers.ip( next_hop.get("nexthopAddr") ), "outgoing_interface": next_hop.get("interface"), } ) routes[prefix].append(route_next_hop) if route_details.get("vias") == []: # empty list routes[prefix].append(route) return routes
def get_route_to(self, destination="", protocol=""): routes = {} # Placeholder for vrf arg vrf = "" # Right not iterating through vrfs is necessary # show ipv6 route doesn't support vrf 'all' if vrf == "": vrfs = sorted(self._get_vrfs()) else: vrfs = [vrf] if protocol.lower() == "direct": protocol = "connected" try: ipv = "" if IPNetwork(destination).version == 6: ipv = "v6" except AddrFormatError: return "Please specify a valid destination!" commands = [] for _vrf in vrfs: commands.append( "show ip{ipv} route vrf {_vrf} {destination} {protocol} detail".format( ipv=ipv, _vrf=_vrf, destination=destination, protocol=protocol, ) ) commands_output = self.device.run_commands(commands) for _vrf, command_output in zip(vrfs, commands_output): if ipv == "v6": routes_out = command_output.get("routes", {}) else: routes_out = command_output.get("vrfs", {}).get(_vrf, {}).get("routes", {}) for prefix, route_details in routes_out.items(): if prefix not in routes.keys(): routes[prefix] = [] route_protocol = route_details.get("routeType") preference = route_details.get("preference", 0) route = { "current_active": True, "last_active": True, "age": 0, "next_hop": "", "protocol": route_protocol, "outgoing_interface": "", "preference": preference, "inactive_reason": "", "routing_table": _vrf, "selected_next_hop": True, "protocol_attributes": {}, } if protocol == "bgp" or route_protocol.lower() in ("ebgp", "ibgp"): nexthop_interface_map = {} for next_hop in route_details.get("vias"): nexthop_ip = napalm.base.helpers.ip(next_hop.get("nexthopAddr")) nexthop_interface_map[nexthop_ip] = next_hop.get("interface") metric = route_details.get("metric") command = "show ip{ipv} bgp {destination} detail vrf {_vrf}".format( ipv=ipv, destination=prefix, _vrf=_vrf ) vrf_details = ( self.device.run_commands([command])[0].get("vrfs", {}).get(_vrf, {}) ) local_as = vrf_details.get("asn") bgp_routes = ( vrf_details.get("bgpRouteEntries", {}) .get(prefix, {}) .get("bgpRoutePaths", []) ) for bgp_route_details in bgp_routes: bgp_route = route.copy() as_path = bgp_route_details.get("asPathEntry", {}).get("asPath", "") remote_as = int(as_path.split()[-1]) remote_address = napalm.base.helpers.ip( bgp_route_details.get("routeDetail", {}) .get("peerEntry", {}) .get("peerAddr", "") ) local_preference = bgp_route_details.get("localPreference") next_hop = napalm.base.helpers.ip(bgp_route_details.get("nextHop")) active_route = bgp_route_details.get("routeType", {}).get( "active", False ) last_active = active_route # should find smth better communities = bgp_route_details.get("routeDetail", {}).get( "communityList", [] ) preference2 = bgp_route_details.get("weight") inactive_reason = bgp_route_details.get("reasonNotBestpath", "") bgp_route.update( { "current_active": active_route, "inactive_reason": inactive_reason, "last_active": last_active, "next_hop": next_hop, "outgoing_interface": nexthop_interface_map.get(next_hop), "selected_next_hop": active_route, "protocol_attributes": { "metric": metric, "as_path": as_path, "local_preference": local_preference, "local_as": local_as, "remote_as": remote_as, "remote_address": remote_address, "preference2": preference2, "communities": communities, }, } ) routes[prefix].append(bgp_route) else: if route_details.get("routeAction") in ("drop",): route["next_hop"] = "NULL" if route_details.get("routingDisabled") is True: route["last_active"] = False route["current_active"] = False for next_hop in route_details.get("vias"): route_next_hop = route.copy() if next_hop.get("nexthopAddr") is None: route_next_hop.update( { "next_hop": "", "outgoing_interface": next_hop.get("interface"), } ) else: route_next_hop.update( { "next_hop": napalm.base.helpers.ip( next_hop.get("nexthopAddr") ), "outgoing_interface": next_hop.get("interface"), } ) routes[prefix].append(route_next_hop) if route_details.get("vias") == []: # empty list routes[prefix].append(route) return routes
https://github.com/napalm-automation/napalm/issues/736
ValueError Traceback (most recent call last) <ipython-input-2-c5a9e185b0d9> in <module>() 5 6 with EOSDriver("router", "bewing", getpass()) as d: ----> 7 pprint(d.get_route_to("192.0.2.0/24")) 8 9 /mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in get_route_to(self, destination, protocol) 1130 bgp_route = route.copy() 1131 as_path = bgp_route_details.get('asPathEntry', {}).get('asPath', u'') -> 1132 remote_as = int(as_path.split()[-1]) 1133 remote_address = napalm.base.helpers.ip(bgp_route_details.get( 1134 'routeDetail', {}).get('peerEntry', {}).get('peerAddr', '')) ValueError: invalid literal for int() with base 10: '21149)'
ValueError
def parse_intf_section(interface): """Parse a single entry from show interfaces output. Different cases: mgmt0 is up admin state is up Ethernet2/1 is up admin state is up, Dedicated Interface Vlan1 is down (Administratively down), line protocol is down, autostate enabled Ethernet154/1/48 is up (with no 'admin state') """ interface = interface.strip() re_protocol = ( r"^(?P<intf_name>\S+?)\s+is\s+(?P<status>.+?)" r",\s+line\s+protocol\s+is\s+(?P<protocol>\S+).*$" ) re_intf_name_state = r"^(?P<intf_name>\S+) is (?P<intf_state>\S+).*" re_is_enabled_1 = r"^admin state is (?P<is_enabled>\S+)$" re_is_enabled_2 = r"^admin state is (?P<is_enabled>\S+), " re_is_enabled_3 = r"^.* is down.*Administratively down.*$" re_mac = r"^\s+Hardware:\s+(?P<hardware>.*), address:\s+(?P<mac_address>\S+) " re_speed = r"\s+MTU .*, BW (?P<speed>\S+) (?P<speed_unit>\S+), " re_description_1 = r"^\s+Description:\s+(?P<description>.*) (?:MTU|Internet)" re_description_2 = r"^\s+Description:\s+(?P<description>.*)$" re_hardware = r"^.* Hardware: (?P<hardware>\S+)$" # Check for 'protocol is ' lines match = re.search(re_protocol, interface, flags=re.M) if match: intf_name = match.group("intf_name") status = match.group("status") protocol = match.group("protocol") if "admin" in status.lower(): is_enabled = False else: is_enabled = True is_up = bool("up" in protocol) else: # More standard is up, next line admin state is lines match = re.search(re_intf_name_state, interface) intf_name = match.group("intf_name") intf_state = match.group("intf_state").strip() is_up = True if intf_state == "up" else False admin_state_present = re.search("admin state is", interface) if admin_state_present: # Parse cases where 'admin state' string exists for x_pattern in [re_is_enabled_1, re_is_enabled_2]: match = re.search(x_pattern, interface, flags=re.M) if match: is_enabled = match.group("is_enabled").strip() is_enabled = True if re.search("up", is_enabled) else False break else: msg = "Error parsing intf, 'admin state' never detected:\n\n{}".format( interface ) raise ValueError(msg) else: # No 'admin state' should be 'is up' or 'is down' strings # If interface is up; it is enabled is_enabled = True if not is_up: match = re.search(re_is_enabled_3, interface, flags=re.M) if match: is_enabled = False match = re.search(re_mac, interface, flags=re.M) if match: mac_address = match.group("mac_address") mac_address = napalm.base.helpers.mac(mac_address) else: mac_address = "" match = re.search(re_hardware, interface, flags=re.M) speed_exist = True if match: if match.group("hardware") == "NVE": speed_exist = False if speed_exist: match = re.search(re_speed, interface, flags=re.M) speed = int(match.group("speed")) speed_unit = match.group("speed_unit") # This was alway in Kbit (in the data I saw) if speed_unit != "Kbit": msg = "Unexpected speed unit in show interfaces parsing:\n\n{}".format( interface ) raise ValueError(msg) speed = int(round(speed / 1000.0)) else: speed = -1 description = "" for x_pattern in [re_description_1, re_description_2]: match = re.search(x_pattern, interface, flags=re.M) if match: description = match.group("description") break return { intf_name: { "description": description, "is_enabled": is_enabled, "is_up": is_up, "last_flapped": -1.0, "mac_address": mac_address, "speed": speed, } }
def parse_intf_section(interface): """Parse a single entry from show interfaces output. Different cases: mgmt0 is up admin state is up Ethernet2/1 is up admin state is up, Dedicated Interface Vlan1 is down (Administratively down), line protocol is down, autostate enabled Ethernet154/1/48 is up (with no 'admin state') """ interface = interface.strip() re_protocol = ( r"^(?P<intf_name>\S+?)\s+is\s+(?P<status>.+?)" r",\s+line\s+protocol\s+is\s+(?P<protocol>\S+).*$" ) re_intf_name_state = r"^(?P<intf_name>\S+) is (?P<intf_state>\S+).*" re_is_enabled_1 = r"^admin state is (?P<is_enabled>\S+)$" re_is_enabled_2 = r"^admin state is (?P<is_enabled>\S+), " re_is_enabled_3 = r"^.* is down.*Administratively down.*$" re_mac = r"^\s+Hardware.*address:\s+(?P<mac_address>\S+) " re_speed = r"^\s+MTU .*, BW (?P<speed>\S+) (?P<speed_unit>\S+), " re_description = r"^\s+Description:\s+(?P<description>.*)$" # Check for 'protocol is ' lines match = re.search(re_protocol, interface, flags=re.M) if match: intf_name = match.group("intf_name") status = match.group("status") protocol = match.group("protocol") if "admin" in status.lower(): is_enabled = False else: is_enabled = True is_up = bool("up" in protocol) else: # More standard is up, next line admin state is lines match = re.search(re_intf_name_state, interface) intf_name = match.group("intf_name") intf_state = match.group("intf_state").strip() is_up = True if intf_state == "up" else False admin_state_present = re.search("admin state is", interface) if admin_state_present: # Parse cases where 'admin state' string exists for x_pattern in [re_is_enabled_1, re_is_enabled_2]: match = re.search(x_pattern, interface, flags=re.M) if match: is_enabled = match.group("is_enabled").strip() is_enabled = True if is_enabled == "up" else False break else: msg = "Error parsing intf, 'admin state' never detected:\n\n{}".format( interface ) raise ValueError(msg) else: # No 'admin state' should be 'is up' or 'is down' strings # If interface is up; it is enabled is_enabled = True if not is_up: match = re.search(re_is_enabled_3, interface, flags=re.M) if match: is_enabled = False match = re.search(re_mac, interface, flags=re.M) if match: mac_address = match.group("mac_address") mac_address = napalm.base.helpers.mac(mac_address) else: mac_address = "" match = re.search(re_speed, interface, flags=re.M) speed = int(match.group("speed")) speed_unit = match.group("speed_unit") # This was alway in Kbit (in the data I saw) if speed_unit != "Kbit": msg = "Unexpected speed unit in show interfaces parsing:\n\n{}".format( interface ) raise ValueError(msg) speed = int(round(speed / 1000.0)) description = "" match = re.search(re_description, interface, flags=re.M) if match: description = match.group("description") return { intf_name: { "description": description, "is_enabled": is_enabled, "is_up": is_up, "last_flapped": -1.0, "mac_address": mac_address, "speed": speed, } }
https://github.com/napalm-automation/napalm/issues/672
Traceback (most recent call last): File "script_switch.py", line 308, in <module> main(sys.argv[1:]) File "script_switch.py", line 303, in main test_migration(ip, login, pwd, driver, fex) File "script_switch.py", line 206, in test_migration new_intf_table = device.get_interfaces() File "/home/florian_lacommare/GitHub/napalm/napalm/nxos_ssh/nxos_ssh.py", line 862, in get_interfaces interfaces.update(parse_intf_section(entry)) File "/home/florian_lacommare/GitHub/napalm/napalm/nxos_ssh/nxos_ssh.py", line 149, in parse_intf_section speed = int(match.group('speed')) AttributeError: 'NoneType' object has no attribute 'group'
AttributeError
def get_facts(self): """Return a set of facts from the devices.""" # default values. vendor = "Cisco" uptime = -1 serial_number, fqdn, os_version, hostname, domain_name, model = ("",) * 6 # obtain output from device show_ver = self.device.send_command("show version") show_hosts = self.device.send_command("show hosts") show_int_status = self.device.send_command("show interface status") show_hostname = self.device.send_command("show hostname") # uptime/serial_number/IOS version for line in show_ver.splitlines(): if " uptime is " in line: _, uptime_str = line.split(" uptime is ") uptime = self.parse_uptime(uptime_str) if "Processor Board ID" in line: _, serial_number = line.split("Processor Board ID ") serial_number = serial_number.strip() if "system: " in line or "NXOS: " in line: line = line.strip() os_version = line.split()[2] os_version = os_version.strip() if "cisco" in line and "hassis" in line: match = re.search(r".cisco (.*) \(", line) if match: model = match.group(1).strip() match = re.search(r".cisco (.* [cC]hassis)", line) if match: model = match.group(1).strip() hostname = show_hostname.strip() # Determine domain_name and fqdn for line in show_hosts.splitlines(): if "Default domain" in line: _, domain_name = re.split(r".*Default domain.*is ", line) domain_name = domain_name.strip() break if hostname.count(".") >= 2: fqdn = hostname # Remove domain name from hostname if domain_name: hostname = re.sub(re.escape(domain_name) + "$", "", hostname) hostname = hostname.strip(".") elif domain_name: fqdn = "{}.{}".format(hostname, domain_name) # interface_list filter interface_list = [] show_int_status = show_int_status.strip() # Remove the header information show_int_status = re.sub( r"(?:^---------+$|^Port .*$|^ .*$)", "", show_int_status, flags=re.M ) for line in show_int_status.splitlines(): if not line: continue interface = line.split()[0] # Return canonical interface name interface_list.append(canonical_interface_name(interface)) return { "uptime": int(uptime), "vendor": vendor, "os_version": py23_compat.text_type(os_version), "serial_number": py23_compat.text_type(serial_number), "model": py23_compat.text_type(model), "hostname": py23_compat.text_type(hostname), "fqdn": fqdn, "interface_list": interface_list, }
def get_facts(self): """Return a set of facts from the devices.""" # default values. vendor = "Cisco" uptime = -1 serial_number, fqdn, os_version, hostname, domain_name, model = ("",) * 6 # obtain output from device show_ver = self.device.send_command("show version") show_hosts = self.device.send_command("show hosts") show_int_status = self.device.send_command("show interface status") show_hostname = self.device.send_command("show hostname") # uptime/serial_number/IOS version for line in show_ver.splitlines(): if " uptime is " in line: _, uptime_str = line.split(" uptime is ") uptime = self.parse_uptime(uptime_str) if "Processor Board ID" in line: _, serial_number = line.split("Processor Board ID ") serial_number = serial_number.strip() if "system: " in line or "NXOS: " in line: line = line.strip() os_version = line.split()[2] os_version = os_version.strip() if "cisco" in line and "hassis" in line: match = re.search(r".cisco (.*) \(", line) if match: model = match.group(1).strip() match = re.search(r".cisco (.* [cC]hassis)", line) if match: model = match.group(1).strip() hostname = show_hostname.strip() # Determine domain_name and fqdn for line in show_hosts.splitlines(): if "Default domain" in line: _, domain_name = re.split(r".*Default domain.*is ", line) domain_name = domain_name.strip() break if hostname.count(".") >= 2: fqdn = hostname # Remove domain name from hostname if domain_name: hostname = re.sub(re.escape(domain_name) + "$", "", hostname) hostname = hostname.strip(".") elif domain_name: fqdn = "{}.{}".format(hostname, domain_name) # interface_list filter interface_list = [] show_int_status = show_int_status.strip() # Remove the header information show_int_status = re.split(r"^---------+", show_int_status, flags=re.M)[-1] for line in show_int_status.splitlines(): if not line: continue interface = line.split()[0] # Return canonical interface name interface_list.append(canonical_interface_name(interface)) return { "uptime": int(uptime), "vendor": vendor, "os_version": py23_compat.text_type(os_version), "serial_number": py23_compat.text_type(serial_number), "model": py23_compat.text_type(model), "hostname": py23_compat.text_type(hostname), "fqdn": fqdn, "interface_list": interface_list, }
https://github.com/napalm-automation/napalm/issues/672
Traceback (most recent call last): File "script_switch.py", line 308, in <module> main(sys.argv[1:]) File "script_switch.py", line 303, in main test_migration(ip, login, pwd, driver, fex) File "script_switch.py", line 206, in test_migration new_intf_table = device.get_interfaces() File "/home/florian_lacommare/GitHub/napalm/napalm/nxos_ssh/nxos_ssh.py", line 862, in get_interfaces interfaces.update(parse_intf_section(entry)) File "/home/florian_lacommare/GitHub/napalm/napalm/nxos_ssh/nxos_ssh.py", line 149, in parse_intf_section speed = int(match.group('speed')) AttributeError: 'NoneType' object has no attribute 'group'
AttributeError
def get_mac_address_table(self): """ Returns a lists of dictionaries. Each dictionary represents an entry in the MAC Address Table, having the following keys * mac (string) * interface (string) * vlan (int) * active (boolean) * static (boolean) * moves (int) * last_move (float) Format1: Legend: * - primary entry, G - Gateway MAC, (R) - Routed MAC, O - Overlay MAC age - seconds since last seen,+ - primary entry using vPC Peer-Link, (T) - True, (F) - False VLAN MAC Address Type age Secure NTFY Ports/SWID.SSID.LID ---------+-----------------+--------+---------+------+----+------------------ * 27 0026.f064.0000 dynamic - F F po1 * 27 001b.54c2.2644 dynamic - F F po1 * 27 0000.0c9f.f2bc dynamic - F F po1 * 27 0026.980a.df44 dynamic - F F po1 * 16 0050.56bb.0164 dynamic - F F po2 * 13 90e2.ba5a.9f30 dynamic - F F eth1/2 * 13 90e2.ba4b.fc78 dynamic - F F eth1/1 39 0100.5e00.4b4b igmp 0 F F Po1 Po2 Po22 110 0100.5e00.0118 igmp 0 F F Po1 Po2 Eth142/1/3 Eth112/1/5 Eth112/1/6 Eth122/1/5 """ # The '*' is stripped out later RE_MACTABLE_FORMAT1 = r"^\s+{}\s+{}\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+".format( VLAN_REGEX, MAC_REGEX ) RE_MACTABLE_FORMAT2 = r"^\s+{}\s+{}\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+".format( "-", MAC_REGEX ) # REGEX dedicated for lines with only interfaces (suite of the previous MAC address) RE_MACTABLE_FORMAT3 = r"^\s+\S+" mac_address_table = [] command = "show mac address-table" output = self.device.send_command(command) # noqa def remove_prefix(s, prefix): return s[len(prefix) :] if s.startswith(prefix) else s def process_mac_fields(vlan, mac, mac_type, interface): """Return proper data for mac address fields.""" if mac_type.lower() in ["self", "static", "system"]: static = True if vlan.lower() == "all": vlan = 0 elif vlan == "-": vlan = 0 if ( interface.lower() == "cpu" or re.search(r"router", interface.lower()) or re.search(r"switch", interface.lower()) ): interface = "" else: static = False if mac_type.lower() in ["dynamic"]: active = True else: active = False return { "mac": napalm.base.helpers.mac(mac), "interface": interface, "vlan": int(vlan), "static": static, "active": active, "moves": -1, "last_move": -1.0, } # Skip the header lines output = re.split(r"^----.*", output, flags=re.M)[1:] output = "\n".join(output).strip() # Strip any leading characters output = re.sub(r"^[\*\+GOCE]", "", output, flags=re.M) output = re.sub(r"^\(R\)", "", output, flags=re.M) output = re.sub(r"^\(T\)", "", output, flags=re.M) output = re.sub(r"^\(F\)", "", output, flags=re.M) output = re.sub(r"vPC Peer-Link", "vPC-Peer-Link", output, flags=re.M) for line in output.splitlines(): # Every 500 Mac's Legend is reprinted, regardless of terminal length if re.search(r"^Legend", line): continue elif re.search(r"^\s+\* \- primary entry", line): continue elif re.search(r"^\s+age \-", line): continue elif re.search(r"^\s+VLAN", line): continue elif re.search(r"^------", line): continue elif re.search(r"^\s*$", line): continue for pattern in [RE_MACTABLE_FORMAT1, RE_MACTABLE_FORMAT2, RE_MACTABLE_FORMAT3]: if re.search(pattern, line): fields = line.split() if len(fields) >= 7: vlan, mac, mac_type, _, _, _, interface = fields[:7] mac_address_table.append( process_mac_fields(vlan, mac, mac_type, interface) ) # there can be multiples interfaces for the same MAC on the same line for interface in fields[7:]: mac_address_table.append( process_mac_fields(vlan, mac, mac_type, interface) ) break # interfaces can overhang to the next line (line only contains interfaces) elif len(fields) < 7: for interface in fields: mac_address_table.append( process_mac_fields(vlan, mac, mac_type, interface) ) break else: raise ValueError("Unexpected output from: {}".format(repr(line))) return mac_address_table
def get_mac_address_table(self): """ Returns a lists of dictionaries. Each dictionary represents an entry in the MAC Address Table, having the following keys * mac (string) * interface (string) * vlan (int) * active (boolean) * static (boolean) * moves (int) * last_move (float) Format1: Legend: * - primary entry, G - Gateway MAC, (R) - Routed MAC, O - Overlay MAC age - seconds since last seen,+ - primary entry using vPC Peer-Link, (T) - True, (F) - False VLAN MAC Address Type age Secure NTFY Ports/SWID.SSID.LID ---------+-----------------+--------+---------+------+----+------------------ * 27 0026.f064.0000 dynamic - F F po1 * 27 001b.54c2.2644 dynamic - F F po1 * 27 0000.0c9f.f2bc dynamic - F F po1 * 27 0026.980a.df44 dynamic - F F po1 * 16 0050.56bb.0164 dynamic - F F po2 * 13 90e2.ba5a.9f30 dynamic - F F eth1/2 * 13 90e2.ba4b.fc78 dynamic - F F eth1/1 39 0100.5e00.4b4b igmp 0 F F Po1 Po2 Po22 110 0100.5e00.0118 igmp 0 F F Po1 Po2 Eth142/1/3 Eth112/1/5 Eth112/1/6 Eth122/1/5 """ # The '*' is stripped out later RE_MACTABLE_FORMAT1 = r"^\s+{}\s+{}\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+".format( VLAN_REGEX, MAC_REGEX ) RE_MACTABLE_FORMAT2 = r"^\s+{}\s+{}\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+".format( "-", MAC_REGEX ) # REGEX dedicated for lines with only interfaces (suite of the previous MAC address) RE_MACTABLE_FORMAT3 = r"^\s+\S+" mac_address_table = [] command = "show mac address-table" output = self.device.send_command(command) # noqa def remove_prefix(s, prefix): return s[len(prefix) :] if s.startswith(prefix) else s def process_mac_fields(vlan, mac, mac_type, interface): """Return proper data for mac address fields.""" if mac_type.lower() in ["self", "static", "system"]: static = True if vlan.lower() == "all": vlan = 0 elif vlan == "-": vlan = 0 if ( interface.lower() == "cpu" or re.search(r"router", interface.lower()) or re.search(r"switch", interface.lower()) ): interface = "" else: static = False if mac_type.lower() in ["dynamic"]: active = True else: active = False return { "mac": napalm.base.helpers.mac(mac), "interface": interface, "vlan": int(vlan), "static": static, "active": active, "moves": -1, "last_move": -1.0, } # Skip the header lines output = re.split(r"^----.*", output, flags=re.M)[1:] output = "\n".join(output).strip() # Strip any leading characters output = re.sub(r"^[\*\+GO]", "", output, flags=re.M) output = re.sub(r"^\(R\)", "", output, flags=re.M) output = re.sub(r"^\(T\)", "", output, flags=re.M) output = re.sub(r"^\(F\)", "", output, flags=re.M) output = re.sub(r"vPC Peer-Link", "vPC-Peer-Link", output, flags=re.M) for line in output.splitlines(): # Every 500 Mac's Legend is reprinted, regardless of terminal length if re.search(r"^Legend", line): continue elif re.search(r"^\s+\* \- primary entry", line): continue elif re.search(r"^\s+age \-", line): continue elif re.search(r"^\s+VLAN", line): continue elif re.search(r"^------", line): continue elif re.search(r"^\s*$", line): continue for pattern in [RE_MACTABLE_FORMAT1, RE_MACTABLE_FORMAT2, RE_MACTABLE_FORMAT3]: if re.search(pattern, line): fields = line.split() if len(fields) >= 7: vlan, mac, mac_type, _, _, _, interface = fields[:7] mac_address_table.append( process_mac_fields(vlan, mac, mac_type, interface) ) # there can be multiples interfaces for the same MAC on the same line for interface in fields[7:]: mac_address_table.append( process_mac_fields(vlan, mac, mac_type, interface) ) break # interfaces can overhang to the next line (line only contains interfaces) elif len(fields) < 7: for interface in fields: mac_address_table.append( process_mac_fields(vlan, mac, mac_type, interface) ) break else: raise ValueError("Unexpected output from: {}".format(repr(line))) return mac_address_table
https://github.com/napalm-automation/napalm/issues/672
Traceback (most recent call last): File "script_switch.py", line 308, in <module> main(sys.argv[1:]) File "script_switch.py", line 303, in main test_migration(ip, login, pwd, driver, fex) File "script_switch.py", line 206, in test_migration new_intf_table = device.get_interfaces() File "/home/florian_lacommare/GitHub/napalm/napalm/nxos_ssh/nxos_ssh.py", line 862, in get_interfaces interfaces.update(parse_intf_section(entry)) File "/home/florian_lacommare/GitHub/napalm/napalm/nxos_ssh/nxos_ssh.py", line 149, in parse_intf_section speed = int(match.group('speed')) AttributeError: 'NoneType' object has no attribute 'group'
AttributeError
def get_lldp_neighbors_detail(self, interface=""): """Detailed view of the LLDP neighbors.""" lldp_neighbors = {} lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device) try: lldp_table.get() except RpcError as rpcerr: # this assumes the library runs in an environment # able to handle logs # otherwise, the user just won't see this happening log.error("Unable to retrieve the LLDP neighbors information:") log.error(rpcerr.message) return {} interfaces = lldp_table.get().keys() # get lldp neighbor by interface rpc for EX Series, QFX Series, J Series # and SRX Series is get-lldp-interface-neighbors-information, # and rpc for M, MX, and T Series is get-lldp-interface-neighbors # ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later) # ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa # Exceptions: # EX9208 personality = SWITCH RPC: <get-lldp-interface-neighbors><interface-device> lldp_table.GET_RPC = "get-lldp-interface-neighbors" if self.device.facts.get("personality") not in ( "MX", "M", "PTX", "T", ) and self.device.facts.get("model") not in ("EX9208", "QFX10008"): # Still need to confirm for QFX10002 and other EX series lldp_table.GET_RPC = "get-lldp-interface-neighbors-information" for interface in interfaces: if "EX9208" in self.device.facts.get("model"): lldp_table.get(interface_device=interface) elif self.device.facts.get("personality") not in ("MX", "M", "PTX", "T"): lldp_table.get(interface_name=interface) else: lldp_table.get(interface_device=interface) for item in lldp_table: if interface not in lldp_neighbors.keys(): lldp_neighbors[interface] = [] lldp_neighbors[interface].append( { "parent_interface": item.parent_interface, "remote_port": item.remote_port, "remote_chassis_id": napalm.base.helpers.convert( napalm.base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id, ), "remote_port_description": napalm.base.helpers.convert( py23_compat.text_type, item.remote_port_description ), "remote_system_name": item.remote_system_name, "remote_system_description": item.remote_system_description, "remote_system_capab": item.remote_system_capab, "remote_system_enable_capab": item.remote_system_enable_capab, } ) return lldp_neighbors
def get_lldp_neighbors_detail(self, interface=""): """Detailed view of the LLDP neighbors.""" lldp_neighbors = {} lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device) try: lldp_table.get() except RpcError as rpcerr: # this assumes the library runs in an environment # able to handle logs # otherwise, the user just won't see this happening log.error("Unable to retrieve the LLDP neighbors information:") log.error(rpcerr.message) return {} interfaces = lldp_table.get().keys() # get lldp neighbor by interface rpc for EX Series, QFX Series, J Series # and SRX Series is get-lldp-interface-neighbors-information, # and rpc for M, MX, and T Series is get-lldp-interface-neighbors # ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later) # ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa # Exceptions: # EX9208 personality = SWITCH RPC: <get-lldp-interface-neighbors><interface-device> lldp_table.GET_RPC = "get-lldp-interface-neighbors" if self.device.facts.get("personality") not in ( "MX", "M", "T", ) and self.device.facts.get("model") not in ("EX9208", "QFX10008"): # Still need to confirm for QFX10002 and other EX series lldp_table.GET_RPC = "get-lldp-interface-neighbors-information" for interface in interfaces: if "EX9208" in self.device.facts.get("model"): lldp_table.get(interface_device=interface) elif self.device.facts.get("personality") not in ("MX", "M", "T"): lldp_table.get(interface_name=interface) else: lldp_table.get(interface_device=interface) for item in lldp_table: if interface not in lldp_neighbors.keys(): lldp_neighbors[interface] = [] lldp_neighbors[interface].append( { "parent_interface": item.parent_interface, "remote_port": item.remote_port, "remote_chassis_id": napalm.base.helpers.convert( napalm.base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id, ), "remote_port_description": napalm.base.helpers.convert( py23_compat.text_type, item.remote_port_description ), "remote_system_name": item.remote_system_name, "remote_system_description": item.remote_system_description, "remote_system_capab": item.remote_system_capab, "remote_system_enable_capab": item.remote_system_enable_capab, } ) return lldp_neighbors
https://github.com/napalm-automation/napalm/issues/646
2018-02-09 14:10:53,449 - napalm - ERROR - method - Failed: RpcError(severity: error, bad_element: get-lldp-interface-neighbors-information, message: error: syntax error error: syntax error) ================= Traceback ================= Traceback (most recent call last): File "/usr/local/bin/napalm", line 11, in <module> load_entry_point('napalm==2.3.0', 'console_scripts', 'napalm')() File "/usr/local/lib/python2.7/dist-packages/napalm/base/clitools/cl_napalm.py", line 285, in main run_tests(args) File "/usr/local/lib/python2.7/dist-packages/napalm/base/clitools/cl_napalm.py", line 268, in run_tests call_getter(device, args.method, **method_kwargs) File "/usr/local/lib/python2.7/dist-packages/napalm/base/clitools/cl_napalm.py", line 27, in wrapper r = func(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/napalm/base/clitools/cl_napalm.py", line 238, in call_getter r = func(**kwargs) File "/usr/local/lib/python2.7/dist-packages/napalm/junos/junos.py", line 718, in get_lldp_neighbors_detail lldp_table.get(interface_name=interface) File "/usr/local/lib/python2.7/dist-packages/jnpr/junos/factory/optable.py", line 64, in get self.xml = getattr(self.RPC, self.GET_RPC)(**rpc_args) File "/usr/local/lib/python2.7/dist-packages/jnpr/junos/rpcmeta.py", line 345, in _exec_rpc return self._junos.execute(rpc, **dec_args) File "/usr/local/lib/python2.7/dist-packages/jnpr/junos/decorators.py", line 63, in wrapper result = function(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/jnpr/junos/decorators.py", line 31, in wrapper return function(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/jnpr/junos/device.py", line 790, in execute errs=ex) jnpr.junos.exception.RpcError: RpcError(severity: error, bad_element: get-lldp-interface-neighbors-information, message: error: syntax error error: syntax error)
jnpr.junos.exception.RpcError
def get_lldp_neighbors(self): """IOS implementation of get_lldp_neighbors.""" lldp = {} command = "show lldp neighbors" output = self._send_command(command) # Check if router supports the command if "% Invalid input" in output: return {} # Process the output to obtain just the LLDP entries try: split_output = re.split(r"^Device ID.*$", output, flags=re.M)[1] split_output = re.split( r"^Total entries displayed.*$", split_output, flags=re.M )[0] except IndexError: return {} split_output = split_output.strip() for lldp_entry in split_output.splitlines(): # Example, twb-sf-hpsw1 Fa4 120 B 17 try: device_id, local_int_brief, hold_time, capability, remote_port = ( lldp_entry.split() ) except ValueError: if len(lldp_entry.split()) == 4: # Four fields might be long_name or missing capability capability_missing = True if lldp_entry[46] == " " else False if capability_missing: device_id, local_int_brief, hold_time, remote_port = ( lldp_entry.split() ) else: # Might be long_name issue tmp_field, hold_time, capability, remote_port = lldp_entry.split() device_id = tmp_field[:20] local_int_brief = tmp_field[20:] # device_id might be abbreviated, try to get full name lldp_tmp = self._lldp_detail_parser(local_int_brief) device_id_new = lldp_tmp[3][0] # Verify abbreviated and full name are consistent if device_id_new[:20] == device_id: device_id = device_id_new else: raise ValueError("Unable to obtain remote device name") elif len(lldp_entry.split()) > 5: # Assuming spaces only in device_id device_id = " ".join(lldp_entry.split()[: len(lldp_entry.split()) - 4]) local_int_brief = lldp_entry.split()[-4] remote_port = lldp_entry.split()[-1] local_port = self._expand_interface_name(local_int_brief) entry = {"port": remote_port, "hostname": device_id} lldp.setdefault(local_port, []) lldp[local_port].append(entry) return lldp
def get_lldp_neighbors(self): """IOS implementation of get_lldp_neighbors.""" lldp = {} command = "show lldp neighbors" output = self._send_command(command) # Check if router supports the command if "% Invalid input" in output: return {} # Process the output to obtain just the LLDP entries try: split_output = re.split(r"^Device ID.*$", output, flags=re.M)[1] split_output = re.split( r"^Total entries displayed.*$", split_output, flags=re.M )[0] except IndexError: return {} split_output = split_output.strip() for lldp_entry in split_output.splitlines(): # Example, twb-sf-hpsw1 Fa4 120 B 17 try: device_id, local_int_brief, hold_time, capability, remote_port = ( lldp_entry.split() ) except ValueError: if len(lldp_entry.split()) == 4: # Four fields might be long_name or missing capability capability_missing = True if lldp_entry[46] == " " else False if capability_missing: device_id, local_int_brief, hold_time, remote_port = ( lldp_entry.split() ) else: # Might be long_name issue tmp_field, hold_time, capability, remote_port = lldp_entry.split() device_id = tmp_field[:20] local_int_brief = tmp_field[20:] # device_id might be abbreviated, try to get full name lldp_tmp = self._lldp_detail_parser(local_int_brief) device_id_new = lldp_tmp[3][0] # Verify abbreviated and full name are consistent if device_id_new[:20] == device_id: device_id = device_id_new else: raise ValueError("Unable to obtain remote device name") local_port = self._expand_interface_name(local_int_brief) entry = {"port": remote_port, "hostname": device_id} lldp.setdefault(local_port, []) lldp[local_port].append(entry) return lldp
https://github.com/napalm-automation/napalm/issues/456
Traceback (most recent call last): File "hello.py", line 57, in <module> print (local_int_brief) NameError: name 'local_int_brief' is not defined
NameError
def get_optics(self): """Return optics information.""" optics_table = junos_views.junos_intf_optics_table(self.device) optics_table.get() optics_items = optics_table.items() # optics_items has no lane information, so we need to re-format data # inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G # but the information for 40G/100G is incorrect at this point # Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ]) optics_items_with_lane = [] for intf_optic_item in optics_items: temp_list = list(intf_optic_item) temp_list.insert(1, "0") new_intf_optic_item = tuple(temp_list) optics_items_with_lane.append(new_intf_optic_item) # Now optics_items_with_lane has all optics with lane 0 included # Example: ('xe-0/0/0', u'0', [ optical_values ]) # Get optical information for 40G/100G optics optics_table40G = junos_views.junos_intf_40Goptics_table(self.device) optics_table40G.get() optics_40Gitems = optics_table40G.items() # Re-format data as before inserting lane value new_optics_40Gitems = [] for item in optics_40Gitems: lane = item[0] iface = item[1].pop(0) new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1])) # New_optics_40Gitems contains 40G/100G optics only: # ('et-0/0/49', u'0', [ optical_values ]), # ('et-0/0/49', u'1', [ optical_values ]), # ('et-0/0/49', u'2', [ optical_values ]) # Remove 40G/100G optics entries with wrong information returned # from junos_intf_optics_table() iface_40G = [item[0] for item in new_optics_40Gitems] for intf_optic_item in optics_items_with_lane: iface_name = intf_optic_item[0] if iface_name not in iface_40G: new_optics_40Gitems.append(intf_optic_item) # New_optics_40Gitems contains all optics 10G/40G/100G with the lane optics_detail = {} for intf_optic_item in new_optics_40Gitems: lane = intf_optic_item[1] interface_name = py23_compat.text_type(intf_optic_item[0]) optics = dict(intf_optic_item[2]) if interface_name not in optics_detail: optics_detail[interface_name] = {} optics_detail[interface_name]["physical_channels"] = {} optics_detail[interface_name]["physical_channels"]["channel"] = [] INVALID_LIGHT_LEVEL = [None, C.OPTICS_NULL_LEVEL, C.OPTICS_NULL_LEVEL_SPC] # Defaulting avg, min, max values to 0.0 since device does not # return these values intf_optics = { "index": int(lane), "state": { "input_power": { "instant": ( float(optics["input_power"]) if optics["input_power"] not in INVALID_LIGHT_LEVEL else 0.0 ), "avg": 0.0, "max": 0.0, "min": 0.0, }, "output_power": { "instant": ( float(optics["output_power"]) if optics["output_power"] not in INVALID_LIGHT_LEVEL else 0.0 ), "avg": 0.0, "max": 0.0, "min": 0.0, }, "laser_bias_current": { "instant": ( float(optics["laser_bias_current"]) if optics["laser_bias_current"] not in INVALID_LIGHT_LEVEL else 0.0 ), "avg": 0.0, "max": 0.0, "min": 0.0, }, }, } optics_detail[interface_name]["physical_channels"]["channel"].append( intf_optics ) return optics_detail
def get_optics(self): """Return optics information.""" optics_table = junos_views.junos_intf_optics_table(self.device) optics_table.get() optics_items = optics_table.items() # optics_items has no lane information, so we need to re-format data # inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G # but the information for 40G/100G is incorrect at this point # Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ]) optics_items_with_lane = [] for intf_optic_item in optics_items: temp_list = list(intf_optic_item) temp_list.insert(1, "0") new_intf_optic_item = tuple(temp_list) optics_items_with_lane.append(new_intf_optic_item) # Now optics_items_with_lane has all optics with lane 0 included # Example: ('xe-0/0/0', u'0', [ optical_values ]) # Get optical information for 40G/100G optics optics_table40G = junos_views.junos_intf_40Goptics_table(self.device) optics_table40G.get() optics_40Gitems = optics_table40G.items() # Re-format data as before inserting lane value new_optics_40Gitems = [] for item in optics_40Gitems: lane = item[0] iface = item[1].pop(0) new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1])) # New_optics_40Gitems contains 40G/100G optics only: # ('et-0/0/49', u'0', [ optical_values ]), # ('et-0/0/49', u'1', [ optical_values ]), # ('et-0/0/49', u'2', [ optical_values ]) # Remove 40G/100G optics entries with wrong information returned # from junos_intf_optics_table() iface_40G = [item[0] for item in new_optics_40Gitems] for intf_optic_item in optics_items_with_lane: iface_name = intf_optic_item[0] if iface_name not in iface_40G: new_optics_40Gitems.append(intf_optic_item) # New_optics_40Gitems contains all optics 10G/40G/100G with the lane optics_detail = {} for intf_optic_item in new_optics_40Gitems: lane = intf_optic_item[1] interface_name = py23_compat.text_type(intf_optic_item[0]) optics = dict(intf_optic_item[2]) if interface_name not in optics_detail: optics_detail[interface_name] = {} optics_detail[interface_name]["physical_channels"] = {} optics_detail[interface_name]["physical_channels"]["channel"] = [] # Defaulting avg, min, max values to 0.0 since device does not # return these values intf_optics = { "index": int(lane), "state": { "input_power": { "instant": ( float(optics["input_power"]) if optics["input_power"] not in [None, C.OPTICS_NULL_LEVEL] else 0.0 ), "avg": 0.0, "max": 0.0, "min": 0.0, }, "output_power": { "instant": ( float(optics["output_power"]) if optics["output_power"] not in [None, C.OPTICS_NULL_LEVEL] else 0.0 ), "avg": 0.0, "max": 0.0, "min": 0.0, }, "laser_bias_current": { "instant": ( float(optics["laser_bias_current"]) if optics["laser_bias_current"] not in [None, C.OPTICS_NULL_LEVEL] else 0.0 ), "avg": 0.0, "max": 0.0, "min": 0.0, }, }, } optics_detail[interface_name]["physical_channels"]["channel"].append( intf_optics ) return optics_detail
https://github.com/napalm-automation/napalm/issues/491
Traceback (most recent call last): File "nuke.py", line 32, in <module> optics = device.get_optics() File "/Users/jboswell/.virtualenvs/nuke/lib/python2.7/site-packages/napalm_junos/junos.py", line 1785, in get_optics [None, C.OPTICS_NULL_LEVEL] ValueError: could not convert string to float: - Inf
ValueError
def run(args): import os import sys import numpy as np import evo.core.lie_algebra as lie from evo.core import trajectory from evo.core.trajectory import PoseTrajectory3D from evo.tools import file_interface, log from evo.tools.settings import SETTINGS log.configure_logging( verbose=args.verbose, silent=args.silent, debug=args.debug, local_logfile=args.logfile, ) if args.debug: import pprint logger.debug( "main_parser config:\n" + pprint.pformat({arg: getattr(args, arg) for arg in vars(args)}) + "\n" ) logger.debug(SEP) trajectories, ref_traj = load_trajectories(args) if args.merge: if args.subcommand == "kitti": die("Can't merge KITTI files.") if len(trajectories) == 0: die("No trajectories to merge (excluding --ref).") trajectories = {"merged_trajectory": trajectory.merge(trajectories.values())} if args.transform_left or args.transform_right: tf_type = "left" if args.transform_left else "right" tf_path = args.transform_left if args.transform_left else args.transform_right transform = file_interface.load_transform_json(tf_path) logger.debug(SEP) if not lie.is_se3(transform): logger.warning("Not a valid SE(3) transformation!") if args.invert_transform: transform = lie.se3_inverse(transform) logger.debug( "Applying a {}-multiplicative transformation:\n{}".format( tf_type, transform ) ) for traj in trajectories.values(): traj.transform( transform, right_mul=args.transform_right, propagate=args.propagate_transform, ) if args.t_offset: logger.debug(SEP) for name, traj in trajectories.items(): if type(traj) is trajectory.PosePath3D: die("{} doesn't have timestamps - can't add time offset.".format(name)) logger.info("Adding time offset to {}: {} (s)".format(name, args.t_offset)) traj.timestamps += args.t_offset if args.n_to_align != -1 and not (args.align or args.correct_scale): die("--n_to_align is useless without --align or/and --correct_scale") if args.sync or args.align or args.correct_scale or args.align_origin: from evo.core import sync if not args.ref: logger.debug(SEP) die("Can't align or sync without a reference! (--ref) *grunt*") for name, traj in trajectories.items(): if args.subcommand == "kitti": ref_traj_tmp = ref_traj else: logger.debug(SEP) ref_traj_tmp, trajectories[name] = sync.associate_trajectories( ref_traj, traj, max_diff=args.t_max_diff, first_name="reference", snd_name=name, ) if args.align or args.correct_scale: logger.debug(SEP) logger.debug("Aligning {} to reference.".format(name)) trajectories[name] = trajectory.align_trajectory( trajectories[name], ref_traj_tmp, correct_scale=args.correct_scale, correct_only_scale=args.correct_scale and not args.align, n=args.n_to_align, ) if args.align_origin: logger.debug(SEP) logger.debug("Aligning {}'s origin to reference.".format(name)) trajectories[name] = trajectory.align_trajectory_origin( trajectories[name], ref_traj_tmp ) print_compact_name = not args.subcommand == "bag" for name, traj in trajectories.items(): print_traj_info(name, traj, args.verbose, args.full_check, print_compact_name) if args.ref: print_traj_info( args.ref, ref_traj, args.verbose, args.full_check, print_compact_name ) if args.plot or args.save_plot or args.serialize_plot: import numpy as np from evo.tools import plot import matplotlib.pyplot as plt import matplotlib.cm as cm plot_collection = plot.PlotCollection("evo_traj - trajectory plot") fig_xyz, axarr_xyz = plt.subplots( 3, sharex="col", figsize=tuple(SETTINGS.plot_figsize) ) fig_rpy, axarr_rpy = plt.subplots( 3, sharex="col", figsize=tuple(SETTINGS.plot_figsize) ) fig_traj = plt.figure(figsize=tuple(SETTINGS.plot_figsize)) plot_mode = plot.PlotMode[args.plot_mode] ax_traj = plot.prepare_axis(fig_traj, plot_mode) if args.ref: short_traj_name = os.path.splitext(os.path.basename(args.ref))[0] if SETTINGS.plot_usetex: short_traj_name = short_traj_name.replace("_", "\\_") plot.traj( ax_traj, plot_mode, ref_traj, style=SETTINGS.plot_reference_linestyle, color=SETTINGS.plot_reference_color, label=short_traj_name, alpha=SETTINGS.plot_reference_alpha, ) plot.draw_coordinate_axes( ax_traj, ref_traj, plot_mode, SETTINGS.plot_axis_marker_scale ) plot.traj_xyz( axarr_xyz, ref_traj, style=SETTINGS.plot_reference_linestyle, color=SETTINGS.plot_reference_color, label=short_traj_name, alpha=SETTINGS.plot_reference_alpha, ) plot.traj_rpy( axarr_rpy, ref_traj, style=SETTINGS.plot_reference_linestyle, color=SETTINGS.plot_reference_color, label=short_traj_name, alpha=SETTINGS.plot_reference_alpha, ) if args.ros_map_yaml: plot.ros_map(ax_traj, args.ros_map_yaml, plot_mode) cmap_colors = None if SETTINGS.plot_multi_cmap.lower() != "none": cmap = getattr(cm, SETTINGS.plot_multi_cmap) cmap_colors = iter(cmap(np.linspace(0, 1, len(trajectories)))) for name, traj in trajectories.items(): if cmap_colors is None: color = next(ax_traj._get_lines.prop_cycler)["color"] else: color = next(cmap_colors) if print_compact_name: short_traj_name = os.path.splitext(os.path.basename(name))[0] else: short_traj_name = name if SETTINGS.plot_usetex: short_traj_name = short_traj_name.replace("_", "\\_") plot.traj( ax_traj, plot_mode, traj, SETTINGS.plot_trajectory_linestyle, color, short_traj_name, alpha=SETTINGS.plot_trajectory_alpha, ) plot.draw_coordinate_axes( ax_traj, traj, plot_mode, SETTINGS.plot_axis_marker_scale ) if args.ref and isinstance(ref_traj, trajectory.PoseTrajectory3D): start_time = ref_traj.timestamps[0] else: start_time = None plot.traj_xyz( axarr_xyz, traj, SETTINGS.plot_trajectory_linestyle, color, short_traj_name, alpha=SETTINGS.plot_trajectory_alpha, start_timestamp=start_time, ) plot.traj_rpy( axarr_rpy, traj, SETTINGS.plot_trajectory_linestyle, color, short_traj_name, alpha=SETTINGS.plot_trajectory_alpha, start_timestamp=start_time, ) if not SETTINGS.plot_usetex: fig_rpy.text( 0.0, 0.005, "euler_angle_sequence: {}".format(SETTINGS.euler_angle_sequence), fontsize=6, ) plot_collection.add_figure("trajectories", fig_traj) plot_collection.add_figure("xyz_view", fig_xyz) plot_collection.add_figure("rpy_view", fig_rpy) if args.plot: plot_collection.show() if args.save_plot: logger.info(SEP) plot_collection.export( args.save_plot, confirm_overwrite=not args.no_warnings ) if args.serialize_plot: logger.info(SEP) plot_collection.serialize( args.serialize_plot, confirm_overwrite=not args.no_warnings ) if args.save_as_tum: logger.info(SEP) for name, traj in trajectories.items(): dest = os.path.splitext(os.path.basename(name))[0] + ".tum" file_interface.write_tum_trajectory_file( dest, traj, confirm_overwrite=not args.no_warnings ) if args.ref: dest = os.path.splitext(os.path.basename(args.ref))[0] + ".tum" file_interface.write_tum_trajectory_file( dest, ref_traj, confirm_overwrite=not args.no_warnings ) if args.save_as_kitti: logger.info(SEP) for name, traj in trajectories.items(): dest = os.path.splitext(os.path.basename(name))[0] + ".kitti" file_interface.write_kitti_poses_file( dest, traj, confirm_overwrite=not args.no_warnings ) if args.ref: dest = os.path.splitext(os.path.basename(args.ref))[0] + ".kitti" file_interface.write_kitti_poses_file( dest, ref_traj, confirm_overwrite=not args.no_warnings ) if args.save_as_bag: import datetime import rosbag dest_bag_path = ( str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) + ".bag" ) logger.info(SEP) logger.info("Saving trajectories to " + dest_bag_path + "...") bag = rosbag.Bag(dest_bag_path, "w") try: for name, traj in trajectories.items(): dest_topic = os.path.splitext(os.path.basename(name))[0] frame_id = traj.meta["frame_id"] if "frame_id" in traj.meta else "" file_interface.write_bag_trajectory(bag, traj, dest_topic, frame_id) if args.ref: dest_topic = os.path.splitext(os.path.basename(args.ref))[0] frame_id = ( ref_traj.meta["frame_id"] if "frame_id" in ref_traj.meta else "" ) file_interface.write_bag_trajectory(bag, ref_traj, dest_topic, frame_id) finally: bag.close()
def run(args): import os import sys import numpy as np import evo.core.lie_algebra as lie from evo.core import trajectory from evo.core.trajectory import PoseTrajectory3D from evo.tools import file_interface, log from evo.tools.settings import SETTINGS log.configure_logging( verbose=args.verbose, silent=args.silent, debug=args.debug, local_logfile=args.logfile, ) if args.debug: import pprint logger.debug( "main_parser config:\n" + pprint.pformat({arg: getattr(args, arg) for arg in vars(args)}) + "\n" ) logger.debug(SEP) trajectories, ref_traj = load_trajectories(args) if args.merge: if args.subcommand == "kitti": die("Can't merge KITTI files.") if len(trajectories) == 0: die("No trajectories to merge (excluding --ref).") trajectories = {"merged_trajectory": trajectory.merge(trajectories.values())} if args.transform_left or args.transform_right: tf_type = "left" if args.transform_left else "right" tf_path = args.transform_left if args.transform_left else args.transform_right transform = file_interface.load_transform_json(tf_path) logger.debug(SEP) if not lie.is_se3(transform): logger.warning("Not a valid SE(3) transformation!") if args.invert_transform: transform = lie.se3_inverse(transform) logger.debug( "Applying a {}-multiplicative transformation:\n{}".format( tf_type, transform ) ) for traj in trajectories.values(): traj.transform( transform, right_mul=args.transform_right, propagate=args.propagate_transform, ) if args.t_offset: logger.debug(SEP) for name, traj in trajectories.items(): if type(traj) is trajectory.PosePath3D: die("{} doesn't have timestamps - can't add time offset.".format(name)) logger.info("Adding time offset to {}: {} (s)".format(name, args.t_offset)) traj.timestamps += args.t_offset if args.n_to_align != -1 and not (args.align or args.correct_scale): die("--n_to_align is useless without --align or/and --correct_scale") if args.sync or args.align or args.correct_scale or args.align_origin: from evo.core import sync if not args.ref: logger.debug(SEP) die("Can't align or sync without a reference! (--ref) *grunt*") for name, traj in trajectories.items(): if args.subcommand == "kitti": ref_traj_tmp = ref_traj else: logger.debug(SEP) ref_traj_tmp, trajectories[name] = sync.associate_trajectories( ref_traj, traj, max_diff=args.t_max_diff, first_name="reference", snd_name=name, ) if args.align or args.correct_scale: logger.debug(SEP) logger.debug("Aligning {} to reference.".format(name)) trajectories[name] = trajectory.align_trajectory( trajectories[name], ref_traj_tmp, correct_scale=args.correct_scale, correct_only_scale=args.correct_scale and not args.align, n=args.n_to_align, ) if args.align_origin: logger.debug(SEP) logger.debug("Aligning {}'s origin to reference.".format(name)) trajectories[name] = trajectory.align_trajectory_origin( trajectories[name], ref_traj_tmp ) print_compact_name = not args.subcommand == "bag" for name, traj in trajectories.items(): print_traj_info(name, traj, args.verbose, args.full_check, print_compact_name) if args.ref: print_traj_info( args.ref, ref_traj, args.verbose, args.full_check, print_compact_name ) if args.plot or args.save_plot or args.serialize_plot: import numpy as np from evo.tools import plot import matplotlib.pyplot as plt import matplotlib.cm as cm plot_collection = plot.PlotCollection("evo_traj - trajectory plot") fig_xyz, axarr_xyz = plt.subplots( 3, sharex="col", figsize=tuple(SETTINGS.plot_figsize) ) fig_rpy, axarr_rpy = plt.subplots( 3, sharex="col", figsize=tuple(SETTINGS.plot_figsize) ) fig_traj = plt.figure(figsize=tuple(SETTINGS.plot_figsize)) plot_mode = plot.PlotMode[args.plot_mode] ax_traj = plot.prepare_axis(fig_traj, plot_mode) if args.ref: short_traj_name = os.path.splitext(os.path.basename(args.ref))[0] if SETTINGS.plot_usetex: short_traj_name = short_traj_name.replace("_", "\\_") plot.traj( ax_traj, plot_mode, ref_traj, style=SETTINGS.plot_reference_linestyle, color=SETTINGS.plot_reference_color, label=short_traj_name, alpha=SETTINGS.plot_reference_alpha, ) plot.draw_coordinate_axes( ax_traj, ref_traj, plot_mode, SETTINGS.plot_axis_marker_scale ) plot.traj_xyz( axarr_xyz, ref_traj, style=SETTINGS.plot_reference_linestyle, color=SETTINGS.plot_reference_color, label=short_traj_name, alpha=SETTINGS.plot_reference_alpha, ) plot.traj_rpy( axarr_rpy, ref_traj, style=SETTINGS.plot_reference_linestyle, color=SETTINGS.plot_reference_color, label=short_traj_name, alpha=SETTINGS.plot_reference_alpha, ) if args.ros_map_yaml: plot.ros_map(ax_traj, args.ros_map_yaml, plot_mode) cmap_colors = None if SETTINGS.plot_multi_cmap.lower() != "none": cmap = getattr(cm, SETTINGS.plot_multi_cmap) cmap_colors = iter(cmap(np.linspace(0, 1, len(trajectories)))) for name, traj in trajectories.items(): if cmap_colors is None: color = next(ax_traj._get_lines.prop_cycler)["color"] else: color = next(cmap_colors) if print_compact_name: short_traj_name = os.path.splitext(os.path.basename(name))[0] else: short_traj_name = name if SETTINGS.plot_usetex: short_traj_name = short_traj_name.replace("_", "\\_") plot.traj( ax_traj, plot_mode, traj, SETTINGS.plot_trajectory_linestyle, color, short_traj_name, alpha=SETTINGS.plot_trajectory_alpha, ) plot.draw_coordinate_axes( ax_traj, traj, plot_mode, SETTINGS.plot_axis_marker_scale ) if args.ref and isinstance(ref_traj, trajectory.PoseTrajectory3D): start_time = ref_traj.timestamps[0] else: start_time = None plot.traj_xyz( axarr_xyz, traj, SETTINGS.plot_trajectory_linestyle, color, short_traj_name, alpha=SETTINGS.plot_trajectory_alpha, start_timestamp=start_time, ) plot.traj_rpy( axarr_rpy, traj, SETTINGS.plot_trajectory_linestyle, color, short_traj_name, alpha=SETTINGS.plot_trajectory_alpha, start_timestamp=start_time, ) fig_rpy.text( 0.0, 0.005, "euler_angle_sequence: {}".format(SETTINGS.euler_angle_sequence), fontsize=6, ) plot_collection.add_figure("trajectories", fig_traj) plot_collection.add_figure("xyz_view", fig_xyz) plot_collection.add_figure("rpy_view", fig_rpy) if args.plot: plot_collection.show() if args.save_plot: logger.info(SEP) plot_collection.export( args.save_plot, confirm_overwrite=not args.no_warnings ) if args.serialize_plot: logger.info(SEP) plot_collection.serialize( args.serialize_plot, confirm_overwrite=not args.no_warnings ) if args.save_as_tum: logger.info(SEP) for name, traj in trajectories.items(): dest = os.path.splitext(os.path.basename(name))[0] + ".tum" file_interface.write_tum_trajectory_file( dest, traj, confirm_overwrite=not args.no_warnings ) if args.ref: dest = os.path.splitext(os.path.basename(args.ref))[0] + ".tum" file_interface.write_tum_trajectory_file( dest, ref_traj, confirm_overwrite=not args.no_warnings ) if args.save_as_kitti: logger.info(SEP) for name, traj in trajectories.items(): dest = os.path.splitext(os.path.basename(name))[0] + ".kitti" file_interface.write_kitti_poses_file( dest, traj, confirm_overwrite=not args.no_warnings ) if args.ref: dest = os.path.splitext(os.path.basename(args.ref))[0] + ".kitti" file_interface.write_kitti_poses_file( dest, ref_traj, confirm_overwrite=not args.no_warnings ) if args.save_as_bag: import datetime import rosbag dest_bag_path = ( str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) + ".bag" ) logger.info(SEP) logger.info("Saving trajectories to " + dest_bag_path + "...") bag = rosbag.Bag(dest_bag_path, "w") try: for name, traj in trajectories.items(): dest_topic = os.path.splitext(os.path.basename(name))[0] frame_id = traj.meta["frame_id"] if "frame_id" in traj.meta else "" file_interface.write_bag_trajectory(bag, traj, dest_topic, frame_id) if args.ref: dest_topic = os.path.splitext(os.path.basename(args.ref))[0] frame_id = ( ref_traj.meta["frame_id"] if "frame_id" in ref_traj.meta else "" ) file_interface.write_bag_trajectory(bag, ref_traj, dest_topic, frame_id) finally: bag.close()
https://github.com/MichaelGrupp/evo/issues/237
[DEBUG][2020-01-30 17:55:20,837][main_traj.run():284] -------------------------------------------------------------------------------- [DEBUG][2020-01-30 17:55:21,225][file_interface.read_bag_trajectory():271] Loaded 1762 nav_msgs/Odometry messages of topic: /odom [DEBUG][2020-01-30 17:55:21,832][file_interface.read_bag_trajectory():271] Loaded 18871 geometry_msgs/PoseStamped messages of topic: /ground_truth [DEBUG][2020-01-30 17:55:21,853][main_traj.run():335] -------------------------------------------------------------------------------- [DEBUG][2020-01-30 17:55:21,930][sync.associate_trajectories():106] Found 314 of max. 18871 possible matching timestamps between... reference and: /odom ..with max. time diff.: 0.01 (s) and time offset: 0.0 (s). [DEBUG][2020-01-30 17:55:21,930][main_traj.run():340] -------------------------------------------------------------------------------- [DEBUG][2020-01-30 17:55:21,930][main_traj.run():341] Aligning /odom to reference. [DEBUG][2020-01-30 17:55:21,930][trajectory.align_trajectory():394] Aligning using Umeyama's method... [DEBUG][2020-01-30 17:55:21,933][trajectory.align_trajectory():404] Rotation of alignment: [[ 0.04979001 -0.99868181 -0.01247348] [ 0.99874792 0.04972487 0.00547931] [-0.00485184 -0.01273067 0.99990719]] Translation of alignment: [-0.31561156 2.12629718 -0.04271305] [DEBUG][2020-01-30 17:55:21,933][trajectory.align_trajectory():405] Scale correction: 1.0 [INFO][2020-01-30 17:55:21,952][main_traj.print_traj_info():231] -------------------------------------------------------------------------------- [INFO][2020-01-30 17:55:21,952][main_traj.print_traj_info():235] name: /odom [INFO][2020-01-30 17:55:21,953][main_traj.print_traj_info():242] infos: duration (s) 176.05091094970703 nr. of poses 314 path length (m) 23.9148423636 pos_end (m) [-0.25087702 2.17551795 1.6117157 ] pos_start (m) [-0.28801754 2.18658985 1.58796842] t_end (s) 1576166447.401164 t_start (s) 1576166271.350253 [INFO][2020-01-30 17:55:21,985][main_traj.print_traj_info():248] checks: SE(3) conform yes array shapes ok nr. of stamps ok quaternions ok timestamps ok [INFO][2020-01-30 17:55:21,991][main_traj.print_traj_info():260] stats: v_avg (km/h) 0.500316 v_avg (m/s) 0.138977 v_max (km/h) 1.310963 v_max (m/s) 0.364156 v_min (km/h) 0.001935 v_min (m/s) 0.000538 [INFO][2020-01-30 17:55:21,992][main_traj.print_traj_info():231] -------------------------------------------------------------------------------- [INFO][2020-01-30 17:55:21,992][main_traj.print_traj_info():235] name: /ground_truth [INFO][2020-01-30 17:55:21,994][main_traj.print_traj_info():242] infos: duration (s) 176.06113266944885 nr. of poses 18871 path length (m) 26.072393337 pos_end (m) [-0.26233882 2.16755581 1.5922972 ] pos_start (m) [-0.29931393 2.17557931 1.59043825] t_end (s) 1576166447.4128075 t_start (s) 1576166271.3516748 [INFO][2020-01-30 17:55:24,239][main_traj.print_traj_info():248] checks: SE(3) conform yes array shapes ok nr. of stamps ok quaternions ok timestamps ok [INFO][2020-01-30 17:55:24,355][main_traj.print_traj_info():260] stats: v_avg (km/h) 5.076679 v_avg (m/s) 1.410189 v_max (km/h) 1248.546319 v_max (m/s) 346.818422 v_min (km/h) 0.000102 v_min (m/s) 0.000028 [ERROR][2020-01-30 17:55:26,409][entry_points.launch():99] Unhandled error in evo.main_traj Traceback (most recent call last): File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/evo/entry_points.py", line 90, in launch main_module.run(args) File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/evo/main_traj.py", line 436, in run plot_collection.show() File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/evo/tools/plot.py", line 193, in show self.tabbed_tk_window() File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/evo/tools/plot.py", line 168, in tabbed_tk_window canvas.draw() File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/matplotlib/backends/backend_tkagg.py", line 12, in draw super(FigureCanvasTkAgg, self).draw() File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/matplotlib/backends/backend_agg.py", line 437, in draw self.figure.draw(self.renderer) File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/matplotlib/artist.py", line 55, in draw_wrapper return draw(artist, renderer, *args, **kwargs) File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/matplotlib/figure.py", line 1493, in draw renderer, self, artists, self.suppressComposite) File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/matplotlib/image.py", line 141, in _draw_list_compositing_images a.draw(renderer) File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/matplotlib/artist.py", line 55, in draw_wrapper return draw(artist, renderer, *args, **kwargs) File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/matplotlib/text.py", line 706, in draw bbox, info, descent = textobj._get_layout(renderer) File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/matplotlib/text.py", line 309, in _get_layout ismath=ismath) File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/matplotlib/backends/backend_agg.py", line 236, in get_text_width_height_descent s, fontsize, renderer=self) File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/matplotlib/texmanager.py", line 501, in get_text_width_height_descent dvifile = self.make_dvi(tex, fontsize) File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/matplotlib/texmanager.py", line 365, in make_dvi texfile], tex) File "/home/riccardo/.virtualenvs/evaluation/local/lib/python2.7/site-packages/matplotlib/texmanager.py", line 344, in _run_checked_subprocess exc=exc.output.decode('utf-8'))) RuntimeError: latex was not able to process the following string: 'euler_angle_sequence: sxyz' Here is the full report generated by latex: This is pdfTeX, Version 3.14159265-2.6-1.40.16 (TeX Live 2015/Debian) (preloaded format=latex) restricted \write18 enabled. entering extended mode (/home/riccardo/.cache/matplotlib/tex.cache/28d4ede8ae4c43d4c3576b57c32bd235.te x LaTeX2e <2016/02/01> Babel <3.9q> and hyphenation patterns for 6 language(s) loaded. (/usr/share/texlive/texmf-dist/tex/latex/base/article.cls Document Class: article 2014/09/29 v1.4h Standard LaTeX document class (/usr/share/texlive/texmf-dist/tex/latex/base/size10.clo)) (/usr/share/texlive/texmf-dist/tex/latex/type1cm/type1cm.sty) (/usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty (/usr/share/texlive/texmf-dist/tex/latex/base/ts1enc.def)) (/usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty (/usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty) (/usr/share/texlive/texmf-dist/tex/generic/oberdiek/ifpdf.sty) (/usr/share/texlive/texmf-dist/tex/generic/oberdiek/ifvtex.sty) (/usr/share/texlive/texmf-dist/tex/generic/ifxetex/ifxetex.sty) Package geometry Warning: Over-specification in `h'-direction. `width' (5058.9pt) is ignored. Package geometry Warning: Over-specification in `v'-direction. `height' (5058.9pt) is ignored. ) (./28d4ede8ae4c43d4c3576b57c32bd235.aux) (/usr/share/texlive/texmf-dist/tex/latex/base/ts1cmr.fd) *geometry* driver: auto-detecting *geometry* detected driver: dvips ! Missing $ inserted. <inserted text> $ l.13 ...tsize{6.000000}{7.500000}{\rmfamily euler_ angle_sequence: sxyz} No pages of output. Transcript written on 28d4ede8ae4c43d4c3576b57c32bd235.log. [ERROR][2020-01-30 17:55:26,411][entry_points.launch():106] evo module evo.main_traj crashed - see /home/riccardo/.evo/evo.log
RuntimeError
def get_delta_unit(args): from evo.core.metrics import Unit delta_unit = Unit.none if args.delta_unit == "f": delta_unit = Unit.frames elif args.delta_unit == "d": delta_unit = Unit.degrees elif args.delta_unit == "r": delta_unit = Unit.radians elif args.delta_unit == "m": delta_unit = Unit.meters return delta_unit
def get_delta_unit(args): from evo.core.metrics import Unit delta_unit = None if args.delta_unit == "f": delta_unit = Unit.frames elif args.delta_unit == "d": delta_unit = Unit.degrees elif args.delta_unit == "r": delta_unit = Unit.radians elif args.delta_unit == "m": delta_unit = Unit.meters return delta_unit
https://github.com/MichaelGrupp/evo/issues/145
[ERROR] Unhandled error in evo.main_rpe Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/evo/entry_points.py", line 90, in launch main_module.run(args) File "/usr/local/lib/python2.7/dist-packages/evo/main_rpe.py", line 244, in run est_name=est_name, File "/usr/local/lib/python2.7/dist-packages/evo/main_rpe.py", line 176, in rpe title = str(rpe_metric) File "/usr/local/lib/python2.7/dist-packages/evo/core/metrics.py", line 207, in __str__ self.pose_relation.value, self.unit.value, self.delta, AttributeError: 'NoneType' object has no attribute 'value' [ERROR] evo module evo.main_rpe crashed - no logfile written (disabled)
AttributeError
def get_result(self, ref_name="reference", est_name="estimate"): """ Wrap the result in Result object. :param ref_name: optional, label of the reference data :param est_name: optional, label of the estimated data :return: """ result = Result() metric_name = self.__class__.__name__ result.add_info( { "title": str(self), "ref_name": ref_name, "est_name": est_name, "label": "{} {}".format(metric_name, "({})".format(self.unit.value)), } ) result.add_stats(self.get_all_statistics()) if hasattr(self, "error"): result.add_np_array("error_array", self.error) return result
def get_result(self, ref_name="reference", est_name="estimate"): """ Wrap the result in Result object. :param ref_name: optional, label of the reference data :param est_name: optional, label of the estimated data :return: """ result = Result() metric_name = self.__class__.__name__ unit_name = self.unit.value if self.unit is not None else "" result.add_info( { "title": str(self), "ref_name": ref_name, "est_name": est_name, "label": "{} {}".format( metric_name, "({})".format(unit_name) if unit_name else "" ), } ) result.add_stats(self.get_all_statistics()) if hasattr(self, "error"): result.add_np_array("error_array", self.error) return result
https://github.com/MichaelGrupp/evo/issues/145
[ERROR] Unhandled error in evo.main_rpe Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/evo/entry_points.py", line 90, in launch main_module.run(args) File "/usr/local/lib/python2.7/dist-packages/evo/main_rpe.py", line 244, in run est_name=est_name, File "/usr/local/lib/python2.7/dist-packages/evo/main_rpe.py", line 176, in rpe title = str(rpe_metric) File "/usr/local/lib/python2.7/dist-packages/evo/core/metrics.py", line 207, in __str__ self.pose_relation.value, self.unit.value, self.delta, AttributeError: 'NoneType' object has no attribute 'value' [ERROR] evo module evo.main_rpe crashed - no logfile written (disabled)
AttributeError
def __init__( self, pose_relation=PoseRelation.translation_part, delta=1.0, delta_unit=Unit.frames, rel_delta_tol=0.1, all_pairs=False, ): if delta < 0: raise MetricsException("delta must be a positive number") if ( delta_unit == Unit.frames and not isinstance(delta, int) and not delta.is_integer() ): raise MetricsException( "delta must be integer for delta unit {}".format(delta_unit) ) self.delta = int(delta) if delta_unit == Unit.frames else delta self.delta_unit = delta_unit self.rel_delta_tol = rel_delta_tol self.pose_relation = pose_relation self.all_pairs = all_pairs self.E = [] self.error = [] self.delta_ids = [] if pose_relation == PoseRelation.translation_part: self.unit = Unit.meters elif pose_relation == PoseRelation.rotation_angle_deg: self.unit = Unit.degrees elif pose_relation == PoseRelation.rotation_angle_rad: self.unit = Unit.radians else: # dimension-less self.unit = Unit.none
def __init__( self, pose_relation=PoseRelation.translation_part, delta=1.0, delta_unit=Unit.frames, rel_delta_tol=0.1, all_pairs=False, ): if delta < 0: raise MetricsException("delta must be a positive number") if ( delta_unit == Unit.frames and not isinstance(delta, int) and not delta.is_integer() ): raise MetricsException( "delta must be integer for delta unit {}".format(delta_unit) ) self.delta = int(delta) if delta_unit == Unit.frames else delta self.delta_unit = delta_unit self.rel_delta_tol = rel_delta_tol self.pose_relation = pose_relation self.all_pairs = all_pairs self.E = [] self.error = [] self.delta_ids = [] if pose_relation == PoseRelation.translation_part: self.unit = Unit.meters elif pose_relation == PoseRelation.rotation_angle_deg: self.unit = Unit.degrees elif pose_relation == PoseRelation.rotation_angle_rad: self.unit = Unit.radians else: # dimension-less self.unit = None
https://github.com/MichaelGrupp/evo/issues/145
[ERROR] Unhandled error in evo.main_rpe Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/evo/entry_points.py", line 90, in launch main_module.run(args) File "/usr/local/lib/python2.7/dist-packages/evo/main_rpe.py", line 244, in run est_name=est_name, File "/usr/local/lib/python2.7/dist-packages/evo/main_rpe.py", line 176, in rpe title = str(rpe_metric) File "/usr/local/lib/python2.7/dist-packages/evo/core/metrics.py", line 207, in __str__ self.pose_relation.value, self.unit.value, self.delta, AttributeError: 'NoneType' object has no attribute 'value' [ERROR] evo module evo.main_rpe crashed - no logfile written (disabled)
AttributeError
def __init__(self, pose_relation=PoseRelation.translation_part): self.pose_relation = pose_relation self.E = [] self.error = [] if pose_relation == PoseRelation.translation_part: self.unit = Unit.meters elif pose_relation == PoseRelation.rotation_angle_deg: self.unit = Unit.degrees elif pose_relation == PoseRelation.rotation_angle_rad: self.unit = Unit.radians else: self.unit = Unit.none # dimension-less
def __init__(self, pose_relation=PoseRelation.translation_part): self.pose_relation = pose_relation self.E = [] self.error = [] if pose_relation == PoseRelation.translation_part: self.unit = Unit.meters elif pose_relation == PoseRelation.rotation_angle_deg: self.unit = Unit.degrees elif pose_relation == PoseRelation.rotation_angle_rad: self.unit = Unit.radians else: self.unit = None # dimension-less
https://github.com/MichaelGrupp/evo/issues/145
[ERROR] Unhandled error in evo.main_rpe Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/evo/entry_points.py", line 90, in launch main_module.run(args) File "/usr/local/lib/python2.7/dist-packages/evo/main_rpe.py", line 244, in run est_name=est_name, File "/usr/local/lib/python2.7/dist-packages/evo/main_rpe.py", line 176, in rpe title = str(rpe_metric) File "/usr/local/lib/python2.7/dist-packages/evo/core/metrics.py", line 207, in __str__ self.pose_relation.value, self.unit.value, self.delta, AttributeError: 'NoneType' object has no attribute 'value' [ERROR] evo module evo.main_rpe crashed - no logfile written (disabled)
AttributeError
def matching_time_indices(stamps_1, stamps_2, max_diff=0.01, offset_2=0.0): """ searches for the best matching timestamps of 2 lists and returns their list indices :param stamps_1: first vector of timestamps :param stamps_2: second vector of timestamps :param max_diff: max. allowed absolute time difference :param offset_2: optional offset of second vector :return: the indices of the matching stamps in stamps_1 """ matching_indices = [] stamps_2 = copy.deepcopy(stamps_2) stamps_2 += offset_2 for stamp in stamps_1: diffs = np.abs(stamps_2 - stamp) argmin = np.argmin(diffs) if diffs[argmin] <= max_diff: matching_indices.append(argmin) return matching_indices
def matching_time_indices(stamps_1, stamps_2, max_diff=0.01, offset_2=0.0): """ searches for the best matching timestamps of 2 lists and returns their list indices :param stamps_1: first vector of timestamps :param stamps_2: second vector of timestamps :param max_diff: max. allowed absolute time difference :param offset_2: optional offset of second vector :return: the indices of the matching stamps in stamps_1 """ matching_indices = [] stamps_2 += offset_2 for stamp in stamps_1: diffs = np.abs(stamps_2 - stamp) argmin = np.argmin(diffs) if diffs[argmin] <= max_diff: matching_indices.append(argmin) return matching_indices
https://github.com/MichaelGrupp/evo/issues/74
[ERROR] Unhandled error in evo.main_ape Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/evo/entry_points.py", line 87, in launch main_module.run(args) File "/usr/local/lib/python2.7/dist-packages/evo/main_ape.py", line 215, in run est_name=est_name, File "/usr/local/lib/python2.7/dist-packages/evo/main_ape.py", line 164, in ape ape_metric.process_data(data) File "/usr/local/lib/python2.7/dist-packages/evo/core/metrics.py", line 343, in process_data raise MetricsException("trajectories must have same number of poses") MetricsException: trajectories must have same number of poses [ERROR] evo module evo.main_ape crashed - no logfile written (disabled)
MetricsException
def parse_object_fields( self, obj: JsonSchemaObject, path: List[str] ) -> List[DataModelFieldBase]: properties: Dict[str, JsonSchemaObject] = ( obj.properties if obj.properties is not None else {} ) requires: Set[str] = {*obj.required} if obj.required is not None else {*()} fields: List[DataModelFieldBase] = [] for field_name, field in properties.items(): is_list: bool = False is_union: bool = False field_types: List[DataType] original_field_name: str = field_name constraints: Optional[Mapping[str, Any]] = None field_name, alias = self.model_resolver.get_valid_field_name_and_alias( field_name ) if field.ref: field_types = [ self.data_type( type=self.model_resolver.add_ref(field.ref).name, ref=True, version_compatible=True, ) ] elif field.is_array: array_field, array_field_classes = self.parse_array_fields( field_name, field, [*path, field_name] ) field_types = array_field.data_types is_list = True is_union = True elif field.anyOf: field_types = self.parse_any_of(field_name, field, [*path, field_name]) elif field.oneOf: field_types = self.parse_one_of(field_name, field, [*path, field_name]) elif field.allOf: field_types = self.parse_all_of(field_name, field, [*path, field_name]) elif field.is_object: if field.properties: field_types = [ self.data_type( type=self.parse_object( field_name, field, [*path, field_name], unique=True ).name, ref=True, version_compatible=True, ) ] elif isinstance(field.additionalProperties, JsonSchemaObject): field_class_name = self.model_resolver.add( [*path, field_name], field_name, class_name=True ).name # TODO: supports other type if field.additionalProperties.is_array: additional_properties_type = self.parse_array( field_class_name, field.additionalProperties, [*path, field_name], ).name else: additional_properties_type = self.parse_object( field_class_name, field.additionalProperties, [*path, field_name], additional_properties=None if field.additionalProperties.ref or field.additionalProperties.is_object else field, ).name field_types = [ self.data_type( type=f"Dict[str, {additional_properties_type}]", imports_=[IMPORT_DICT], unresolved_types=[additional_properties_type], ) ] else: field_types = [ self.data_type( type="Dict[str, Any]", imports_=[IMPORT_ANY, IMPORT_DICT], ) ] elif field.enum: enum = self.parse_enum(field_name, field, [*path, field_name], unique=True) field_types = [ self.data_type(type=enum.name, ref=True, version_compatible=True) ] else: field_types = self.get_data_type(field) if self.field_constraints: constraints = field.dict() required: bool = original_field_name in requires fields.append( self.data_model_field_type( name=field_name, example=field.example, examples=field.examples, description=field.description, default=field.default, title=field.title, data_types=field_types, required=required, is_list=is_list, is_union=is_union, alias=alias, constraints=constraints, ) ) return fields
def parse_object_fields( self, obj: JsonSchemaObject, path: List[str] ) -> List[DataModelFieldBase]: properties: Dict[str, JsonSchemaObject] = ( obj.properties if obj.properties is not None else {} ) requires: Set[str] = {*obj.required} if obj.required is not None else {*()} fields: List[DataModelFieldBase] = [] for field_name, field in properties.items(): is_list: bool = False is_union: bool = False field_types: List[DataType] original_field_name: str = field_name constraints: Optional[Mapping[str, Any]] = None field_name, alias = self.model_resolver.get_valid_field_name_and_alias( field_name ) if field.ref: field_types = [ self.data_type( type=self.model_resolver.add_ref(field.ref).name, ref=True, version_compatible=True, ) ] elif field.is_array: array_field, array_field_classes = self.parse_array_fields( field_name, field, [*path, field_name] ) field_types = array_field.data_types is_list = True is_union = True elif field.anyOf: field_types = self.parse_any_of(field_name, field, [*path, field_name]) elif field.oneOf: field_types = self.parse_one_of(field_name, field, [*path, field_name]) elif field.allOf: field_types = self.parse_all_of(field_name, field, [*path, field_name]) elif field.is_object: if field.properties: field_types = [ self.data_type( type=self.parse_object( field_name, field, [*path, field_name], unique=True ).name, ref=True, version_compatible=True, ) ] elif isinstance(field.additionalProperties, JsonSchemaObject): field_class_name = self.model_resolver.add( [*path, field_name], field_name, class_name=True ).name # TODO: supports other types if field.additionalProperties.is_array: parse_method = self.parse_array else: parse_method = self.parse_object additional_properties_type = parse_method( field_class_name, field.additionalProperties, [*path, field_name], ).name field_types = [ self.data_type( type=f"Dict[str, {additional_properties_type}]", imports_=[IMPORT_DICT], unresolved_types=[additional_properties_type], ) ] else: field_types = [ self.data_type( type="Dict[str, Any]", imports_=[IMPORT_ANY, IMPORT_DICT], ) ] elif field.enum: enum = self.parse_enum(field_name, field, [*path, field_name], unique=True) field_types = [ self.data_type(type=enum.name, ref=True, version_compatible=True) ] else: field_types = self.get_data_type(field) if self.field_constraints: constraints = field.dict() required: bool = original_field_name in requires fields.append( self.data_model_field_type( name=field_name, example=field.example, examples=field.examples, description=field.description, default=field.default, title=field.title, data_types=field_types, required=required, is_list=is_list, is_union=is_union, alias=alias, constraints=constraints, ) ) return fields
https://github.com/koxudaxi/datamodel-code-generator/issues/225
Traceback (most recent call last): File "test.py", line 20, in <module> print(Test(failing={"foo": "bar"}).to_json()) File "pydantic/main.py", line 346, in pydantic.main.BaseModel.__init__ pydantic.error_wrappers.ValidationError: 1 validation error for Test failing -> foo value is not a valid dict (type=type_error.dict)
pydantic.error_wrappers.ValidationError
def parse_object( self, name: str, obj: JsonSchemaObject, path: List[str], singular_name: bool = False, unique: bool = False, additional_properties: Optional[JsonSchemaObject] = None, ) -> DataModel: class_name = self.model_resolver.add( path, name, class_name=True, singular_name=singular_name, unique=unique ).name fields = self.parse_object_fields(obj, path) self.set_title(class_name, obj) self.set_additional_properties(class_name, additional_properties or obj) data_model_type = self.data_model_type( class_name, fields=fields, custom_base_class=self.base_class, custom_template_dir=self.custom_template_dir, extra_template_data=self.extra_template_data, ) self.append_result(data_model_type) return data_model_type
def parse_object( self, name: str, obj: JsonSchemaObject, path: List[str], singular_name: bool = False, unique: bool = False, ) -> DataModel: class_name = self.model_resolver.add( path, name, class_name=True, singular_name=singular_name, unique=unique ).name fields = self.parse_object_fields(obj, path) self.set_title(class_name, obj) self.set_additional_properties(class_name, obj) data_model_type = self.data_model_type( class_name, fields=fields, custom_base_class=self.base_class, custom_template_dir=self.custom_template_dir, extra_template_data=self.extra_template_data, ) self.append_result(data_model_type) return data_model_type
https://github.com/koxudaxi/datamodel-code-generator/issues/225
Traceback (most recent call last): File "test.py", line 20, in <module> print(Test(failing={"foo": "bar"}).to_json()) File "pydantic/main.py", line 346, in pydantic.main.BaseModel.__init__ pydantic.error_wrappers.ValidationError: 1 validation error for Test failing -> foo value is not a valid dict (type=type_error.dict)
pydantic.error_wrappers.ValidationError
def parse_object_fields( self, obj: JsonSchemaObject, path: List[str] ) -> List[DataModelFieldBase]: properties: Dict[str, JsonSchemaObject] = ( obj.properties if obj.properties is not None else {} ) requires: Set[str] = {*obj.required} if obj.required is not None else {*()} fields: List[DataModelFieldBase] = [] for field_name, field in properties.items(): is_list: bool = False is_union: bool = False field_types: List[DataType] original_field_name: str = field_name constraints: Optional[Mapping[str, Any]] = None field_name, alias = self.model_resolver.get_valid_field_name_and_alias( field_name ) if field.ref: field_types = [ self.data_type( type=self.model_resolver.add_ref(field.ref).name, ref=True, version_compatible=True, ) ] elif field.is_array: array_field, array_field_classes = self.parse_array_fields( field_name, field, [*path, field_name] ) field_types = array_field.data_types is_list = True is_union = True elif field.anyOf: field_types = self.parse_any_of(field_name, field, [*path, field_name]) elif field.oneOf: field_types = self.parse_one_of(field_name, field, [*path, field_name]) elif field.allOf: field_types = self.parse_all_of(field_name, field, [*path, field_name]) elif field.is_object: if field.properties: field_types = [ self.data_type( type=self.parse_object( field_name, field, [*path, field_name], unique=True ).name, ref=True, version_compatible=True, ) ] elif isinstance(field.additionalProperties, JsonSchemaObject): field_class_name = self.model_resolver.add( [*path, field_name], field_name, class_name=True ).name # TODO: supports other types if field.additionalProperties.is_array: parse_method = self.parse_array else: parse_method = self.parse_object additional_properties_type = parse_method( field_class_name, field.additionalProperties, [*path, field_name], ).name field_types = [ self.data_type( type=f"Dict[str, {additional_properties_type}]", imports_=[IMPORT_DICT], unresolved_types=[additional_properties_type], ) ] else: field_types = [ self.data_type( type="Dict[str, Any]", imports_=[IMPORT_ANY, IMPORT_DICT], ) ] elif field.enum: enum = self.parse_enum(field_name, field, [*path, field_name], unique=True) field_types = [ self.data_type(type=enum.name, ref=True, version_compatible=True) ] else: field_types = self.get_data_type(field) if self.field_constraints: constraints = field.dict() required: bool = original_field_name in requires fields.append( self.data_model_field_type( name=field_name, example=field.example, examples=field.examples, description=field.description, default=field.default, title=field.title, data_types=field_types, required=required, is_list=is_list, is_union=is_union, alias=alias, constraints=constraints, ) ) return fields
def parse_object_fields( self, obj: JsonSchemaObject, path: List[str] ) -> List[DataModelFieldBase]: properties: Dict[str, JsonSchemaObject] = ( obj.properties if obj.properties is not None else {} ) requires: Set[str] = {*obj.required} if obj.required is not None else {*()} fields: List[DataModelFieldBase] = [] for field_name, field in properties.items(): is_list: bool = False is_union: bool = False field_types: List[DataType] original_field_name: str = field_name constraints: Optional[Mapping[str, Any]] = None field_name, alias = self.model_resolver.get_valid_field_name_and_alias( field_name ) if field.ref: field_types = [ self.data_type( type=self.model_resolver.add_ref(field.ref).name, ref=True, version_compatible=True, ) ] elif field.is_array: array_field, array_field_classes = self.parse_array_fields( field_name, field, [*path, field_name] ) field_types = array_field.data_types is_list = True is_union = True elif field.anyOf: field_types = self.parse_any_of(field_name, field, [*path, field_name]) elif field.oneOf: field_types = self.parse_one_of(field_name, field, [*path, field_name]) elif field.allOf: field_types = self.parse_all_of(field_name, field, [*path, field_name]) elif field.is_object: if field.properties: field_types = [ self.data_type( type=self.parse_object( field_name, field, [*path, field_name], unique=True ).name, ref=True, version_compatible=True, ) ] elif isinstance(field.additionalProperties, JsonSchemaObject): additional_properties_type = self.parse_object( field_name, field.additionalProperties, [*path, field_name], unique=True, ).name field_types = [ self.data_type( type=f"Dict[str, {additional_properties_type}]", imports_=[IMPORT_DICT], unresolved_types=[additional_properties_type], ) ] else: field_types = [ self.data_type( type="Dict[str, Any]", imports_=[IMPORT_ANY, IMPORT_DICT], ) ] elif field.enum: enum = self.parse_enum(field_name, field, [*path, field_name], unique=True) field_types = [ self.data_type(type=enum.name, ref=True, version_compatible=True) ] else: field_types = self.get_data_type(field) if self.field_constraints: constraints = field.dict() required: bool = original_field_name in requires fields.append( self.data_model_field_type( name=field_name, example=field.example, examples=field.examples, description=field.description, default=field.default, title=field.title, data_types=field_types, required=required, is_list=is_list, is_union=is_union, alias=alias, constraints=constraints, ) ) return fields
https://github.com/koxudaxi/datamodel-code-generator/issues/216
Traceback (most recent call last): File "<input>", line 1, in <module> File "pydantic\main.py", line 346, in pydantic.main.BaseModel.__init__ pydantic.error_wrappers.ValidationError: 2 validation errors for FileSetUpload tags -> tag1 value is not a valid dict (type=type_error.dict) tags -> tag2 value is not a valid dict (type=type_error.dict)
pydantic.error_wrappers.ValidationError