code stringlengths 281 23.7M |
|---|
class VirtualExp():
def __init__(self, *args, **kwargs):
self.name = kwargs['name']
self.index = kwargs['index']
self.exp = kwargs['exp']
self.elm = kwargs['elm']
self.blk = kwargs['blk']
self.pc = kwargs['pc']
def __repr__(self):
if isinstance(self.exp, list):
return '({}{} {})'.format(self.name, self.index, ', '.format(map(repr, self.exp)))
else:
return '({}{} {})'.format(self.name, self.index, repr(self.exp))
def __str__(self):
if isinstance(self.exp, list):
return '({}{} {})'.format(self.name, self.index, ', '.format(map(str, self.exp)))
else:
return '({}{} {})'.format(self.name, self.index, str(self.exp))
def str_noindex(self):
if isinstance(self.exp, list):
return ', '.join(map((lambda e: e.str_noindex()), self.exp))
else:
return self.exp.str_noindex() |
def get_solar_capacity_au_nt(target_datetime: datetime) -> (float | None):
session = Session()
capacity_df = get_opennem_capacity_data(session)
capacity_df = filter_capacity_data_by_datetime(capacity_df, target_datetime)
capacity_df = capacity_df.loc[(capacity_df['zone_key'] == 'NT1')]
capacity_df['zone_key'] = 'AU-NT'
capacity_df['mode'] = capacity_df['mode'].map(FUEL_MAPPING)
capacity_df = capacity_df.groupby(['zone_key', 'mode'])[['value']].sum().reset_index()
solar_capacity = capacity_df.get('value')
if (solar_capacity is not None):
return round(solar_capacity.values[0], 0)
else:
logger.error(f'No capacity data for AU-NT in {target_datetime.date()}') |
def gen_methods_text_str(model_obj=None):
template = "The periodic & aperiodic spectral parameterization algorithm (version {}) was used to parameterize neural power spectra. Settings for the algorithm were set as: peak width limits : {}; max number of peaks : {}; minimum peak height : {}; peak threshold : {}; and aperiodic mode : '{}'. Power spectra were parameterized across the frequency range {} to {} Hz."
if model_obj:
freq_range = (model_obj.freq_range if model_obj.has_data else ('XX', 'XX'))
else:
freq_range = ('XX', 'XX')
methods_str = template.format(MODULE_VERSION, (model_obj.peak_width_limits if model_obj else 'XX'), (model_obj.max_n_peaks if model_obj else 'XX'), (model_obj.min_peak_height if model_obj else 'XX'), (model_obj.peak_threshold if model_obj else 'XX'), (model_obj.aperiodic_mode if model_obj else 'XX'), *freq_range)
return methods_str |
class Data(models.Model):
device = models.ForeignKey(Device, related_name='device_data', on_delete=models.CASCADE)
field_1 = models.CharField(_('Deger 1'), max_length=10, null=True, blank=False)
field_2 = models.CharField(_('Deger 2'), max_length=10, null=True, blank=False)
field_3 = models.CharField(_('Deger 3'), max_length=10, null=True, blank=False)
field_4 = models.CharField(_('Deger 4'), max_length=10, null=True, blank=False)
field_5 = models.CharField(_('Deger 5'), max_length=10, null=True, blank=False)
field_6 = models.CharField(_('Deger 6'), max_length=10, null=True, blank=False)
field_7 = models.CharField(_('Deger 7'), max_length=10, null=True, blank=False)
field_8 = models.CharField(_('Deger 8'), max_length=10, null=True, blank=False)
field_9 = models.CharField(_('Deger 9'), max_length=10, null=True, blank=False)
field_10 = models.CharField(_('Deger 10'), max_length=10, null=True, blank=False)
api_key = models.CharField(_('Api key'), max_length=200, null=True, blank=True)
remote_address = models.CharField(_('Ip adres'), max_length=255)
pub_date = models.DateTimeField(_('Yayin tarihi'), auto_now=True)
class Meta():
ordering = ['-pub_date']
def __str__(self):
return self.device.name |
def test_span_labelling(elasticapm_client):
elasticapm_client.begin_transaction('test')
with elasticapm.capture_span('test', labels={'foo': 'bar', 'ba.z': 'baz.zinga'}) as span:
span.label(lorem='ipsum')
elasticapm_client.end_transaction('test', 'OK')
span = elasticapm_client.events[SPAN][0]
assert (span['context']['tags'] == {'foo': 'bar', 'ba_z': 'baz.zinga', 'lorem': 'ipsum'}) |
def offset_to_line(source_code, bytecode_offset, source_mapping):
srcmap_runtime_mappings = source_mapping[0].split(';')
srcmap_mappings = source_mapping[1].split(';')
mappings = None
if ((bytecode_offset < 0) or (len(srcmap_mappings) <= bytecode_offset)):
if ((bytecode_offset < 0) or (len(srcmap_runtime_mappings) <= bytecode_offset)):
logging.debug('Bytecode offset is wrong!')
return 0
else:
mappings = srcmap_runtime_mappings
else:
mappings = srcmap_mappings
src_offset = (- 1)
while True:
src_offset = mappings[bytecode_offset].split(':')[0]
bytecode_offset -= 1
if (not (((src_offset == '') or (int(src_offset) < 0)) and (bytecode_offset >= 0))):
break
if ((src_offset != '') and (int(src_offset) >= 0)):
source_line = get_source_line_from_offset(source_code, int(src_offset))
return source_line |
(scope='class', autouse=True)
def aea_testcase_teardown_check(request):
from aea.test_tools.test_cases import BaseAEATestCase
(yield)
if (request.cls and issubclass(request.cls, BaseAEATestCase) and (getattr(request.cls, '_skipped', False) is False)):
assert getattr(request.cls, '_is_teardown_class_called', None), 'No BaseAEATestCase.teardown_class was called!' |
def test_unconditional_node():
graph = ControlFlowGraph()
graph.add_nodes_from((vertices := [BasicBlock(0, []), BasicBlock(1, [])]))
graph.add_edges_from([UnconditionalEdge(vertices[0], vertices[1])])
t_cfg = TransitionCFG.generate(graph)
true_condition = LogicCondition.initialize_true(LogicCondition.generate_new_context())
new_nodes = [TransitionBlock(0, CodeNode([], true_condition.copy())), TransitionBlock(1, CodeNode([], true_condition.copy()))]
assert ((set(t_cfg.nodes) == {new_nodes[0], new_nodes[1]}) and t_cfg.get_edge(new_nodes[0], new_nodes[1]).tag.is_true and (t_cfg.get_edge(new_nodes[0], new_nodes[1]).property == EdgeProperty.non_loop))
assert (len(t_cfg.condition_handler) == 0) |
class CmdCdestroy(CmdChannel):
key = 'cdestroy'
aliases = []
help_category = 'Comms'
locks = 'cmd: not pperm(channel_banned)'
account_caller = True
def func(self):
caller = self.caller
if (not self.args):
self.msg('Usage: cdestroy <channelname>')
return
channel = self.search_channel(self.args)
if (not channel):
self.msg(('Could not find channel %s.' % self.args))
return
if (not channel.access(caller, 'control')):
self.msg('You are not allowed to do that.')
return
channel_key = channel.key
message = f'{channel.key} is being destroyed. Make sure to change your aliases.'
self.destroy_channel(channel, message)
self.msg(("Channel '%s' was destroyed." % channel_key))
logger.log_sec(('Channel Deleted: %s (Caller: %s, IP: %s).' % (channel_key, caller, self.session.address))) |
()
_context
_type_argument
_key_argument
_analytics
def delete(ctx: click.Context, resource_type: str, fides_key: str) -> None:
config = ctx.obj['CONFIG']
handle_cli_response(_api.delete(url=config.cli.server_url, resource_type=resource_type, resource_id=fides_key, headers=config.user.auth_header), verbose=False)
echo_green(f"{resource_type.capitalize()} with fides_key '{fides_key}' successfully deleted.") |
class BaseFixedEncoder(NumberEncoder):
frac_places = None
def type_check_fn(value):
return (is_number(value) and (not isinstance(value, float)))
def illegal_value_fn(value):
if isinstance(value, decimal.Decimal):
return (value.is_nan() or value.is_infinite())
return False
def validate_value(self, value):
super().validate_value(value)
with decimal.localcontext(abi_decimal_context):
residue = (value % (TEN ** (- self.frac_places)))
if (residue > 0):
self.invalidate_value(value, exc=IllegalValue, msg=f'residue {repr(residue)} outside allowed fractional precision of {self.frac_places}')
def validate(self):
super().validate()
if (self.frac_places is None):
raise ValueError('must specify `frac_places`')
if ((self.frac_places <= 0) or (self.frac_places > 80)):
raise ValueError('`frac_places` must be in range (0, 80]') |
class _Date(PythonDataType):
def cast_from(self, obj):
if isinstance(obj, str):
try:
return datetime_parse.parse_date(obj)
except datetime_parse.DateTimeError:
raise TypeMismatchError(obj, self)
return super().cast_from(obj) |
def _fuse_single_source_parallel_gemms(sorted_graph: List[Tensor]) -> Tuple[(bool, List[Tensor])]:
_fusing_ops = {'gemm_rcr', 'gemm_rcr_bias'}
for tensor in sorted_graph:
fusion_groups = {}
for dst in tensor.dst_ops():
op_type = dst._attrs['op']
if (op_type in _fusing_ops):
if dst._attrs['outputs'][0]._attrs['is_output']:
continue
if ((tensor == dst._attrs['inputs'][1]) or dst._attrs['inputs'][1].src_ops() or dst._attrs['inputs'][1]._attrs['is_input']):
continue
elif ((len(dst._attrs['inputs']) > 2) and ((tensor == dst._attrs['inputs'][2]) or dst._attrs['inputs'][2].src_ops() or dst._attrs['inputs'][2]._attrs['is_input'])):
continue
if (op_type in fusion_groups):
fusion_groups[op_type].append(dst)
else:
fusion_groups[op_type] = [dst]
for (op_type, fusion_group) in fusion_groups.items():
if (len(fusion_group) < 2):
continue
bias = ('bias' in op_type)
W = []
B = []
N = []
for gemm_op in fusion_group:
w = gemm_op._attrs['inputs'][1]
W.append(w)
if bias:
B.append(gemm_op._attrs['inputs'][2])
N.append(w.shape()[0].value())
W_concat = ops.concatenate()(W, dim=0)
if bias:
B_concat = ops.concatenate()(B)
fused_gemm = ops.gemm_rcr_bias()(tensor, W_concat, B_concat)
else:
fused_gemm = ops.gemm_rcr()(tensor, W_concat)
split_result = ops.split()(fused_gemm, N, dim=(- 1))
for (old_op, new_tensor) in zip(fusion_group, split_result):
transform_utils.replace_tensor(old_op._attrs['outputs'][0], new_tensor)
sorted_graph = toposort(sorted_graph)
return (True, transform_utils.sanitize_sorted_graph(sorted_graph))
return (False, sorted_graph) |
def extractWhitedovetranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class _DefaultWorkerManagerFactory(WorkerManagerFactory):
def __init__(self, system_app: (SystemApp | None)=None, worker_manager: WorkerManager=None):
super().__init__(system_app)
self.worker_manager = worker_manager
def create(self) -> WorkerManager:
return self.worker_manager |
class EngineState(Entity):
INFO = {'make': [], 'initialize': [], 'reset': []}
def __init__(self, ns, name, simulator, backend, params):
self.ns = ns
self.name = name
self.color = 'grey'
self.backend = backend
from eagerx.core.specs import EngineStateSpec
self.initialize(EngineStateSpec(params), simulator)
def pre_make(cls, entity_id, entity_type):
spec = super().pre_make(entity_id, entity_type)
params = spec.params
params['state_type'] = params.pop('entity_type')
params.pop('entity_id')
from eagerx.core.specs import EngineStateSpec
return EngineStateSpec(params)
def check_spec(cls, spec):
super().check_spec(spec)
return
def initialize(self, spec: 'EngineStateSpec', simulator: Any) -> None:
pass
def reset(self, state: Any) -> None:
pass |
class SearchFilterLayer(SqlalchemyDataLayer):
def filter_query(self, query, filter_info, model):
without_fulltext = [f for f in filter_info if (f['op'] != 'search')]
if (not without_fulltext):
return query
return super().filter_query(query, without_fulltext, model) |
def faba_with_object_class_and_two_awards(award_count_sub_schedule, award_count_submission, defc_codes):
basic_object_class = major_object_class_with_children('001', [1])
award1 = _normal_award(156)
award2 = _normal_award(212)
baker.make('awards.FinancialAccountsByAwards', parent_award_id='basic award 1', award=award1, disaster_emergency_fund=DisasterEmergencyFundCode.objects.filter(code='M').first(), submission=SubmissionAttributes.objects.all().first(), object_class=basic_object_class[0], transaction_obligated_amount=1, ussgl487200_down_adj_pri_ppaid_undel_orders_oblig_refund_cpe=0, ussgl497200_down_adj_pri_paid_deliv_orders_oblig_refund_cpe=0)
baker.make('awards.FinancialAccountsByAwards', parent_award_id='basic award 2', award=award2, disaster_emergency_fund=DisasterEmergencyFundCode.objects.filter(code='M').first(), submission=SubmissionAttributes.objects.all().first(), object_class=basic_object_class[0], transaction_obligated_amount=1) |
class OptionSeriesPieSonificationTracksMappingTremoloSpeed(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class reiterable(_coconut_has_iter):
__slots__ = ()
def __new__(cls, iterable):
if _coconut.isinstance(iterable, _coconut.reiterables):
return iterable
return _coconut.super(reiterable, cls).__new__(cls, iterable)
def get_new_iter(self):
(self.iter, new_iter) = tee(self.iter)
return new_iter
def __iter__(self):
return _coconut.iter(self.get_new_iter())
def __repr__(self):
return ('reiterable(%s)' % (_coconut.repr(self.get_new_iter()),))
def __reduce__(self):
return (self.__class__, (self.iter,))
def __copy__(self):
return self.__class__(self.get_new_iter())
def __getitem__(self, index):
return _coconut_iter_getitem(self.get_new_iter(), index)
def __reversed__(self):
return reversed(self.get_new_iter())
def __len__(self):
if (not _coconut.isinstance(self.iter, _coconut.abc.Sized)):
return _coconut.NotImplemented
return _coconut.len(self.get_new_iter())
def __contains__(self, elem):
return (elem in self.get_new_iter())
def count(self, elem):
return self.get_new_iter().count(elem)
def index(self, elem):
return self.get_new_iter().index(elem) |
def test_slave_chat_update_member(bot_group, slave, channel):
(added, edited, removed) = slave.send_member_update_status()
group = added.chat
chat_manager = channel.chat_manager
group_key = chat_manager.get_cache_key(group)
assert (group_key in chat_manager.cache)
group_cache = chat_manager.cache[group_key]
added_cache = group_cache.get_member(added.uid)
edited_cache = group_cache.get_member(edited.uid)
with raises(KeyError):
group_cache.get_member(removed.uid)
compare_members(added, added_cache)
compare_members(edited, edited_cache) |
def markTightParagraphs(state: StateBlock, idx: int) -> None:
level = (state.level + 2)
i = (idx + 2)
length = (len(state.tokens) - 2)
while (i < length):
if ((state.tokens[i].level == level) and (state.tokens[i].type == 'paragraph_open')):
state.tokens[(i + 2)].hidden = True
state.tokens[i].hidden = True
i += 2
i += 1 |
class Filters(Html.Html):
name = 'Filters'
requirements = (cssDefaults.ICON_FAMILY,)
_option_cls = OptList.OptionsTagItems
def __init__(self, page: primitives.PageModel, items, width, height, html_code, helper, options, profile, verbose: bool=False):
super(Filters, self).__init__(page, items, html_code=html_code, profile=profile, options=options, css_attrs={'width': width, 'min-height': height}, verbose=verbose)
self.input = self.page.ui.input()
self.input.style.css.text_align = 'left'
self.input.style.css.padding = '0 5px'
self.input.options.managed = False
self.selections = self.page.ui.div()
self.selections.options.managed = False
self.selections.attr['name'] = 'panel'
self.selections.css({'min-height': '30px', 'padding': '5px 2px'})
self.add_helper(helper)
self.__enter_def = False
def options(self) -> OptList.OptionsTagItems:
return super().options
def enter(self, js_funcs: types.JS_FUNCS_TYPES, profile: types.PROFILE_TYPE=None):
self.__enter_def = True
if (not isinstance(js_funcs, list)):
js_funcs = [js_funcs]
self.keydown.enter((([JsUtils.jsConvertFncs(js_funcs, toStr=True), self.dom.add(self.dom.input)] + js_funcs) + [self.input.dom.empty()]), profile)
return self
def drop(self, js_funcs: types.JS_FUNCS_TYPES, prevent_default: bool=True, profile: types.PROFILE_TYPE=None):
self.style.css.border = '1px dashed black'
self.tooltip('Drag and drop values here')
return super(Filters, self).drop(js_funcs, prevent_default, profile)
def delete(self, js_funcs: types.JS_FUNCS_TYPES, profile: types.PROFILE_TYPE=None):
if self.__enter_def:
raise ValueError('delete on chip must be triggered before enter')
if (not isinstance(js_funcs, list)):
js_funcs = [js_funcs]
self._jsStyles['delete'] = JsUtils.jsConvertFncs((['this.parentNode.remove()'] + js_funcs), toStr=True, profile=profile)
return self
def append(self, value: Any, category: Optional[str]=None, name: Optional[str]=None, disabled: bool=False, fixed: bool=False):
rec = {'value': value, 'disabled': disabled, 'fixed': fixed, 'category': category, 'name': name}
if (category is None):
rec['category'] = (name or self.options.category)
rec['name'] = (name or rec['category'])
self._vals.append(rec)
def draggable(self, js_funcs: types.JS_FUNCS_TYPES=None, options: dict=None, profile: types.PROFILE_TYPE=None, source_event: str=None):
js_funcs = (js_funcs or [])
if (not isinstance(js_funcs, list)):
js_funcs = [js_funcs]
js_funcs.append('event.dataTransfer.setData("text", value)')
self.options.draggable = ('function(event, value){%s} ' % JsUtils.jsConvertFncs(js_funcs, toStr=True, profile=profile))
return self
def dom(self) -> JsHtmlList.Tags:
if (self._dom is None):
self._dom = JsHtmlList.Tags(self, page=self.page)
return self._dom
def __str__(self):
self.page.properties.js.add_builders(self.refresh())
self.page.properties.js.add_constructor('ChipAdd', 'function chipAdd(panel, record, options){\nif(typeof(record.category !== "undefined")){options.category = record.category}\nvar div = document.createElement("div"); \nfor (var key in options.item_css){div.style[key] = options.item_css[key]};\ndiv.setAttribute(\'data-category\', record.category);\nif(typeof(record.css !== "undefined")){\n for (var key in record.css){ div.style[key] = record.css[key]}};\nvar content = document.createElement("span"); \nfor (var key in options.value_css){ content.style[key] = options.value_css[key]};\ncontent.setAttribute(\'name\', \'chip_value\'); content.innerHTML = record.value; \nif(options.visible){\n var p = document.createElement("p"); \n for (var key in options.category_css){p.style[key] = options.category_css[key]};\n p.innerHTML = record.name; div.appendChild(p)}\ndiv.appendChild(content);\nif(!record.fixed && options.delete){\n var icon = document.createElement("i"); \n for (var key in options.icon_css){icon.style[key] = options.icon_css[key] };\n icon.classList.add(\'fas\'); icon.classList.add(\'fa-times\'); \n icon.addEventListener(\'click\', function(){eval(options.delete)});\n div.appendChild(icon)}\nif(typeof options.draggable !== \'undefined\'){\n div.setAttribute(\'draggable\', true); div.style.cursor = \'grab\';\n div.ondragstart = function(event){ var value = this.innerHTML; options.draggable(event, value) }\n}\npanel.appendChild(div);\n\nconst maxHeight = options.max_height;\nif(maxHeight > 0){\n panel.style.maxHeight = ""+ maxHeight + "px";\n panel.style.overflow = "hidden"; panel.style.position = "relative";\n var div = document.createElement("div"); div.style.color = "#3366BB";\n div.innerHTML = "Show all"; div.style.position = "absolute"; \n div.style.bottom = 0; div.style.cursor = "pointer";\n div.addEventListener("click", function(event){ \n var targetElement = event.target || event.srcElement;\n if (targetElement.innerHTML != "reduce"){panel.style.maxHeight = null; targetElement.innerHTML = "reduce"} \n else {panel.style.maxHeight = ""+ maxHeight + "px"; targetElement.innerHTML = "Show all"}});\n div.style.right = "5px"; panel.appendChild(div)\n}}')
if (not self.options.visible):
self.input.style.css.display = False
return ('<div %(attrs)s>%(input)s%(selections)s</div>%(helper)s' % {'attrs': self.get_attrs(css_class_names=self.style.get_classes()), 'input': self.input.html(), 'selections': self.selections.html(), 'helper': self.helper}) |
def _init_4bit_linear(source: Module, config: Union[(_8BitConfig, _4BitConfig)], device: torch.device) -> 'bnb.nn.Linear4bit':
assert isinstance(config, _4BitConfig)
import bitsandbytes as bnb
quantized_module = bnb.nn.Linear4bit(input_features=source.in_features, output_features=source.out_features, bias=(source.bias is not None), compute_dtype=config.compute_dtype, compress_statistics=config.double_quantization, quant_type=config.quantization_dtype.value, device=device)
return quantized_module |
def test_augmented_assignment_broadcast():
mesh = UnitSquareMesh(1, 1)
V = FunctionSpace(mesh, 'BDM', 1)
u = Function(V)
a = Constant(1)
b = Constant(2)
u.assign(a)
assert np.allclose(u.dat.data_ro, 1)
u *= (- (a + b))
assert np.allclose(u.dat.data_ro, (- 3))
u += (b * 2)
assert np.allclose(u.dat.data_ro, 1)
u /= (- (b + a))
assert np.allclose(u.dat.data_ro, ((- 1) / 3))
u -= ((2 + a) + b)
assert np.allclose(u.dat.data_ro, ((- 16) / 3)) |
class OptionSeriesScatter3dData(Options):
def accessibility(self) -> 'OptionSeriesScatter3dDataAccessibility':
return self._config_sub_data('accessibility', OptionSeriesScatter3dDataAccessibility)
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get(None)
def color(self, text: str):
self._config(text, js_type=False)
def colorIndex(self):
return self._config_get(None)
def colorIndex(self, num: float):
self._config(num, js_type=False)
def custom(self):
return self._config_get(None)
def custom(self, value: Any):
self._config(value, js_type=False)
def dataLabels(self) -> 'OptionSeriesScatter3dDataDatalabels':
return self._config_sub_data('dataLabels', OptionSeriesScatter3dDataDatalabels)
def description(self):
return self._config_get(None)
def description(self, text: str):
self._config(text, js_type=False)
def dragDrop(self) -> 'OptionSeriesScatter3dDataDragdrop':
return self._config_sub_data('dragDrop', OptionSeriesScatter3dDataDragdrop)
def drilldown(self):
return self._config_get(None)
def drilldown(self, text: str):
self._config(text, js_type=False)
def events(self) -> 'OptionSeriesScatter3dDataEvents':
return self._config_sub_data('events', OptionSeriesScatter3dDataEvents)
def id(self):
return self._config_get(None)
def id(self, text: str):
self._config(text, js_type=False)
def labelrank(self):
return self._config_get(None)
def labelrank(self, num: float):
self._config(num, js_type=False)
def marker(self) -> 'OptionSeriesScatter3dDataMarker':
return self._config_sub_data('marker', OptionSeriesScatter3dDataMarker)
def name(self):
return self._config_get(None)
def name(self, text: str):
self._config(text, js_type=False)
def selected(self):
return self._config_get(False)
def selected(self, flag: bool):
self._config(flag, js_type=False)
def x(self):
return self._config_get(None)
def x(self, num: float):
self._config(num, js_type=False)
def y(self):
return self._config_get(None)
def y(self, num: float):
self._config(num, js_type=False)
def z(self):
return self._config_get(None)
def z(self, num: float):
self._config(num, js_type=False) |
class OptionPlotoptionsVariablepieSonificationDefaultspeechoptionsMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
_os(*metadata.platforms)
def main():
masquerade = '/tmp/bash'
common.copy_macos_masquerade(masquerade)
common.log('Launching fake osascript commands to mimic apple script execution')
command = 'osascript with administrator privileges'
common.execute([masquerade, 'childprocess', command], shell=True, timeout=5, kill=True)
common.remove_file(masquerade) |
(PRIVACY_REQUEST_TRANSFER_TO_PARENT, status_code=HTTP_200_OK, dependencies=[Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_TRANSFER])], response_model=Dict[(str, Optional[List[Row]])])
def privacy_request_data_transfer(*, privacy_request_id: str, rule_key: str, db: Session=Depends(deps.get_db), cache: FidesopsRedis=Depends(deps.get_cache)) -> Dict[(str, Optional[List[Row]])]:
privacy_request = PrivacyRequest.get(db=db, object_id=privacy_request_id)
if (not privacy_request):
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f'No privacy request with id {privacy_request_id} found')
rule = Rule.filter(db=db, conditions=(Rule.key == rule_key)).first()
if (not rule):
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f'Rule key {rule_key} not found')
value_dict: Dict[(str, Optional[List[Row]])] = cache.get_encoded_objects_by_prefix(f'{privacy_request_id}__access_request')
if (not value_dict):
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f'No access request information found for privacy request id {privacy_request_id}')
access_result = {k.split('__')[(- 1)]: v for (k, v) in value_dict.items()}
datasets = DatasetConfig.all(db=db)
if (not datasets):
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f'No datasets found for privacy request {privacy_request_id}')
dataset_graphs = [dataset_config.get_graph() for dataset_config in datasets]
if (not dataset_graphs):
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f'No dataset graphs found for privacy request {privacy_request_id}')
dataset_graph = DatasetGraph(*dataset_graphs)
target_categories = {target.data_category for target in rule.targets}
filtered_results: Optional[Dict[(str, Optional[List[Row]])]] = filter_data_categories(access_result, target_categories, dataset_graph.data_category_field_mapping)
if (filtered_results is None):
raise HTTPException(status_code=404, detail=f'No results found for privacy request {privacy_request_id}')
return filtered_results |
def mark_item_unwatched(item_id):
log.debug('Mark Item UnWatched: {0}', item_id)
url = ('{server}/emby/Users/{userid}/PlayedItems/' + item_id)
downloadUtils.download_url(url, method='DELETE')
check_for_new_content()
home_window = HomeWindow()
last_url = home_window.get_property('last_content_url')
if last_url:
log.debug('markUnwatched_lastUrl: {0}', last_url)
home_window.set_property(('skip_cache_for_' + last_url), 'true')
xbmc.executebuiltin('Container.Refresh') |
def test_to_python_value_and_literal():
ctx = context_manager.FlyteContext.current_context()
tf = NumpyArrayTransformer()
python_val = np.array([1, 2, 3])
lt = tf.get_literal_type(np.ndarray)
lv = tf.to_literal(ctx, python_val, type(python_val), lt)
assert (lv.scalar.blob.metadata == BlobMetadata(type=BlobType(format=NumpyArrayTransformer.NUMPY_ARRAY_FORMAT, dimensionality=BlobType.BlobDimensionality.SINGLE)))
assert (lv.scalar.blob.uri is not None)
output = tf.to_python_value(ctx, lv, np.ndarray)
assert_array_equal(output, python_val) |
def gen_subfile(pkt_bits, note='x10 command', repeat=1):
datalines = []
for bits in pkt_bits:
data = [9000, (- 4500)]
for bit in bits:
if (bit == '1'):
data.extend((562, (- 1688)))
else:
data.extend((562, (- 563)))
data.extend((562, (- 40000)))
for i in range(0, len(data), 510):
batch = map(str, data[i:(i + 510)])
datalines.append(f"RAW_Data: {' '.join(batch)}")
bb = pkt_bits[0]
bin_dat = ' '.join([bb[i:(i + 8)] for i in range(0, len(bb), 8)])
hdr = f'''Filetype: Flipper SubGhz RAW File
Version: 1
# {note} {bin_dat}
# Generated with subghz_x10.py
# {time.ctime()}
Frequency: {rf_freq}
Preset: FuriHalSubGhzPresetOok650Async
Protocol: RAW
'''
res = hdr
res += ('\n'.join(datalines) + '\n')
if (repeat > 1):
for i in range(0, repeat):
res += ('\n'.join(datalines) + '\n')
return res |
class DQN():
def __init__(self, config, create_env, create_agent):
self.config = config
self.logger = TFLogger(log_dir=self.config['logdir'], hps=self.config, save_every=self.config['save_every'])
self._create_env = create_env
self._create_agent = create_agent
def _state_dict(self, model, device):
sd = model.state_dict()
for (k, v) in sd.items():
sd[k] = v.to(device)
return sd
def run(self):
env = self._create_env(self.config['n_envs'], seed=0, **{k: self.config[k] for k in self.config if k.startswith('environment/')})
self.n_actions = env.action_space.n
self.obs_shape = env.reset()[0]['frame'].size()
del env
self.learning_model = self._create_model()
self.target_model = copy.deepcopy(self.learning_model)
self.agent = self._create_agent(n_actions=self.n_actions, model=self.learning_model)
model = copy.deepcopy(self.learning_model)
self.train_batcher = RL_Batcher(n_timesteps=self.config['batch_timesteps'], create_agent=self._create_agent, create_env=self._create_env, env_args={'mode': 'train', 'n_envs': self.config['n_envs'], 'max_episode_steps': self.config['max_episode_steps'], **{k: self.config[k] for k in self.config if k.startswith('environment/')}}, agent_args={'n_actions': self.n_actions, 'model': model}, n_processes=self.config['n_processes'], seeds=[(self.config['env_seed'] + (k * 10)) for k in range(self.config['n_processes'])], agent_info=DictTensor({'epsilon': torch.zeros(1)}), env_info=DictTensor({}))
model = copy.deepcopy(self.learning_model)
self.evaluation_batcher = RL_Batcher(n_timesteps=self.config['max_episode_steps'], create_agent=self._create_agent, create_env=self._create_env, env_args={'mode': 'evaluation', 'max_episode_steps': self.config['max_episode_steps'], 'n_envs': self.config['n_evaluation_envs'], **{k: self.config[k] for k in self.config if k.startswith('environment/')}}, agent_args={'n_actions': self.n_actions, 'model': model}, n_processes=self.config['n_evaluation_processes'], seeds=[((self.config['env_seed'] * 10) + (k * 10)) for k in range(self.config['n_evaluation_processes'])], agent_info=DictTensor({'epsilon': torch.zeros(1)}), env_info=DictTensor({}))
self.replay_buffer = ReplayBuffer(self.config['replay_buffer_size'])
device = torch.device(self.config['learner_device'])
self.learning_model.to(device)
self.target_model.to(device)
optimizer = getattr(torch.optim, self.config['optim'])(self.learning_model.parameters(), lr=self.config['lr'])
self.evaluation_batcher.update(self._state_dict(self.learning_model, torch.device('cpu')))
n_episodes = (self.config['n_envs'] * self.config['n_processes'])
agent_info = DictTensor({'epsilon': torch.ones(n_episodes).float()})
self.train_batcher.reset(agent_info=agent_info)
logging.info('Sampling initial transitions')
for k in range(self.config['initial_buffer_epochs']):
self.train_batcher.execute()
(trajectories, n) = self.train_batcher.get(blocking=True)
assert (not (n == 0))
self.replay_buffer.push(trajectories.trajectories)
print(k, '/', self.config['initial_buffer_epochs'])
self.iteration = 0
n_episodes = (self.config['n_evaluation_envs'] * self.config['n_evaluation_processes'])
self.evaluation_batcher.reset(agent_info=DictTensor({'epsilon': torch.zeros(n_episodes).float()}))
self.evaluation_batcher.execute()
logging.info('Starting Learning')
_start_time = time.time()
self.target_model.load_state_dict(self.learning_model.state_dict())
cumulated_reward = torch.zeros((self.config['n_envs'] * self.config['n_processes']))
while ((time.time() - _start_time) < self.config['time_limit']):
n_episodes = (self.config['n_envs'] * self.config['n_processes'])
self.train_batcher.update(self._state_dict(self.learning_model, torch.device('cpu')))
self.train_batcher.execute(agent_info=DictTensor({'epsilon': torch.tensor([self.config['epsilon_greedy']]).repeat(n_episodes).float()}))
(trajectories, n) = self.train_batcher.get(blocking=True)
assert (n == (self.config['n_envs'] * self.config['n_processes']))
self.replay_buffer.push(trajectories.trajectories)
self.logger.add_scalar('stats/replay_buffer_size', self.replay_buffer.size(), self.iteration)
assert (self.config['qvalue_epochs'] > 0)
for k in range(self.config['qvalue_epochs']):
optimizer.zero_grad()
transitions = self.replay_buffer.sample(n=self.config['n_batches'])
dt = self.get_loss(transitions, device)
_loss = dt['q_loss'].to(self.config['learner_device']).mean()
self.logger.add_scalar('q_loss', _loss.item(), self.iteration)
_loss.backward()
if (self.config['clip_grad'] > 0):
n = torch.nn.utils.clip_grad_norm_(self.learning_model.parameters(), self.config['clip_grad'])
self.logger.add_scalar('grad_norm', n.item(), self.iteration)
self.iteration += 1
optimizer.step()
tau = self.config['update_target_tau']
self.soft_update_params(self.learning_model, self.target_model, tau)
if (((time.time() - _start_time) > 600) and ((self.iteration % 1000) == 0)):
self.logger.update_csv()
self.evaluate()
self.logger.update_csv()
(trajectories, n) = self.train_batcher.get()
self.train_batcher.close()
self.evaluation_batcher.get()
self.evaluation_batcher.close()
self.logger.close()
def soft_update_params(self, net, target_net, tau):
for (param, target_param) in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(((tau * param.data) + ((1 - tau) * target_param.data)))
def evaluate(self, relaunch=True):
(evaluation_trajectories, n) = self.evaluation_batcher.get(blocking=False)
if (evaluation_trajectories is None):
return
avg_reward = (evaluation_trajectories.trajectories['_observation/reward'] * evaluation_trajectories.trajectories.mask()).sum(1).mean().item()
self.logger.add_scalar('avg_reward', avg_reward, self.iteration)
if self.config['verbose']:
print(((((('Iteration ' + str(self.iteration)) + ', Reward = ') + str(avg_reward)) + ', Buffer size = ') + str(self.replay_buffer.size())))
if relaunch:
self.evaluation_batcher.update(self._state_dict(self.learning_model, torch.device('cpu')))
n_episodes = (self.config['n_evaluation_envs'] * self.config['n_evaluation_processes'])
self.evaluation_batcher.reset(agent_info=DictTensor({'epsilon': torch.zeros(n_episodes).float()}))
self.evaluation_batcher.execute()
return avg_reward
def get_loss(self, transitions, device):
transitions = transitions.to(device)
B = transitions.n_elems()
Bv = torch.arange(B).to(device)
action = transitions['action/action']
reward = transitions['_observation/reward']
frame = transitions['observation/frame']
_frame = transitions['_observation/frame']
_done = transitions['_observation/done'].float()
q = self.learning_model(frame)
qa = q[(Bv, action)]
_q_target = self.target_model(_frame).detach()
_q_target_a = None
actionp = _q_target.max(1)[1]
_q_target_a = _q_target[(Bv, actionp)]
_target_value = (((_q_target_a * (1 - _done)) * self.config['discount_factor']) + reward)
td = ((_target_value - qa) ** 2)
dt = DictTensor({'q_loss': td})
return dt |
def type_text(data):
if (keepmenu.CLIPBOARD is True):
type_clipboard(data)
return
library = 'pynput'
if keepmenu.CONF.has_option('database', 'type_library'):
library = keepmenu.CONF.get('database', 'type_library')
if (library == 'xdotool'):
call(['xdotool', 'type', '--', data])
elif (library == 'ydotool'):
call(['ydotool', 'type', '-e', '0', '--', data])
elif (library == 'wtype'):
call(['wtype', '--', data])
elif (library == 'dotool'):
_ = run(['dotool'], check=True, encoding=keepmenu.ENC, input=f'type {data}')
else:
try:
from pynput import keyboard
except ModuleNotFoundError:
return
kbd = keyboard.Controller()
try:
kbd.type(data)
except kbd.InvalidCharacterException:
dmenu_err('Unable to type string...bad character.\nTry setting `type_library = xdotool` in config.ini') |
class limits_property():
def __init__(self, minimum_attribute_name, maximum_attribute_name):
super().__init__()
self._minimum_attribute_name = minimum_attribute_name
self._maximum_attribute_name = maximum_attribute_name
def __get__(self, instance, owner):
return (getattr(instance, f'{self._minimum_attribute_name}'), getattr(instance, f'{self._maximum_attribute_name}'))
def __set__(self, instance, value):
setattr(instance, f'_{self._minimum_attribute_name}', value[0])
setattr(instance, f'_{self._maximum_attribute_name}', value[1]) |
.LinearSolvers
def test_step_noslip_FullRun():
petsc_options = initialize_petsc_options
context_options_str = "boundary_condition_type='ns'"
ns = load_simulation(context_options_str)
actual_log = runTest(ns, 'test_2')
L1 = actual_log.get_ksp_resid_it_info([(' step2d ', 1.0, 0, 0)])
L2 = actual_log.get_ksp_resid_it_info([(' step2d ', 1.0, 0, 1)])
L3 = actual_log.get_ksp_resid_it_info([(' step2d ', 1.0, 0, 2)])
print(L1, L2, L3)
assert (L1[0][1] == 2)
assert (L2[0][1] == 20)
assert (L3[0][1] == 23) |
class OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMapping(Options):
def frequency(self) -> 'OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingFrequency':
return self._config_sub_data('frequency', OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingFrequency)
def gapBetweenNotes(self) -> 'OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingGapbetweennotes':
return self._config_sub_data('gapBetweenNotes', OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingGapbetweennotes)
def highpass(self) -> 'OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingHighpass':
return self._config_sub_data('highpass', OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingHighpass)
def lowpass(self) -> 'OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingLowpass':
return self._config_sub_data('lowpass', OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingLowpass)
def noteDuration(self) -> 'OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingNoteduration':
return self._config_sub_data('noteDuration', OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingNoteduration)
def pan(self) -> 'OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingPan':
return self._config_sub_data('pan', OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingPan)
def pitch(self) -> 'OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingPitch':
return self._config_sub_data('pitch', OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingPitch)
def playDelay(self) -> 'OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingPlaydelay':
return self._config_sub_data('playDelay', OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingPlaydelay)
def time(self) -> 'OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingTime':
return self._config_sub_data('time', OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingTime)
def tremolo(self) -> 'OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingTremolo':
return self._config_sub_data('tremolo', OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingTremolo)
def volume(self) -> 'OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingVolume':
return self._config_sub_data('volume', OptionSeriesNetworkgraphSonificationDefaultinstrumentoptionsMappingVolume) |
class StarSubmissionResult(object):
codeMap = {201: 'New Entry', 202: 'System CR increased', 203: 'Coordinates calculated', 301: 'Distance added', 302: 'Distance CR increased', 303: 'Added verification', 304: 'OK: Needs more data', 305: 'Distance appears to be wrong', 401: 'CALCULATED', 402: 'No solution found, more data needed'}
def __init__(self, star, response):
self.star = star
self.valid = False
self.summary = 'No Data'
self.systems = {}
self.distances = {}
self.errors = []
self.recheck = {}
summary = response['status']['input'][0]['status']
(code, msg) = (int(summary['statusnum']), summary['msg'])
if (code != 0):
self.valid = False
self.summary = 'Error #{}: {}'.format(code, msg)
self.errors.append(Status('status', code, msg, None, None))
return
self.valid = True
self.summary = 'OK'
sysArray = response['status']['system']
for ent in sysArray:
sysName = ent['system'].upper()
code = int(ent['status']['statusnum'])
msg = ent['status']['msg']
if (code in [201, 202, 203]):
self.systems[sysName] = (code, None)
else:
self.errors.append(Status('system', code, msg, sysName, None))
distArray = response['status']['dist']
errPairs = set()
for ent in distArray:
lhsName = ent['system1'].upper()
rhsName = ent['system2'].upper()
code = int(ent['status']['statusnum'])
msg = ent['status']['msg']
if (code in [301, 302, 303, 304]):
if (not (lhsName in self.distances)):
self.distances[lhsName] = {}
try:
rhsDists = self.distances[rhsName]
if (lhsName in rhsDists):
continue
except KeyError:
pass
dist = float(ent['dist'])
self.distances[lhsName][rhsName] = dist
if (not (lhsName in self.systems)):
self.systems[lhsName] = (code, None)
else:
if ((lhsName, rhsName, code) in errPairs):
continue
if ((rhsName, lhsName, code) in errPairs):
continue
errPairs.add((lhsName, rhsName, code))
errPairs.add((rhsName, lhsName, code))
self.errors.append(Status('dist', code, msg, lhsName, rhsName))
if (code == 305):
if (lhsName == star):
self.recheck[rhsName] = ent['dist']
elif (rhsName == star):
self.recheck[lhsName] = ent['dist']
triArray = response['status']['trilat']
for ent in triArray:
sysName = ent['system'].upper()
code = int(ent['status']['statusnum'])
if (code == 401):
try:
system = self.systems[sysName]
if (system[1] is not None):
continue
except KeyError:
system = (code, None)
assert (system[1] is None)
coord = ent['coord']
(x, y, z) = (coord['x'], coord['y'], coord['z'])
self.systems[sysName] = (system[0], [x, y, z])
elif (code == 402):
pass
else:
self.errors.append(Status('trilat', code, msg, sysName, None))
def __str__(self):
if (not self.valid):
return 'ERROR: {}'.format(self.summary)
text = ''
if (not self.errors):
text += 'Success.\n'
if self.systems:
text += '+Updates:\n'
sysNames = list(self.systems.keys())
sysNames.sort(key=(lambda s: self.systems[s][0]))
for sysName in sysNames:
(code, coords) = self.systems[sysName]
sysText = self.translateCode(code)
if (coords is not None):
sysText += str(coords)
text += '|- {:.<30s} {}\n'.format(sysName, sysText)
text += '\n'
if self.errors:
text += '+Problems:\n'
errors = sorted(self.errors, key=(lambda e: (e.rhs or '')))
errors.sort(key=(lambda e: (e.lhs or '')))
errors.sort(key=(lambda e: e.code))
for err in errors:
text += '|- {:.<30s} #{} {}'.format(err.lhs, err.code, self.translateCode(err.code))
if err.rhs:
text += (' <-> ' + err.rhs)
text += '\n'
return text
def translateCode(self, code):
try:
return self.codeMap[code]
except KeyError:
return 'Error #{} (unknown)'.format(code) |
def test_get_surfaces_from_3dgrid(tmpdir):
mygrid = xtgeo.grid_from_file(TESTSETG1)
surfs = xtgeo.surface.surfaces.surfaces_from_grid(mygrid, rfactor=2)
surfs.describe()
assert (surfs.surfaces[(- 1)].values.mean() == pytest.approx(1742.28, abs=0.04))
assert (surfs.surfaces[(- 1)].values.min() == pytest.approx(1589.58, abs=0.04))
assert (surfs.surfaces[(- 1)].values.max() == pytest.approx(1977.29, abs=0.04))
assert (surfs.surfaces[0].values.mean() == pytest.approx(1697.02, abs=0.04))
for srf in surfs.surfaces:
srf.to_file(join(tmpdir, (srf.name + '.gri'))) |
def validate_model(model, val_loader):
print('Validating the model')
model.eval()
y_true = []
y_pred = []
with torch.no_grad():
for (step, (x, mel)) in enumerate(val_loader):
if (step < 15):
(x, mel) = (Variable(x).cuda(), Variable(mel).cuda())
logits = model.forward_eval(mel)
targets = x.cpu().view((- 1)).numpy()
y_true += targets.tolist()
predictions = return_classes(logits)
y_pred += predictions.tolist()
recall = get_metrics(y_pred, y_true)
print('Unweighted Recall for the validation set: ', recall)
print('\n') |
class LocalFilePreferredNamesPreference(widgets.Preference, widgets.CheckConditional):
default = ['front', 'cover', 'album']
name = 'covers/localfile/preferred_names'
condition_preference_name = 'covers/use_localfile'
def __init__(self, preferences, widget):
widgets.Preference.__init__(self, preferences, widget)
widgets.CheckConditional.__init__(self)
def _get_value(self):
return [v.strip() for v in widgets.Preference._get_value(self).split(',')]
def _set_value(self):
self.widget.set_text(', '.join(settings.get_option(self.name, self.default))) |
class OptionPlotoptionsCylinderSonificationContexttracksMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class GetNodeDataTracker(BasePerformanceTracker[(GetNodeDataV65, NodeDataBundles)]):
def _get_request_size(self, request: GetNodeDataV65) -> Optional[int]:
return len(request.payload)
def _get_result_size(self, result: NodeDataBundles) -> int:
return len(result)
def _get_result_item_count(self, result: NodeDataBundles) -> int:
return len(result) |
class ElementConstantNewton(proteus.NonlinearSolvers.NonlinearSolver):
def __init__(self, linearSolver, F, J=None, du=None, par_du=None, rtol_r=0.0001, atol_r=1e-16, rtol_du=0.0001, atol_du=1e-16, maxIts=100, norm=l2Norm, convergenceTest='r', computeRates=True, printInfo=True, fullNewton=True, directSolver=False, EWtol=True, maxLSits=100):
import copy
self.par_du = par_du
if (par_du is not None):
F.dim_proc = par_du.dim_proc
NonlinearSolver.__init__(self, F, J, du, rtol_r, atol_r, rtol_du, atol_du, maxIts, norm, convergenceTest, computeRates, printInfo)
self.updateJacobian = True
self.fullNewton = fullNewton
self.linearSolver = linearSolver
self.directSolver = directSolver
self.lineSearch = True
self.EWtol = EWtol
self.maxLSits = maxLSits
if self.linearSolver.computeEigenvalues:
self.JLast = copy.deepcopy(self.J)
self.J_t_J = copy.deepcopy(self.J)
self.dJ_t_dJ = copy.deepcopy(self.J)
self.JLsolver = LU(self.J_t_J, computeEigenvalues=True)
self.dJLsolver = LU(self.dJ_t_dJ, computeEigenvalues=True)
self.u0 = np.zeros(self.F.dim, 'd')
def info(self):
return 'Not Implemented'
def solve(self, u, r=None, b=None, par_u=None, par_r=None):
self.F.maxIts = self.maxIts
self.F.maxLSits = self.maxLSits
self.F.atol = self.atol_r
self.F.elementConstantSolve(u, r)
self.failedFlag = False
return self.failedFlag |
class TestQCSampleForSolid(unittest.TestCase):
def setUp(self):
self.d = TestUtils.make_dir()
for f in SOLID_FILES.split():
TestUtils.make_file(f, 'lorem ipsum', basedir=self.d)
self.qc_dir = TestUtils.make_sub_dir(self.d, 'qc')
for f in SOLID_QC_FILES.split():
TestUtils.make_file(f, 'lorem ipsum', basedir=self.qc_dir)
def tearDown(self):
TestUtils.remove_dir(self.d)
def test_qcsample_with_solid_data(self):
for name in SOLID_SAMPLE_NAMES:
solid_qc_sample = SolidQCSample(name, self.qc_dir, False)
self.assertTrue(solid_qc_sample.verify(), ('Verify failed for %s' % name)) |
def find_app_module():
(rv, files, dirs) = (None, [], [])
for path in os.listdir():
if any((path.startswith(val) for val in ['.', 'test'])):
continue
if os.path.isdir(path):
if (not path.startswith('_')):
dirs.append(path)
continue
(_, ext) = os.path.splitext(path)
if (ext == '.py'):
files.append(path)
if ('app.py' in files):
rv = 'app.py'
elif ('app' in dirs):
rv = 'app'
elif ('__init__.py' in files):
rv = '__init__.py'
elif (len(files) == 1):
rv = files[0]
elif (len(dirs) == 1):
rv = dirs[0]
else:
modules = []
for path in dirs:
if os.path.exists(os.path.join(path, '__init__.py')):
modules.append(path)
if (len(modules) == 1):
rv = modules[0]
return rv |
class HtmlStates():
def loading(self, status: bool=True, label: str=None, data: types.JS_DATA_TYPES=None):
if (label is not None):
self.options.templateLoading = label
if (self.options.templateLoading is None):
self.options.templateLoading = Default_html.TEMPLATE_LOADING_ONE_LINE
if status:
return self.build(data, options={'templateMode': 'loading'})
if (data is None):
return self.build(self.dom.getAttribute('data-content'))
return self.build(data)
def error(self, status: bool=True, label: str=None, data: types.JS_DATA_TYPES=None):
if (label is not None):
self.options.templateError = label
if (self.options.templateError is None):
self.options.templateError = Default_html.TEMPLATE_ERROR_ONE_LINE
if status:
return self.build(data, options={'templateMode': 'error'})
if (data is None):
return self.build(self.dom.getAttribute('data-content'))
return self.build(data) |
def test_get_final_config_bibtex(data_regression):
cli_config = {'latex_individualpages': False}
user_config = {'bibtex_bibfiles': ['tmp.bib']}
(final_config, metadata) = get_final_config(user_yaml=user_config, cli_config=cli_config, validate=True, raise_on_invalid=True)
assert ('sphinxcontrib.bibtex' in final_config['extensions']) |
def flag_cutpaste_candidates(insertion_from_signature_clusters, deletion_signature_clusters, options):
int_duplication_candidates = []
for ins_cluster in insertion_from_signature_clusters:
distances = [(del_index, span_position_distance_clusters(del_cluster, ins_cluster, options.position_distance_normalizer)) for (del_index, del_cluster) in enumerate(deletion_signature_clusters)]
(closest_deletion_index, closest_deletion) = sorted(distances, key=(lambda obj: obj[1]))[0]
(source_contig, source_start, source_end) = ins_cluster.get_source()
(dest_contig, dest_start, dest_end) = ins_cluster.get_destination()
if (closest_deletion <= options.del_ins_dup_max_distance):
int_duplication_candidates.append(CandidateDuplicationInterspersed(source_contig, source_start, source_end, dest_contig, dest_start, dest_end, ins_cluster.members, ins_cluster.score, ins_cluster.std_span, ins_cluster.std_pos, cutpaste=True))
else:
int_duplication_candidates.append(CandidateDuplicationInterspersed(source_contig, source_start, source_end, dest_contig, dest_start, dest_end, ins_cluster.members, ins_cluster.score, ins_cluster.std_span, ins_cluster.std_pos, cutpaste=False))
return int_duplication_candidates |
class OptionSeriesScatterDataDragdrop(Options):
def draggableX(self):
return self._config_get(None)
def draggableX(self, flag: bool):
self._config(flag, js_type=False)
def draggableY(self):
return self._config_get(None)
def draggableY(self, flag: bool):
self._config(flag, js_type=False)
def dragHandle(self) -> 'OptionSeriesScatterDataDragdropDraghandle':
return self._config_sub_data('dragHandle', OptionSeriesScatterDataDragdropDraghandle)
def dragMaxX(self):
return self._config_get(None)
def dragMaxX(self, num: float):
self._config(num, js_type=False)
def dragMaxY(self):
return self._config_get(None)
def dragMaxY(self, num: float):
self._config(num, js_type=False)
def dragMinX(self):
return self._config_get(None)
def dragMinX(self, num: float):
self._config(num, js_type=False)
def dragMinY(self):
return self._config_get(None)
def dragMinY(self, num: float):
self._config(num, js_type=False)
def dragPrecisionX(self):
return self._config_get(0)
def dragPrecisionX(self, num: float):
self._config(num, js_type=False)
def dragPrecisionY(self):
return self._config_get(0)
def dragPrecisionY(self, num: float):
self._config(num, js_type=False)
def dragSensitivity(self):
return self._config_get(2)
def dragSensitivity(self, num: float):
self._config(num, js_type=False)
def groupBy(self):
return self._config_get(None)
def groupBy(self, text: str):
self._config(text, js_type=False)
def guideBox(self) -> 'OptionSeriesScatterDataDragdropGuidebox':
return self._config_sub_data('guideBox', OptionSeriesScatterDataDragdropGuidebox)
def liveRedraw(self):
return self._config_get(True)
def liveRedraw(self, flag: bool):
self._config(flag, js_type=False) |
class NCSOConcessionBookmark(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
pct = models.ForeignKey(PCT, null=True, blank=True, on_delete=models.PROTECT)
practice = models.ForeignKey(Practice, null=True, blank=True, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
def entity(self):
if (self.pct is not None):
return self.pct
elif (self.practice is not None):
return self.practice
else:
return None
def entity_type(self):
if (self.pct is not None):
return 'CCG'
elif (self.practice is not None):
return 'practice'
else:
return 'all_england'
def entity_cased_name(self):
if (self.entity is None):
return 'the NHS in England'
else:
return self.entity.cased_name
def name(self):
return 'price concessions for {}'.format(self.entity_cased_name)
def dashboard_url(self):
if (self.entity_type == 'CCG'):
kwargs = {'entity_code': self.entity.code}
return reverse('spending_for_one_ccg', kwargs=kwargs)
elif (self.entity_type == 'practice'):
kwargs = {'entity_code': self.entity.code}
return reverse('spending_for_one_practice', kwargs=kwargs)
else:
return reverse('spending_for_all_england')
def topic(self):
return self.name |
class EditForumForm(ForumForm):
id = HiddenField()
def __init__(self, forum, *args, **kwargs):
self.forum = forum
kwargs['obj'] = self.forum
ForumForm.__init__(self, *args, **kwargs)
def save(self):
data = self.data
data.pop('submit', None)
data.pop('csrf_token', None)
forum = Forum(**data)
make_transient(forum)
make_transient_to_detached(forum)
return forum.save() |
class FipaHandler(Handler):
SUPPORTED_PROTOCOL = FipaMessage.protocol_id
def setup(self) -> None:
def handle(self, message: Message) -> None:
fipa_msg = cast(FipaMessage, message)
fipa_dialogues = cast(FipaDialogues, self.context.fipa_dialogues)
fipa_dialogue = cast(FipaDialogue, fipa_dialogues.update(fipa_msg))
if (fipa_dialogue is None):
self._handle_unidentified_dialogue(fipa_msg)
return
if (fipa_msg.performative == FipaMessage.Performative.PROPOSE):
self._handle_propose(fipa_msg, fipa_dialogue)
else:
self._handle_invalid(fipa_msg, fipa_dialogue)
def teardown(self) -> None:
def _handle_unidentified_dialogue(self, fipa_msg: FipaMessage) -> None:
self.context.logger.info('unidentified dialogue for message={}.'.format(fipa_msg))
default_dialogues = cast(DefaultDialogues, self.context.default_dialogues)
(default_msg, _) = default_dialogues.create(counterparty=fipa_msg.sender, performative=DefaultMessage.Performative.ERROR, error_code=DefaultMessage.ErrorCode.INVALID_DIALOGUE, error_msg='Invalid dialogue.', error_data={'fipa_message': fipa_msg.encode()})
self.context.outbox.put_message(message=default_msg)
def _handle_propose(self, fipa_msg: FipaMessage, fipa_dialogue: FipaDialogue) -> None:
if all(((key in ['contract_address', 'from_supply', 'to_supply', 'value', 'trade_nonce', 'token_id']) for key in fipa_msg.proposal.values.keys())):
self.context.logger.info('received valid PROPOSE from sender={}: proposal={}'.format(fipa_msg.sender[(- 5):], fipa_msg.proposal.values))
strategy = cast(Strategy, self.context.strategy)
contract_api_dialogues = cast(ContractApiDialogues, self.context.contract_api_dialogues)
(contract_api_msg, contract_api_dialogue) = contract_api_dialogues.create(counterparty=LEDGER_API_ADDRESS, performative=ContractApiMessage.Performative.GET_RAW_MESSAGE, ledger_id=strategy.ledger_id, contract_id=strategy.contract_id, contract_address=fipa_msg.proposal.values['contract_address'], callable='get_hash_single', kwargs=ContractApiMessage.Kwargs({'from_address': fipa_msg.sender, 'to_address': self.context.agent_address, 'token_id': int(fipa_msg.proposal.values['token_id']), 'from_supply': int(fipa_msg.proposal.values['from_supply']), 'to_supply': int(fipa_msg.proposal.values['to_supply']), 'value': int(fipa_msg.proposal.values['value']), 'trade_nonce': int(fipa_msg.proposal.values['trade_nonce'])}))
terms = Terms(ledger_id=strategy.ledger_id, sender_address=self.context.agent_address, counterparty_address=fipa_msg.sender, amount_by_currency_id={}, quantities_by_good_id={str(fipa_msg.proposal.values['token_id']): (int(fipa_msg.proposal.values['from_supply']) - int(fipa_msg.proposal.values['to_supply']))}, is_sender_payable_tx_fee=False, nonce=str(fipa_msg.proposal.values['trade_nonce']))
contract_api_dialogue = cast(ContractApiDialogue, contract_api_dialogue)
contract_api_dialogue.terms = terms
contract_api_dialogue.associated_fipa_dialogue = fipa_dialogue
self.context.outbox.put_message(message=contract_api_msg)
self.context.logger.info('requesting single hash message from contract api...')
else:
self.context.logger.info('received invalid PROPOSE from sender={}: proposal={}'.format(fipa_msg.sender[(- 5):], fipa_msg.proposal.values))
def _handle_invalid(self, fipa_msg: FipaMessage, fipa_dialogue: FipaDialogue) -> None:
self.context.logger.warning('cannot handle fipa message of performative={} in dialogue={}.'.format(fipa_msg.performative, fipa_dialogue)) |
class TestMappedUnion(unittest.TestCase):
def test_no_mapped(self):
no_mapped = MappedUnion(Int, Float)
self.assertFalse(no_mapped.is_mapped)
self.assertIsNone(no_mapped.post_setattr)
def test_mapped(self):
mapped = MappedUnion(None, Str, Map({'yes': True, 'no': False}))
self.assertTrue(mapped.is_mapped)
self.assertIsNotNone(mapped.post_setattr)
def test_optional(self):
mapped = Optional(ColorTrait)
self.assertTrue(mapped.is_mapped)
self.assertIsNotNone(mapped.post_setattr)
def test_no_mapped_class(self):
mapped_union = UsesMappedUnion()
no_mapped = mapped_union.trait('no_mapped')
self.assertFalse(no_mapped.is_mapped)
self.assertIsNone(no_mapped.post_setattr)
self.assertFalse(hasattr(mapped_union, 'no_mapped_'))
mapped_union.no_mapped = 1
self.assertFalse(hasattr(mapped_union, 'no_mapped_'))
def test_mapped_class(self):
mapped_union = UsesMappedUnion()
mapped = mapped_union.trait('mapped')
self.assertTrue(mapped.is_mapped)
self.assertIsNotNone(mapped.post_setattr)
self.assertIsNone(mapped_union.mapped_)
mapped_union.mapped = 'yes'
self.assertTrue(mapped_union.mapped_)
mapped_union.mapped = 'red'
self.assertEqual(mapped_union.mapped_, (1.0, 0.0, 0.0, 1.0))
mapped_union.mapped = 'notacolor'
self.assertEqual(mapped_union.mapped_, 'notacolor')
def test_optional_class(self):
mapped_union = UsesMappedUnion()
optional = mapped_union.trait('optional')
self.assertTrue(optional.is_mapped)
self.assertIsNotNone(optional.post_setattr)
self.assertIsNone(mapped_union.optional_)
mapped_union.optional = 'red'
self.assertEqual(mapped_union.optional_, (1.0, 0.0, 0.0, 1.0))
mapped_union.optional = None
self.assertIsNone(mapped_union.optional_)
def test_optional_default_class(self):
mapped_union = UsesMappedUnion()
self.assertEqual(mapped_union.optional_default_, (1.0, 0.0, 0.0, 1.0)) |
class SimpleValuesBuilderTest(unittest.TestCase):
def test_build_string(self):
value = build_value('some string', str, False)
self.assertEqual(value, 'some string')
value = build_value('"some string"', str, True)
self.assertEqual(value, 'some string')
def test_build_int(self):
value = build_value('1', int, False)
self.assertEqual(value, 1)
value = build_value('1', int, True)
self.assertEqual(value, 1)
def test_build_custom_type(self):
def parser(string):
return string.split('#')
value = build_value('special#string', parser, False)
self.assertEqual(value, ['special', 'string'])
value = build_value('"special#string"', parser, True)
self.assertEqual(value, ['special', 'string'])
def test_build_tuple(self):
value = build_value('foo bar,1,0.5', typing.Tuple[(str, int, float)], False)
self.assertEqual(value, ('foo bar', 1, 0.5))
value = build_value('("foo bar",1,0.5)', typing.Tuple[(str, int, float)], True)
self.assertEqual(value, ('foo bar', 1, 0.5))
def test_build_tuple_partially_typed(self):
value = build_value('foo bar,1,0.5', typing.Tuple[(str, typing.Any, float)], False)
self.assertEqual(value, ('foo bar', '1', 0.5))
value = build_value('("foo bar",1,0.5)', typing.Tuple[(str, typing.Any, float)], True)
self.assertEqual(value, (str('foo bar'), 1, 0.5))
def test_build_tuple_untyped(self):
value = build_value('foo bar,1,0.5', typing.Tuple, False)
self.assertEqual(value, ('foo bar', '1', '0.5'))
value = build_value('("foo bar",1,0.5)', typing.Tuple, True)
self.assertEqual(value, (str('foo bar'), 1, 0.5))
def test_build_tuple_single_element(self):
value = build_value('foo bar', typing.Tuple[str], False)
self.assertEqual(value, ('foo bar',))
value = build_value('("foo bar",)', typing.Tuple[str], True)
self.assertEqual(value, (str('foo bar'),))
def test_build_typed_dict(self):
value = build_value('a:1;b:2', typing.Mapping[(str, int)], False)
self.assertEqual(value, {'a': 1, 'b': 2})
value = build_value('{"a": "1", "b": 2, "c": 3.2}', typing.Mapping[(str, int)], True)
self.assertEqual(value, {'a': 1, 'b': 2, 'c': 3})
def test_build_typed_dict_mixed(self):
value = build_value('a=1;b=2', typing.Mapping[(str, int)], False)
self.assertEqual(value, {'a': 1, 'b': 2})
value = build_value('a:1;b=2', typing.Mapping[(str, int)], False)
self.assertEqual(value, {'a': 1, 'b': 2})
def test_build_typed_dict_with_list(self):
value = build_value('a=1,2,3;b=2', typing.Mapping[(str, str)], False)
self.assertEqual(value, {'a': '1,2,3', 'b': '2'})
value = build_value('a=1,2,3;b=2', typing.Mapping[(str, typing.List[int])], False)
self.assertEqual(value, {'a': [1, 2, 3], 'b': [2]})
def test_build_partially_typed_dict(self):
value = build_value('a:1;b:2', typing.Mapping[(typing.Any, int)], False)
self.assertEqual(value, {'a': 1, 'b': 2})
value = build_value('{"a": "1", "b": 2, 0: 3}', typing.Mapping[(typing.Any, int)], True)
self.assertEqual(value, {'a': 1, 'b': 2, 0: 3})
def test_build_untyped_dict(self):
value = build_value('a:1;b:2', typing.Mapping, False)
self.assertEqual(value, {'a': '1', 'b': '2'})
value = build_value('{"a": 1, "b": 2.5}', typing.Mapping, True)
self.assertEqual(value, {'a': 1, 'b': 2.5})
def test_build_typed_list(self):
value = build_value('1,2,3', typing.List[int], False)
self.assertEqual(value, [1, 2, 3])
value = build_value('hello,world,test', typing.List[str], False)
self.assertEqual(value, ['hello', 'world', 'test'])
value = build_value('hello', typing.List[str], False)
self.assertEqual(value, ['hello'])
value = build_value('["1",2,3.2]', typing.List[int], True)
self.assertEqual(value, [1, 2, 3])
def test_build_untyped_list(self):
value = build_value('1,2,3', typing.List, False)
self.assertEqual(value, ['1', '2', '3'])
value = build_value('["1",2,3.5]', typing.List, True)
self.assertEqual(value, ['1', 2, 3.5])
def test_build_any_typed_list(self):
value = build_value('1,2,3', typing.List[typing.Any], False)
self.assertEqual(value, ['1', '2', '3'])
value = build_value('["1",2,3.5]', typing.List[typing.Any], True)
self.assertEqual(value, ['1', 2, 3.5])
def test_build_whitespaces(self):
value = build_value(' a : 1 ; b : 2 ', typing.Mapping[(str, int)], False)
self.assertEqual(value, {'a': 1, 'b': 2})
value = build_value('{ "a" : 1 , "b" : 2 }', typing.Mapping[(str, int)], True)
self.assertEqual(value, {'a': 1, 'b': 2})
value = build_value(' 1 , 2 , 3 ', typing.List[int], False)
self.assertEqual(value, [1, 2, 3])
value = build_value('[ 1 , 2 , 3 ]', typing.List[int], True)
self.assertEqual(value, [1, 2, 3])
value = build_value(' 1 , 2 , 3 ', typing.Tuple[(int, int, int)], False)
self.assertEqual(value, (1, 2, 3))
value = build_value('( 1 , 2 , 3 )', typing.Tuple[(int, int, int)], True)
self.assertEqual(value, (1, 2, 3))
def test_build_with_casting(self):
value = build_value('a:1;b:2;c:3', typing.Mapping[(str, float)])
self.assertEqual(value, {'a': 1.0, 'b': 2.0, 'c': 3.0})
value = build_value('a:1;b:2;c:3', typing.Mapping[(str, str)])
self.assertEqual(value, {'a': '1', 'b': '2', 'c': '3'})
self.assertRaises(ValueError, build_value, 'a:1;b:2;c:3', typing.Mapping[(int, int)])
def test_build_nested_structures(self):
inpt = '{\n "a": 1,\n "b": {\n "c": [2, 3, 4, [5, 6]]\n }\n }'
expected = {'a': 1, 'b': {'c': [2, 3, 4, [5, 6]]}}
expected_type = typing.Any
self.assertEqual(build_value(inpt, expected_type, True), expected)
inpt = '{\n "a": [ [1, 2], [3, 4] ],\n "b": [ [10, 20, 30], [40] ]\n }'
expected = {'a': [[1, 2], [3, 4]], 'b': [[10, 20, 30], [40]]}
expected_type = typing.Mapping[(str, typing.List[typing.List[int]])]
self.assertEqual(build_value(inpt, expected_type, True), expected)
def test_build_tuple_error(self):
self.assertRaises(ValueError, build_value, 'foo bar,1,0.5,extra!', typing.Tuple[(str, int, float)], False)
self.assertRaises(ValueError, build_value, '("foo bar", 1, 0.5, "extra!")', typing.Tuple[(str, int, float)], True)
self.assertRaises(ValueError, build_value, 'foo bar', typing.Tuple[(str, int, float)], False)
self.assertRaises(ValueError, build_value, '("foo bar",)', typing.Tuple[(str, int, float)], True) |
class MicrosoftGraphOAuth2(BaseOAuth2[Dict[(str, Any)]]):
display_name = 'Microsoft'
logo_svg = LOGO_SVG
def __init__(self, client_id: str, client_secret: str, tenant: str='common', scopes: Optional[List[str]]=BASE_SCOPES, name: str='microsoft'):
access_token_endpoint = ACCESS_TOKEN_ENDPOINT.format(tenant=tenant)
super().__init__(client_id, client_secret, AUTHORIZE_ENDPOINT.format(tenant=tenant), access_token_endpoint, access_token_endpoint, name=name, base_scopes=scopes)
def get_authorization_url(self, redirect_uri, state=None, scope=None, extras_params=None):
if (extras_params is None):
extras_params = {}
extras_params['response_mode'] = 'query'
return super().get_authorization_url(redirect_uri, state=state, scope=scope, extras_params=extras_params)
async def get_id_email(self, token: str) -> Tuple[(str, Optional[str])]:
async with self.get_ as client:
response = (await client.get(PROFILE_ENDPOINT, headers={'Authorization': f'Bearer {token}'}))
if (response.status_code >= 400):
raise GetIdEmailError(response.json())
data = cast(Dict[(str, Any)], response.json())
return (data['id'], data['userPrincipalName']) |
def get_remote_file_url(file_path: str, full_name: str, repo_url: str) -> str:
if Path(repo_url).exists():
return GitService.LOCAL.file_url.format(file_path=file_path)
for service in GitService:
if (service.host in repo_url):
return service.file_url.format(full_name=full_name, file_path=file_path)
raise ValueError('Unsupported Git service for URL generation.') |
class TestDSLMemoizedBeforeAttribute(TestDSLBase):
def test_memoize_before_attribute(self):
mock = Mock()
def top(context):
value = 1
memoized = []
_before
def attribute_name(self):
memoized.append(True)
return (value + 1)
def attribute_is_memoized(self):
assert memoized
mock(self.attribute_name)
mock(self.attribute_name)
self.run_first_context_first_example()
self.assertEqual(mock.mock_calls, [call(2), call(2)])
def test_memoize_before_attribute_as_lambda(self):
mock = Mock()
def top(context):
value = 1
memoized = []
def creator(self):
memoized.append(True)
return (value + 1)
context.memoize_before('attribute_name', creator)
def attribute_is_memoized(self):
assert memoized
mock(self.attribute_name)
mock(self.attribute_name)
self.run_first_context_first_example()
self.assertEqual(mock.mock_calls, [call(2), call(2)])
def test_cant_call_memoize_before_functions_directly(self):
with self.assertRaisesRegex(BaseException, 'This function should not be called outside test code.'):
def top(context):
_before
def attribute(self, msg):
pass
attribute()
def test_cant_create_two_memoize_before_with_same_name(self):
with self.assertRaisesRegex(AttributeError, 'Attribute "name" already set for context "top"'):
def top(context):
_before
def name(self, msg):
pass
_before
def name(self, msg):
pass
def test_cant_do_composition(self):
executed = []
def top(context):
_before
async def attribute(self):
executed.append(True)
return 'top'
_before
async def trigger_attribute(self):
trigger_attribute = {}
trigger_attribute['attribute'] = self.attribute
return trigger_attribute
_context
def inner(context):
_before
async def attribute(self):
return 'inner'
async def can_compose(self):
assert (self.trigger_attribute['attribute'] == 'inner')
self.run_all_examples()
self.assertFalse(bool(executed)) |
def div_cc_c(gen, t, srcs):
denom = cmag_c_f(gen, 'mag', [srcs[1]])
ac = gen.emit_binop('*', [srcs[0].re, srcs[1].re], Float)
bd = gen.emit_binop('*', [srcs[0].im, srcs[1].im], Float)
bc = gen.emit_binop('*', [srcs[0].im, srcs[1].re], Float)
ad = gen.emit_binop('*', [srcs[0].re, srcs[1].im], Float)
dre = gen.emit_binop('+', [ac, bd], Float)
dim = gen.emit_binop('-', [bc, ad], Float)
return ComplexArg(gen.emit_binop('/', [dre, denom], Float), gen.emit_binop('/', [dim, denom], Float)) |
class WatchDog(multiprocessing.Process):
def __init__(self, timeout=30):
multiprocessing.Process.__init__(self)
self.timeout = timeout
def run(self):
try:
while True:
if ping.wait(self.timeout):
ping.clear()
else:
log.info('client has gone', 'simul')
break
except KeyboardInterrupt:
pass
finally:
shutdown.set() |
class GameRunner(Greenlet):
def _run(self) -> None:
raise GameError('Abstract')
def user_input(self, entities: Sequence[Any], inputlet: Inputlet, timeout: int=25, type: str='single', trans: Optional[InputTransaction]=None):
raise GameError('Abstract')
def is_aborted(self) -> bool:
raise GameError('Abstract')
def is_dropped(self, p: Player) -> bool:
raise GameError('Abstract')
def pause(self, time: float) -> None:
raise GameError('Abstract')
def get_side(self) -> str:
raise GameError('Abstract') |
class GroupTaggerPlugin():
def get_preferences_pane(self):
return gt_prefs
def enable(self, exaile):
self.exaile = exaile
def on_gui_loaded(self):
self.track = None
self.tag_dialog = None
migrate_settings()
self.panel = gt_widgets.GroupTaggerPanel(self.exaile)
self.panel.show_all()
self.setup_panel_font(False)
self.panel.tagger.view.connect('category-changed', self.on_category_change)
self.panel.tagger.view.connect('category-edited', self.on_category_edited)
self.panel.tagger.view.connect('group-changed', self.on_group_change)
providers.register('main-panel', self.panel)
event.add_ui_callback(self.on_playback_track_start, 'playback_track_start')
event.add_ui_callback(self.on_playlist_cursor_changed, 'playlist_cursor_changed')
event.add_ui_callback(self.on_plugin_options_set, 'plugin_grouptagger_option_set')
tools_submenu = menu.Menu(None, context_func=(lambda p: self.exaile))
tools_submenu.add_item(menu.simple_menu_item('gt_get_tags', [], _('_Get all tags from collection'), callback=self.on_get_tags_menu))
tools_submenu.add_item(menu.simple_menu_item('gt_import', [], _('_Import tags from directory'), callback=self.on_import_tags))
tools_submenu.add_item(menu.simple_menu_item('gt_rename', [], _('_Mass rename/delete tags'), callback=self.on_mass_rename))
tools_submenu.add_item(menu.simple_menu_item('gt_export', [], _('E_xport collection tags to JSON'), callback=self.on_export_tags))
self.tools_menuitem = menu.simple_menu_item('grouptagger', ['plugin-sep'], _('_GroupTagger'), submenu=tools_submenu)
providers.register('menubar-tools-menu', self.tools_menuitem)
self.provider_items = []
track_subitem = menu.Menu(None, inherit_context=True)
track_subitem.add_item(menu.simple_menu_item('gt_search_all', [], _('Show tracks with all tags'), callback=self.on_playlist_context_select_all_menu, callback_args=[self.exaile]))
track_subitem.add_item(menu.simple_menu_item('gt_search_custom', ['gt_search_all'], _('Show tracks with tags (custom)'), callback=self.on_playlist_context_select_custom_menu, callback_args=[self.exaile]))
tag_cond_fn = (lambda n, p, c: (c['selection-count'] > 1))
track_subitem.add_item(menu.simple_menu_item('gt_tag_add_multi', ['gt_search_custom'], _('Add tags to all'), callback=self.on_add_tags, condition_fn=tag_cond_fn, callback_args=[self.exaile]))
track_subitem.add_item(menu.simple_menu_item('gt_tag_rm_multi', ['gt_tag_add_multi'], _('Remove tags from all'), callback=self.on_rm_tags, condition_fn=tag_cond_fn, callback_args=[self.exaile]))
self.provider_items.append(menu.simple_menu_item('grouptagger', ['rating'], _('GroupTagger'), submenu=track_subitem))
for item in self.provider_items:
providers.register('playlist-context-menu', item)
if player.PLAYER.is_playing():
self.set_display_track(player.PLAYER.current)
else:
self.panel.tagger.set_categories([], get_group_categories())
def disable(self, exaile):
if self.tools_menuitem:
providers.unregister('menubar-tools-menu', self.tools_menuitem)
for item in self.provider_items:
providers.unregister('playlist-context-menu', item)
providers.unregister('track-panel-menu', item)
self.tools_menuitem = None
self.provider_items = []
if self.tag_dialog:
self.tag_dialog.destroy()
self.tag_dialog = None
event.remove_callback(self.on_playback_track_start, 'playback_track_start')
event.remove_callback(self.on_playlist_cursor_changed, 'playlist_cursor_changed')
event.remove_callback(self.on_plugin_options_set, 'plugin_grouptagger_option_set')
providers.unregister('main-panel', self.panel)
def setup_panel_font(self, always_set):
font = settings.get_option('plugin/grouptagger/panel_font', None)
if (font is None):
if (not always_set):
return
font = gt_prefs._get_system_default_font()
else:
font = Pango.FontDescription(font)
self.panel.tagger.set_font(font)
def on_export_tags(self, widget, name, parent, exaile):
gt_export.export_tags(exaile)
def on_get_tags_menu(self, widget, name, parent, exaile):
if (self.tag_dialog is None):
self.tag_dialog = gt_widgets.AllTagsDialog(exaile, self.panel.tagger.add_groups)
self.tag_dialog.connect('delete-event', self.on_get_tags_menu_window_deleted)
self.tag_dialog.show_all()
def on_get_tags_menu_window_deleted(self, *args):
self.tag_dialog = None
def on_import_tags(self, widget, name, parent, exaile):
gt_import.import_tags(exaile)
def on_mass_rename(self, widget, name, parent, exaile):
gt_mass.mass_rename(exaile)
def _add_rm_multi_tags(self, add, context, exaile):
tracks = context['selected-tracks']
dialog = gt_widgets.GroupTaggerAddRemoveDialog(add, tracks, exaile)
if add:
dialog.tagger.set_categories([], get_group_categories())
else:
groups = set()
for track in tracks:
groups |= get_track_groups(track)
dialog.tagger.add_groups(groups)
dialog.set_size_request(250, 500)
retval = dialog.run()
groups = {}
if (retval == Gtk.ResponseType.APPLY):
groups = set(dialog.get_active())
dialog.destroy()
if (len(groups) > 0):
for track in tracks:
existing = get_track_groups(track)
if add:
set_track_groups(track, (existing | groups))
else:
set_track_groups(track, (existing - groups))
def on_add_tags(self, widget, name, parent, context, exaile):
self._add_rm_multi_tags(True, context, exaile)
def on_rm_tags(self, widget, name, parent, context, exaile):
self._add_rm_multi_tags(False, context, exaile)
def on_playback_track_start(self, type, player, track):
self.set_display_track(track)
def on_playlist_context_select_all_menu(self, menu, display_name, playlist_view, context, exaile):
tracks = context['selected-tracks']
groups = set()
for track in tracks:
groups |= get_track_groups(track)
if (len(groups) > 0):
create_all_search_playlist(groups, exaile)
else:
dialogs.error(None, _('No categorization tags found in selected tracks'))
def on_playlist_context_select_custom_menu(self, menu, display_name, playlist_view, context, exaile):
tracks = context['selected-tracks']
groups = set()
for track in tracks:
groups |= get_track_groups(track)
if (len(groups) > 0):
create_custom_search_playlist(groups, exaile)
else:
dialogs.error(None, _('No categorization tags found in selected tracks'))
def on_playlist_cursor_changed(self, type, playlist_view, context):
tracks = context['selected-tracks']
if (len(tracks) == 1):
self.set_display_track(tracks[0])
def set_display_track(self, track, force_update=False):
if ((self.track == track) and (not force_update)):
return
self.track = track
track_groups = get_track_groups(track)
self.panel.tagger.view.show_click_column()
self.panel.tagger.set_categories(track_groups, get_group_categories())
self.panel.tagger.set_track_info(track)
def on_category_change(self, view, action, category):
categories = get_group_categories()
if (action == gt_widgets.category_change.added):
categories.setdefault(category, [True, []])
elif (action == gt_widgets.category_change.deleted):
del categories[category]
elif (action == gt_widgets.category_change.collapsed):
categories[category][0] = False
elif (action == gt_widgets.category_change.expanded):
categories[category][0] = True
elif (action == gt_widgets.category_change.updated):
v = categories.setdefault(category, [True, []])
v[1] = view.get_model().get_category_groups(category)
set_group_categories(categories)
def on_category_edited(self, view, old_category, new_category):
categories = get_group_categories()
categories[new_category] = categories.pop(old_category)
set_group_categories(categories)
def on_group_change(self, view, action, value):
if (self.track is not None):
groups = view.get_model().iter_active()
if (not set_track_groups(self.track, groups)):
self.set_display_track(self.track, force_update=True)
def on_plugin_options_set(self, evtype, settings, option):
if (option == 'plugin/grouptagger/panel_font'):
self.setup_panel_font(True)
elif (option == tagname_option):
if (self.track is not None):
self.set_display_track(self.track, True) |
class ImageStoreView(APIView):
dc_bound = False
HTTP_TIMEOUT = 20
HTTP_MAX_SIZE =
LOCK_KEY = 'imagestore-update'
def __init__(self, request, name, data, many=False):
super(ImageStoreView, self).__init__(request)
self.data = data
self.name = name
self.many = many
repositories = ImageStore.get_repositories(include_image_vm=request.user.is_staff)
if name:
assert (not many)
try:
self.repo = ImageStore(name, url=repositories[name])
except KeyError:
raise ObjectNotFound(model=ImageStore)
else:
assert many
if ((request.method == 'PUT') or (self.full or self.extended)):
self.repo = ImageStore.all(repositories)
else:
self.repo = repositories.keys()
def _update(cls, task_id, repo):
err = res = images = None
repo_url = repo.get_images_url()
logger.info('Downloading images from image repository %s (%s)', repo.name, repo_url)
try:
curl = HttpClient(repo_url)
res = curl.get(timeout=cls.HTTP_TIMEOUT, max_size=cls.HTTP_MAX_SIZE, allow_redirects=True)
images = res.json()
except RequestException as exc:
err = ('%s' % exc)
except ValueError as exc:
err = ('Image server response could not be decoded (%s)' % exc)
else:
if (not isinstance(images, list)):
err = ('Unexpected output from image server (%s)' % type(images))
if err:
status = FAILURE
msg = err
logger.error(err)
repo.error = err
repo.save()
else:
status = SUCCESS
img_count = len(images)
msg = (u'Downloaded metadata for %d images from image repository %s in %d s' % (img_count, repo.name, res.elapsed.microseconds))
logger.info(msg)
repo.image_count = img_count
repo.last_update = timezone.now()
repo.error = None
repo.save(images=images)
del images
task_log(task_id, LOG_IMAGE_STORE_UPDATE, obj=repo, task_status=status, detail=msg, update_user_tasks=False)
return repo
def update(cls, task_id, repo):
lock = TaskLock(cls.LOCK_KEY, desc=('Image repository %s update' % repo.name))
if (not lock.acquire(task_id, timeout=60, save_reverse=False)):
raise TaskIsAlreadyRunning
try:
return cls._update(task_id, repo)
finally:
lock.delete(fail_silently=True, delete_reverse=False)
def get(self):
return SuccessTaskResponse(self.request, self.repo, dc_bound=self.dc_bound)
def put(self):
request = self.request
task_id = task_id_from_request(request, dummy=True, tt=TT_DUMMY, tg=TG_DC_UNBOUND)
if self.many:
res = [self.update(task_id, repo) for repo in self.repo]
err = any((bool(repo['error']) for repo in res))
else:
res = self.update(task_id, self.repo)
err = bool(res.error)
if err:
response_class = FailureTaskResponse
else:
response_class = SuccessTaskResponse
return response_class(self.request, res, task_id=task_id, dc_bound=self.dc_bound) |
class TestSyncFedShuffleServers():
def _fake_data(self, num_batches=3, batch_size=2, rng: Optional[torch.Generator]=None):
dataset = [torch.rand(batch_size, 2, generator=rng) for _ in range(num_batches)]
dataset = utils.DatasetFromList(dataset)
return utils.DummyUserData(dataset, utils.SampleNet(utils.TwoFC()))
def _fake_client(self, dataset, client_lr):
optim_config = LocalOptimizerSGDConfig(lr=client_lr)
dataset = (dataset or self._fake_data())
clnt = FedShuffleClient(dataset=dataset, **OmegaConf.structured(FedShuffleClientConfig(optimizer=optim_config, shuffle_batch_order=False, epochs=2)))
return clnt
def _perform_fedshuffle_training(self, server, expected_model, client_lr):
rng = torch.Generator().manual_seed(1234)
clients = []
train_dataset = []
num_batches = [3, 5, 4, 6]
for batches in num_batches:
dataset = self._fake_data(batches, 2, rng)
train_dataset.append(dataset.data.ds)
clients.append(self._fake_client(dataset, client_lr))
data_provider = FLDataProviderFromList(train_dataset, train_dataset, train_dataset, server.global_model)
for _ in range(5):
server.init_round()
selected_clients = server.select_clients_for_training(len(num_batches), 2, data_provider)
broadcast_message = server.broadcast_message_to_clients(selected_clients)
for clnt in selected_clients:
clnt = clients[clnt]
(delta, weight) = clnt.generate_local_update(broadcast_message)
server.receive_update_from_client(Message(delta, weight))
server.step()
error_msg = utils.verify_models_equivalent_after_training(server.global_model, expected_model)
assertEmpty(error_msg, error_msg)
def test_fedshuffle_uniform_sampling_weighted_average_training(self):
server_model = utils.SampleNet(utils.linear_model(4.0))
server = instantiate(SyncFedShuffleServerConfig(server_optimizer=FedAvgWithLROptimizerConfig(lr=2.0, momentum=0.9), active_user_selector=UniformlyRandomActiveUserSelectorConfig(user_selector_seed=34), aggregation_type=AggregationType.WEIGHTED_AVERAGE), global_model=server_model)
expected_model = utils.linear_model(0.0)
expected_model.fc1.weight = nn.Parameter(torch.tensor([[3., 3.5928464]]))
expected_model.fc1.bias = nn.Parameter(torch.tensor([3.]))
self._perform_fedshuffle_training(server, expected_model, client_lr=0.03)
def test_fedshuffle_uniform_sampling_weighted_sum_training(self):
server_model = utils.SampleNet(utils.linear_model(4.0))
server = instantiate(SyncFedShuffleServerConfig(server_optimizer=FedAvgWithLROptimizerConfig(lr=0.2, momentum=0.9), active_user_selector=UniformlyRandomActiveUserSelectorConfig(user_selector_seed=34), aggregation_type=AggregationType.WEIGHTED_SUM), global_model=server_model)
expected_model = utils.linear_model(0.0)
expected_model.fc1.weight = nn.Parameter(torch.tensor([[3., 3.]]))
expected_model.fc1.bias = nn.Parameter(torch.tensor([2.3984]))
self._perform_fedshuffle_training(server, expected_model, client_lr=0.03)
def test_fedshuffle_importance_sampling_weighted_average_training(self):
server_model = utils.SampleNet(utils.linear_model(4.0))
server = instantiate(SyncFedShuffleServerConfig(server_optimizer=FedAvgWithLROptimizerConfig(lr=0.2, momentum=0.9), active_user_selector=ImportanceSamplingActiveUserSelectorConfig(user_selector_seed=34), aggregation_type=AggregationType.WEIGHTED_AVERAGE), global_model=server_model)
expected_model = utils.linear_model(0.0)
expected_model.fc1.weight = nn.Parameter(torch.tensor([[3., 3.]]))
expected_model.fc1.bias = nn.Parameter(torch.tensor([3.]))
self._perform_fedshuffle_training(server, expected_model, client_lr=0.3)
def test_fedshuffle_importance_sampling_weighted_sum_training(self):
server_model = utils.SampleNet(utils.linear_model(4.0))
server = instantiate(SyncFedShuffleServerConfig(server_optimizer=FedAvgWithLROptimizerConfig(lr=0.2, momentum=0.9), active_user_selector=ImportanceSamplingActiveUserSelectorConfig(user_selector_seed=34), aggregation_type=AggregationType.WEIGHTED_SUM), global_model=server_model)
expected_model = utils.linear_model(0.0)
expected_model.fc1.weight = nn.Parameter(torch.tensor([[2., 3.]]))
expected_model.fc1.bias = nn.Parameter(torch.tensor([2.042368]))
self._perform_fedshuffle_training(server, expected_model, client_lr=0.3) |
class Perm021FCCCRTestCase(unittest.TestCase):
def _test_perm021fc_ccr(self, test_name='perm021fc_ccr', dtype='float16'):
B = 1024
M = 128
K = 745
N = 30
target = detect_target()
X = Tensor(shape=[B, K, M], dtype=dtype, name='input_0', is_input=True)
W = Tensor(shape=[1, N, K], dtype=dtype, name='input_1', is_input=True)
OP = ops.perm021fc_ccr()
Y = OP(X, W)
Y._attrs['name'] = 'output_0'
Y._attrs['is_output'] = True
module = compile_model(Y, target, './tmp', test_name)
X_pt = get_random_torch_tensor([B, K, M], dtype=dtype)
W_pt = get_random_torch_tensor([N, K], dtype=dtype)
XT = X_pt.permute(0, 2, 1)
XT = torch.reshape(XT, ((- 1), K))
Y_pt = torch.nn.functional.linear(XT, W_pt)
Y_pt = torch.reshape(Y_pt, (B, M, N))
y = torch.empty_like(Y_pt)
module.run_with_tensors({'input_0': X_pt, 'input_1': W_pt.unsqueeze(0)}, [y])
self.assertTrue(torch.allclose(Y_pt, y, atol=0.1, rtol=0.1))
def test_perm021fc_ccr_fp16(self):
self._test_perm021fc_ccr(test_name='perm021fc_ccr_fp16', dtype='float16')
def test_perm021fc_ccr_float32_sm80(self):
self._test_perm021fc_ccr(test_name='perm021fc_ccr_fp32', dtype='float32')
def test_perm021fc_ccr_bf16(self):
self._test_perm021fc_ccr(test_name='perm021fc_ccr_bf16', dtype='bfloat16') |
class ResourceScanner(base_scanner.BaseScanner):
def __init__(self, global_configs, scanner_configs, service_config, model_name, snapshot_timestamp, rules):
super(ResourceScanner, self).__init__(global_configs, scanner_configs, service_config, model_name, snapshot_timestamp, rules)
self.rules_engine = resource_rules_engine.ResourceRulesEngine(rules_file_path=self.rules, snapshot_timestamp=self.snapshot_timestamp)
self.rules_engine.build_rule_book(self.global_configs)
def run(self):
resources = self._retrieve()
all_violations = self._find_violations(resources)
self._output_results(all_violations)
def _retrieve(self):
resources = []
resource_types = self.rules_engine.rule_book.get_applicable_resource_types()
(scoped_session, data_access) = self.service_config.model_manager.get(self.model_name)
with scoped_session as session:
for resource_type in resource_types:
for resource in data_access.scanner_iter(session, resource_type):
resources.append(resource_util.create_resource_from_db_row(resource))
return resources
def _find_violations(self, resources):
LOGGER.info('Finding Resource violations...')
violations = self.rules_engine.find_violations(resources)
LOGGER.debug(violations)
return violations
def _output_results(self, all_violations):
all_violations = self._flatten_violations(all_violations)
self._output_results_to_db(all_violations)
def _flatten_violations(violations):
for violation in violations:
(yield {'resource_id': violation.resource_id, 'resource_type': violation.resource_type, 'full_name': violation.full_name, 'rule_index': violation.rule_index, 'rule_name': violation.rule_name, 'violation_type': violation.violation_type, 'violation_data': violation.violation_data, 'resource_data': violation.resource_data}) |
class MySource(Node):
A = Topic(MyMessage1)
def __init__(self) -> None:
super(MySource, self).__init__()
(A)
async def source(self) -> AsyncPublisher:
for i in range(NUM_MESSAGES):
(yield (self.A, MyMessage1(int_field=i)))
(await asyncio.sleep((1 / SAMPLE_RATE))) |
class WindowsEnableNewAdapterDisrupter(Disrupter):
def __init__(self, device, parameters):
super().__init__(device, parameters)
self._restrict_parameters(must_disrupt=True, must_restore=False)
self._primary_adapter = self._find_primary_adapter()
def _find_primary_adapter(self):
primary_adapter = self._device['network_tool'].primary_adapter()
L.info('Primary network adapter is {}'.format(primary_adapter.name()))
return primary_adapter
def setup(self):
L.describe('Disable the primary network adapter')
self._primary_adapter.disable()
L.info('Disabled adapter {}'.format(self._primary_adapter.name()))
def disrupt(self):
L.describe('Re-enable primary network adapter')
self._primary_adapter.enable()
def teardown(self):
if self._primary_adapter:
self.disrupt()
super().teardown() |
def db_migrate_speaker_doc(db):
conn = db.engine.connect()
cursor = conn.execute('SELECT * from sessions')
dict = []
for row in cursor:
dict.append({'name': row['title'], 'link': row['slides_url']})
data = f"'{json.dumps(dict)}'"
id = row['id']
conn.execute(f'UPDATE sessions SET slides = {data} WHERE id = {id}') |
class OptionPlotoptionsPyramid3dSonificationDefaultspeechoptionsMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
.parametrize('solver', [quartic.QuarticSolver.NUMERIC, quartic.QuarticSolver.HYBRID])
def test_almost_touching_ball_ball_collision(solver: quartic.QuarticSolver):
template = System(cue=Cue.default(), table=(table := Table.default()), balls={'1': (ball := Ball.create('1', xy=((table.w / 2), (table.l / 2)))), 'cue': Ball.create('cue', xy=(((table.w / 2) + (2 * ball.params.R)), (table.l / 2)))})
def _apply_parameters(system: System, V0: float, eps: float) -> None:
rx = (((table.w / 2) + (2 * ball.params.R)) + eps)
v = np.array([(- V0), 0, 0])
w = (ptmath.cross(np.array([0, 0, 1]), v) / ball.params.R)
system.balls['cue'].state.rvw[(0, 0)] = rx
system.balls['cue'].state.rvw[1] = v
system.balls['cue'].state.rvw[2] = w
system.balls['cue'].state.s = const.rolling
_assert_rolling(system.balls['cue'].state.rvw, ball.params.R)
def true_time_to_collision(eps, V0, mu_r, g):
collision_time = np.inf
for t in quadratic.solve(((0.5 * mu_r) * g), (- V0), eps):
if ((t >= 0) and (t < collision_time)):
collision_time = t
return collision_time
V0 = 2
for eps in np.logspace((- 12), (- 1), 20):
system = template.copy()
_apply_parameters(system, V0, eps)
ball1 = system.balls['cue']
ball2 = system.balls['1']
coeffs = ball_ball_collision_coeffs(rvw1=ball1.state.rvw, rvw2=ball2.state.rvw, s1=ball1.state.s, s2=ball2.state.s, mu1=(ball1.params.u_s if (ball1.state.s == const.sliding) else ball1.params.u_r), mu2=(ball2.params.u_s if (ball2.state.s == const.sliding) else ball2.params.u_r), m1=ball1.params.m, m2=ball2.params.m, g1=ball1.params.g, g2=ball2.params.g, R=ball1.params.R)
coeffs_array = np.array([coeffs], dtype=np.float64)
truth = true_time_to_collision(eps, V0, ball1.params.u_r, ball1.params.g)
calculated = quartic.minimum_quartic_root(coeffs_array, solver=solver)[0]
diff = abs((calculated - truth))
assert (diff < 1e-11) |
def _ipython(local, banner):
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.ipapp import load_default_config
InteractiveShellEmbed.clear_instance()
shell = InteractiveShellEmbed.instance(banner1=banner, user_ns=local, config=load_default_config())
shell() |
class TestImportHelpers(unittest.TestCase):
def test_allowed_file(self):
request_filename = 'test.pdf'
request_extensions = ['pdf', 'zip']
actual_response = _allowed_file(request_filename, request_extensions)
self.assertTrue(actual_response)
request_filename = 'test.pdf'
request_extensions = ['zip']
actual_response = _allowed_file(request_filename, request_extensions)
self.assertFalse(actual_response)
def test_available_path(self):
with patch('app.api.helpers.import_helpers.os.path.isfile', return_value=False):
expected_response = 'testfile.pdf'
actual_response = _available_path('test', 'file.pdf')
self.assertEqual(expected_response, actual_response)
with patch('app.api.helpers.import_helpers.os.path.isfile', side_effect=[True, True, False]):
expected_response = 'testfilename2'
actual_response = _available_path('test', 'filename')
self.assertEqual(expected_response, actual_response)
def test_make_error(self):
expected_response_title = 'File event, Internal Server Error'
expected_response_status = 500
actual_response = make_error('event')
self.assertEqual(expected_response_status, actual_response.status)
self.assertEqual(expected_response_title, actual_response.title)
self.assertIsInstance(actual_response, ServerError)
error = ServerError(source='Zip Upload', detail='Invalid json')
expected_response_title = 'File event, Internal Server Error'
expected_response_status = 500
actual_response = make_error('event', er=error)
self.assertEqual(expected_response_status, actual_response.status)
self.assertEqual(expected_response_title, actual_response.title)
self.assertIsInstance(actual_response, ServerError)
error = ServerError(source='Zip Upload', detail='Invalid json', title='Error while uploading.')
expected_response_title = 'File event, Error while uploading.'
expected_response_status = 500
actual_response = make_error('event', er=error)
self.assertEqual(expected_response_status, actual_response.status)
self.assertEqual(expected_response_title, actual_response.title)
self.assertIsInstance(actual_response, ServerError)
error = ServerError(source='Zip Upload', detail='Invalid json', status=404)
expected_response_title = 'File event, Internal Server Error'
expected_response_status = 404
actual_response = make_error('event', er=error)
self.assertEqual(expected_response_status, actual_response.status)
self.assertEqual(expected_response_title, actual_response.title)
self.assertIsInstance(actual_response, ServerError)
error = ServerError(source='Zip Upload', detail='Invalid json', title='Error while uploading.', status=403)
expected_response_title = 'File event, Error while uploading.'
expected_response_status = 403
actual_response = make_error('event', er=error)
self.assertEqual(expected_response_status, actual_response.status)
self.assertEqual(expected_response_title, actual_response.title)
self.assertIsInstance(actual_response, ServerError)
error = ServerError(source='{}', detail='Internal Server Error', title='Internal Server Error')
expected_response_title = 'File event, ID ERR_255, Internal Server Error'
expected_response_status = 500
actual_response = make_error('event', er=error, id_='ERR_255')
self.assertEqual(expected_response_status, actual_response.status)
self.assertEqual(expected_response_title, actual_response.title)
self.assertIsInstance(actual_response, ServerError)
actual_response = make_error('event', id_='ERR_255')
expected_response_title = 'File event, ID ERR_255, Internal Server Error'
expected_response_status = 500
self.assertEqual(expected_response_status, actual_response.status)
self.assertEqual(expected_response_title, actual_response.title)
self.assertIsInstance(actual_response, ServerError)
def test_trim_id(self):
data = {'id': 'e34234'}
expected_response = ('e34234', {})
actual_response = _trim_id(data)
self.assertEqual(expected_response, actual_response)
data = {'id': 'e34234', 'details': 'This is a test event', 'Venue': 'Fossasia'}
expected_response = ('e34234', {'details': 'This is a test event', 'Venue': 'Fossasia'})
actual_response = _trim_id(data)
self.assertEqual(expected_response, actual_response) |
class AdCampaignDeliveryStatsUnsupportedReasons(AbstractObject):
def __init__(self, api=None):
super(AdCampaignDeliveryStatsUnsupportedReasons, self).__init__()
self._isAdCampaignDeliveryStatsUnsupportedReasons = True
self._api = api
class Field(AbstractObject.Field):
reason_data = 'reason_data'
reason_type = 'reason_type'
_field_types = {'reason_data': 'list<map<string, string>>', 'reason_type': 'string'}
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info |
def main(data_source):
pipeline = FairMarketcapSF1(pretrained=False, data_source=data_source)
base_df = pipeline.data['base'].load()
tickers = base_df[((base_df['currency'] == CURRENCY) & base_df['scalemarketcap'].apply((lambda x: (x in SCALE_MARKETCAP))))]['ticker'].values
result = pipeline.fit(tickers, median_absolute_relative_error)
print(result)
path = '{}/{}'.format(config['models_path'], OUT_NAME)
pipeline.export_core(path) |
class OptionSeriesColumnSonificationContexttracksMappingPan(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def create_extraction_chain(llm: BaseLanguageModel, node: Object, *, encoder_or_encoder_class: Union[(Type[Encoder], Encoder, str)]='csv', type_descriptor: Union[(TypeDescriptor, str)]='typescript', validator: Optional[Validator]=None, input_formatter: InputFormatter=None, instruction_template: Optional[PromptTemplate]=None, verbose: Optional[bool]=None, **encoder_kwargs: Any) -> LLMChain:
if (not isinstance(node, Object)):
raise ValueError(f'node must be an Object got {type(node)}')
encoder = initialize_encoder(encoder_or_encoder_class, node, **encoder_kwargs)
type_descriptor_to_use = initialize_type_descriptors(type_descriptor)
chain_kwargs = {}
if (verbose is not None):
chain_kwargs['verbose'] = verbose
return LLMChain(llm=llm, prompt=create_langchain_prompt(node, encoder, type_descriptor_to_use, validator=validator, instruction_template=instruction_template, input_formatter=input_formatter), output_parser=KorParser(encoder=encoder, validator=validator, schema_=node), **chain_kwargs) |
def _construct_shape(shape: List[List[int]], input_number: int) -> Tuple[(List[IntVar], List[Optional[str]])]:
result = []
dim_names = []
num_dynamic = 0
for dim in shape:
dim_name = None
if (len(dim) == 1):
result.append(IntImm(dim[0]))
else:
dim_name = f'dynamic{input_number}{num_dynamic}'
result.append(IntVar(dim, name=dim_name))
num_dynamic += 1
dim_names.append(dim_name)
return (result, dim_names) |
class Compose():
def __init__(self, project_name: str, env_file: str):
self.project_name = project_name
self.base_cmd = ('docker', 'compose', '-p', project_name, '--env-file', env_file)
def __call__(self, *cmd: str) -> None:
file_args = ['-f', 'compose.yaml', '-f', 'overrides/compose.proxy.yaml', '-f', 'overrides/compose.mariadb.yaml', '-f', 'overrides/compose.redis.yaml']
if CI:
file_args += ('-f', 'tests/compose.ci.yaml')
args = ((self.base_cmd + tuple(file_args)) + cmd)
subprocess.check_call(args)
def exec(self, *cmd: str) -> None:
if sys.stdout.isatty():
self('exec', *cmd)
else:
self('exec', '-T', *cmd)
def stop(self) -> None:
with suppress(subprocess.CalledProcessError):
subprocess.check_call((self.base_cmd + ('down', '-v', '--remove-orphans')))
def bench(self, *cmd: str) -> None:
self.exec('backend', 'bench', *cmd) |
def build_ordered_output_actions(acl_table, output_list, tunnel_rules=None, source_id=None):
output_actions = []
output_ports = []
output_ofmsgs = []
output_inst = []
for action in output_list:
for (key, value) in action.items():
if (key == 'pop_vlans'):
for _ in range(value):
output_actions.append(valve_of.pop_vlan())
if (key == 'vlan_vid'):
output_actions.extend(push_vlan(acl_table, value))
if (key == 'swap_vid'):
output_actions.append(acl_table.set_vlan_vid(value))
if (key == 'vlan_vids'):
for vlan_vid in value:
output_actions.extend(push_vlan(acl_table, vlan_vid))
if (key == 'set_fields'):
for set_field in value:
output_actions.append(acl_table.set_field(**set_field))
if (key == 'port'):
output_ports.append(value)
output_actions.append(valve_of.output_port(value))
if (key == 'ports'):
for output_port in value:
output_ports.append(output_port)
output_actions.append(valve_of.output_port(output_port))
if (key == 'failover'):
group_id = value['group_id']
buckets = []
for port in value['ports']:
buckets.append(valve_of.bucket(watch_port=port, actions=[valve_of.output_port(port)]))
output_ofmsgs.extend(valve_of.groupadd_ff(group_id=group_id, buckets=buckets))
output_actions.append(valve_of.group_act(group_id=group_id))
if ((key == 'tunnel') and tunnel_rules and (source_id is not None)):
source_rule = tunnel_rules[value][source_id]
(_, tunnel_actions, tunnel_ofmsgs, tunnel_inst) = build_output_actions(acl_table, source_rule)
output_actions.extend(tunnel_actions)
output_ofmsgs.extend(tunnel_ofmsgs)
output_inst.extend(tunnel_inst)
if (key == 'goto'):
output_inst.append(valve_of.goto_table_id(value))
return (output_ports, output_actions, output_ofmsgs, output_inst) |
class OptionPlotoptionsBubbleSonificationTracksMappingPan(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class paramTimer():
elapsedTimeNS: float = 0.0
def reset(self, newTime: float=0.0) -> None:
self.elapsedTimeNS = newTime
def incrTimeNS(self, timeNS: float) -> None:
self.elapsedTimeNS += timeNS
def getTimeUS(self) -> float:
return (self.elapsedTimeNS / 1000.0)
def getTimeNS(self) -> float:
return self.elapsedTimeNS |
class IntroRoom(TutorialRoom):
def at_object_creation(self):
super().at_object_creation()
self.db.tutorial_info = 'The first room of the tutorial. This assigns the health Attribute to the account.'
def at_object_receive(self, character, source_location, move_type='move', **kwargs):
health = (self.db.char_health or 20)
if character.has_account:
character.db.health = health
character.db.health_max = health
if character.is_superuser:
string = ((('-' * 78) + SUPERUSER_WARNING) + ('-' * 78))
character.msg(('|r%s|n' % string.format(name=character.key, quell='|wquell|r')))
elif character.account:
character.account.execute_cmd('quell')
character.msg('(Auto-quelling while in tutorial-world)') |
def breathing_led(mc, duration):
min_brightness = 0
max_brightness = 255
speed = 0.02
period = (2 * math.pi)
while True:
start_time = time.time()
while ((time.time() - start_time) < duration):
elapsed_time = (time.time() - start_time)
phase = ((((elapsed_time * 2) * math.pi) / period) % (2 * math.pi))
brightness = int(((1 - ((math.cos(phase) / 2) * (max_brightness - min_brightness))) + min_brightness))
brightness = max(min(brightness, max_brightness), min_brightness)
print('color:', brightness)
mc.set_color(brightness, 0, 0)
time.sleep(speed)
mc.set_color(0, 0, 0) |
class OptionSeriesAreaLabelStyle(Options):
def fontSize(self):
return self._config_get('0.8em')
def fontSize(self, num: float):
self._config(num, js_type=False)
def fontWeight(self):
return self._config_get('bold')
def fontWeight(self, text: str):
self._config(text, js_type=False) |
def test_no_opts():
runner = CliRunner()
result = runner.invoke(pipeline)
assert ('run Run a pipeline in your local environment' in result.output)
assert ('submit Submit a pipeline to be executed on the server' in result.output)
assert ('describe Display pipeline summary' in result.output)
assert ('export Export a pipeline to a runtime-specific format' in result.output)
assert ('validate Validate pipeline' in result.output)
assert (result.exit_code == 0) |
def test_sa():
K = bytearray.fromhex('10f2e5d6c9a2630580a960856f66b029')
C_S = bytearray.fromhex('b0bc5f422f00c64c38e')
C_M = bytearray.fromhex('a79060aa4f4638d907e261b4a5a3c612')
BTADD_S = bytearray.fromhex('404e36a8bf5f')
BTADD_M = bytearray.fromhex('20819A076931')
SRES_S = bytearray.fromhex('cca016d3')
SRES_M = bytearray.fromhex('8266e553')
(ComputedSRES_M, ComputedSRES_S) = sa(K, C_M, C_S, BTADD_M, BTADD_S)
assert (ComputedSRES_M == SRES_M)
assert (ComputedSRES_S == SRES_S) |
.parametrize('lang', wikiquote.supported_languages())
def test_qotd_author(lang):
try:
(_, author) = wikiquote.quote_of_the_day(lang=lang)
except wikiquote.MissingQOTDException:
pytest.skip('No QOTD for {lang}'.format(lang=lang))
assert isinstance(author, str)
assert (len(author) > 0) |
(firedrake.MixedVectorSpaceBasis)
def coarsen_mixedvectorspacebasis(mspbasis, self, coefficient_mapping=None):
coarse_V = self(mspbasis._function_space, self, coefficient_mapping=coefficient_mapping)
coarse_bases = []
for basis in mspbasis._bases:
if isinstance(basis, firedrake.VectorSpaceBasis):
coarse_bases.append(self(basis, self, coefficient_mapping=coefficient_mapping))
elif (basis.index is not None):
coarse_bases.append(coarse_V.sub(basis.index))
else:
raise RuntimeError('MixedVectorSpaceBasis can only contain vector space bases or indexed function spaces')
return firedrake.MixedVectorSpaceBasis(coarse_V, coarse_bases) |
def badge_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
try:
(args, kwargs) = string_to_func_inputs(text)
(text, classes) = get_badge_inputs(*args, **kwargs)
except Exception as err:
msg = inliner.reporter.error(f'badge input is invalid: {err}', line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return ([prb], [msg])
node = nodes.inline(rawtext, unescape(text), classes=(['sphinx-bs', 'badge'] + classes))
return ([node], []) |
class RuleDecoratorMeta(type):
def __new__(metaclass, name, bases, namespace):
def unvisit(name):
return (name[6:] if name.startswith('visit_') else name)
methods = [v for (k, v) in namespace.items() if (hasattr(v, '_rule') and isfunction(v))]
if methods:
from parsimonious.grammar import Grammar
methods.sort(key=((lambda x: x.func_code.co_firstlineno) if (version_info[0] < 3) else (lambda x: x.__code__.co_firstlineno)))
namespace['grammar'] = Grammar('\n'.join(('{name} = {expr}'.format(name=unvisit(m.__name__), expr=m._rule) for m in methods)))
return super(RuleDecoratorMeta, metaclass).__new__(metaclass, name, bases, namespace) |
def test_workflow_execution_data_response():
input_blob = _common_models.UrlBlob('in', 1)
output_blob = _common_models.UrlBlob('out', 2)
obj = _execution.WorkflowExecutionGetDataResponse(input_blob, output_blob, _INPUT_MAP, _OUTPUT_MAP)
obj2 = _execution.WorkflowExecutionGetDataResponse.from_flyte_idl(obj.to_flyte_idl())
assert (obj == obj2)
assert (obj2.inputs == input_blob)
assert (obj2.outputs == output_blob)
assert (obj2.full_inputs == _INPUT_MAP)
assert (obj2.full_outputs == _OUTPUT_MAP) |
def guess_own_iface(match_ips):
if (len(match_ips) == 0):
return None
for iface in ni.interfaces():
ifa = ni.ifaddresses(iface)
if ((ni.AF_LINK not in ifa) or (len(ifa[ni.AF_LINK]) == 0)):
logging.debug('{} is has no MAC address, skipped.'.format(iface))
continue
if ((ni.AF_INET not in ifa) or (len(ifa[ni.AF_INET]) == 0)):
logging.warning('{} has no IPv4 address'.format(iface))
continue
mac = ifa[ni.AF_LINK][0]['addr']
for addr in ifa[ni.AF_INET]:
if (('addr' not in addr) or ('netmask' not in addr)):
continue
net = IPv4Network(((addr['addr'] + '/') + addr['netmask']), strict=False)
if any([(IPv4Address(ip) in net) for ip in match_ips]):
return (iface, addr['addr'], addr['netmask'], mac)
return None |
class EngineGraph():
def __init__(self, state: Dict):
self._state = state
def __str__(self):
return yaml.dump(self._state)
def create(cls, actuators: Optional[List[Dict]]=None, sensors: Optional[List[Dict]]=None):
nodes = []
from eagerx.core.entities import EngineNode
spec = EngineNode.pre_make(None, None)
spec.config.name = 'actuators'
spec.config.color = 'yellow'
nodes.append(spec)
for (cname, params) in actuators.items():
spec.add_output(cname, space=(params.space.to_dict() if (params.space is not None) else None), processor=(ProcessorSpec(params.processor.to_dict()) if (params.processor is not None) else None))
spec.add_input(cname, space=(params.space.to_dict() if (params.space is not None) else None), skip=params.skip, window=params.window, processor=(ProcessorSpec(params.processor.to_dict()) if (params.processor is not None) else None))
spec.config.outputs.append(cname)
spec = EngineNode.pre_make(None, None)
spec.config.name = 'sensors'
spec.config.color = 'yellow'
nodes.append(spec)
for (cname, params) in sensors.items():
spec.config.inputs.append(cname)
spec.add_input(cname, space=(params.space.to_dict() if (params.space is not None) else None), processor=(ProcessorSpec(params.processor.to_dict()) if (params.processor is not None) else None))
state = dict(nodes=dict(), connects=list(), backup=dict(), gui_state=dict())
graph = cls(state)
graph.add(nodes)
return graph
def add(self, nodes: Union[(NodeSpec, List[NodeSpec])]) -> None:
if (not isinstance(nodes, list)):
nodes = [nodes]
for node in nodes:
self._check_spec(node)
name = node.config.name
assert (name not in self._state['nodes']), ('There is already a node or object registered in this graph with name "%s".' % name)
self._state['nodes'][name] = node._params
self._state['backup'][name] = node.params
def remove(self, names: Union[(Union[(str, EntitySpec)], List[Union[(str, EntitySpec)]])]) -> None:
if (not isinstance(names, list)):
names = [names]
for name in names:
if isinstance(name, EntitySpec):
name = name.params['config']['name']
assert (name in self._state['nodes']), f" No entity with name '{name}' in graph."
for (source, target) in deepcopy(self._state['connects']):
if (name in [source[0], target[0]]):
if (source[0] == 'actuators'):
actuator = source[2]
source = None
else:
actuator = None
source = self.get_view(source[0], source[1:])
if (target[0] == 'sensors'):
sensor = target[2]
target = None
else:
sensor = None
target = self.get_view(target[0], target[1:])
self.disconnect(source, target, actuator, sensor)
self._state['nodes'].pop(name)
def add_component(self, entry: SpecView) -> None:
(name, component, cname) = entry()
params = self._state['nodes'][name]
assert (cname not in params['config'][component]), f'"{cname}" already selected in "{name}" under {component}.'
params['config'][component].append(cname)
def remove_component(self, entry: SpecView) -> None:
self._is_selected(self._state, entry)
self._disconnect_component(entry)
(name, component, cname) = entry()
params = self._state['nodes'][name]
params['config'][component].remove(cname)
def connect(self, source: Optional[SpecView]=None, target: Optional[SpecView]=None, actuator: str=None, sensor: str=None, window: Optional[int]=None, delay: Optional[float]=None, skip: Optional[bool]=None) -> None:
flag = ((not source) or (not actuator))
assert flag, f'You cannot specify an actuator if you wish to connect actuator "{actuator}", as the actuator will act as the source.'
flag = ((not target) or (not sensor))
assert flag, f'You cannot specify a target if you wish to connect sensor "{sensor}", as the sensor will act as the target.'
assert (not (actuator and sensor)), 'You cannot connect an actuator directly to a sensor.'
if actuator:
source = self.get_view('actuators', ['outputs', actuator])
if (target.processor is not None):
msg = f'Cannot specify a processor for actuator "{actuator}", because there is already one specified in the agnostic graph definition. You can only have one processor.'
assert (source.processor is None), msg
assert (window is None), f'Cannot specify a window when connecting actuator "{actuator}". You can only do that in the agnostic object definition.'
assert (delay is None), f'Cannot specify a delay when connecting actuator "{actuator}". You can only do that in the agnostic object definition.'
assert (skip is None), f'Cannot specify a skip when connecting actuator "{actuator}". You can only do that in the agnostic object definition.'
elif sensor:
target = self.get_view('sensors', ['inputs', sensor])
if (source.processor is not None):
(src_name, src_comp, src_cname) = source()
msg = f"Processor clash detected! You attempt to connect EngineNode output '{src_name}.{src_comp}.{src_cname}' to sensor '{sensor}'. There is a processor defined for '{src_name}.{src_comp}.{src_cname}', but sensor '{sensor}' also has a a processor. Only one can be used."
assert (target.processor is None), msg
assert (window is None), f'Cannot specify a window when connecting sensor "{sensor}". You can only do that in the agnostic object definition.'
assert (delay is None), f'Cannot specify a delay when connecting sensor "{sensor}". You can only do that in the agnostic object definition.'
assert (skip is None), f'Cannot specify a skip when connecting sensor "{sensor}". You can only do that in the agnostic object definition.'
self._connect(source, target, window, delay, skip)
def _connect(self, source: Optional[SpecView]=None, target: Optional[SpecView]=None, window: Optional[int]=None, delay: Optional[float]=None, skip: Optional[bool]=None):
self._is_selected(self._state, source)
self._is_selected(self._state, target)
self._is_compatible(self._state, source, target)
(target_name, target_comp, target_cname) = target()
for (s, t) in self._state['connects']:
(t_name, t_comp, t_cname) = t
flag = (not ((target_name == t_name) and (target_comp == t_comp) and (target_cname == t_cname)))
assert flag, f'Target "{target}" is already connected to source "{s}"'
if (window is not None):
self.set({'window': window}, target)
if (delay is not None):
self.set({'delay': delay}, target)
if (skip is not None):
self.set({'skip': skip}, target)
connect = [list(source()), list(target())]
self._state['connects'].append(connect)
def disconnect(self, source: Optional[SpecView]=None, target: Optional[SpecView]=None, actuator: str=None, sensor: str=None) -> None:
assert ((not source) or (not actuator)), ('You cannot specify a source if you wish to disconnect actuator ', f'"{actuator}", as the actuator will act as the source.')
assert ((not target) or (not sensor)), f'You cannot specify a target if you wish to disconnect sensor "{sensor}", as the sensor will act as the target.'
assert (not (sensor and actuator)), 'You cannot disconnect an actuator from a sensor, as such a connection cannot exist.'
if actuator:
source = self.get_view('actuators', ['outputs', actuator])
if sensor:
target = self.get_view('sensors', ['inputs', sensor])
self._disconnect(source, target)
def _disconnect(self, source: SpecView, target: SpecView):
self._is_selected(self._state, target)
self._is_selected(self._state, source)
connect_exists = False
idx_connect = None
for (idx, c) in enumerate(self._state['connects']):
if ((list(source()) == c[0]) and (list(target()) == c[1])):
connect_exists = True
idx_connect = idx
break
assert connect_exists, f'The connection with source={source()} and target={target()} cannot be removed, because it does not exist.'
self._state['connects'].pop(idx_connect)
(target_name, target_comp, target_cname) = target()
target_params = self._state['nodes'][target_name]
target_params[target_comp][target_cname] = self._state['backup'][target_name][target_comp][target_cname]
def _disconnect_component(self, entry: SpecView):
was_connected = False
(name, component, cname) = entry()
for (source, target) in deepcopy(self._state['connects']):
source = self.get_view(source[0], source[1:])
target = self.get_view(target[0], target[1:])
self._is_selected(self._state, source)
self._is_selected(self._state, target)
(source_name, source_comp, source_cname) = source()
(target_name, target_comp, target_cname) = target()
if ((name == source_name) and (component == source_comp) and (cname == source_cname)):
self.disconnect(source, target)
was_connected = True
elif ((name == target_name) and (component == target_comp) and (cname == target_cname)):
self.disconnect(source, target)
was_connected = True
return was_connected
def set(self, mapping: Any, entry: Optional[SpecView], parameter: Optional[str]=None) -> None:
assert (not (entry()[0] == 'actuators')), "Cannot change the actuator parameters here, in an engine specific implementation. That is only possible in the object's agnostic definition."
assert (not (entry()[0] == 'sensors')), "Cannot change the sensor parameters here, in an engine specific implementation. That is only possible in the object's agnostic definition."
if (parameter is None):
if isinstance(mapping, SpecView):
mapping = mapping.to_dict()
assert isinstance(mapping, dict), "Can only set mappings of type dict. Else, also set 'parameter=<param_name>'."
else:
mapping = {parameter: mapping}
for (parameter, value) in mapping.items():
if parameter:
getattr(entry, parameter)
if (parameter == 'processor'):
msg = 'Skipping processor. Cannot change the processor with this method. Add output processors before connecting, and input processors when making a connection.'
log.logwarn(msg)
else:
t = entry()
name = t[0]
if (t[1] == 'config'):
assert (parameter not in ['sensors', 'actuators', 'targets', 'states', 'inputs', 'outputs']), 'You cannot modify component parameters with this function. Use _add/remove_component(..) instead.'
assert (parameter not in ['name']), f"You cannot rename '{name}'."
p = self._state['nodes'][name]['config']
else:
(name, component, cname) = entry()
p = self._state['nodes'][name][component][cname]
self._set(p, {parameter: value})
def get(self, entry: Optional[Union[(SpecView, EntitySpec)]]=None, actuator: Optional[str]=None, sensor: Optional[str]=None, parameter: Optional[str]=None) -> Any:
if isinstance(entry, EntitySpec):
name = entry.params['config']['name']
assert (name in self._state['nodes']), f" No entity with name '{name}' in graph."
return self._state['nodes'][name]
self._correct_signature(entry, actuator, sensor)
if actuator:
entry = self.get_view('actuators', ['outputs', actuator])
if sensor:
entry = self.get_view('sensors', ['inputs', sensor])
if parameter:
return getattr(entry, parameter)
else:
return entry
def get_spec(self, name: str) -> NodeSpec:
assert (name in self._state['nodes']), f" No entity with name '{name}' in graph."
params = self._state['nodes'][name]
spec = NodeSpec(params)
return spec
def _node_dependencies(self, state):
import networkx as nx
state = deepcopy(state)
G = self._generate_graph(state)
G_rev = G.reverse(copy=True)
def is_synced(node_name):
return (True if ('tick' in state['nodes'][node_name]['config']['inputs']) else False)
dependencies = dict(sensors=dict(), actuators=dict())
for cname in state['nodes']['sensors']['inputs']:
dependencies['sensors'][cname] = []
target_name = f'sensors/{cname}'
descendants = nx.descendants(G_rev, target_name)
for source in descendants:
(node_name, source_cname) = source.split('/')
if (node_name in ['actuators', 'sensors']):
continue
dependencies['sensors'][cname].append(node_name)
for cname in state['nodes']['actuators']['outputs']:
dependencies['actuators'][cname] = []
source_name = f'actuators/{cname}'
descendants = nx.descendants(G, source_name)
for target in list(descendants):
(node_name, target_cname) = target.split('/')
if (node_name in ['actuators', 'sensors']):
continue
sync = is_synced(node_name)
if sync:
dependencies['actuators'][cname].append(node_name)
else:
descendants.remove(target)
for target in descendants:
nx.descendants(G_rev, target)
for source in descendants:
(node_name, source_cname) = source.split('/')
if (node_name in ['actuators', 'sensors']):
continue
dependencies['actuators'][cname].append(node_name)
for (source, target) in state['connects']:
(source_name, source_comp, source_cname) = source
(target_name, target_comp, target_cname) = target
if ((source_name == 'actuators') and (cname == source_cname)):
dependencies['actuators'][cname].append(target_name)
dependencies['actuators'][cname] = list(set(dependencies['actuators'].pop(cname)))
return dependencies
def register(self):
assert self.is_valid(plot=False), 'Graph not valid.'
dependencies = self._node_dependencies(self._state)
state = deepcopy(self._state)
actuators = dict()
sensors = dict()
connects = state['connects']
for (source, target) in connects:
(source_name, source_comp, source_cname) = source
(target_name, target_comp, target_cname) = target
if (source_name == 'actuators'):
dependency = [f'$(ns obj_name)/{d}' for d in dependencies['actuators'][source_cname]]
if (source_cname not in actuators):
actuators[source_cname] = []
entry = {'name': f'$(ns obj_name)/{target_name}', 'component': target_comp, 'cname': target_cname, 'dependency': dependency}
actuators[source_cname].append(entry)
continue
if (target_name == 'sensors'):
dependency = [f'$(ns obj_name)/{d}' for d in dependencies['sensors'][target_cname]]
if (target_cname not in sensors):
sensors[target_cname] = []
entry = {'name': f'$(ns obj_name)/{source_name}', 'component': source_comp, 'cname': source_cname, 'dependency': dependency}
sensors[target_cname].append(entry)
continue
s = self.get_view(source[0], source[1:])
t = self.get_view(target[0], target[1:])
self._is_compatible(state, s, t)
source_dtype = state['nodes'][source_name][source_comp][source_cname]['space']['dtype']
state['nodes'][target_name][target_comp][target_cname]['dtype'] = source_dtype
for (source, target) in connects:
if (source[0] != 'actuators'):
source[0] = f'$(ns obj_name)/{source[0]}'
if (target[0] != 'sensors'):
target[0] = f'$(ns obj_name)/{target[0]}'
nodes = dict()
from eagerx.core.specs import NodeSpec
for (name, params) in state['nodes'].items():
if ('node_type' in params):
if (name == 'actuators'):
pass
elif (name == 'sensors'):
pass
else:
spec = NodeSpec(params)
flag = (True if ('tick' not in spec.config.inputs) else (len(spec.config.outputs) > 0))
assert flag, f'If Node `{spec.config.name}` must run synchronized with the Engine, it must have at least 1 (dummy?) output.'
name = f'$(ns obj_name)/{spec.config.name}'
spec.config.name = name
params = spec.params
context = {'ns': {'node_name': name}, 'config': params['config']}
substitute_args(params, context, only=['config', 'ns'])
nodes[name] = params
return (nodes, actuators, sensors, connects)
def gui(self, interactive: Optional[bool]=True, resolution: Optional[List[int]]=None, filename: Optional[str]=None) -> Union[(None, np.ndarray)]:
try:
from eagerx_gui import launch_gui, render_gui
except ImportError as e:
log.logwarn(f'{e}. You will likely have to install eagerx-gui. It can be installed by running: pip install eagerx-gui')
return
if interactive:
self._state = launch_gui(deepcopy(self._state), is_engine=True)
else:
return render_gui(deepcopy(self._state), resolution=resolution, filename=filename, is_engine=True)
def _get_address(source: Tuple[(str, str, str)], target: Tuple[(str, str, str)]):
(source_name, source_comp, source_cname) = source
(target_name, target_comp, target_cname) = target
if (source_name == 'actuators'):
assert (not (target_name == 'sensors')), f'A direct connection between a sensor "{target_cname}" and actuator "{source_cname}" cannot exist.'
address = f'$(ns obj_name)/{source_name}/{source_cname}'
elif (target_name == 'sensors'):
assert (not (source_name == 'actuators')), f'A direct connection between a sensor "{target_cname}" and actuator "{source_cname}" cannot exist.'
address = f'$(ns obj_name)/{target_name}/{target_cname}'
else:
address = f'$(ns obj_name)/{source_name}/{source_comp}/{source_cname}'
return address
def _is_selected(state: Dict, entry: SpecView):
(name, component, cname) = entry()
params = state['nodes'][name]
component = ('outputs' if (component == 'feedthroughs') else component)
assert (cname in params['config'][component]), f'"{cname}" not selected in "{name}" under "config" in {component}.'
def _is_compatible(state: Dict, source: SpecView, target: SpecView):
targets = ['inputs']
sources = ['outputs']
(target_name, target_component, target_cname) = target()
(source_name, source_component, source_cname) = source()
base_msg = f"'{target_name}.{target_component}.{target_cname}' cannot be connected with '{source_name}.{source_component}.{source_cname}')."
assert (target_component in targets), f"{base_msg} '{target_component}' cannot be a target."
assert (source_component in sources), f"{base_msg} '{source_component}' cannot be a source."
if (source_component in ['outputs', 'sensors']):
valid = ['inputs', 'feedthroughs', 'actuators']
msg = f"{base_msg} '{source_component}' can only be connected to any of the components in '{valid}'."
assert (target_component in valid), msg
else:
valid = ['targets']
msg = f"{base_msg} '{source_component}' can only be connected to any of the components in '{valid}'."
assert (target_component in valid), msg
msg = f"{base_msg} Only '{source_component}' of Objects can be connected to targets. '{source_name}' is not of type Object."
assert ('node_type' not in state['nodes'][source_name]), msg
source_params = state['nodes'][source_name][source_component][source_cname]
target_params = state['nodes'][target_name][target_component][target_cname]
assert (source_params['space'] is not None), f'source={source_name}.{source_component}.{source_cname} does not have a space defined.'
assert (target_params['space'] is not None), f'target={target_name}.{target_component}.{target_cname} does not have a space defined.'
source_dtype = source_params['space']['dtype']
target_dtype = target_params['space']['dtype']
try:
is_compatible(source_dtype, target_dtype)
except AssertionError as e:
msg = f'Incorrect connection of (source={source_name}.{source_component}.{source_cname}) with (target={target_name}.{target_component}.{target_cname}): {e}'
raise IOError(msg)
def _correct_signature(entry: Optional[SpecView]=None, actuator: Optional[str]=None, sensor: Optional[str]=None):
if entry:
assert (actuator is None), "If 'entry' is specified, actuator argument cannot be specified."
assert (sensor is None), "If 'entry' is specified, sensor argument cannot be specified."
if actuator:
assert (sensor is None), 'If actuator is specified, sensor must be None.'
assert (entry is None), "If actuator is specified, the 'entry' argument cannot be specified."
if sensor:
assert (actuator is None), 'If sensor is specified, action must be None.'
assert (entry is None), "If actuator is specified, the 'entry' argument cannot be specified."
def is_valid(self, plot=True) -> bool:
return self._is_valid(self._state, plot=plot)
def _is_valid(state, plot=True):
state = deepcopy(state)
EngineGraph.check_inputs_have_address(state)
EngineGraph.check_graph_is_acyclic(state, plot=plot)
return True
def check_inputs_have_address(state):
state = deepcopy(state)
for (source, target) in state['connects']:
address = EngineGraph._get_address(source, target)
(target_name, target_comp, target_cname) = target
state['nodes'][target_name][target_comp][target_cname]['address'] = address
for (name, params) in state['nodes'].items():
for component in params['config']:
if (component not in ['inputs', 'outputs', 'targets', 'feedthroughs', 'states']):
continue
for cname in params['config'][component]:
flag = (cname in params[component])
assert flag, f'"{cname}" was selected in {component} of "{name}", but has no implementation.'
if (component not in ['inputs', 'targets', 'feedthroughs']):
continue
if (name in ['sensors', 'actuators']):
continue
if ((cname == 'tick') and (component == 'inputs')):
continue
flag = (params[component][cname]['address'] is not None)
assert flag, f'"{cname}" was selected in {component} of "{name}", but no address was specified. Either deselect it, or connect it.'
return True
def check_graph_is_acyclic(state, plot=True):
G = EngineGraph._generate_graph(state)
not_active = is_stale(G, exclude_skip=True)
color_nodes(G)
color_edges(G)
(H, cycles) = episode_graph(G)
is_dag = nx.is_directed_acyclic_graph(H)
if plot:
(fig_env, ax_env) = plt.subplots(nrows=1, ncols=1)
ax_env.set_title('Engine-specific graph')
(_, _, _, pos) = plot_graph(G, k=2, ax=ax_env)
plt.show()
cycle_strs = ['Circular loops detected: ']
for (idx, connect) in enumerate(cycles):
connect.append(connect[0])
s = (' Loop %s: ' % idx)
n = (('\n' + ''.join(([' '] * len(s)))) + '...-->')
s = (('\n\n' + s) + '...-->')
for idx in range((len(connect) - 1)):
(tmp, target) = connect[idx]
(source, tmp2) = connect[(idx + 1)]
(source_name, source_comp, source_cname) = source
(target_name, target_comp, target_cname) = target
assert (source_name == target_name), ('Source and target not equal: %s, %s' % (source, target))
connect_name = ('%s/%s/%s][%s/%s/%s' % tuple((list(source) + list(target))))
node_name = ('Node: ' + source_name).center(len(connect_name), ' ')
s += ('[%s]-->' % connect_name)
n += ('[%s]-->' % node_name)
s += '...'
n += '...'
cycle_strs.append(s)
cycle_strs.append(n)
connect.pop((- 1))
assert is_dag, ''.join(cycle_strs)
not_active_excl_sensors = [n for n in not_active if (not (n.split('/')[0] == 'sensors'))]
assert (len(not_active_excl_sensors) == 0), ('Stale episode graph detected. Nodes "%s" will be stale, while they must be active (i.e. connected) in order for the graph to resolve (i.e. not deadlock).' % not_active)
return True
def _generate_graph(state):
G = nx.MultiDiGraph()
for (node, params) in state['nodes'].items():
default = params['config']
if ('outputs' in default):
has_tick = (True if ('tick' in default['inputs']) else False)
for cname in default['outputs']:
name = ('%s/%s' % (node, cname))
remain_active = (True if (node == 'actuators') else False)
always_active = (True if (node == 'actuators') else False)
G.add_node(name, remain_active=remain_active, always_active=always_active, is_stale=False, has_tick=has_tick)
if (node == 'sensors'):
for cname in default['inputs']:
name = ('%s/%s' % (node, cname))
G.add_node(name, remain_active=True, always_active=False, is_stale=False, has_tick=False)
target_comps = ['inputs']
source_comps = ['outputs']
for (source, target) in state['connects']:
(source_name, source_comp, source_cname) = source
(target_name, target_comp, target_cname) = target
if ((source_comp in source_comps) and (target_comp in target_comps)):
source_edge = ('%s/%s' % (source_name, source_cname))
target_edges = []
target_default = state['nodes'][target_name]['config']
for cname in target_default['outputs']:
target_edge = ('%s/%s' % (target_name, cname))
target_edges.append(target_edge)
if (target_name == 'sensors'):
target_edge = ('%s/%s' % (target_name, target_cname))
target_edges.append(target_edge)
if (source_name == 'actuators'):
skip = state['nodes'][source_name]['inputs'][source_cname]['skip']
else:
skip = state['nodes'][target_name][target_comp][target_cname]['skip']
color = ('green' if skip else 'black')
style = ('dotted' if skip else 'solid')
for target_edge in target_edges:
G.add_edge(source_edge, target_edge, color=color, feedthrough=False, style=style, alpha=1.0, is_stale=False, skip=skip, source=source, target=target)
color_nodes(G)
color_edges(G)
return G
def _get_view(spec, name: str, depth: Optional[List[str]]=None):
depth = (depth if depth else [])
return SpecView(spec, depth=depth, name=name)
def get_view(self, name: str, depth: Optional[List[str]]=None):
return self._get_view(self.get_spec(name), name, depth)
_types(str, int, list, float, bool, dict, EntitySpec, SpecView, None)
def _set(state, mapping):
merge(state, mapping)
def _check_spec(spec):
if (spec.config.name in ['sensors', 'actuators']):
return
from eagerx.core.entities import EngineNode
EngineNode.check_spec(spec) |
def exec_composed_command(command: str, line_objs: List[LineMatch]) -> None:
if (not command):
edit_files(line_objs)
return
logger.add_event('command_on_num_files', len(line_objs))
command = compose_command(command, line_objs)
append_alias_expansion()
append_if_invalid(line_objs)
append_friendly_command(command)
append_exit() |
class Node_Monitoring(PrometheusNodeMetrics):
def __init__(self):
super().__init__()
self.dashboards = ['default', 'pvc']
def list_dashboards(self):
print(self.dashboards)
def display_dashboard(self, dashboard, node_name):
if (dashboard not in self.dashboards):
print(f"ERROR -- Dashboard '{dashboard}' not found")
Logging.log.error(f"ERROR -- Dashboard '{dashboard}' not found")
print('Available dashboards:')
print(self.list_dashboards())
if (dashboard == 'default'):
self.node_monitor_dashboard_default(node_name)
if (dashboard == 'pvc'):
self.node_monitor_dashboard_pvc(node_name)
def node_monitor_dashboard_default(self, node_name):
rich.print('[blink]Loading ...', end='\r')
def make_layout() -> Layout:
layout = Layout(name='root')
layout.split(Layout(name='header', size=3), Layout(name='main', ratio=1))
layout['main'].split_row(Layout(name='body', ratio=3, minimum_size=100))
layout['body'].split_row(Layout(name='body1', size=45), Layout(name='body2'))
layout['body1'].split_column(Layout(name='body1_a'), Layout(name='body1_b', size=11))
layout['body2'].split(Layout(name='body2_a', ratio=1), Layout(name='body2_b', ratio=1))
layout['body2_b'].split_row(Layout(name='body2_b_a', ratio=1), Layout(name='body2_b_b', ratio=1))
return layout
class Header():
def __rich__(self) -> Panel:
grid = Table.grid(expand=True)
grid.add_column(justify='center', ratio=1)
grid.add_column(justify='right')
grid.add_row(f'[b]Node: [/b] {node_name} ', datetime.now().ctime().replace(':', '[blink]:[/]'))
return Panel(grid, style='green')
class Node_Resources_Progress(PrometheusNodeMetrics):
def __init__(self):
super().__init__()
self.progress_start()
def progress_start(self):
self.progress_threads_status = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TextColumn('{task.fields[status]}'))
self.task_thread_refresh = self.progress_threads_status.add_task(description=f'[white]Interval Refresh', status=f'unknown')
self.task_prometheus_server_connection = self.progress_threads_status.add_task(description=f'[white]Prometheus', status=f'unknown')
self.progress_mem_total = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TextColumn('{task.fields[status]}'))
self.task_mem_total = self.progress_mem_total.add_task(description=f'[white]Mem Total ', status='Loading')
self.progress_mem = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TaskProgressColumn(), TextColumn('{task.fields[status]}'))
self.task_mem_used = self.progress_mem.add_task(completed=0, description=f'[white]Mem used', total=100, status='Loading')
self.task_mem_free = self.progress_mem.add_task(completed=0, description=f'[white]Mem free', total=100, status='Loading')
self.task_mem_cached = self.progress_mem.add_task(completed=0, description=f'[white]Mem cached ', total=100, status='Loading')
self.task_mem_buffer = self.progress_mem.add_task(completed=0, description=f'[white]Mem buffer ', total=100, status='Loading')
self.progress_swap = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TaskProgressColumn(), TextColumn('{task.fields[status]}'))
self.task_swap_total = self.progress_swap.add_task(completed=0, description=f'[white]Swap Total ', total=100, status='Loading')
self.task_swap_free = self.progress_swap.add_task(completed=0, description=f'[white]Swap free ', total=100, status='Loading')
self.task_swap_cached = self.progress_swap.add_task(completed=0, description=f'[white]Swap cached ', total=100, status='Loading')
self.progress_cpu_used_avg = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TaskProgressColumn(), TextColumn('{task.fields[status]}'))
self.task_cpu_used_avg = self.progress_cpu_used_avg.add_task(description='CPU used AVG[10m]', completed=0, total=100, status='Loading')
self.progress_cpu = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TextColumn('{task.fields[status]}'))
self.task_cpu_load1avg = self.progress_cpu.add_task(description=f'[white]CPU load avg 1m ', status='Loading')
self.task_cpu_load5avg = self.progress_cpu.add_task(description=f'[white]CPU load avg 5m ', status='Loading')
self.task_cpu_load15avg = self.progress_cpu.add_task(description=f'[white]CPU load avg 15m ', status='Loading')
self.progress_fs_total = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TextColumn('{task.fields[status]}'))
self.task_fs_size_total = self.progress_fs_total.add_task(description=f'[white]FS Total ', status='Loading')
self.progress_fs = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TaskProgressColumn(), TextColumn('{task.fields[status]}'))
self.task_fs_used = self.progress_fs.add_task(completed=0, description=f'[white]FS used ', total=100, status='Loading')
self.task_fs_available = self.progress_fs.add_task(completed=0, description=f'[white]FS available ', total=100, status='Loading')
self.group_memory = Group(self.progress_mem_total, self.progress_mem, Rule(style='#AAAAAA'), self.progress_swap)
self.group_cpu = Group(self.progress_cpu_used_avg, self.progress_cpu)
self.group_fs = Group(self.progress_fs_total, self.progress_fs)
def update(self):
time.sleep(3)
while True:
Logging.log.info('Getting node metrics to update the dashboard')
node_metrics_json = self.nodeMetrics(node=node_name)
Logging.log.debug('Node metrics Json:')
Logging.log.debug(node_metrics_json)
node_mem_metrics_json = node_metrics_json.get('memory')
node_cpu_metrics_json = node_metrics_json.get('cpu')
node_fs_metrics_json = node_metrics_json.get('fs')
self.progress_mem_total.update(self.task_mem_total, description=f'[white]Mem Total ', status=f" {helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemTotalBytes').get('result'))}")
self.progress_mem.update(self.task_mem_used, completed=(node_mem_metrics_json.get('MemTotalBytes').get('result') - ((node_mem_metrics_json.get('MemFreeBytes').get('result') + node_mem_metrics_json.get('MemBuffersBytes').get('result')) + node_mem_metrics_json.get('MemCachedBytes').get('result'))), description=f'[white]Mem used', total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb((node_mem_metrics_json.get('MemTotalBytes').get('result') - ((node_mem_metrics_json.get('MemFreeBytes').get('result') + node_mem_metrics_json.get('MemBuffersBytes').get('result')) + node_mem_metrics_json.get('MemCachedBytes').get('result'))))}")
self.progress_mem.update(self.task_mem_free, completed=node_mem_metrics_json.get('MemFreeBytes').get('result'), description=f'[white]Mem free', total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemFreeBytes').get('result'))}")
self.progress_mem.update(self.task_mem_cached, completed=node_mem_metrics_json.get('MemCachedBytes').get('result'), description=f'[white]Mem cached ', total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemCachedBytes').get('result'))}")
self.progress_mem.update(self.task_mem_buffer, completed=node_mem_metrics_json.get('MemBuffersBytes').get('result'), description=f'[white]Mem buffer ', total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemBuffersBytes').get('result'))}")
self.progress_swap.update(self.task_swap_total, completed=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), description=f'[white]Swap Total ', total=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemSwapTotalBytes').get('result'))}")
self.progress_swap.update(self.task_swap_free, completed=node_mem_metrics_json.get('MemSwapFreeBytes').get('result'), description=f'[white]Swap free ', total=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemSwapFreeBytes').get('result'))}")
self.progress_swap.update(self.task_swap_cached, completed=node_mem_metrics_json.get('MemSwapCachedBytes').get('result'), description=f'[white]Swap cached ', total=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemSwapCachedBytes').get('result'))}")
self.progress_cpu_used_avg.update(self.task_cpu_used_avg, completed=(node_cpu_metrics_json.get('cpuUsageAVG').get('result') / 2), description=f'[white]CPU used AVG[10m] ', total=100, status='')
self.progress_cpu.update(self.task_cpu_load1avg, description=f'[white]CPU load avg 1m ', status=node_cpu_metrics_json.get('cpuLoadAvg1m').get('result'))
self.progress_cpu.update(self.task_cpu_load5avg, description=f'[white]CPU load avg 5m ', status=node_cpu_metrics_json.get('cpuLoadAvg5m').get('result'))
self.progress_cpu.update(self.task_cpu_load15avg, description=f'[white]CPU load avg 15m ', status=node_cpu_metrics_json.get('cpuLoadAvg15m').get('result'))
self.progress_fs_total.update(self.task_fs_size_total, description=f'[white]FS Total ', status=helper_.bytes_to_kb_mb_gb(node_fs_metrics_json.get('nodeFsSize').get('result')))
self.progress_fs.update(self.task_fs_used, completed=node_fs_metrics_json.get('nodeFsUsed').get('result'), description=f'[white]FS used ', total=node_fs_metrics_json.get('nodeFsSize').get('result'), status=helper_.bytes_to_kb_mb_gb(node_fs_metrics_json.get('nodeFsUsed').get('result')))
self.progress_fs.update(self.task_fs_available, completed=node_fs_metrics_json.get('nodeFsAvailable').get('result'), description=f'[white]FS available ', total=node_fs_metrics_json.get('nodeFsSize').get('result'), status=helper_.bytes_to_kb_mb_gb(node_fs_metrics_json.get('nodeFsAvailable').get('result')))
if GlobalAttrs.debug:
Logging.log.debug(f"Waiting for interval '{GlobalAttrs.live_update_interval}' before the next update")
time.sleep(GlobalAttrs.live_update_interval)
def check_thread_node_resources(self, restart=True):
while True:
def thread_status():
status = ''
if self.thread_node_resources.is_alive():
status = f'alive [green]'
else:
status = 'dead [red]'
if restart:
self.start_threads()
return status
self.progress_threads_status.update(task_id=self.task_thread_refresh, status=thread_status())
time.sleep(5)
class ValidatePrometheuesConnection(PrometheusNodeMetrics):
def __init__(self):
super().__init__()
self.result = {}
def run(self):
while True:
time.sleep(5)
self.result = self.verify_prometheus_connection()
if GlobalAttrs.debug:
print('DEBUG -- Function: ValidatePrometheuesConnection')
Logging.log.info('Function: ValidatePrometheuesConnection')
Logging.log.info("Function: ValidatePrometheuesConnection, waiting for internal '5s' ")
def check_thread_prometheus_server_connection(self):
while True:
def thread_status():
result = self.vlaidate_prometheus_server.result
if (result.get('connected') is None):
status = f'waiting [green]'
elif result.get('connected'):
status = f'connected [green]'
else:
status = f"{result.get('reason')} [red]"
return status
self.progress_threads_status.update(task_id=self.task_prometheus_server_connection, status=f"{thread_status()} ({self.vlaidate_prometheus_server.result.get('status_code')})")
time.sleep(5)
def start_threads(self):
self.thread_node_resources = threading.Thread(target=self.update)
self.thread_node_resources.daemon = True
self.thread_node_resources.start()
Logging.log.debug('Started Thread: thread_node_resources')
self.vlaidate_prometheus_server = self.ValidatePrometheuesConnection()
self.thread_prometheus_server_connection = threading.Thread(target=self.vlaidate_prometheus_server.run)
self.thread_prometheus_server_connection.daemon = True
self.thread_prometheus_server_connection.start()
Logging.log.debug('Started Thread: thread_prometheus_server_connection')
def watch_threads(self):
self.thread_check_thread_node_resources = threading.Thread(target=self.check_thread_node_resources)
self.thread_check_thread_node_resources.daemon = True
self.thread_check_thread_node_resources.start()
self.thread_check_thread_prometheus_server_connection = threading.Thread(target=self.check_thread_prometheus_server_connection)
self.thread_check_thread_prometheus_server_connection.daemon = True
self.thread_check_thread_prometheus_server_connection.start()
try:
node_metrics = PrometheusNodeMetrics()
node_resources_progress = Node_Resources_Progress()
progress_table = Table.grid(expand=True)
progress_table.add_row(Panel(node_resources_progress.group_cpu, title='[b]CPU', padding=(1, 2)))
progress_table.add_row(Panel(node_resources_progress.group_memory, title='[b]Memory', padding=(1, 2)))
progress_table.add_row(Panel(node_resources_progress.group_fs, title='[b]FS "/"', padding=(1, 2)))
progress_table.add_row(Panel(node_resources_progress.progress_threads_status, title='[b]Threads Status', padding=(1, 2), subtitle=''))
layout = make_layout()
layout['header'].update(Header())
layout['body1_a'].update(progress_table)
layout['body1_b'].update(Panel('Made with [red][/red]', title='[b]Unused Space', padding=(1, 2)))
layout['body2_a'].update(Panel('Loading ...', title='[b]Top Pods in Memory Usage', padding=(1, 1)))
node_resources_progress.start_threads()
node_resources_progress.watch_threads()
update_disk_read_bytes_graph = True
disk_read_bytes_graph = AsciiGraph()
disk_read_bytes = self.nodeDiskReadBytes(node_name)
if GlobalAttrs.debug:
Logging.log.debug(f'''Getting Pod 'disk_read_bytes' metrics; Result:
{disk_read_bytes}''')
else:
Logging.log.info("Getting Pod 'disk_read_bytes' metrics")
if disk_read_bytes.get('success'):
disk_read_bytes_graph.create_graph(disk_read_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s')
else:
disk_read_bytes_graph.graph = disk_read_bytes.get('fail_reason')
update_disk_read_bytes_graph = False
update_network_received_bytes_graph = True
network_received_bytes_graph = AsciiGraph()
network_received_bytes = self.nodeNetworkReceiveBytes(node_name)
if GlobalAttrs.debug:
Logging.log.debug(f'''Getting Pod 'network_received_bytes' metrics; Result:
{network_received_bytes}''')
else:
Logging.log.info("Getting Pod 'network_received_bytes' metrics")
if network_received_bytes.get('success'):
network_received_bytes_graph.create_graph(network_received_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s')
else:
network_received_bytes_graph.graph = network_received_bytes.get('fail_reason')
update_network_received_bytes_graph = False
update_network_transmit_bytes_graph = True
network_transmit_bytes_graph = AsciiGraph()
network_transmit_bytes = self.nodeNetworkTransmitBytes(node_name)
if GlobalAttrs.debug:
Logging.log.debug(f'''Getting Pod 'network_transmit_bytes' metrics; Result:
{network_transmit_bytes}''')
else:
Logging.log.info("Getting Pod 'network_transmit_bytes' metrics")
if network_transmit_bytes.get('success'):
network_transmit_bytes_graph.create_graph(network_transmit_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s')
else:
network_transmit_bytes_graph.graph = network_transmit_bytes.get('fail_reason')
update_network_transmit_bytes_graph = False
update_disk_written_bytes_graph = True
disk_written_bytes_graph = AsciiGraph()
disk_written_bytes = self.nodeDiskWrittenBytes(node_name)
if disk_written_bytes.get('success'):
disk_written_bytes_graph.create_graph(disk_written_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s')
else:
disk_written_bytes_graph.graph = disk_written_bytes.get('fail_reason')
update_disk_written_bytes_graph = False
layout['body2_b_b'].update(Panel(Markdown('Loading ...'), title='[b]Network IO', padding=(1, 1)))
layout['body2_b_a'].update(Panel(Markdown('Loading ...'), title='[b]Disk IO', padding=(1, 1)))
group_network_io = Group(Markdown('Bytes Received', justify='center'), Text.from_ansi((network_received_bytes_graph.graph + f'''
{network_received_bytes_graph.colors_description_str}''')), Rule(style='#AAAAAA'), Markdown('Bytes Transmitted', justify='center'), Text.from_ansi((network_transmit_bytes_graph.graph + f'''
{network_transmit_bytes_graph.colors_description_str}''')))
group_disk_io = Group(Markdown('Bytes Read', justify='center'), Text.from_ansi((disk_read_bytes_graph.graph + f'''
{disk_read_bytes_graph.colors_description_str}''')), Rule(style='#AAAAAA'), Markdown('Bytes Written', justify='center'), Text.from_ansi((disk_written_bytes_graph.graph + f'''
{disk_written_bytes_graph.colors_description_str}''')))
Logging.log.info('Starting the Layout.')
with Live(layout, auto_refresh=True, screen=True, refresh_per_second=GlobalAttrs.live_update_interval):
while True:
pod_memory_usage = node_metrics.PodMemTopUsage(node=node_name)
layout['body2_a'].update(Panel(pod_memory_usage, title='[b]Top Pods in Memory Usage', padding=(1, 1)))
Logging.log.info("Updating the Layout with 'Top Pods in Memory Usage'")
Logging.log.debug(f'''Result:
{pod_memory_usage}''')
if update_network_received_bytes_graph:
network_received_bytes = self.nodeNetworkReceiveBytes(node_name)
Logging.log.info("Updating Node 'network_received_bytes' metrics")
Logging.log.debug(network_received_bytes)
for (device, value) in network_received_bytes.get('result').items():
network_received_bytes_graph.update_lst(device, helper_.bytes_to_kb(value))
if update_network_transmit_bytes_graph:
Logging.log.info("Updating Node 'network_transmit_bytes' metrics")
Logging.log.debug(network_transmit_bytes)
network_transmit_bytes = self.nodeNetworkTransmitBytes(node_name)
for (device, value) in network_transmit_bytes.get('result').items():
network_transmit_bytes_graph.update_lst(device, helper_.bytes_to_kb(value))
if update_disk_read_bytes_graph:
disk_read_bytes = self.nodeDiskReadBytes(node_name)
Logging.log.info("Updating Node 'disk_read_bytes' metrics")
Logging.log.debug(disk_read_bytes)
for (device, value) in disk_read_bytes.get('result').items():
disk_read_bytes_graph.update_lst(device, helper_.bytes_to_kb(value))
if update_disk_written_bytes_graph:
disk_written_bytes = self.nodeDiskWrittenBytes(node_name)
Logging.log.info("Updating Node 'disk_written_bytes' metrics")
Logging.log.debug(disk_written_bytes)
for (device, value) in disk_written_bytes.get('result').items():
disk_written_bytes_graph.update_lst(device, helper_.bytes_to_kb(value))
if (update_network_received_bytes_graph or update_network_transmit_bytes_graph):
group_network_io = Group(Markdown('Bytes Received', justify='center'), Text.from_ansi((network_received_bytes_graph.graph + f'''
{network_received_bytes_graph.colors_description_str}''')), Rule(style='#AAAAAA'), Markdown('Bytes Transmitted', justify='center'), Text.from_ansi((network_transmit_bytes_graph.graph + f'''
{network_transmit_bytes_graph.colors_description_str}''')))
if (update_disk_read_bytes_graph or update_disk_written_bytes_graph):
group_disk_io = Group(Markdown('Bytes Read', justify='center'), Text.from_ansi((disk_read_bytes_graph.graph + f'''
{disk_read_bytes_graph.colors_description_str}''')), Rule(style='#AAAAAA'), Markdown('Bytes Written', justify='center'), Text.from_ansi((disk_written_bytes_graph.graph + f'''
{disk_written_bytes_graph.colors_description_str}''')))
layout['body2_b_b'].update(Panel(group_network_io, title='[b]Network IO', padding=(1, 1)))
layout['body2_b_a'].update(Panel(group_disk_io, title='[b]Disk IO', padding=(1, 1)))
Logging.log.info(f"waiting for the update interval '{GlobalAttrs.live_update_interval}' before updating the Layout ")
time.sleep(GlobalAttrs.live_update_interval)
Logging.log.info(f'Updating the layout')
except Exception as e:
rich.print(('\n[yellow]ERROR -- ' + str(e)))
rich.print('\n[underline bold]Exception:')
traceback.print_exc()
exit(1)
except KeyboardInterrupt:
print(' ', end='\r')
rich.print('Ok')
exit(0)
def node_monitor_dashboard_pvc(self, node_name):
rich.print('[blink]Loading ...', end='\r')
def make_layout() -> Layout:
layout = Layout(name='root')
layout.split(Layout(name='header', size=3), Layout(name='main', ratio=1))
layout['main'].split_row(Layout(name='body', ratio=3, minimum_size=100))
layout['body'].split_column(Layout(name='body1', size=23), Layout(name='body2'))
return layout
class Header():
def __rich__(self) -> Panel:
grid = Table.grid(expand=True)
grid.add_column(justify='center', ratio=1)
grid.add_column(justify='right')
grid.add_row(f'[b]Node: [/b] {node_name} ', datetime.now().ctime().replace(':', '[blink]:[/]'))
return Panel(grid, style='green')
class Node_Resources_Progress(PrometheusNodeMetrics):
def __init__(self):
super().__init__()
self.progress_start()
def progress_start(self):
self.progress_threads_status = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TextColumn('{task.fields[status]}'))
self.task_thread_refresh = self.progress_threads_status.add_task(description=f'[white]Metrics Refresh', status=f'unknown')
self.task_prometheus_server_connection = self.progress_threads_status.add_task(description=f'[white]Prometheus', status=f'unknown')
self.progress_mem_total = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TextColumn('{task.fields[status]}'))
self.task_mem_total = self.progress_mem_total.add_task(description=f'[white]Mem Total ', status='Loading')
self.progress_mem = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TaskProgressColumn(), TextColumn('{task.fields[status]}'))
self.task_mem_used = self.progress_mem.add_task(completed=0, description=f'[white]Mem used', total=100, status='Loading')
self.task_mem_free = self.progress_mem.add_task(completed=0, description=f'[white]Mem free', total=100, status='Loading')
self.task_mem_cached = self.progress_mem.add_task(completed=0, description=f'[white]Mem cached ', total=100, status='Loading')
self.task_mem_buffer = self.progress_mem.add_task(completed=0, description=f'[white]Mem buffer ', total=100, status='Loading')
self.progress_swap = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TaskProgressColumn(), TextColumn('{task.fields[status]}'))
self.task_swap_total = self.progress_swap.add_task(completed=0, description=f'[white]Swap Total ', total=100, status='Loading')
self.task_swap_free = self.progress_swap.add_task(completed=0, description=f'[white]Swap free ', total=100, status='Loading')
self.task_swap_cached = self.progress_swap.add_task(completed=0, description=f'[white]Swap cached ', total=100, status='Loading')
self.progress_cpu_used_avg = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TaskProgressColumn(), TextColumn('{task.fields[status]}'))
self.task_cpu_used_avg = self.progress_cpu_used_avg.add_task(description='CPU used AVG[10m]', completed=0, total=100, status='Loading')
self.progress_cpu = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TextColumn('{task.fields[status]}'))
self.task_cpu_load1avg = self.progress_cpu.add_task(description=f'[white]CPU load avg 1m ', status='Loading')
self.task_cpu_load5avg = self.progress_cpu.add_task(description=f'[white]CPU load avg 5m ', status='Loading')
self.task_cpu_load15avg = self.progress_cpu.add_task(description=f'[white]CPU load avg 15m ', status='Loading')
self.progress_fs_total = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TextColumn('{task.fields[status]}'))
self.task_fs_size_total = self.progress_fs_total.add_task(description=f'[white]FS Total ', status='Loading')
self.progress_fs = Progress(TextColumn('[progress.description]{task.description}'), BarColumn(bar_width=20), TaskProgressColumn(), TextColumn('{task.fields[status]}'))
self.task_fs_used = self.progress_fs.add_task(completed=0, description=f'[white]FS used ', total=100, status='Loading')
self.task_fs_available = self.progress_fs.add_task(completed=0, description=f'[white]FS available ', total=100, status='Loading')
self.group_memory = Group(self.progress_mem_total, self.progress_mem, Rule(style='#AAAAAA'), self.progress_swap)
self.group_cpu = Group(self.progress_cpu_used_avg, self.progress_cpu)
self.group_fs = Group(self.progress_fs_total, self.progress_fs)
def update(self):
time.sleep(3)
while True:
Logging.log.info('Getting node metrics to update the dashboard')
node_metrics_json = self.nodeMetrics(node=node_name)
if GlobalAttrs.debug:
Logging.log.info('Node metrics Json:')
Logging.log.debug(node_metrics_json)
node_mem_metrics_json = node_metrics_json.get('memory')
node_cpu_metrics_json = node_metrics_json.get('cpu')
node_fs_metrics_json = node_metrics_json.get('fs')
self.progress_mem_total.update(self.task_mem_total, description=f'[white]Mem Total ', status=f" {helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemTotalBytes').get('result'))}")
self.progress_mem.update(self.task_mem_used, completed=(node_mem_metrics_json.get('MemTotalBytes').get('result') - node_mem_metrics_json.get('MemFreeBytes').get('result')), description=f'[white]Mem used', total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb((node_mem_metrics_json.get('MemTotalBytes').get('result') - ((node_mem_metrics_json.get('MemFreeBytes').get('result') + node_mem_metrics_json.get('MemBuffersBytes').get('result')) + node_mem_metrics_json.get('MemCachedBytes').get('result'))))}")
self.progress_mem.update(self.task_mem_free, completed=node_mem_metrics_json.get('MemFreeBytes').get('result'), description=f'[white]Mem free', total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemFreeBytes').get('result'))}")
self.progress_mem.update(self.task_mem_cached, completed=node_mem_metrics_json.get('MemCachedBytes').get('result'), description=f'[white]Mem cached ', total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemCachedBytes').get('result'))}")
self.progress_mem.update(self.task_mem_buffer, completed=node_mem_metrics_json.get('MemBuffersBytes').get('result'), description=f'[white]Mem buffer ', total=node_mem_metrics_json.get('MemTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemBuffersBytes').get('result'))}")
self.progress_swap.update(self.task_swap_total, completed=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), description=f'[white]Swap Total ', total=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemSwapTotalBytes').get('result'))}")
self.progress_swap.update(self.task_swap_free, completed=node_mem_metrics_json.get('MemSwapFreeBytes').get('result'), description=f'[white]Swap free ', total=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemSwapFreeBytes').get('result'))}")
self.progress_swap.update(self.task_swap_cached, completed=node_mem_metrics_json.get('MemSwapCachedBytes').get('result'), description=f'[white]Swap cached ', total=node_mem_metrics_json.get('MemSwapTotalBytes').get('result'), status=f"{helper_.bytes_to_kb_mb_gb(node_mem_metrics_json.get('MemSwapCachedBytes').get('result'))}")
self.progress_cpu_used_avg.update(self.task_cpu_used_avg, completed=(node_cpu_metrics_json.get('cpuUsageAVG').get('result') / 2), description=f'[white]CPU used AVG[10m] ', total=100, status='')
self.progress_cpu.update(self.task_cpu_load1avg, description=f'[white]CPU load avg 1m ', status=node_cpu_metrics_json.get('cpuLoadAvg1m').get('result'))
self.progress_cpu.update(self.task_cpu_load5avg, description=f'[white]CPU load avg 5m ', status=node_cpu_metrics_json.get('cpuLoadAvg5m').get('result'))
self.progress_cpu.update(self.task_cpu_load15avg, description=f'[white]CPU load avg 15m ', status=node_cpu_metrics_json.get('cpuLoadAvg15m').get('result'))
self.progress_fs_total.update(self.task_fs_size_total, description=f'[white]FS Total ', status=helper_.bytes_to_kb_mb_gb(node_fs_metrics_json.get('nodeFsSize').get('result')))
self.progress_fs.update(self.task_fs_used, completed=node_fs_metrics_json.get('nodeFsUsed').get('result'), description=f'[white]FS used ', total=node_fs_metrics_json.get('nodeFsSize').get('result'), status=helper_.bytes_to_kb_mb_gb(node_fs_metrics_json.get('nodeFsUsed').get('result')))
self.progress_fs.update(self.task_fs_available, completed=node_fs_metrics_json.get('nodeFsAvailable').get('result'), description=f'[white]FS available ', total=node_fs_metrics_json.get('nodeFsSize').get('result'), status=helper_.bytes_to_kb_mb_gb(node_fs_metrics_json.get('nodeFsAvailable').get('result')))
Logging.log.debug(f"Waiting for interval '{GlobalAttrs.live_update_interval}' before the next update")
time.sleep(GlobalAttrs.live_update_interval)
def check_thread_node_resources(self, restart=True):
while True:
def thread_status():
status = ''
if self.thread_node_resources.is_alive():
status = f'alive [green]'
else:
status = 'dead [red]'
if restart:
self.start_threads()
return status
self.progress_threads_status.update(task_id=self.task_thread_refresh, status=thread_status())
time.sleep(5)
class ValidatePrometheuesConnection(PrometheusNodeMetrics):
def __init__(self):
super().__init__()
self.result = {}
def run(self):
while True:
time.sleep(5)
self.result = self.verify_prometheus_connection()
if GlobalAttrs.debug:
print('DEBUG -- Function: ValidatePrometheuesConnection')
Logging.log.info('Function: ValidatePrometheuesConnection')
Logging.log.info("Function: ValidatePrometheuesConnection, waiting for internal '5s' ")
def check_thread_prometheus_server_connection(self):
while True:
def thread_status():
result = self.vlaidate_prometheus_server.result
if (result.get('connected') is None):
status = f'waiting [green]'
elif result.get('connected'):
status = f'connected [green]'
else:
status = f"{result.get('reason')} [red]"
return status
self.progress_threads_status.update(task_id=self.task_prometheus_server_connection, status=f"{thread_status()} ({self.vlaidate_prometheus_server.result.get('status_code')})")
time.sleep(5)
def start_threads(self):
self.thread_node_resources = threading.Thread(target=self.update)
self.thread_node_resources.daemon = True
self.thread_node_resources.start()
Logging.log.debug('Started Thread: thread_node_resources')
self.vlaidate_prometheus_server = self.ValidatePrometheuesConnection()
self.thread_prometheus_server_connection = threading.Thread(target=self.vlaidate_prometheus_server.run)
self.thread_prometheus_server_connection.daemon = True
self.thread_prometheus_server_connection.start()
Logging.log.debug('Started Thread: thread_prometheus_server_connection')
def watch_threads(self):
self.thread_check_thread_node_resources = threading.Thread(target=self.check_thread_node_resources)
self.thread_check_thread_node_resources.daemon = True
self.thread_check_thread_node_resources.start()
self.thread_check_thread_prometheus_server_connection = threading.Thread(target=self.check_thread_prometheus_server_connection)
self.thread_check_thread_prometheus_server_connection.daemon = True
self.thread_check_thread_prometheus_server_connection.start()
try:
node_resources_progress = Node_Resources_Progress()
progress_table = Table.grid(expand=True)
progress_table.add_row(Panel(node_resources_progress.group_cpu, title='[b]CPU', padding=(1, 2)))
progress_table.add_row(Panel(node_resources_progress.group_memory, title='[b]Memory', padding=(1, 2)))
progress_table.add_row(Panel(node_resources_progress.group_fs, title='[b]FS "/"', padding=(1, 2)))
progress_table.add_row(Panel(node_resources_progress.progress_threads_status, title='[b]Threads Status', padding=(1, 2), subtitle=''))
layout = make_layout()
layout['header'].update(Header())
node_resources_progress.start_threads()
node_resources_progress.watch_threads()
update_disk_read_bytes_graph = True
disk_read_bytes_graph = AsciiGraph()
disk_read_bytes = self.nodeDiskReadBytes(node_name)
if GlobalAttrs.debug:
Logging.log.debug(f'''Getting Pod 'disk_read_bytes' metrics; Result:
{disk_read_bytes}''')
else:
Logging.log.info("Getting Pod 'disk_read_bytes' metrics")
if disk_read_bytes.get('success'):
disk_read_bytes_graph.create_graph(disk_read_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s')
else:
disk_read_bytes_graph.graph = disk_read_bytes.get('fail_reason')
update_disk_read_bytes_graph = False
update_network_received_bytes_graph = True
network_received_bytes_graph = AsciiGraph()
network_received_bytes = self.nodeNetworkReceiveBytes(node_name)
if GlobalAttrs.debug:
Logging.log.debug(f'''Getting Pod 'network_received_bytes' metrics; Result:
{network_received_bytes}''')
else:
Logging.log.info("Getting Pod 'network_received_bytes' metrics")
if network_received_bytes.get('success'):
network_received_bytes_graph.create_graph(network_received_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s')
else:
network_received_bytes_graph.graph = network_received_bytes.get('fail_reason')
update_network_received_bytes_graph = False
update_network_transmit_bytes_graph = True
network_transmit_bytes_graph = AsciiGraph()
network_transmit_bytes = self.nodeNetworkTransmitBytes(node_name)
if GlobalAttrs.debug:
Logging.log.debug(f'''Getting Pod 'network_transmit_bytes' metrics; Result:
{network_transmit_bytes}''')
else:
Logging.log.info("Getting Pod 'network_transmit_bytes' metrics")
if network_transmit_bytes.get('success'):
network_transmit_bytes_graph.create_graph(network_transmit_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s')
else:
network_transmit_bytes_graph.graph = network_transmit_bytes.get('fail_reason')
update_network_transmit_bytes_graph = False
update_disk_written_bytes_graph = True
disk_written_bytes_graph = AsciiGraph()
disk_written_bytes = self.nodeDiskWrittenBytes(node_name)
if disk_written_bytes.get('success'):
disk_written_bytes_graph.create_graph(disk_written_bytes.get('result').keys(), height=5, width=GlobalAttrs.graphs_width, format='{:8.0f} kb/s')
else:
disk_written_bytes_graph.graph = disk_written_bytes.get('fail_reason')
update_disk_written_bytes_graph = False
group_network_io = Group(Markdown('Bytes Received', justify='center'), Text.from_ansi((network_received_bytes_graph.graph + f'''
{network_received_bytes_graph.colors_description_str}''')), Rule(style='#AAAAAA'), Markdown('Bytes Transmitted', justify='center'), Text.from_ansi((network_transmit_bytes_graph.graph + f'''
{network_transmit_bytes_graph.colors_description_str}''')))
group_disk_io = Group(Markdown('Bytes Read', justify='center'), Text.from_ansi((disk_read_bytes_graph.graph + f'''
{disk_read_bytes_graph.colors_description_str}''')), Rule(style='#AAAAAA'), Markdown('Bytes Written', justify='center'), Text.from_ansi((disk_written_bytes_graph.graph + f'''
{disk_written_bytes_graph.colors_description_str}''')))
Logging.log.info('Starting the Layout.')
with Live(layout, auto_refresh=True, screen=True, refresh_per_second=GlobalAttrs.live_update_interval):
while True:
Logging.log.info(f"waiting for the update interval '{GlobalAttrs.live_update_interval}' before updating the Layout ")
time.sleep(GlobalAttrs.live_update_interval)
Logging.log.info(f'Updating the layout')
except Exception as e:
rich.print(('\n[yellow]ERROR -- ' + str(e)))
rich.print('\n[underline bold]Exception:')
traceback.print_exc()
exit(1)
except KeyboardInterrupt:
print(' ', end='\r')
rich.print('Ok')
exit(0)
def node_monitor_dashboard_memory(self, node_name):
print('not implemented yet.')
exit(0) |
def _handle_error_while_loading_component_generic_error(configuration: ComponentConfiguration, e: Exception) -> None:
e_str = parse_exception(e)
raise AEAPackageLoadingError('Package loading error: An error occurred while loading {} {}: {}'.format(str(configuration.component_type), configuration.public_id, e_str)) from e |
def extractTaidatranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Nightmare Game', 'Nightmare Game', 'translated'), ('Escape the Chamber', 'Escape the Chamber', 'translated'), ('Sha Qing', 'Sha Qing', 'translated'), ('IWY', 'I Am Incomplete Without You', 'translated'), ('KoD', 'Kaleidoscope of Death', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
.parametrize('key_encoding', (compute_extension_key, compute_leaf_key))
def test_TraversedPartialPath_keeps_node_value(key_encoding):
node_key = (0, 15, 9)
untraversed_tail = node_key[:1]
remaining_key = node_key[1:]
node_value = b'unicorns'
node = annotate_node([key_encoding(node_key), node_value])
tpp = TraversedPartialPath(node_key, node, untraversed_tail)
simulated_node = tpp.simulated_node
assert (simulated_node.raw[1] == node_value)
if (key_encoding is compute_leaf_key):
assert (simulated_node.sub_segments == ())
assert (simulated_node.suffix == remaining_key)
assert (simulated_node.raw[0] == compute_leaf_key(remaining_key))
assert (simulated_node.value == node_value)
elif (key_encoding is compute_extension_key):
assert (simulated_node.sub_segments == (remaining_key,))
assert (simulated_node.suffix == ())
assert (simulated_node.raw[0] == compute_extension_key(remaining_key))
else:
raise Exception('Unsupported way to encode keys: {key_encoding}') |
class FileField(Field):
default_error_messages = {'required': _('No file was submitted.'), 'invalid': _('The submitted data was not a file. Check the encoding type on the form.'), 'no_name': _('No filename could be determined.'), 'empty': _('The submitted file is empty.'), 'max_length': _('Ensure this filename has at most {max_length} characters (it has {length}).')}
def __init__(self, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
if ('use_url' in kwargs):
self.use_url = kwargs.pop('use_url')
super().__init__(**kwargs)
def to_internal_value(self, data):
try:
file_name = data.name
file_size = data.size
except AttributeError:
self.fail('invalid')
if (not file_name):
self.fail('no_name')
if ((not self.allow_empty_file) and (not file_size)):
self.fail('empty')
if (self.max_length and (len(file_name) > self.max_length)):
self.fail('max_length', max_length=self.max_length, length=len(file_name))
return data
def to_representation(self, value):
if (not value):
return None
use_url = getattr(self, 'use_url', api_settings.UPLOADED_FILES_USE_URL)
if use_url:
try:
url = value.url
except AttributeError:
return None
request = self.context.get('request', None)
if (request is not None):
return request.build_absolute_uri(url)
return url
return value.name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.