code stringlengths 281 23.7M |
|---|
class FeatureToggling():
_conf_original = {'new-storage': _Feature(default_enabled=False, msg='The new storage solution is experimental! Thank you for testing our new features.'), 'scheduler': _Feature(default_enabled=False, msg='Use Scheduler instead of JobQueue')}
_conf = deepcopy(_conf_original)
def is_enabled(feature_name: str) -> bool:
return FeatureToggling._conf[feature_name].is_enabled
def add_feature_toggling_args(parser: ArgumentParser) -> None:
for feature_name in FeatureToggling._conf:
parser.add_argument(f'--{FeatureToggling._get_arg_name(feature_name)}', action='store_true', help=f'Toggle {feature_name} (Warning: This is experimental)', default=False)
def update_from_args(args: Namespace) -> None:
args_dict = vars(args)
for feature_name in FeatureToggling._conf:
arg_name = FeatureToggling._get_arg_name(feature_name)
feature_name_escaped = arg_name.replace('-', '_')
if ((feature_name_escaped in args_dict) and args_dict[feature_name_escaped]):
current_state = FeatureToggling._conf[feature_name].is_enabled
FeatureToggling._conf[feature_name].is_enabled = (not current_state)
if (FeatureToggling._conf[feature_name].is_enabled and (FeatureToggling._conf[feature_name].msg is not None)):
logger = logging.getLogger()
logger.warning(FeatureToggling._conf[feature_name].msg)
def _get_arg_name(feature_name: str) -> str:
default_state = FeatureToggling._conf[feature_name].is_enabled
arg_default_state = ('disable' if default_state else 'enable')
return f'{arg_default_state}-{feature_name}'
def reset() -> None:
FeatureToggling._conf = deepcopy(FeatureToggling._conf_original) |
class TestDeprecated(unittest.TestCase, UnittestTools):
def test_deprecated_function(self):
with self.assertDeprecated():
result = my_deprecated_addition(42, 1729)
self.assertEqual(result, 1771)
def test_deprecated_exception_raising_function(self):
with self.assertRaises(ZeroDivisionError):
with self.assertDeprecated():
my_bad_function()
def test_deprecated_method(self):
obj = ClassWithDeprecatedBits()
with self.assertDeprecated():
result = obj.bits()
self.assertEqual(result, 42)
def test_deprecated_method_with_fancy_signature(self):
obj = ClassWithDeprecatedBits()
with self.assertDeprecated():
result = obj.bytes(3, 27, 65, name='Boris', age=(- 3.2))
self.assertEqual(result, (3, (27, 65), {'name': 'Boris', 'age': (- 3.2)})) |
class OptionSeriesTilemapSonificationDefaultinstrumentoptionsMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def gen_fan_brute():
fan_space = ('0' * 39)
for (k, v) in fan_comm.items():
pwm_dat = []
for p in range(16):
pin = f'{p:04b}'
f_cmd = ''.join([('011' if (b == '1') else '001') for b in f'01{pin}0{v}'])
fan_cmd_end = ''.join([('011' if (b == '1') else '001') for b in f'01{pin}0{fan_end}'])
cmd_pwm = f'{f_cmd}{fan_space}'
full_pwm = ((((cmd_pwm * 4) + fan_cmd_end) + fan_space) * 2)
pwm_dat.append(full_pwm)
if _verbose:
print(k, pin, v)
print(f'fan_{k} cmd_pwm', cmd_pwm)
if _verbose:
print(k, 'xmit us:', (len(''.join(pwm_dat)) * fan_bit_len))
with open(f'fan_brute-{k}.sub', 'w') as f:
print(gen_sub(fan_freq, fan_bit_len, fan_bit_len, 1, 0, ''.join(pwm_dat), comment_text=f'FAN-11T Remote Control {k}'), file=f) |
def _prediction_column(prediction: Optional[Union[(str, int, Sequence[int], Sequence[str])]], target_type: Optional[ColumnType], task: Optional[str], data: _InputData, mapping: Optional[ColumnMapping]=None) -> Optional[PredictionColumns]:
if (prediction is None):
return None
if isinstance(prediction, str):
prediction_present = _get_column_presence(prediction, data)
if (prediction_present == ColumnPresenceState.Missing):
return None
if (prediction_present == ColumnPresenceState.Partially):
raise ValueError(f'Prediction column ({prediction}) is partially present in data')
prediction_type = _get_column_type(prediction, data, mapping)
if (task == TaskType.CLASSIFICATION_TASK):
if (prediction_type == ColumnType.Categorical):
return PredictionColumns(predicted_values=ColumnDefinition(prediction, prediction_type))
if (prediction_type == ColumnType.Numerical):
return PredictionColumns(prediction_probas=[ColumnDefinition(prediction, prediction_type)])
raise ValueError(f'Unexpected type for prediction column ({prediction}) (it is {prediction_type})')
if (task == TaskType.REGRESSION_TASK):
if (prediction_type == ColumnType.Categorical):
raise ValueError('Prediction type is categorical but task is regression')
if (prediction_type == ColumnType.Numerical):
return PredictionColumns(predicted_values=ColumnDefinition(prediction, prediction_type))
if ((mapping is not None) and (mapping.recommendations_type == RecomType.RANK)):
return PredictionColumns(predicted_values=ColumnDefinition(prediction, prediction_type))
if ((task == TaskType.RECOMMENDER_SYSTEMS) and (mapping is not None) and (mapping.recommendations_type == RecomType.SCORE)):
return PredictionColumns(prediction_probas=[ColumnDefinition(prediction, prediction_type)])
if (task is None):
if ((prediction_type == ColumnType.Numerical) and (target_type == ColumnType.Categorical)):
return PredictionColumns(prediction_probas=[ColumnDefinition(prediction, prediction_type)])
return PredictionColumns(predicted_values=ColumnDefinition(prediction, prediction_type))
if isinstance(prediction, list):
presence = [_get_column_presence(column, data) for column in prediction]
if all([(item == ColumnPresenceState.Missing) for item in presence]):
return None
if all([(item == ColumnPresenceState.Present) for item in presence]):
prediction_defs = [ColumnDefinition(column, _get_column_type(column, data)) for column in prediction]
if any([(item.column_type != ColumnType.Numerical) for item in prediction_defs]):
raise ValueError(f'Some prediction columns have incorrect types {prediction_defs}')
return PredictionColumns(prediction_probas=prediction_defs)
raise ValueError('Unexpected type for prediction field in column_mapping') |
('ciftify.bidsapp.fmriprep_ciftify.run')
def test_ux11_one_participant_anat_ciftify_only_for_ds005(mock_run):
uargs = [ds005_bids, ds005_derivs, 'participant', '--participant_label=14', '--anat_only', '--surf-reg', 'FS']
ret = simple_main_run(uargs)
call_list = parse_call_list_into_strings(mock_run.call_args_list)
assert (count_calls_to('fmriprep', call_list, call_contains='--anat_only') == 0)
assert (count_calls_to('ciftify_recon_all', call_list) == 1)
assert (count_calls_to('ciftify_subject_fmri', call_list) == 0) |
('field_values.j2')
def page_field_values(nested, template_name='field_values_template.j2'):
category_fields = ['event.kind', 'event.category', 'event.type', 'event.outcome']
nested_fields = []
for cat_field in category_fields:
nested_fields.append(nested['event']['fields'][cat_field])
return dict(fields=nested_fields) |
def _retry_job(job: str):
frappe.only_for('System Manager')
doc = frappe.get_doc('Ecommerce Integration Log', job)
if ((not doc.method.startswith('ecommerce_integrations.')) or (doc.status != 'Error')):
return
doc.db_set('status', 'Queued', update_modified=False)
doc.db_set('traceback', '', update_modified=False)
frappe.enqueue(method=doc.method, queue='short', timeout=300, is_async=True, payload=json.loads(doc.request_data), request_id=doc.name, enqueue_after_commit=True) |
def test_conversion_of_ai_standard_to_red_shift_material_sss_properties(create_pymel, setup_scene):
pm = create_pymel
(ai_standard, ai_standard_sg) = pm.createSurfaceShader('aiStandard')
ms_color0 = (1, 0.5, 0)
ms_amount = 0.532
ms_radius0 = 1.434
emission_color = (0.57, 0.34, 0.54)
emission_weight = 0.5
ai_standard.KsssColor.set(ms_color0)
ai_standard.Ksss.set(ms_amount)
ai_standard.sssRadius.set([ms_radius0, ms_radius0, ms_radius0])
ai_standard.emissionColor.set(emission_color)
ai_standard.emission.set(emission_weight)
conversion_man = ai2rs.ConversionManager()
rs_material = conversion_man.convert(ai_standard)
assert (rs_material.ms_color0.get()[0] == pytest.approx(ms_color0[0], abs=0.001))
assert (rs_material.ms_color0.get()[1] == pytest.approx(ms_color0[1], abs=0.001))
assert (rs_material.ms_color0.get()[2] == pytest.approx(ms_color0[2], abs=0.001))
assert (rs_material.ms_amount.get() == pytest.approx(ms_amount, abs=0.001))
assert (rs_material.ms_radius0.get() == pytest.approx(ms_radius0, abs=0.001))
assert (rs_material.emission_color.get()[0] == pytest.approx(emission_color[0], abs=0.001))
assert (rs_material.emission_color.get()[1] == pytest.approx(emission_color[1], abs=0.001))
assert (rs_material.emission_color.get()[2] == pytest.approx(emission_color[2], abs=0.001))
assert (rs_material.emission_weight.get() == pytest.approx(emission_weight, abs=0.001)) |
class TextFilter():
def __init__(self, equals: Optional[str]=None, contains: Optional[Union[(list, tuple)]]=None, starts_with: Optional[Union[(str, list, tuple)]]=None, ends_with: Optional[Union[(str, list, tuple)]]=None, ignore_case: bool=False):
to_check = sum(((pattern is not None) for pattern in (equals, contains, starts_with, ends_with)))
if (to_check == 0):
raise ValueError('None of the check modes was specified')
self.equals = equals
self.contains = self._check_iterable(contains, filter_name='contains')
self.starts_with = self._check_iterable(starts_with, filter_name='starts_with')
self.ends_with = self._check_iterable(ends_with, filter_name='ends_with')
self.ignore_case = ignore_case
def _check_iterable(self, iterable, filter_name: str):
if (not iterable):
pass
elif ((not isinstance(iterable, str)) and (not isinstance(iterable, list)) and (not isinstance(iterable, tuple))):
raise ValueError(f'Incorrect value of {filter_name!r}')
elif isinstance(iterable, str):
iterable = [iterable]
elif (isinstance(iterable, list) or isinstance(iterable, tuple)):
iterable = [i for i in iterable if isinstance(i, str)]
return iterable
def check(self, obj: Union[(types.Message, types.CallbackQuery, types.InlineQuery, types.Poll)]):
if isinstance(obj, types.Poll):
text = obj.question
elif isinstance(obj, types.Message):
text = (obj.text or obj.caption)
elif isinstance(obj, types.CallbackQuery):
text = obj.data
elif isinstance(obj, types.InlineQuery):
text = obj.query
else:
return False
if self.ignore_case:
text = text.lower()
if self.equals:
self.equals = self.equals.lower()
elif self.contains:
self.contains = tuple(map(str.lower, self.contains))
elif self.starts_with:
self.starts_with = tuple(map(str.lower, self.starts_with))
elif self.ends_with:
self.ends_with = tuple(map(str.lower, self.ends_with))
if self.equals:
result = (self.equals == text)
if result:
return True
elif ((not result) and (not any((self.contains, self.starts_with, self.ends_with)))):
return False
if self.contains:
result = any([(i in text) for i in self.contains])
if result:
return True
elif ((not result) and (not any((self.starts_with, self.ends_with)))):
return False
if self.starts_with:
result = any([text.startswith(i) for i in self.starts_with])
if result:
return True
elif ((not result) and (not self.ends_with)):
return False
if self.ends_with:
return any([text.endswith(i) for i in self.ends_with])
return False |
class TestBaseNode():
def test_representation(self):
(n1, n2, n3) = (BasicNode(1), BasicNode('test'), BasicNode(0.7))
assert ((str(n1) == '1') and (str(n2) == 'test') and (str(n3) == '0.7'))
assert ((repr(n1) == 'Node(1)') and (repr(n2) == 'Node(test)') and (repr(n3) == 'Node(0.7)'))
def test_copy(self):
n1 = BasicNode(1)
assert ((n1 == n1.copy()) and (id(n1) != id(n1.copy())))
n2 = BasicNode(object())
assert (n1 != n2)
assert (n2 == n2.copy()) |
class OptionSeriesSankeySonificationContexttracksMappingHighpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionSeriesWaterfallSonificationDefaultinstrumentoptionsMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def reports(request, event_slug):
event = get_object_or_404(Event, event_slug=event_slug)
event_dates = EventDate.objects.filter(event=event)
(confirmed_attendees_count, not_confirmed_attendees_count, speakers_count) = (0, 0, 0)
installers = Installer.objects.filter(event_user__event=event)
installations = Installation.objects.filter(attendee__event=event)
talks = Activity.objects.filter(event=event).filter(is_dummy=False)
collaborators = Collaborator.objects.filter(event_user__event=event)
collaborators_event_users = [collaborator.event_user for collaborator in list(collaborators)]
installers_event_users = [installer.event_user for installer in list(installers)]
attendees = Attendee.objects.filter(event=event)
attendees_attendance = AttendeeAttendanceDate.objects.filter(attendee__event=event).order_by('attendee').distinct()
confirmed_attendees_count = attendees_attendance.count()
not_confirmed_attendees_count = (attendees.count() - confirmed_attendees_count)
confirmed_collaborators_count = EventUserAttendanceDate.objects.filter(event_user__event=event, event_user__in=collaborators_event_users).order_by('event_user').distinct().count()
not_confirmed_collaborators = (collaborators.count() - confirmed_collaborators_count)
confirmed_installers_count = EventUserAttendanceDate.objects.filter(event_user__event=event, event_user__in=installers_event_users).order_by('event_user').distinct().count()
not_confirmed_installers_count = (installers.count() - confirmed_installers_count)
speakers = []
for talk in talks:
speakers.append(talk.speakers_names.split(','))
speakers_count = len(set(itertools.chain.from_iterable(speakers)))
attendance_by_date = {}
for event_date in event_dates:
attendance_for_date = AttendeeAttendanceDate.objects.filter(attendee__event=event, date__date=event_date.date).order_by('attendee').distinct()
attendance_by_date[event_date.date.strftime('%Y-%m-%d')] = count_by(attendance_for_date, (lambda attendance: (attendance.date.hour - 3)))
template_dict = {'event_dates': [event_date.date.strftime('%Y-%m-%d') for event_date in event_dates], 'confirmed_attendees_count': confirmed_attendees_count, 'not_confirmed_attendees_count': not_confirmed_attendees_count, 'confirmed_collaborators_count': confirmed_collaborators_count, 'not_confirmed_collaborators_count': not_confirmed_collaborators, 'confirmed_installers_count': confirmed_installers_count, 'not_confirmed_installers_count': not_confirmed_installers_count, 'speakers_count': speakers_count, 'organizers_count': Organizer.objects.filter(event_user__event=event).count(), 'activities_count': talks.count(), 'installations_count': Installation.objects.filter(attendee__event=event).count(), 'installers_for_level': count_by(installers, (lambda inst: inst.level)), 'installers_count': installers.count(), 'installation_for_software': count_by(installations, (lambda inst: inst.software.name)), 'registered_in_time': count_by(attendees, (lambda attendee: attendee.registration_date.date())), 'attendance_by_date': attendance_by_date}
return render(request, 'reports/dashboard.html', update_event_info(event_slug, render_dict=template_dict)) |
class PackedArrayEncoder(BaseArrayEncoder):
array_size = None
def validate_value(self, value):
super().validate_value(value)
if ((self.array_size is not None) and (len(value) != self.array_size)):
self.invalidate_value(value, exc=ValueOutOfBounds, msg=f'value has {len(value)} items when {self.array_size} were expected')
def encode(self, value):
encoded_elements = self.encode_elements(value)
return encoded_elements
_type_str(with_arrlist=True)
def from_type_str(cls, abi_type, registry):
item_encoder = registry.get_encoder(abi_type.item_type.to_type_str())
array_spec = abi_type.arrlist[(- 1)]
if (len(array_spec) == 1):
return cls(array_size=array_spec[0], item_encoder=item_encoder)
else:
return cls(item_encoder=item_encoder) |
class DirectEvent(Event):
def __init__(self, user_id, dc_id=None, **kwargs):
if dc_id:
tg = TG_DC_BOUND
else:
tg = TG_DC_UNBOUND
dc_id = cq.conf.ERIGONES_DEFAULT_DC
task_id = task_id_from_string(user_id, dummy=True, dc_id=dc_id, tt=TT_DUMMY, tg=tg)
kwargs['direct'] = True
super(DirectEvent, self).__init__(task_id, **kwargs) |
()
def _get_help_text(obj: Union[(Command, Group)], formatter: RichHelpFormatter) -> Iterable[Union[(Markdown, Text)]]:
if TYPE_CHECKING:
assert isinstance(obj.help, str)
config = formatter.config
if obj.deprecated:
(yield Text(config.deprecated_string, style=config.style_deprecated))
help_text = inspect.cleandoc(obj.help)
help_text = help_text.partition('\x0c')[0]
first_line = help_text.split('\n\n')[0]
if ((not config.use_markdown) and (not first_line.startswith('\x08'))):
first_line = first_line.replace('\n', ' ')
(yield _make_rich_rext(first_line.strip(), config.style_helptext_first_line, formatter))
remaining_paragraphs = help_text.split('\n\n')[1:]
if (len(remaining_paragraphs) > 0):
if (not config.use_markdown):
remaining_paragraphs = [(x.replace('\n', ' ').strip() if (not x.startswith('\x08')) else '{}\n'.format(x.strip('\x08\n'))) for x in remaining_paragraphs]
remaining_lines = '\n'.join(remaining_paragraphs)
else:
remaining_lines = '\n\n'.join(remaining_paragraphs)
(yield _make_rich_rext(remaining_lines, config.style_helptext, formatter)) |
class TestResetLights():
def test_can_reset_lights(self, given_that, media_player, assert_that, update_passed_args):
with update_passed_args():
given_that.passed_arg('reset_lights_after').is_set_to(True)
media_player('media_player.tv_test').update_state('playing', {'entity_picture': rgb_images[0]})
given_that.mock_functions_are_cleared()
media_player('media_player.tv_test').update_state('idle')
assert_that('light.test_light_2').was.turned_off()
assert_that('light.test_light_1').was.turned_on(**test_light_1_base_state)
def test_wont_reset_lights_if_setting_is_false(self, given_that, media_player, hass_mocks):
media_player('media_player.tv_test').update_state('playing', {'entity_picture': rgb_images[0]})
given_that.mock_functions_are_cleared()
media_player('media_player.tv_test').update_state('idle')
assert (len(hass_mocks.hass_functions['turn_on'].call_args_list) == 0)
def test_can_change_lights_after_reset(self, assert_that, media_player, given_that, update_passed_args):
with update_passed_args():
given_that.passed_arg('reset_lights_after').is_set_to(True)
media_player('media_player.tv_test').update_state('playing', {'entity_picture': rgb_images[0]})
media_player('media_player.tv_test').update_state('idle')
given_that.mock_functions_are_cleared()
media_player('media_player.tv_test').update_state('playing', {'entity_picture': rgb_images[0]})
assert_that('light.test_light_1').was.turned_on(brightness=255, rgb_color=[59, 180, 180])
assert_that('light.test_light_2').was.turned_on(brightness=255, rgb_color=[46, 56, 110])
def test_can_reset_color_modes(self, assert_that, media_player, given_that, update_passed_args, hass_mocks):
with update_passed_args():
given_that.passed_arg('reset_lights_after').is_set_to(True)
given_that.state_of('light.test_light_1').is_set_to('on', {'color_mode': 'color_temp', 'color_temp': 500, 'brightness': 150})
given_that.state_of('light.test_light_2').is_set_to('on', {'color_mode': 'xy', 'xy_color': [0.166, 0.269], 'brightness': 100})
media_player('media_player.tv_test').update_state('playing', {'entity_picture': rgb_images[0]})
given_that.mock_functions_are_cleared()
media_player('media_player.tv_test').update_state('idle')
assert_that('light.test_light_1').was.turned_on(color_temp=500, brightness=150)
assert_that('light.test_light_2').was.turned_on(xy_color=[0.166, 0.269], brightness=100) |
class OptionSeriesArcdiagramSonificationContexttracksMappingLowpass(Options):
def frequency(self) -> 'OptionSeriesArcdiagramSonificationContexttracksMappingLowpassFrequency':
return self._config_sub_data('frequency', OptionSeriesArcdiagramSonificationContexttracksMappingLowpassFrequency)
def resonance(self) -> 'OptionSeriesArcdiagramSonificationContexttracksMappingLowpassResonance':
return self._config_sub_data('resonance', OptionSeriesArcdiagramSonificationContexttracksMappingLowpassResonance) |
class LiteScopeIO(Module, AutoCSR):
def __init__(self, data_width):
self.data_width = data_width
self.input = Signal(data_width)
self.output = Signal(data_width)
self.submodules.gpio = GPIOInOut(self.input, self.output)
def get_csrs(self):
return self.gpio.get_csrs() |
class BaseNode(object):
__slots__ = ()
template = None
delims = {}
precedence = None
def iter_slots(self):
for key in self.__slots__:
(yield (key, getattr(self, key, None)))
def children(self):
children = []
def recurse(descendant):
if isinstance(descendant, BaseNode):
children.append(descendant)
elif hasattr(descendant, '__iter__'):
for c in descendant:
recurse(c)
recurse((c for c in self.iter_slots()))
return children
def optimize(self, recursive=False):
return Optimizer(recursive=recursive).walk(self)
def __eq__(self, other):
return ((type(self) == type(other)) and (list(self.iter_slots()) == list(other.iter_slots())))
def __ne__(self, other):
return (not (self == other))
def render(self, precedence=None, **kwargs):
if (not self.template):
raise NotImplementedError()
dicted = {}
for (name, value) in self.iter_slots():
if isinstance(value, (list, tuple)):
delim = self.delims[name]
value = [(v.render(self.precedence, **kwargs) if isinstance(v, BaseNode) else v) for v in value]
value = delim.join((v for v in value))
elif isinstance(value, BaseNode):
value = value.render(self.precedence, **kwargs)
dicted[name] = value
return self.template.substitute(dicted)
def __repr__(self):
return '{}({})'.format(type(self).__name__, ', '.join(('{}={}'.format(name, repr(slot)) for (name, slot) in self.iter_slots())))
def __iter__(self):
return Walker().iter_node(self)
def __unicode__(self):
return self.render()
def __str__(self):
unicoded = self.__unicode__()
if (not isinstance(unicoded, str)):
unicoded = unicoded.encode('utf-8')
return unicoded |
class RunningRMSTest(TestCaseBase):
def setUp(self) -> None:
self.outer_size = 10
self.inner_size = (4, 5)
self.running_rms = RunningRMS(self.inner_size)
self.rtol = 1e-06
self.atol = 1e-06
def test_single_update(self) -> None:
input = torch.rand(self.outer_size, *self.inner_size)
self.running_rms.reset()
for x in torch.unbind(input):
self.running_rms.update(x)
self._verify_running_rms(input)
def test_batch_update(self) -> None:
input = torch.rand(self.outer_size, *self.inner_size)
split_size = [1, 2, 3, 4]
self.running_rms.reset()
for x in torch.split(input, split_size):
self.running_rms.update(x)
self._verify_running_rms(input)
def _verify_running_rms(self, input: torch.Tensor) -> None:
self.assert_tensor_equal(self.running_rms.count(), torch.tensor([self.outer_size]))
self.assert_tensor_close(self.running_rms.mean_square(), input.square().mean(dim=0), rtol=self.rtol, atol=self.atol)
self.assert_tensor_close(self.running_rms.rms(), input.square().mean(dim=0).sqrt(), rtol=self.rtol, atol=self.atol)
self.assert_tensor_close(self.running_rms.rrms(), input.square().mean(dim=0).rsqrt(), rtol=self.rtol, atol=self.atol) |
def is_turn_over(shot: System, constraints: ShotConstraints, legal: bool) -> bool:
if (not legal):
return True
pocketed_on_balls = get_pocketed_ball_ids_during_shot(shot)
if (not (num_pocketed := len(pocketed_on_balls))):
return True
assert ('white' not in pocketed_on_balls), 'Legal shot has cue in pocket?'
assert (not is_off_ball_pocketed(shot, constraints)), 'Legal shot w/ off-ball pocketed?'
if (BallGroup.get(constraints.hittable) is BallGroup.COLORS):
assert (num_pocketed == 1), 'Legal shot has multi colors sank?'
return False |
class MEditor(HasTraits):
name = Str()
tooltip = Str()
control = Any()
obj = Any()
dirty = Bool(False)
editor_area = Instance('pyface.tasks.i_editor_area_pane.IEditorAreaPane')
is_active = Property(Bool, observe='editor_area.active_editor')
has_focus = Bool(False)
closing = VetoableEvent()
closed = Event()
def close(self):
if (self.control is not None):
self.closing = event = Vetoable()
if (not event.veto):
self.editor_area.remove_editor(self)
self.closed = True
_property
def _get_is_active(self):
if (self.editor_area is not None):
return (self.editor_area.active_editor == self)
return False |
class EnabledProductResponseLinks(ModelNormal):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
return {'_self': (str,), 'service': (str,)}
_property
def discriminator():
return None
attribute_map = {'_self': 'self', 'service': 'service'}
read_only_vars = {}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
class Bugzilla(object):
def __init__(self) -> None:
self._bz = None
def _connect(self) -> None:
user = config.get('bodhi_email')
password = config.get('bodhi_password')
url = config.get('bz_server')
log.info(('Using BZ URL %s' % url))
if config['bugzilla_api_key']:
self._bz = bugzilla.Bugzilla(url=url, api_key=config.get('bugzilla_api_key'), cookiefile=None, tokenfile=None)
elif (user and password):
self._bz = bugzilla.Bugzilla(url=url, user=user, password=password, cookiefile=None, tokenfile=None)
else:
self._bz = bugzilla.Bugzilla(url=url, cookiefile=None, tokenfile=None)
def bz(self) -> bugzilla.Bugzilla:
if (self._bz is None):
self._connect()
return self._bz
def getbug(self, bug_id: int) -> 'bugzilla.bug.Bug':
return self.bz.getbug(bug_id)
def comment(self, bug_id: int, comment: str) -> None:
try:
if (len(comment) > 65535):
raise InvalidComment(f'Comment is too long: {comment}')
bug = self.bz.getbug(bug_id)
attempts = 0
while (attempts < 5):
try:
bug.addcomment(comment)
break
except xmlrpc_client.Fault as e:
attempts += 1
log.error(f'''
A fault has occurred
Fault code: {e.faultCode}
Fault string: {e.faultString}''')
except InvalidComment:
log.error(('Comment too long for bug #%d: %s' % (bug_id, comment)))
except xmlrpc_client.Fault as err:
if (err.faultCode == 102):
log.info('Cannot retrieve private bug #%d.', bug_id)
else:
log.exception('Got fault from Bugzilla on #%d: fault code: %d, fault string: %s', bug_id, err.faultCode, err.faultString)
except Exception:
log.exception(('Unable to add comment to bug #%d' % bug_id))
def on_qa(self, bug_id: int, comment: str) -> None:
try:
bug = self.bz.getbug(bug_id)
if (bug.product not in config.get('bz_products')):
log.info('Skipping set on_qa on {0!r} bug #{1}'.format(bug.product, bug_id))
return
if (bug.bug_status not in ('ON_QA', 'VERIFIED', 'CLOSED')):
log.debug(('Setting Bug #%d to ON_QA' % bug_id))
bug.setstatus('ON_QA', comment=comment)
else:
bug.addcomment(comment)
except xmlrpc_client.Fault as err:
if (err.faultCode == 102):
log.info('Cannot retrieve private bug #%d.', bug_id)
else:
log.exception('Got fault from Bugzilla on #%d: fault code: %d, fault string: %s', bug_id, err.faultCode, err.faultString)
except Exception:
log.exception(('Unable to alter bug #%d' % bug_id))
def close(self, bug_id: int, versions: typing.Mapping[(str, str)], comment: str) -> None:
args = {'comment': comment}
try:
bug = self.bz.getbug(bug_id)
if (bug.product not in config.get('bz_products')):
log.info('Skipping set closed on {0!r} bug #{1}'.format(bug.product, bug_id))
return
if (bug.component in versions):
version = versions[bug.component]
fixedin = [v.strip() for v in bug.fixed_in.split()]
fixedin = [v for v in fixedin if v]
fixedin_str = ' '.join(fixedin)
if ((version not in fixedin) and ((len(fixedin_str) + len(version)) < 255)):
args['fixedin'] = ' '.join([fixedin_str, version]).strip()
bug.close('ERRATA', **args)
except xmlrpc_client.Fault as err:
if (err.faultCode == 102):
log.info('Cannot retrieve private bug #%d.', bug_id)
else:
log.exception('Got fault from Bugzilla on #%d: fault code: %d, fault string: %s', bug_id, err.faultCode, err.faultString)
def update_details(self, bug: typing.Union[('bugzilla.bug.Bug', None)], bug_entity: 'models.Bug') -> None:
if (not bug):
try:
bug = self.bz.getbug(bug_entity.bug_id)
except xmlrpc_client.Fault as err:
if (err.faultCode == 102):
log.info('Cannot retrieve private bug #%d.', bug_entity.bug_id)
bug_entity.title = 'Private bug'
else:
log.exception('Got fault from Bugzilla on #%d: fault code: %d, fault string: %s', bug_entity.bug_id, err.faultCode, err.faultString)
bug_entity.title = 'Invalid bug number'
return
except Exception:
log.exception('Unknown exception from Bugzilla')
return
if (bug.product == 'Security Response'):
bug_entity.parent = True
bug_entity.title = bug.short_desc
if isinstance(bug.keywords, str):
keywords = bug.keywords.split()
else:
keywords = bug.keywords
if ('security' in [keyword.lower() for keyword in keywords]):
bug_entity.security = True
def modified(self, bug_id: typing.Union[(int, str)], comment: str) -> None:
try:
bug = self.bz.getbug(bug_id)
if (bug.product not in config.get('bz_products')):
log.info('Skipping set modified on {0!r} bug #{1}'.format(bug.product, bug_id))
return
if (bug.bug_status not in ('MODIFIED', 'VERIFIED', 'CLOSED')):
log.info(('Setting bug #%s status to MODIFIED' % bug_id))
bug.setstatus('MODIFIED', comment=comment)
else:
bug.addcomment(comment)
except xmlrpc_client.Fault as err:
if (err.faultCode == 102):
log.info('Cannot retrieve private bug #%d.', bug_id)
else:
log.exception('Got fault from Bugzilla on #%d: fault code: %d, fault string: %s', bug_id, err.faultCode, err.faultString)
except Exception:
log.exception(('Unable to alter bug #%s' % bug_id)) |
class DiceRollEngineTest(EvenniaTest):
def setUp(self):
super().setUp()
self.roll_engine = rules.DiceRollEngine()
('world.rules.randint')
def test_roll(self, mock_randint):
mock_randint.return_value = 8
self.assertEqual(self.roll_engine.roll('1d6'), 8)
mock_randint.assert_called_with(1, 6)
self.assertEqual(self.roll_engine.roll('2d8'), (2 * 8))
mock_randint.assert_called_with(1, 8)
self.assertEqual(self.roll_engine.roll('4d12'), (4 * 8))
mock_randint.assert_called_with(1, 12)
self.assertEqual(self.roll_engine.roll('8d100'), (8 * 8))
mock_randint.assert_called_with(1, 100)
def test_roll_limits(self):
with self.assertRaises(TypeError):
self.roll_engine.roll('100d6', max_number=10)
with self.assertRaises(TypeError):
self.roll_engine.roll('100')
with self.assertRaises(TypeError):
self.roll_engine.roll('dummy')
with self.assertRaises(TypeError):
self.roll_engine.roll('Ad4')
with self.assertRaises(TypeError):
self.roll_engine.roll('1d10000')
('world.rules.randint')
def test_roll_with_advantage_disadvantage(self, mock_randint):
mock_randint.return_value = 9
self.assertEqual(self.roll_engine.roll_with_advantage_or_disadvantage(), 9)
mock_randint.assert_called_once()
mock_randint.reset_mock()
self.assertEqual(self.roll_engine.roll_with_advantage_or_disadvantage(disadvantage=True, advantage=True), 9)
mock_randint.assert_called_once()
mock_randint.reset_mock()
self.assertEqual(self.roll_engine.roll_with_advantage_or_disadvantage(advantage=True), 9)
mock_randint.assert_has_calls([call(1, 20), call(1, 20)])
mock_randint.reset_mock()
self.assertEqual(self.roll_engine.roll_with_advantage_or_disadvantage(disadvantage=True), 9)
mock_randint.assert_has_calls([call(1, 20), call(1, 20)])
mock_randint.reset_mock()
('world.rules.randint')
def test_saving_throw(self, mock_randint):
mock_randint.return_value = 8
character = MagicMock()
character.strength = 2
character.dexterity = 1
self.assertEqual(self.roll_engine.saving_throw(character, bonus_type=enums.Ability.STR), (False, None, Something))
self.assertEqual(self.roll_engine.saving_throw(character, bonus_type=enums.Ability.DEX, modifier=1), (False, None, Something))
self.assertEqual(self.roll_engine.saving_throw(character, advantage=True, bonus_type=enums.Ability.DEX, modifier=6), (False, None, Something))
self.assertEqual(self.roll_engine.saving_throw(character, disadvantage=True, bonus_type=enums.Ability.DEX, modifier=7), (True, None, Something))
mock_randint.return_value = 1
self.assertEqual(self.roll_engine.saving_throw(character, disadvantage=True, bonus_type=enums.Ability.STR, modifier=2), (False, enums.Ability.CRITICAL_FAILURE, Something))
mock_randint.return_value = 20
self.assertEqual(self.roll_engine.saving_throw(character, disadvantage=True, bonus_type=enums.Ability.STR, modifier=2), (True, enums.Ability.CRITICAL_SUCCESS, Something))
('world.rules.randint')
def test_opposed_saving_throw(self, mock_randint):
mock_randint.return_value = 10
(attacker, defender) = (MagicMock(), MagicMock())
attacker.strength = 1
defender.armor = 2
self.assertEqual(self.roll_engine.opposed_saving_throw(attacker, defender, attack_type=enums.Ability.STR, defense_type=enums.Ability.ARMOR), (False, None, Something))
self.assertEqual(self.roll_engine.opposed_saving_throw(attacker, defender, attack_type=enums.Ability.STR, defense_type=enums.Ability.ARMOR, modifier=2), (True, None, Something))
('world.rules.randint')
def test_roll_random_table(self, mock_randint):
mock_randint.return_value = 10
self.assertEqual(self.roll_engine.roll_random_table('1d20', random_tables.chargen_tables['physique']), 'scrawny')
self.assertEqual(self.roll_engine.roll_random_table('1d20', random_tables.chargen_tables['vice']), 'irascible')
self.assertEqual(self.roll_engine.roll_random_table('1d20', random_tables.chargen_tables['alignment']), 'neutrality')
self.assertEqual(self.roll_engine.roll_random_table('1d20', random_tables.chargen_tables['helmets and shields']), 'no helmet or shield')
mock_randint.return_value = 25
self.assertEqual(self.roll_engine.roll_random_table('1d20', random_tables.chargen_tables['helmets and shields']), 'helmet and shield')
mock_randint.return_value = (- 10)
self.assertEqual(self.roll_engine.roll_random_table('1d20', random_tables.chargen_tables['helmets and shields']), 'no helmet or shield')
('world.rules.randint')
def test_morale_check(self, mock_randint):
defender = MagicMock()
defender.morale = 12
mock_randint.return_value = 7
self.assertEqual(self.roll_engine.morale_check(defender), False)
mock_randint.return_value = 3
self.assertEqual(self.roll_engine.morale_check(defender), True)
('world.rules.randint')
def test_heal_from_rest(self, mock_randint):
character = MagicMock()
character.heal = MagicMock()
character.hp_max = 8
character.hp = 1
character.constitution = 1
mock_randint.return_value = 5
self.roll_engine.heal_from_rest(character)
mock_randint.assert_called_with(1, 8)
character.heal.assert_called_with(6)
('world.rules.randint')
def test_roll_death(self, mock_randint):
character = MagicMock()
character.strength = 13
character.hp = 0
character.hp_max = 8
mock_randint.return_value = 1
self.roll_engine.roll_death(character)
character.at_death.assert_called()
mock_randint.return_value = 3
self.roll_engine.roll_death(character)
self.assertEqual(character.strength, 10) |
def lazy_import():
from fastly.model.backend import Backend
from fastly.model.backend_response_all_of import BackendResponseAllOf
from fastly.model.service_id_and_version import ServiceIdAndVersion
from fastly.model.timestamps import Timestamps
globals()['Backend'] = Backend
globals()['BackendResponseAllOf'] = BackendResponseAllOf
globals()['ServiceIdAndVersion'] = ServiceIdAndVersion
globals()['Timestamps'] = Timestamps |
class Bits(object):
def get_next_largest(self, num):
if (num is None):
raise TypeError('num cannot be None')
if (num <= 0):
raise ValueError('num cannot be 0 or negative')
num_ones = 0
num_zeroes = 0
num_copy = num
while ((num_copy != 0) and ((num_copy & 1) == 0)):
num_zeroes += 1
num_copy >>= 1
while ((num_copy != 0) and ((num_copy & 1) == 1)):
num_ones += 1
num_copy >>= 1
index = (num_zeroes + num_ones)
num |= (1 << index)
num &= (~ ((1 << index) - 1))
num |= ((1 << (num_ones - 1)) - 1)
return num
def get_next_smallest(self, num):
if (num is None):
raise TypeError('num cannot be None')
if (num <= 0):
raise ValueError('num cannot be 0 or negative')
num_ones = 0
num_zeroes = 0
num_copy = num
while ((num_copy != 0) and ((num_copy & 1) == 1)):
num_ones += 1
num_copy >>= 1
while ((num_copy != 0) and ((num_copy & 1) == 0)):
num_zeroes += 1
num_copy >>= 1
index = (num_zeroes + num_ones)
num &= (~ (1 << index))
num &= (~ ((1 << index) - 1))
num |= ((1 << (num_ones + 1)) - 1)
return num |
def _align(dtype):
dtype = normalize_type(dtype)
if (len(dtype.shape) > 0):
return numpy.dtype((align(dtype.base), dtype.shape))
if (dtype.names is None):
return dtype
adjusted_fields = [align(dtype.fields[name][0]) for name in dtype.names]
alignments = [_find_alignments(field_dtype)[0] for field_dtype in adjusted_fields]
offsets = [0]
for (name, prev_field_dtype, alignment) in zip(dtype.names[1:], adjusted_fields[:(- 1)], alignments[1:]):
prev_end = (offsets[(- 1)] + prev_field_dtype.itemsize)
offsets.append((min_blocks(prev_end, alignment) * alignment))
struct_alignment = _struct_alignment(alignments)
min_itemsize = (offsets[(- 1)] + adjusted_fields[(- 1)].itemsize)
itemsize = (min_blocks(min_itemsize, struct_alignment) * struct_alignment)
return numpy.dtype(dict(names=dtype.names, formats=adjusted_fields, offsets=offsets, itemsize=itemsize, aligned=True)) |
def test_builder_with_single_alias_kwarg(owned_package):
(_, _, compiler_output) = owned_package
manifest = build(BASE_MANIFEST, contract_type('Owned', compiler_output, alias='OwnedAlias'), validate())
contract_type_data = normalize_contract_type(compiler_output['Owned.sol']['Owned'], 'Owned.sol')
compilers_data = contract_type_data.pop('compiler')
compilers_data['contractTypes'] = ['OwnedAlias']
expected_with_contract_type = assoc(BASE_MANIFEST, 'contractTypes', {'OwnedAlias': assoc(contract_type_data, 'contractType', 'Owned')})
expected = assoc(expected_with_contract_type, 'compilers', [compilers_data])
assert (manifest == expected) |
.external
.skipif((has_openai_key is False), reason='OpenAI API key not available')
.parametrize('n_detections', [0, 1, 2])
def test_spancat_scoring(fewshot_cfg_string: str, n_detections: int):
config = Config().from_str(fewshot_cfg_string)
nlp = assemble_from_config(config)
examples = []
for text in ['Alice works with Bob.', 'Bob lives with Alice.']:
predicted = nlp.make_doc(text)
reference = nlp.make_doc(text)
ent1 = Span(reference, 0, 1, label='PER')
ent2 = Span(reference, 3, 4, label='PER')
reference.spans['sc'] = [ent1, ent2][:n_detections]
examples.append(Example(predicted, reference))
scores = nlp.evaluate(examples)
assert (scores['spans_sc_p'] == (n_detections / 2))
assert (scores['spans_sc_r'] == (1 if (n_detections != 0) else 0))
assert (scores['spans_sc_f'] == (pytest.approx(0.) if (n_detections == 1) else (n_detections / 2))) |
class AbstractPerturbation(ABC, Tidy3dBaseModel):
_property
def perturbation_range(self) -> Union[(Tuple[(float, float)], Tuple[(Complex, Complex)])]:
_property
def is_complex(self) -> bool:
def _linear_range(interval: Tuple[(float, float)], ref: float, coeff: Union[(float, Complex)]):
if (coeff in (0, 0j)):
return np.array([0, 0])
return tuple(np.sort((coeff * (np.array(interval) - ref))))
def _get_val(field: Union[(ArrayLike[float], ArrayLike[Complex], SpatialDataArray)], val: FieldVal) -> Union[(ArrayLike[float], ArrayLike[Complex], SpatialDataArray)]:
if (val == 'real'):
return np.real(field)
if (val == 'imag'):
return np.imag(field)
if (val == 'abs'):
return np.abs(field)
if (val == 'abs^2'):
return (np.abs(field) ** 2)
if (val == 'phase'):
return np.arctan2(np.real(field), np.imag(field))
raise ValueError("Unknown 'val' key. Argument 'val' can take values 'real', 'imag', 'abs', 'abs^2', or 'phase'.")
def _array_type(value: Union[(ArrayLike[float], ArrayLike[Complex], SpatialDataArray)]) -> str:
if isinstance(value, SpatialDataArray):
return 'spatial'
if (np.ndim(value) == 0):
return 'scalar'
return 'array' |
def test():
assert (len(doc1.ents) == 2), 'Expected two entities in the first example'
assert ((doc1.ents[0].label_ == 'WEBSITE') and (doc1.ents[0].text == 'Reddit')), 'Check entity one in the first example'
assert ((doc1.ents[1].label_ == 'WEBSITE') and (doc1.ents[1].text == 'Patreon')), 'Check entity two in the first example'
assert (len(doc2.ents) == 1), 'Expected one entity in the second example'
assert ((doc2.ents[0].label_ == 'WEBSITE') and (doc2.ents[0].text == 'YouTube')), 'Check the entity in the second example'
assert (len(doc3.ents) == 1), 'Expected one entity in the third example'
assert ((doc3.ents[0].label_ == 'WEBSITE') and (doc3.ents[0].text == 'Reddit')), 'Check the entity in the third example'
__msg__.good('Nice work!') |
class BytesDataclass():
def __post_init__(self) -> None:
for field in fields(self):
if (field.type in (bytes, Dict[(str, Any)])):
self.__setattr__(field.name, encode_bytes(self.__getattribute__(field.name)))
def as_json(self) -> str:
return json.dumps(asdict(self), default=(lambda x: x.hex())) |
.django_db(transaction=True)
def test_download_transactions_excessive_limit(client, monkeypatch, download_test_data, elasticsearch_transaction_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_transaction_index)
download_generation.retrieve_db_string = Mock(return_value=get_database_dsn_string(settings.DOWNLOAD_DB_ALIAS))
resp = client.post('/api/v2/download/transactions/', content_type='application/json', data=json.dumps({'limit': (settings.MAX_DOWNLOAD_LIMIT + 1), 'filters': {'award_type_codes': ['A']}, 'columns': []}))
assert (resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY) |
(variables=_variables_numerical_docstring, missing_values=_missing_values_docstring, drop_original=_drop_original_docstring, feature_names_in_=_feature_names_in_docstring, n_features_in_=_n_features_in_docstring, fit=_fit_not_learn_docstring, fit_transform=_fit_transform_docstring)
class LagFeatures(BaseForecastTransformer):
def __init__(self, variables: Union[(None, int, str, List[Union[(str, int)]])]=None, periods: Union[(int, List[int])]=1, freq: Union[(str, List[str], None)]=None, sort_index: bool=True, missing_values: str='raise', drop_original: bool=False) -> None:
if (not ((isinstance(periods, int) and (periods > 0)) or (isinstance(periods, list) and all(((isinstance(num, int) and (num > 0)) for num in periods))))):
raise ValueError(f'periods must be an integer or a list of positive integers. Got {periods} instead.')
if (isinstance(periods, list) and (len(periods) != len(set(periods)))):
raise ValueError(f'There are duplicated periods in the list: {periods}')
if (isinstance(freq, list) and (len(freq) != len(set(freq)))):
raise ValueError(f'There are duplicated freq values in the list: {freq}')
if (not isinstance(sort_index, bool)):
raise ValueError(f'sort_index takes values True and False.Got {sort_index} instead.')
super().__init__(variables, missing_values, drop_original)
self.periods = periods
self.freq = freq
self.sort_index = sort_index
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
X = self._check_transform_input_and_state(X)
if (self.freq is not None):
if isinstance(self.freq, list):
df_ls = []
for fr in self.freq:
tmp = X[self.variables_].shift(freq=fr, axis=0)
df_ls.append(tmp)
tmp = pd.concat(df_ls, axis=1)
else:
tmp = X[self.variables_].shift(freq=self.freq, axis=0)
elif isinstance(self.periods, list):
df_ls = []
for pr in self.periods:
tmp = X[self.variables_].shift(periods=pr, axis=0)
df_ls.append(tmp)
tmp = pd.concat(df_ls, axis=1)
else:
tmp = X[self.variables_].shift(periods=self.periods, axis=0)
tmp.columns = self._get_new_features_name()
X = X.merge(tmp, left_index=True, right_index=True, how='left')
if self.drop_original:
X = X.drop(self.variables_, axis=1)
return X
def _get_new_features_name(self) -> List:
if isinstance(self.freq, list):
feature_names = [f'{feature}_lag_{fr}' for fr in self.freq for feature in self.variables_]
elif (self.freq is not None):
feature_names = [f'{feature}_lag_{self.freq}' for feature in self.variables_]
elif isinstance(self.periods, list):
feature_names = [f'{feature}_lag_{pr}' for pr in self.periods for feature in self.variables_]
else:
feature_names = [f'{feature}_lag_{self.periods}' for feature in self.variables_]
return feature_names |
.django_db(transaction=True)
def test_empty_array_filter_fail(client, _award_download_data):
filters = {'agency': 'all', 'prime_award_types': [*list(award_type_mapping.keys())], 'sub_award_types': [], 'date_type': 'action_date', 'date_range': {'start_date': '2016-10-01', 'end_date': '2017-09-30'}, 'recipient_scope': 'foreign'}
resp = client.post('/api/v2/bulk_download/awards', content_type='application/json', data=json.dumps({'filters': filters}))
assert (resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY)
assert ("Field 'filters|sub_award_types' value '[]' is below min '1' items" in resp.json()['detail']), 'Incorrect error message' |
class InsnMap():
def __init__(self, *args, **kwargs):
self.pc_dict = dict()
self.insn_dict = dict()
if ('pcs' in kwargs):
for item in kwargs['pcs']:
self.add_pc(item['start_pc'], item['byte_length'], item['insn_name'])
def add_pc(self, start_pc, byte_length, insn_name):
self.pc_dict[start_pc] = (start_pc + byte_length)
self.insn_dict[start_pc] = insn_name
def get_pc(self, pc):
return (self.pc_dict[pc] if (pc in self.pc_dict) else pc)
def get_insn(self, pc):
return (self.insn_dict[pc] if (pc in self.insn_dict) else None) |
def gen_function_call(func_attrs, indent=' ', bias_ptr_arg=None):
a = func_attrs['inputs'][0]
ashape = func_attrs['input_accessors'][0].original_shapes
a_dims_ptr = [f"&{ashape[idx]._attrs['name']}" for idx in range(len(ashape))]
b = func_attrs['inputs'][1]
bshape = func_attrs['input_accessors'][1].original_shapes
b_dims_ptr = [f"&{bshape[idx]._attrs['name']}" for idx in range(len(bshape))]
c = func_attrs['outputs'][0]
cshape = func_attrs['output_accessors'][0].original_shapes
c_dims_ptr = [f"&{cshape[idx]._attrs['name']}" for idx in range(len(cshape))]
has_d = False
d_ptr = None
if ('has_d' in func_attrs):
has_d = func_attrs['has_d']
d_ptr = func_attrs['inputs'][2]._attrs['name']
has_bias = (bias_ptr_arg is not None)
assert (not (has_d and has_bias))
local_dim_defs = common.gen_local_dim_defs(func_attrs, indent=indent)
return FUNC_CALL_TEMPLATE.render(local_dim_defs=local_dim_defs, func_name=func_attrs['name'], a_ptr=a._attrs['name'], b_ptr=b._attrs['name'], has_bias=has_bias, bias_ptr=bias_ptr_arg, c_ptr=c._attrs['name'], d_ptr=d_ptr, has_d=has_d, a_dims_ptr=a_dims_ptr, b_dims_ptr=b_dims_ptr, c_dims_ptr=c_dims_ptr, indent=indent) |
def test_endpoints_by_mac():
s = get_sdn_connect(logger)
endpoints = s.endpoints_by_mac('00:00:00:00:00:01')
assert (endpoints == [])
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
s.endpoints[endpoint.name] = endpoint
endpoint2 = s.endpoints_by_mac('00:00:00:00:00:00')
assert ([endpoint] == endpoint2) |
.parametrize('n_prompt_examples', [(- 1), 0, 1, 2])
def test_ner_init(noop_config, n_prompt_examples: int):
config = Config().from_str(noop_config)
del config['components']['llm']['task']['labels']
nlp = assemble_from_config(config)
examples = []
for text in ['Alice works with Bob in London.', 'Bob lives with Alice in Manchester.']:
predicted = nlp.make_doc(text)
reference = predicted.copy()
reference.ents = [Span(reference, 0, 1, label='PER'), Span(reference, 3, 4, label='PER'), Span(reference, 5, 6, label='LOC')]
examples.append(Example(predicted, reference))
(_, llm) = nlp.pipeline[0]
task: NERTask = llm._task
assert (set(task._label_dict.values()) == set())
assert (not task._prompt_examples)
nlp.config['initialize']['components']['llm'] = {'n_prompt_examples': n_prompt_examples}
nlp.initialize((lambda : examples))
assert (set(task._label_dict.values()) == {'PER', 'LOC'})
if (n_prompt_examples >= 0):
assert (len(task._prompt_examples) == n_prompt_examples)
else:
assert (len(task._prompt_examples) == len(examples))
if (n_prompt_examples > 0):
for eg in task._prompt_examples:
assert (set(eg.entities.keys()) == {'PER', 'LOC'}) |
class MessageDecorator(lmql.decorators.LMQLDecorator):
def pre(self, variable, context):
if ((context.runtime.output_writer is not None) and isinstance(context.runtime.output_writer, ChatMessageOutputWriter)):
context.runtime.output_writer.begin_message(variable)
return variable
def stream(self, variable_value, context):
if ((context.runtime.output_writer is not None) and isinstance(context.runtime.output_writer, ChatMessageOutputWriter)):
context.runtime.output_writer.stream_message(variable_value)
def post(self, variable_value, prompt_value, context):
if ((context.runtime.output_writer is not None) and isinstance(context.runtime.output_writer, ChatMessageOutputWriter)):
context.runtime.output_writer.complete_message(variable_value)
return super().post(variable_value, prompt_value, context) |
def test_check_assertion_is_raised_when_using_missing_uuid() -> None:
chain = create_extraction_chain(ToyChatModel(response='<json>{ "obj": { "text_node": "hello" } }</json>'), SIMPLE_OBJECT_SCHEMA, encoder_or_encoder_class='json')
documents = [Document(page_content='hello')]
with pytest.raises(ValueError):
asyncio.run(extract_from_documents(chain, documents, use_uid=True, max_concurrency=100)) |
def test_matte_color():
with Image(filename='rose:') as img:
with Color('navy') as color:
img.matte_color = color
assert (img.matte_color == color)
with raises(TypeError):
img.matte_color = False
img.matte_color = 'orange'
assert (img.matte_color == Color('orange')) |
def _hypercorn_factory(host, port):
if _WIN32:
script = f'''
from hypercorn.run import Config, run
import ctypes
ctypes.windll.kernel32.SetConsoleCtrlHandler(None, 0)
config = Config()
config.application_path = '_asgi_test_app:application'
config.bind = ['{host}:{port}']
config.accesslog = '-'
config.debug = True
run(config)
'''
return subprocess.Popen((sys.executable, '-c', script), cwd=_MODULE_DIR, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
return subprocess.Popen((sys.executable, '-m', 'hypercorn', '--bind', f'{host}:{port}', '--access-logfile', '-', '--debug', '_asgi_test_app:application'), cwd=_MODULE_DIR) |
def run_migrations_offline() -> None:
target_metadata = db.metadata
url = config.get_main_option('sqlalchemy.url')
assert (target_metadata is not None)
assert (url is not None)
context.configure(url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={'paramstyle': 'named'})
with context.begin_transaction():
context.run_migrations() |
def SetNoteOn(note, velocity):
global previous_note
if (monophonic and (previous_note != None)):
SetNoteOff(previous_note, 0)
if (midichannel is None):
msg = mido.Message('note_on', note=note, velocity=velocity)
else:
msg = mido.Message('note_on', note=note, velocity=velocity, channel=midichannel)
previous_note = note
outputport.send(msg)
if (duration_note != None):
t = threading.Timer(duration_note, SetNoteOff, args=[note, 0])
t.start() |
class OptionSeriesHistogramSonificationTracksMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
('rocm.groupnorm.gen_function')
def groupnorm_gen_function(func_attrs: Dict[(str, Any)], use_swish: bool=False) -> str:
shapes = func_attrs['inputs'][0]._attrs['shape']
for dim_idx in (1, 2, 3):
assert isinstance(shapes[dim_idx], IntImm), f'groupnorm requires reduction dim dim_idx={dim_idx!r} to be static'
return gen_function(func_attrs, SHAPE_EVAL_TEMPLATE, EXEC_TEMPLATE, EXTRA_HEADERS, EXTRA_CODE_TEMPLATE, get_func_signature, use_swish) |
def generate_distribution(distribution_name, domain):
if (distribution_name == 'uniform'):
return np.full(shape=domain, fill_value=(1.0 / domain))
elif (distribution_name == 'gauss'):
u = (domain / 2)
sigma = (domain / 6)
x = np.arange(1, (domain + 1))
fx = ((1 / (np.sqrt((2 * np.pi)) * sigma)) * (np.e ** ((- ((x - u) ** 2)) / (2 * (sigma ** 2)))))
return (fx / sum(fx))
elif (distribution_name == 'exp'):
lmda = 2
prob_list = np.array([(lmda * (np.e ** ((- lmda) * x))) for x in (np.arange(1, (domain + 1)) / 10)])
return (prob_list / sum(prob_list))
else:
raise Exception('the distribution is not contained') |
class Trainer():
def __init__(self, config, net, dataset):
self.config = config
self.dataset = dataset
self.dataloader = DataLoader(dataset, batch_size=config['batch_size'], shuffle=True, num_workers=1)
gpus = [i for i in range(config['num_gpus'])]
self.net = th.nn.DataParallel(net, gpus)
weights = filter((lambda x: x.requires_grad), net.parameters())
self.optimizer = NewbobAdam(weights, net, artifacts_dir=config['artifacts_dir'], initial_learning_rate=config['learning_rate'], decay=config['newbob_decay'], max_decay=config['newbob_max_decay'])
self.l2_loss = L2Loss(mask_beginning=config['mask_beginning'])
self.phase_loss = PhaseLoss(sample_rate=48000, mask_beginning=config['mask_beginning'])
self.total_iters = 0
self.net.train()
def save(self, suffix=''):
self.net.module.save(self.config['artifacts_dir'], suffix)
def train(self):
for epoch in range(self.config['epochs']):
t_start = time.time()
loss_stats = {}
data_pbar = tqdm.tqdm(self.dataloader)
for data in data_pbar:
loss_new = self.train_iteration(data)
for (k, v) in loss_new.items():
loss_stats[k] = ((loss_stats[k] + v) if (k in loss_stats) else v)
data_pbar.set_description(f"loss: {loss_new['accumulated_loss'].item():.7f}")
for k in loss_stats:
loss_stats[k] /= len(self.dataloader)
self.optimizer.update_lr(loss_stats['accumulated_loss'])
t_end = time.time()
loss_str = ' '.join([f'{k}:{v:.4}' for (k, v) in loss_stats.items()])
time_str = f"({time.strftime('%H:%M:%S', time.gmtime((t_end - t_start)))})"
print((((f'epoch {(epoch + 1)} ' + loss_str) + ' ') + time_str))
if ((self.config['save_frequency'] > 0) and (((epoch + 1) % self.config['save_frequency']) == 0)):
self.save(suffix=('epoch-' + str((epoch + 1))))
print('Saved model')
self.save()
def train_iteration(self, data):
self.optimizer.zero_grad()
(mono, binaural, quats) = data
(mono, binaural, quats) = (mono.cuda(), binaural.cuda(), quats.cuda())
prediction = self.net.forward(mono, quats)
l2 = self.l2_loss(prediction['output'], binaural)
phase = self.phase_loss(prediction['output'], binaural)
intermediate_binaural = th.cat(([binaural] * len(prediction['intermediate'])), dim=1)
intermediate_prediction = th.cat(prediction['intermediate'], dim=1)
intermediate_l2 = self.l2_loss(intermediate_prediction, intermediate_binaural)
intermediate_phase = self.phase_loss(intermediate_prediction, intermediate_binaural)
loss = (((l2 + intermediate_l2) * self.config['loss_weights']['l2']) + ((phase + intermediate_phase) * self.config['loss_weights']['phase']))
loss.backward()
self.optimizer.step()
self.total_iters += 1
return {'l2': l2, 'phase': phase, 'intermediate_l2': intermediate_l2, 'intermediate_phase': intermediate_phase, 'accumulated_loss': loss} |
class MissingWellKnown(BgpExc):
CODE = BGP_ERROR_UPDATE_MESSAGE_ERROR
SUB_CODE = BGP_ERROR_SUB_MISSING_WELL_KNOWN_ATTRIBUTE
def __init__(self, pattr_type_code):
super(MissingWellKnown, self).__init__()
self.pattr_type_code = pattr_type_code
self.data = struct.pack('B', pattr_type_code) |
.parametrize('response,answer', [('Answer: accept\nReason: The text is a recipe.', 'accept'), ('Answer: Accept\nReason: The text is a recipe.', 'accept'), ('Answer: reject\nReason: The text is not a recipe.', 'reject'), ('Answer: Reject\nReason: The text is not a recipe.', 'reject'), ('answer: reject\nreason: The text is not a recipe.', 'reject'), ("answer: Reject\nreason: The text is not a recipe.\nI don't know what it's about.", 'reject')])
def test_parse_response_binary(response, answer):
labels = ['recipe']
parser = make_textcat_response_parser(labels=labels)
example = parser(response)
assert (example.get('answer') == answer) |
class Test_geneve(unittest.TestCase):
def test_parser(self):
files = ['geneve_unknown']
for f in files:
for (_, buf) in pcaplib.Reader(open(((GENEVE_DATA_DIR + f) + '.pcap'), 'rb')):
pkt = packet.Packet(buf)
geneve_pkt = pkt.get_protocol(geneve.geneve)
ok_(isinstance(geneve_pkt, geneve.geneve), ('Failed to parse Geneve message: %s' % pkt))
pkt.serialize()
eq_(buf, pkt.data, ("b'%s' != b'%s'" % (binary_str(buf), binary_str(pkt.data)))) |
()
def empty_dataset(client, is_integration_test):
if is_integration_test:
empty_dataset = generic_upload_dataset_if_not_exists(client=client, name='empty_v1', upload_folder=None, foundry_schema=None)
(yield empty_dataset)
else:
(yield ('empty-rid', 'empty-path', None, 'empty-branch', False)) |
(IUndoManager)
class UndoManager(HasTraits):
active_stack = Instance('pyface.undo.api.ICommandStack')
active_stack_clean = Property(Bool)
redo_name = Property(Str)
sequence_nr = Int()
stack_updated = Event()
undo_name = Property(Str)
def redo(self):
if (self.active_stack is not None):
self.active_stack.redo()
def undo(self):
if (self.active_stack is not None):
self.active_stack.undo()
('active_stack')
def _update_stack_updated(self, event):
new = event.new
self.stack_updated = new
def _get_active_stack_clean(self):
if (self.active_stack is None):
active_stack_clean = True
else:
active_stack_clean = self.active_stack.clean
return active_stack_clean
def _get_redo_name(self):
if (self.active_stack is None):
redo_name = ''
else:
redo_name = self.active_stack.redo_name
return redo_name
def _get_undo_name(self):
if (self.active_stack is None):
undo_name = ''
else:
undo_name = self.active_stack.undo_name
return undo_name |
def test_cube_cropping(tmpdir, loadsfile1):
logger.info('Import SEGY format via SEGYIO')
incube = loadsfile1
assert (incube.dimensions == (408, 280, 70))
incube.do_cropping((2, 13), (10, 22), (30, 0))
assert (incube.dimensions == (393, 248, 40))
assert (incube.values.mean() == pytest.approx(0.)) |
def extractHibernatingtranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def get_plotter_from_command_line(command_line: typing.List[str]) -> typing.Type[Plotter]:
for plotter in all_plotters():
if plotter.identify_process(command_line=command_line):
return plotter
raise UnableToIdentifyCommandLineError('Failed to identify the plotter definition for parsing the command line') |
def test_no_cse_for_calls_2():
expr3 = BinaryOperation(OperationType.plus, [Call(function_symbol('foo'), [expr1.copy()]), Constant(1)])
cfg = ControlFlowGraph()
cfg.add_node((node := BasicBlock(0, instructions=[Assignment(Variable('a'), expr3.copy()), Assignment(Variable('b'), expr3.copy()), Return([BinaryOperation(OperationType.plus, [Variable('a'), Variable('b')])])])))
_run_cse(cfg, _generate_options(threshold=2))
assert (len(node.instructions) == 4)
replacement = Variable('c0', ssa_label=0)
expr4 = BinaryOperation(OperationType.plus, [Call(function_symbol('foo'), [replacement.copy()]), Constant(1)])
assert (node.instructions == [Assignment(replacement.copy(), expr1.copy()), Assignment(Variable('a'), expr4), Assignment(Variable('b'), expr4), Return([BinaryOperation(OperationType.plus, [Variable('a'), Variable('b')])])]) |
class OptionSeriesSunburstOnpointPosition(Options):
def offsetX(self):
return self._config_get(None)
def offsetX(self, num: float):
self._config(num, js_type=False)
def offsetY(self):
return self._config_get(None)
def offsetY(self, num: float):
self._config(num, js_type=False)
def x(self):
return self._config_get(None)
def x(self, num: float):
self._config(num, js_type=False)
def y(self):
return self._config_get(None)
def y(self, num: float):
self._config(num, js_type=False) |
class AxisTest(unittest.TestCase):
def test_compile(self):
axis = Axis()
(axis.axisTag, axis.axisNameID) = ('opsz', 345)
(axis.minValue, axis.defaultValue, axis.maxValue) = ((- 0.5), 1.3, 1.5)
self.assertEqual(FVAR_AXIS_DATA, axis.compile())
def test_decompile(self):
axis = Axis()
axis.decompile(FVAR_AXIS_DATA)
self.assertEqual('opsz', axis.axisTag)
self.assertEqual(345, axis.axisNameID)
self.assertEqual((- 0.5), axis.minValue)
self.assertAlmostEqual(1.3000031, axis.defaultValue)
self.assertEqual(1.5, axis.maxValue)
def test_toXML(self):
font = MakeFont()
axis = Axis()
axis.decompile(FVAR_AXIS_DATA)
AddName(font, 'Optical Size').nameID = 256
axis.axisNameID = 256
axis.flags = 2748
writer = XMLWriter(BytesIO())
axis.toXML(writer, font)
self.assertEqual(['', '<!-- Optical Size -->', '<Axis>', '<AxisTag>opsz</AxisTag>', '<Flags>0xABC</Flags>', '<MinValue>-0.5</MinValue>', '<DefaultValue>1.3</DefaultValue>', '<MaxValue>1.5</MaxValue>', '<AxisNameID>256</AxisNameID>', '</Axis>'], xml_lines(writer))
def test_fromXML(self):
axis = Axis()
for (name, attrs, content) in parseXML('<Axis> <AxisTag>wght</AxisTag> <Flags>0x123ABC</Flags> <MinValue>100</MinValue> <DefaultValue>400</DefaultValue> <MaxValue>900</MaxValue> <AxisNameID>256</AxisNameID></Axis>'):
axis.fromXML(name, attrs, content, ttFont=None)
self.assertEqual('wght', axis.axisTag)
self.assertEqual(1194684, axis.flags)
self.assertEqual(100, axis.minValue)
self.assertEqual(400, axis.defaultValue)
self.assertEqual(900, axis.maxValue)
self.assertEqual(256, axis.axisNameID) |
class InternalMarketResource(Base):
__tablename__ = 'internal_market_resource'
internal_market_resource_id = Column(Integer, primary_key=True)
country_data_id = Column(ForeignKey(CountryData.country_data_id), index=True)
resource_name_id = Column(ForeignKey(SharedDescription.description_id), index=True)
fluctuation = Column(Float)
country_data = relationship('CountryData', back_populates='internal_market_resources')
resource_name = relationship('SharedDescription') |
class OpenSSHAuthStrategy(AuthStrategy):
def __init__(self, ssh_config, fabric_config, username):
super().__init__(ssh_config=ssh_config)
self.username = username
self.config = fabric_config
self.agent = Agent()
def get_pubkeys(self):
(config_certs, config_keys, cli_certs, cli_keys) = ([], [], [], [])
for path in self.config.authentication.identities:
try:
key = PKey.from_path(path)
except FileNotFoundError:
continue
source = OnDiskPrivateKey(username=self.username, source='python-config', path=path, pkey=key)
(cli_certs if key.public_blob else cli_keys).append(source)
for path in self.ssh_config.get('identityfile', []):
try:
key = PKey.from_path(path)
except FileNotFoundError:
continue
source = OnDiskPrivateKey(username=self.username, source='ssh-config', path=path, pkey=key)
(config_certs if key.public_blob else config_keys).append(source)
if (not any((config_certs, config_keys, cli_certs, cli_keys))):
user_ssh = (Path.home() / f"{('' if win32 else '.')}ssh")
for type_ in ('rsa', 'ecdsa', 'ed25519', 'dsa'):
path = (user_ssh / f'id_{type_}')
try:
key = PKey.from_path(path)
except FileNotFoundError:
continue
source = OnDiskPrivateKey(username=self.username, source='implicit-home', path=path, pkey=key)
dest = (config_certs if key.public_blob else config_keys)
dest.append(source)
agent_keys = self.agent.get_keys()
for source in config_certs:
(yield source)
for source in cli_certs:
(yield source)
deferred_agent_keys = []
for key in agent_keys:
config_index = None
for (i, config_key) in enumerate(config_keys):
if (config_key.pkey == key):
config_index = i
break
if config_index:
(yield InMemoryPrivateKey(username=self.username, pkey=key))
del config_keys[config_index]
else:
deferred_agent_keys.append(key)
for key in deferred_agent_keys:
(yield InMemoryPrivateKey(username=self.username, pkey=key))
for source in cli_keys:
(yield source)
for source in config_keys:
(yield source)
def get_sources(self):
(yield from self.get_pubkeys())
user = self.username
prompter = partial(getpass, f"{user}'s password: ")
(yield Password(username=self.username, password_getter=prompter))
def authenticate(self, *args, **kwargs):
try:
return super().authenticate(*args, **kwargs)
finally:
self.close()
def close(self):
self.agent.close() |
def map_private_computation_role_to_mpc_party(private_computation_role: PrivateComputationRole) -> MPCParty:
if (private_computation_role is PrivateComputationRole.PUBLISHER):
return MPCParty.SERVER
elif (private_computation_role is PrivateComputationRole.PARTNER):
return MPCParty.CLIENT
else:
raise ValueError(f'No mpc party defined for {private_computation_role}') |
def test_persistent_expiring_value_caching(tmp_path):
with freeze_time() as frozen_datetime:
cache_file = (tmp_path / 'test.dat')
assert (not cache_file.exists())
ev = cache.PersistentExpiringValue(random.random, cache_file, max_age=300)
initial_value = ev.value()
assert cache_file.exists()
frozen_datetime.tick(delta=timedelta(seconds=60))
assert (ev.value() == initial_value), 'value was different, should have been cached'
frozen_datetime.tick(delta=timedelta(seconds=241))
second_value = ev.value()
assert (second_value != initial_value), 'value was the same, should have expired'
mtime = time.mktime(frozen_datetime().timetuple())
os.utime(str(cache_file), (mtime, mtime))
frozen_datetime.tick(delta=timedelta(seconds=60))
assert (ev.value() == second_value), 'value was different, should have been cached' |
class MarkdownTextareaWidget(Textarea):
class Media():
css = {'all': ('machina/build/css/vendor/easymde.min.css',)}
js = ('machina/build/js/vendor/easymde.min.js', 'machina/build/js/machina.editor.min.js')
def render(self, name, value, attrs=None, **kwargs):
attrs = ({} if (attrs is None) else attrs)
classes = attrs.get('classes', '')
attrs['class'] = (classes + ' machina-mde-markdown')
return super().render(name, value, attrs, **kwargs) |
class Parent():
tail: Optional[Parent]
child: Child
def __init__(self, *children):
self.children = children
def max_value(self, children: {value}):
return max([c.value for c in children])
def partial_sum(self: {max_value}) -> {children: {partial_sum}}:
return self.max_value
_sum.step
def partial_sum(self, children: {attenuated_sum}) -> {children: {partial_sum}}:
return children.attenuated_sum
def attenuated_sums(self, children: {attenuated_sum}) -> {children: {partial_sum}}:
return [c.attenuated_sum for c in children] |
def get_list_of_invoices_for_center(center, start_date, end_date):
full_url = (((((((api_url + 'sales/salesreport?center_id=') + center) + '&start_date=') + start_date) + '&end_date=') + end_date) + '&item_type=7&status=1')
sales_report = make_api_call(full_url)
list_of_invoice_for_center = []
invoice = []
if sales_report:
for report in sales_report['center_sales_report']:
if (len(invoice) and (invoice[0]['invoice_no'] != report['invoice_no'])):
list_of_invoice_for_center.append(invoice)
invoice = []
if (len(invoice) and (invoice[0]['invoice_no'] == report['invoice_no'])):
invoice.append(report)
else:
invoice.append(report)
if len(invoice):
list_of_invoice_for_center.append(invoice)
return list_of_invoice_for_center |
class RefCountryCode(models.Model):
country_code = models.TextField(primary_key=True)
country_name = models.TextField(blank=True, null=True)
valid_begin_date = models.DateTimeField(blank=True, null=True)
valid_end_date = models.DateTimeField(blank=True, null=True)
valid_code_indicator = models.TextField(blank=True, null=True)
create_date = models.DateTimeField(auto_now_add=True, blank=True, null=True)
update_date = models.DateTimeField(auto_now=True, null=True)
class Meta():
managed = True
db_table = 'ref_country_code'
def __str__(self):
return ('%s: %s' % (self.country_code, self.country_name)) |
class enhanced_hash_capability(bsn_tlv):
type = 143
def __init__(self, value=None):
if (value != None):
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!H', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!Q', self.value))
length = sum([len(x) for x in packed])
packed[1] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = enhanced_hash_capability()
_type = reader.read('!H')[0]
assert (_type == 143)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.value = reader.read('!Q')[0]
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.value != other.value):
return False
return True
def pretty_print(self, q):
q.text('enhanced_hash_capability {')
with q.group():
with q.indent(2):
q.breakable()
q.text('value = ')
value_name_map = {1: 'OFP_BSN_ENHANCED_HASH_L2', 2: 'OFP_BSN_ENHANCED_HASH_L3', 4: 'OFP_BSN_ENHANCED_HASH_L2GRE', 8: 'OFP_BSN_ENHANCED_HASH_MPLS', 16: 'OFP_BSN_ENHANCED_HASH_GTP', 32: 'OFP_BSN_ENHANCED_HASH_SYMMETRIC'}
q.text(util.pretty_flags(self.value, value_name_map.values()))
q.breakable()
q.text('}') |
def test_adding_a_extra_init_container():
config = "\ndeployment:\n enabled: true\nextraInitContainers: |\n - name: do-something\n image: busybox\n command: ['do', 'something']\n"
r = helm_template(config)
extraInitContainerDaemonset = r['daemonset'][name]['spec']['template']['spec']['initContainers']
assert ({'name': 'do-something', 'image': 'busybox', 'command': ['do', 'something']} in extraInitContainerDaemonset)
deployment_name = name
extraInitContainerDeployment = r['deployment'][deployment_name]['spec']['template']['spec']['initContainers']
assert ({'name': 'do-something', 'image': 'busybox', 'command': ['do', 'something']} in extraInitContainerDeployment) |
def test_data_drift_test_feature_value_drift_json_render() -> None:
test_current_dataset = pd.DataFrame({'feature_1': [0, 0, 0, 1], 'target': [0, 0, 0, 1], 'prediction': [0, 0, 0, 1]})
test_reference_dataset = pd.DataFrame({'feature_1': [1, 1, 2, 0], 'target': [0, 0, 0, 1], 'prediction': [0, 0, 0, 1]})
suite = TestSuite(tests=[TestColumnDrift(column_name='feature_1')])
suite.run(current_data=test_current_dataset, reference_data=test_reference_dataset)
suite._inner_suite.raise_for_error()
assert suite
result_from_json = json.loads(suite.json())
assert (result_from_json['summary']['all_passed'] is True)
test_info = result_from_json['tests'][0]
assert (test_info == {'description': 'The drift score for the feature **feature_1** is 0.064. The drift detection method is chi-square p_value. The drift detection threshold is 0.05.', 'group': 'data_drift', 'name': 'Drift per Column', 'parameters': {'detected': False, 'score': 0.064, 'stattest': 'chi-square p_value', 'threshold': 0.05, 'column_name': 'feature_1'}, 'status': 'SUCCESS'}) |
_set_stats_type(ofproto.OFPMP_QUEUE_DESC, OFPQueueDesc)
_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPQueueDescStatsRequest(OFPMultipartRequest):
def __init__(self, datapath, flags=0, port_no=ofproto.OFPP_ANY, queue_id=ofproto.OFPQ_ALL, type_=None):
super(OFPQueueDescStatsRequest, self).__init__(datapath, flags)
self.port_no = port_no
self.queue_id = queue_id
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_QUEUE_DESC_REQUEST_PACK_STR, self.buf, ofproto.OFP_MULTIPART_REQUEST_SIZE, self.port_no, self.queue_id) |
class CreateSnapshotDefineForm(SnapshotDefineForm):
def __init__(self, request, vm, *args, **kwargs):
super(CreateSnapshotDefineForm, self).__init__(request, vm, *args, **kwargs)
from api.vm.snapshot.serializers import define_schedule_defaults
schret = define_schedule_defaults('daily')
self.fields['name'].widget.attrs['placeholder'] = 'daily'
self.fields['schedule'].widget.attrs['placeholder'] = schret.get('schedule', '')
self.fields['retention'].widget.attrs['placeholder'] = schret.get('retention', '') |
class FaucetConntrackClearTest(FaucetUntaggedTest):
SOFTWARE_ONLY = True
CONFIG_GLOBAL = '\nvlans:\n 100:\n description: "untagged"\n acl_in: 2\nacls:\n 1:\n - rule:\n eth_type: 0x0800\n ip_proto: 6\n ct_state: 0/0x20\n actions:\n ct:\n table: 0\n zone: 1\n - rule:\n eth_type: 0x0800\n ip_proto: 6\n ct_state: 0x21/0x21\n actions:\n ct:\n flags: 1\n table: 1\n zone: 1\n - rule:\n actions:\n allow: 1\n 2:\n - rule:\n eth_type: 0x0800\n ip_proto: 6\n ct_state: 0x20/0x20\n actions:\n ct:\n clear: true\n allow: 1\n - rule:\n actions:\n allow: 1\n'
CONFIG = '\n interfaces:\n %(port_1)d:\n native_vlan: 100\n acl_in: 1\n %(port_2)d:\n native_vlan: 100\n %(port_3)d:\n native_vlan: 100\n %(port_4)d:\n native_vlan: 100\n' |
def filter_out_unlicensed(date: datetime):
log.debug(f'Filter out unlicensed push events commits for date {date:%Y-%m-%d}')
log.debug(f'Read licensed repos for date {date:%Y-%m-%d}')
licensed_repos_df = Repositories(date=date).read()
for (company, df) in DataLake().staging.get_daily_raw_push_events_commits(date):
log.debug(f'Filter out unlicensed push events commits for date {date:%Y-%m-%d} for {company}')
filtered_df = filter_and_adjunct_push_event_commit(df, licensed_repos_df, [DataLake().staging.schemas.repositories.license], [DataLake().staging.schemas.repositories.name, DataLake().staging.schemas.repositories.language, DataLake().staging.schemas.repositories.license], DataLake().staging.schemas.push_commits.required, right_index=DataLake().staging.schemas.repositories.name, left_index=DataLake().staging.schemas.push_commits.repo_name)
if (not filtered_df.empty):
DataLake().staging.save_push_events_commits(push_event_commits=filtered_df, company_name=company, date=date) |
def build_span_finder_suggester(candidates_key: str) -> Suggester:
def span_finder_suggester(docs: Iterable[Doc], *, ops: Optional[Ops]=None) -> Ragged:
if (ops is None):
ops = get_current_ops()
spans = []
lengths = []
for doc in docs:
length = 0
if doc.spans[candidates_key]:
for span in doc.spans[candidates_key]:
spans.append([span.start, span.end])
length += 1
lengths.append(length)
lengths_array = cast(Ints1d, ops.asarray(lengths, dtype='i'))
if (len(spans) > 0):
output = Ragged(ops.asarray(spans, dtype='i'), lengths_array)
else:
output = Ragged(ops.xp.zeros((0, 0), dtype='i'), lengths_array)
return output
return span_finder_suggester |
class TradeLoad(namedtuple('TradeLoad', ('items', 'gainCr', 'costCr', 'units'))):
def __bool__(self):
return (self.units > 0)
def __lt__(self, rhs):
if (self.gainCr < rhs.gainCr):
return True
if (rhs.gainCr < self.gainCr):
return False
if (self.units < rhs.units):
return True
if (rhs.units < self.units):
return False
return (self.costCr < rhs.costCr)
def gpt(self):
return ((self.gainCr / self.units) if self.units else 0) |
def test_raises_error_if_negative_values_after_adding_C(df_vartypes):
user_var = 'Age'
df_neg = df_vartypes.copy()
df_neg.loc[(2, user_var)] = (- 7)
with pytest.raises(ValueError) as errmsg:
transformer = LogCpTransformer(base='e', variables=user_var, C=1)
transformer.fit(df_neg)
exceptionmsg = errmsg.value.args[0]
assert (exceptionmsg == ('Some variables contain zero or negative values after addingconstant C, ' + "can't apply log"))
with pytest.raises(ValueError):
transformer = LogCpTransformer(base='e', variables=user_var, C=1)
transformer.fit(df_vartypes)
transformer.transform(df_neg)
assert (exceptionmsg == ('Some variables contain zero or negative values after addingconstant C, ' + "can't apply log")) |
class CloudAssetRepositoryClient(_base_repository.BaseRepositoryClient):
def __init__(self, quota_max_calls=None, quota_period=60.0, use_rate_limiter=True, cache_discovery=False, cache=None):
if (not quota_max_calls):
use_rate_limiter = False
self._top_level = None
self._operations = None
super(CloudAssetRepositoryClient, self).__init__(API_NAME, versions=['v1'], quota_max_calls=quota_max_calls, quota_period=quota_period, use_rate_limiter=use_rate_limiter, cache_discovery=cache_discovery, cache=cache)
def top_level(self):
if (not self._top_level):
self._top_level = self._init_repository(_CloudAssetV1Repository)
return self._top_level
def operations(self):
if (not self._operations):
self._operations = self._init_repository(_CloudAssetOperationsRepository)
return self._operations |
class Joco(BikeShareSystem):
def __init__(self, tag, meta, bbox, feed_url):
super(Joco, self).__init__(tag, meta)
self.bbox = bbox
self.feed_url = feed_url
def update(self, scraper=None):
scraper = (scraper or PyBikesScraper())
data = json.loads(scraper.request(self.feed_url))
stations = []
for station in data:
stations.append(JocoStation(station))
if self.bbox:
stations = list(filter_bounds(stations, None, self.bbox))
self.stations = stations |
def test_fr_morphologizer_spaces(NLP):
doc = NLP('Some\nspaces are\tnecessary.')
assert (doc[0].pos != SPACE)
assert (doc[0].pos_ != 'SPACE')
assert (doc[1].pos == SPACE)
assert (doc[1].pos_ == 'SPACE')
assert (doc[1].tag_ == 'SPACE')
assert (doc[2].pos != SPACE)
assert (doc[3].pos != SPACE)
assert (doc[4].pos == SPACE) |
def holding_period_map(dbal):
year = em.aggregate_returns(dbal.pct_change(), 'yearly')
year_start = 0
table = "<table class='table table-hover table-condensed table-striped'>"
table += '<tr><th>Years</th>'
for i in range(len(year)):
table += '<th>{}</th>'.format((i + 1))
table += '</tr>'
for (the_year, _) in year.items():
table += f'<tr><th>{the_year}</th>'
for years_held in range(1, (len(year) + 1)):
if (years_held <= len(year.iloc[year_start:(year_start + years_held)])):
ret = em.annual_return(year.iloc[year_start:(year_start + years_held)], 'yearly')
table += '<td>{:.0f}</td>'.format((ret * 100))
table += '</tr>'
year_start += 1
display(HTML(table)) |
def _expect(post_state: Dict[(str, Any)], networks: Any, transaction: TransactionDict, filler: Dict[(str, Any)]) -> Dict[(str, Any)]:
test_name = get_test_name(filler)
test = filler[test_name]
test_update: Dict[(str, Dict[(Any, Any)])] = {test_name: {}}
pre_state = test.get('pre', {})
post_state = normalize_state((post_state or {}))
defaults = {address: {'balance': 0, 'nonce': 0, 'code': b'', 'storage': {}} for address in post_state}
result = deep_merge(defaults, pre_state, normalize_state(post_state))
new_expect = {'result': result}
if (transaction is not None):
transaction = normalize_transaction(merge(get_default_transaction(networks), transaction))
if ('transaction' not in test):
transaction_group = apply_formatters_to_dict({'data': wrap_in_list, 'gasLimit': wrap_in_list, 'value': wrap_in_list}, transaction)
indexes = {index_key: 0 for (transaction_key, index_key) in [('gasLimit', 'gas'), ('value', 'value'), ('data', 'data')] if (transaction_key in transaction_group)}
else:
(transaction_group, indexes) = add_transaction_to_group(test['transaction'], transaction)
new_expect = assoc(new_expect, 'indexes', indexes)
test_update = assoc_in(test_update, [test_name, 'transaction'], transaction_group)
if (networks is not None):
networks = normalize_networks(networks)
new_expect = assoc(new_expect, 'networks', networks)
existing_expects = test.get('expect', [])
expect = (existing_expects + [new_expect])
test_update = assoc_in(test_update, [test_name, 'expect'], expect)
return deep_merge(filler, test_update) |
class OptionLines(Options):
def value(self):
return self._config_get()
def value(self, num):
self._config(num)
def css_class(self):
return self._config_get()
_class.setter
def css_class(self, css_id):
self._config(css_id)
def text(self):
return self._config_get()
def text(self, value):
self._config(value)
def position(self):
return self._config_get(None)
def position(self, val):
self._config(val)
def positions(self) -> EnumTextPosition:
return EnumTextPosition(self, 'position') |
class StableSet(abc.MutableSet):
def __init__(self, s: Iterable[Any]=None):
if (s is None):
s = []
self._d = {item: None for item in s}
def add(self, value) -> None:
self._d[value] = None
def update(self, other) -> None:
for item in other:
self._d[item] = None
def discard(self, value) -> None:
self._d.pop(value, None)
def remove(self, value) -> None:
self._d.pop(value)
def copy(self):
return StableSet(list(self._d))
def clear(self):
self._d = {}
def __sub__(self, other):
res = self.copy()
for item in other:
res.discard(item)
return res
def __str__(self) -> str:
return str(list(self._d))
def __repr__(self) -> str:
return str(list(self._d))
def __len__(self) -> int:
return len(self._d)
def __contains__(self, value: Any) -> int:
return (value in self._d)
def __iter__(self):
return list(self._d).__iter__()
def _type_check(self, other):
if (not isinstance(other, StableSet)):
raise RuntimeError(f'A StableSet can only be operated with another StableSet! Current type: {type(other)}.')
def __eq__(self, other):
self._type_check(other)
return (set(other._d) == set(self._d))
def __le__(self, other):
self._type_check(other)
return (set(self._d) <= set(other._d))
def __lt__(self, other):
self._type_check(other)
return (set(self._d) < set(other._d))
def __ge__(self, other):
self._type_check(other)
return (set(self._d) >= set(other._d))
def __gt__(self, other):
self._type_check(other)
return (set(self._d) > set(other._d))
def __getitem__(self, idx):
return list(self._d)[idx] |
def test_long_extra_data(w3):
return_block_with_long_extra_data = construct_fixture_middleware({'eth_getBlockByNumber': {'extraData': ('0x' + ('ff' * 33))}})
w3.middleware_onion.inject(return_block_with_long_extra_data, layer=0)
with pytest.raises(ExtraDataLengthError):
w3.eth.get_block('latest') |
class hello_failed_error_msg(error_msg):
version = 1
type = 1
err_type = 0
def __init__(self, xid=None, code=None, data=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (code != None):
self.code = code
else:
self.code = 0
if (data != None):
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.err_type))
packed.append(struct.pack('!H', self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = hello_failed_error_msg()
_version = reader.read('!B')[0]
assert (_version == 1)
_type = reader.read('!B')[0]
assert (_type == 1)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_err_type = reader.read('!H')[0]
assert (_err_type == 0)
obj.code = reader.read('!H')[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.code != other.code):
return False
if (self.data != other.data):
return False
return True
def pretty_print(self, q):
q.text('hello_failed_error_msg {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('code = ')
value_name_map = {0: 'OFPHFC_INCOMPATIBLE', 1: 'OFPHFC_EPERM'}
if (self.code in value_name_map):
q.text(('%s(%d)' % (value_name_map[self.code], self.code)))
else:
q.text(('%#x' % self.code))
q.text(',')
q.breakable()
q.text('data = ')
q.pp(self.data)
q.breakable()
q.text('}') |
def record_base_submission_ids():
run_sqls(split_sql('\n -- LOG: Record base submission ids\n alter table submission_attributes add column if not exists _base_submission_id int;\n\n update submission_attributes\n set _base_submission_id = submission_id\n where _base_submission_id is null;\n ')) |
class _TimePeriods(_Filter):
underscore_name = 'time_period'
def generate_elasticsearch_query(cls, filter_values: List[dict], query_type: _QueryType, **options) -> ES_Q:
if (('time_period_obj' not in options) or (options.get('time_period_obj') is None)):
return cls._default_elasticsearch_query(filter_values, query_type, **options)
time_period_query = []
for filter_value in filter_values:
time_period_obj = options['time_period_obj']
time_period_obj.filter_value = filter_value
gte_range = time_period_obj.gte_date_range()
lte_range = time_period_obj.lte_date_range()
all_ranges = []
for range in gte_range:
all_ranges.append(ES_Q('bool', should=[ES_Q('range', **range)]))
for range in lte_range:
all_ranges.append(ES_Q('bool', should=[ES_Q('range', **range)]))
time_period_query.append(ES_Q('bool', should=all_ranges, minimum_should_match='100%'))
return ES_Q('bool', should=time_period_query, minimum_should_match=1)
def _default_elasticsearch_query(cls, filter_values: List[dict], query_type: _QueryType, **options):
time_period_query = []
for filter_value in filter_values:
start_date = (filter_value.get('start_date') or settings.API_SEARCH_MIN_DATE)
end_date = (filter_value.get('end_date') or settings.API_MAX_DATE)
gte_range = {filter_value.get('gte_date_type', 'action_date'): {'gte': start_date}}
lte_range = {filter_value.get('lte_date_type', ('date_signed' if (query_type == _QueryType.AWARDS) else 'action_date')): {'lte': end_date}}
time_period_query.append(ES_Q('bool', should=[ES_Q('range', **gte_range), ES_Q('range', **lte_range)], minimum_should_match=2))
return ES_Q('bool', should=time_period_query, minimum_should_match=1) |
def vs_filewhere(installation_path, platform, file):
try:
vcvarsall = os.path.join(installation_path, 'VC\\Auxiliary\\Build\\vcvarsall.bat')
env = subprocess.check_output(('cmd /c "%s" %s & where %s' % (vcvarsall, platform, file)))
paths = [path[:(- len(file))] for path in env.split('\r\n') if path.endswith(file)]
return paths[0]
except Exception:
return '' |
def initialize_dbt_flags(profiles_dir: str, project_dir: str, profile_target: Optional[str], vars: Optional[dict], threads: Optional[int]):
args = FlagsArgs(use_colors=None, project_dir=project_dir, profiles_dir=profiles_dir, profile=None, target=profile_target, vars=vars, threads=threads)
flags.set_from_args(args, None)
import dbt.events.functions as events_functions
events_functions.set_invocation_id()
return flags.get_flags() |
class SetCommandGenerator(CommandGenerator):
def processResponseVarBinds(self, snmpEngine, sendRequestHandle, errorIndication, PDU, cbCtx):
(cbFun, cbCtx) = cbCtx
cbFun(snmpEngine, sendRequestHandle, errorIndication, ((PDU and v2c.apiPDU.getErrorStatus(PDU)) or 0), ((PDU and v2c.apiPDU.getErrorIndex(PDU, muteErrors=True)) or 0), ((PDU and v2c.apiPDU.getVarBinds(PDU)) or ()), cbCtx)
def sendVarBinds(self, snmpEngine, targetName, contextEngineId, contextName, varBinds, cbFun, cbCtx=None):
reqPDU = v2c.SetRequestPDU()
v2c.apiPDU.setDefaults(reqPDU)
v2c.apiPDU.setVarBinds(reqPDU, varBinds)
return self.sendPdu(snmpEngine, targetName, contextEngineId, contextName, reqPDU, self.processResponseVarBinds, (cbFun, cbCtx)) |
def get_stats(tree_id, pname):
(pmin, pmax) = (inf, (- inf))
(n, pmean, pmean2) = (0, 0, 0)
try:
for node in load_tree(tree_id):
if (pname in node.props):
value = float(node.props[pname])
(pmin, pmax) = (min(pmin, value), max(pmax, value))
pmean = (((n * pmean) + value) / (n + 1))
pmean2 = (((n * pmean2) + (value * value)) / (n + 1))
n += 1
assert (n > 0), 'no node has the given property'
return {'n': n, 'min': pmin, 'max': pmax, 'mean': pmean, 'var': (pmean2 - (pmean * pmean))}
except (ValueError, AssertionError) as e:
abort(400, f'when reading property {pname}: {e}') |
def verify_statistics_map(fledge_url, skip_verify_north_interface):
get_url = '/fledge/statistics'
jdoc = utils.get_request(fledge_url, get_url)
actual_stats_map = utils.serialize_stats_map(jdoc)
assert (1 <= actual_stats_map['{}-Ingest'.format(SOUTH_SERVICE_NAME)])
assert (1 <= actual_stats_map['READINGS'])
if (not skip_verify_north_interface):
assert (1 <= actual_stats_map['Readings Sent'])
assert (1 <= actual_stats_map[NORTH_SERVICE_NAME]) |
def make_all_fourier_transforms(input_grid, q, fov, shift):
fft1 = FastFourierTransform(input_grid, q=q, fov=fov, shift=shift, emulate_fftshifts=True)
fft2 = FastFourierTransform(input_grid, q=q, fov=fov, shift=shift, emulate_fftshifts=False)
mft1 = MatrixFourierTransform(input_grid, fft1.output_grid, precompute_matrices=True, allocate_intermediate=True)
mft2 = MatrixFourierTransform(input_grid, fft1.output_grid, precompute_matrices=True, allocate_intermediate=False)
mft3 = MatrixFourierTransform(input_grid, fft1.output_grid, precompute_matrices=False, allocate_intermediate=True)
mft4 = MatrixFourierTransform(input_grid, fft1.output_grid, precompute_matrices=False, allocate_intermediate=False)
nft1 = NaiveFourierTransform(input_grid, fft1.output_grid, precompute_matrices=True)
nft2 = NaiveFourierTransform(input_grid, fft1.output_grid, precompute_matrices=False)
zfft = ZoomFastFourierTransform(input_grid, fft1.output_grid)
return [fft1, fft2, mft1, mft2, mft3, mft4, nft1, nft2, zfft] |
class TestDockPane(unittest.TestCase):
((sys.platform == 'darwin'), 'only applicable to macOS')
def test_dock_windows_visible_on_macos(self):
tool_attributes = []
def check_panes_and_exit(app_event):
app = app_event.application
for window in app.windows:
for dock_pane in window.dock_panes:
attr = dock_pane.control.testAttribute(QtCore.Qt.WidgetAttribute.WA_MacAlwaysShowToolWindow)
tool_attributes.append(attr)
app.exit()
app = MyApplication()
app.on_trait_change(check_panes_and_exit, 'application_initialized')
app.run()
self.assertTrue(tool_attributes)
for attr in tool_attributes:
self.assertTrue(attr)
def test_dock_windows_undock(self):
tool_attributes = []
def check_panes_and_exit(app_event):
app = app_event.application
app.windows[0].dock_panes[0].control.setFloating(True)
for window in app.windows:
for dock_pane in window.dock_panes:
attr = dock_pane.dock_area
tool_attributes.append(attr)
app.exit()
app = MyApplication()
app.on_trait_change(check_panes_and_exit, 'application_initialized')
app.run()
self.assertTrue(tool_attributes)
for attr in tool_attributes:
self.assertEqual(attr, 'left') |
class OptionSeriesPolygonMarkerStatesSelect(Options):
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def fillColor(self):
return self._config_get('#cccccc')
def fillColor(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('#000000')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(2)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(None)
def radius(self, num: float):
self._config(num, js_type=False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.