code stringlengths 281 23.7M |
|---|
class CoverManager(GObject.GObject):
__gsignals__ = {'prefetch-started': (GObject.SignalFlags.RUN_LAST, None, ()), 'prefetch-progress': (GObject.SignalFlags.RUN_LAST, None, (GObject.TYPE_INT,)), 'prefetch-completed': (GObject.SignalFlags.RUN_LAST, None, (GObject.TYPE_INT,)), 'fetch-started': (GObject.SignalFlags.RUN_LAST, None, (GObject.TYPE_INT,)), 'fetch-completed': (GObject.SignalFlags.RUN_LAST, None, (GObject.TYPE_INT,)), 'fetch-progress': (GObject.SignalFlags.RUN_LAST, None, (GObject.TYPE_INT,)), 'cover-fetched': (GObject.SignalFlags.RUN_LAST, None, (GObject.TYPE_PYOBJECT, GdkPixbuf.Pixbuf))}
def __init__(self, parent, collection):
GObject.GObject.__init__(self)
self.outstanding = []
self.album_tracks = {}
self.outstanding_text = _('{outstanding} covers left to fetch')
self.completed_text = _('All covers fetched')
self.cover_size = (90, 90)
self.default_cover_pixbuf = pixbuf_from_data(COVER_MANAGER.get_default_cover(), self.cover_size)
builder = guiutil.get_builder(xdg.get_data_path('ui', 'covermanager.ui'))
builder.connect_signals(self)
self.window = builder.get_object('window')
self.window.set_transient_for(parent)
self.message = dialogs.MessageBar(parent=builder.get_object('content_area'), buttons=Gtk.ButtonsType.CLOSE)
self.previews_box = builder.get_object('previews_box')
self.model = builder.get_object('covers_model')
self.model_path_cache = {}
self.menu = CoverMenu(self)
self.menu.attach_to_widget(self.previews_box, (lambda menu, widget: True))
self.progress_bar = builder.get_object('progressbar')
self.progress_bar.set_text(_('Collecting albums and covers...'))
self.progress_bar.pulse_timeout = GLib.timeout_add(100, self.on_progress_pulse_timeout)
self.close_button = builder.get_object('close_button')
self.stop_button = builder.get_object('stop_button')
self.stop_button.set_sensitive(False)
self.fetch_button = builder.get_object('fetch_button')
self.window.show_all()
self.stopper = threading.Event()
thread = threading.Thread(target=self.prefetch, name='CoverPrefetch', args=(collection,))
thread.daemon = True
thread.start()
def prefetch(self, collection):
albums = set()
for track in collection:
if self.stopper.is_set():
return
try:
artist = track.get_tag_raw('artist')[0]
album = track.get_tag_raw('album')[0]
except TypeError:
continue
if ((not album) or (not artist)):
continue
album = (artist, album)
try:
self.album_tracks[album].append(track)
except KeyError:
self.album_tracks[album] = [track]
albums.add(album)
albums = sorted(albums)
outstanding = []
get_cover = COVER_MANAGER.get_cover
default_cover_pixbuf = self.default_cover_pixbuf
cover_size = self.cover_size
self.emit('prefetch-started')
for (i, album) in enumerate(albums):
if self.stopper.is_set():
return
cover_data = get_cover(self.album_tracks[album][0], set_only=True)
cover_pixbuf = (pixbuf_from_data(cover_data) if cover_data else None)
try:
thumbnail_pixbuf = cover_pixbuf.scale_simple(*cover_size, interp_type=GdkPixbuf.InterpType.BILINEAR)
except AttributeError:
thumbnail_pixbuf = default_cover_pixbuf
outstanding.append(album)
label = '{0} - {1}'.format(*album)
iter = self.model.append((album, thumbnail_pixbuf, label))
self.model_path_cache[album] = self.model.get_path(iter)
self.emit('prefetch-progress', (i + 1))
self.outstanding = outstanding
self.emit('prefetch-completed', len(self.outstanding))
def fetch(self):
self.emit('fetch-started', len(self.outstanding))
get_cover = COVER_MANAGER.get_cover
save = COVER_MANAGER.save
for (i, album) in enumerate(self.outstanding[:]):
if self.stopper.is_set():
break
cover_data = get_cover(self.album_tracks[album][0], save_cover=True)
cover_pixbuf = (pixbuf_from_data(cover_data) if cover_data else None)
self.emit('fetch-progress', (i + 1))
if (not cover_pixbuf):
continue
self.outstanding.remove(album)
self.emit('cover-fetched', album, cover_pixbuf)
if ((i % 50) == 0):
logger.debug('Saving cover database')
save()
logger.debug('Saving cover database')
save()
self.emit('fetch-completed', len(self.outstanding))
def show_cover(self):
paths = self.previews_box.get_selected_items()
if paths:
path = paths[0]
album = self.model[path][0]
track = self.album_tracks[album][0]
cover_data = COVER_MANAGER.get_cover(track, set_only=True)
cover_pixbuf = (pixbuf_from_data(cover_data) if cover_data else None)
if cover_pixbuf:
savedir = Gio.File.new_for_uri(track.get_loc_for_io()).get_parent()
if savedir:
savedir = savedir.get_path()
cover_window = CoverWindow(self.window, cover_pixbuf, album[1], savedir)
cover_window.show_all()
def fetch_cover(self):
paths = self.previews_box.get_selected_items()
if paths:
path = paths[0]
album = self.model[path][0]
track = self.album_tracks[album][0]
cover_chooser = CoverChooser(self.window, track)
cover_chooser.path = path
cover_chooser.connect('cover-chosen', self.on_cover_chosen)
def remove_cover(self):
paths = self.previews_box.get_selected_items()
if paths:
path = paths[0]
album = self.model[path][0]
track = self.album_tracks[album][0]
COVER_MANAGER.remove_cover(track)
self.model[path][1] = self.default_cover_pixbuf
_add()
def do_prefetch_started(self):
self.previews_box.set_model(None)
self.model.clear()
self.previews_box.set_sensitive(False)
self.fetch_button.set_sensitive(False)
self.progress_bar.set_fraction(0)
GLib.source_remove(self.progress_bar.pulse_timeout)
_add()
def do_prefetch_completed(self, outstanding):
self.previews_box.set_sensitive(True)
self.previews_box.set_model(self.model)
self.fetch_button.set_sensitive(True)
self.progress_bar.set_fraction(0)
self.progress_bar.set_text(self.outstanding_text.format(outstanding=outstanding))
_add()
def do_prefetch_progress(self, progress):
fraction = (progress / float(len(self.album_tracks)))
self.progress_bar.set_fraction(fraction)
_add()
def do_fetch_started(self, outstanding):
self.previews_box.set_sensitive(False)
self.stop_button.set_sensitive(True)
self.fetch_button.set_sensitive(False)
self.progress_bar.set_fraction(0)
self.progress_bar.outstanding_total = float(outstanding)
_add()
def do_fetch_completed(self, outstanding):
self.previews_box.set_sensitive(True)
self.stop_button.set_sensitive(False)
if (outstanding > 0):
self.fetch_button.set_sensitive(True)
self.progress_bar.set_fraction(0)
_add()
def do_fetch_progress(self, progress):
outstanding = len(self.outstanding)
if (outstanding > 0):
progress_text = self.outstanding_text.format(outstanding=outstanding)
else:
progress_text = self.completed_text
self.progress_bar.set_text(progress_text)
fraction = (progress / self.progress_bar.outstanding_total)
self.progress_bar.set_fraction(fraction)
_add()
def do_cover_fetched(self, album, pixbuf):
path = self.model_path_cache[album]
self.model[path][1] = pixbuf.scale_simple(*self.cover_size, interp_type=GdkPixbuf.InterpType.BILINEAR)
def on_cover_chosen(self, cover_chooser, track, cover_data):
path = cover_chooser.path
if path:
album = self.model[path][0]
pixbuf = pixbuf_from_data(cover_data)
self.emit('cover-fetched', album, pixbuf)
try:
self.outstanding.remove(album)
except ValueError:
pass
else:
outstanding = len(self.outstanding)
if (outstanding > 0):
progress_text = self.outstanding_text.format(outstanding=outstanding)
else:
progress_text = self.completed_text
self.progress_bar.set_text(progress_text)
def on_previews_box_item_activated(self, iconview, path):
self.show_cover()
def on_previews_box_button_press_event(self, widget, e):
path = self.previews_box.get_path_at_pos(int(e.x), int(e.y))
if path:
self.previews_box.select_path(path)
if e.triggers_context_menu():
self.menu.popup(None, None, None, None, 3, e.time)
def on_previews_box_popup_menu(self, menu):
paths = self.previews_box.get_selected_items()
if paths:
self.menu.popup(None, None, None, None, 0, Gtk.get_current_event_time())
def on_previews_box_query_tooltip(self, widget, x, y, keyboard_mode, tooltip):
(x, y) = self.previews_box.convert_widget_to_bin_window_coords(x, y)
path = self.previews_box.get_path_at_pos(x, y)
if path:
tooltip.set_text(self.model[path][2])
self.previews_box.set_tooltip_item(tooltip, path)
return True
return False
def on_progress_pulse_timeout(self):
self.progress_bar.pulse()
return True
def on_close_button_clicked(self, button):
self.stopper.set()
self.window.destroy()
self.model.clear()
del self.outstanding
del self.album_tracks
del self.model_path_cache
def on_stop_button_clicked(self, button):
self.stopper.set()
def on_fetch_button_clicked(self, button):
self.stopper.clear()
thread = threading.Thread(target=self.fetch, name='CoverFetch')
thread.daemon = True
thread.start()
def on_window_delete_event(self, window, e):
self.close_button.clicked()
return True |
def test_attention_sequential_masked_2():
in_dict = build_multi_input_dict(dims=[(2, 20, 10), (2, 7, 5), (2, 7, 15), (2, 7)])
in_dict['in_key_3'] = (in_dict['in_key_3'] != 0)
self_attn_block = MultiHeadAttentionBlock(in_keys=['in_key_0', 'in_key_1', 'in_key_2', 'in_key_3'], out_keys='self_attention', in_shapes=[(7, 10), (7, 5), (7, 15), (7,)], num_heads=10, dropout=0.0, bias=False, add_input_to_output=True, add_bias_kv=False, add_zero_attn=False, kdim=5, vdim=15, use_key_padding_mask=True)
str(self_attn_block)
out_dict = self_attn_block(in_dict)
assert (self_attn_block.get_num_of_parameters() == 401)
assert (len(out_dict.keys()) == len(self_attn_block.out_keys) == 1)
assert (out_dict[self_attn_block.out_keys[0]].shape == (2, 20, 10)) |
def test_mine_all_uncle(chain, tx, tx2, funded_address):
if hasattr(chain, 'mine_all'):
starting_tip = chain.get_canonical_head()
canonical = chain.mine_all([tx])
uncled = chain.mine_all([], parent_header=starting_tip)
uncled_header = uncled[0].imported_block.header
new_tip = chain.mine_all([tx2], parent_header=canonical[0].imported_block.header, uncles=[uncled_header])
block = new_tip[0].imported_block
assert (block.transactions == (tx2,))
assert (chain.get_block_by_hash(block.hash) == block)
assert (block.uncles == (uncled_header,))
elif isinstance(chain, MiningChainAPI):
raise AssertionError() |
class MessageWindow(Gtk.Window):
def __init__(self, text, title='Epoptes', markup=True, icon_name='dialog-information'):
super().__init__(title=title, icon_name=icon_name)
self.set_position(Gtk.WindowPosition.CENTER)
grid = Gtk.Grid(column_spacing=10, row_spacing=10, margin=10)
self.add(grid)
image = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.DIALOG)
grid.add(image)
label = Gtk.Label(label=text, selectable=True, hexpand=True, vexpand=True, halign=Gtk.Align.START, valign=Gtk.Align.START)
if markup:
label.set_markup(text)
grid.add(label)
button = Gtk.Button.new_from_stock(Gtk.STOCK_CLOSE)
button.set_hexpand(False)
button.set_halign(Gtk.Align.END)
button.connect('clicked', Gtk.main_quit)
grid.attach(button, 1, 1, 2, 1)
self.set_focus_child(button)
accelgroup = Gtk.AccelGroup()
(key, modifier) = Gtk.accelerator_parse('Escape')
accelgroup.connect(key, modifier, Gtk.AccelFlags.VISIBLE, Gtk.main_quit)
self.add_accel_group(accelgroup) |
def update_agent_config(ctx: Context) -> None:
update_aea_version_range(ctx.agent_config)
cli_author = ctx.config.get('cli_author')
if (cli_author and (ctx.agent_config.author != cli_author)):
click.echo(f'Updating author from {ctx.agent_config.author} to {cli_author}')
ctx.agent_config._author = cli_author
ctx.agent_config.version = DEFAULT_VERSION
ctx.dump_agent_config() |
def get_data(session: Session, type: str='production') -> tuple[(list[Any], dict[(str, str)])]:
CSV_data = []
PLANT_TO_TYPE_MAP = {}
if (type == 'production'):
for index in range(1, 11):
if (index == 7):
continue
params = {'request': 'CSV_N_', 'p8_indx': index}
response: Response = session.get(DATA_URL, params=params, verify=False)
parsed_csv = list(reader(response.text.splitlines()))
for row in parsed_csv:
if ((row[0] == 'Fecha') or (row[1] == 'Planta')):
continue
PLANT_TO_TYPE_MAP[row[1]] = INDEX_TO_TYPE_MAP[index]
CSV_data.append(row)
return (CSV_data, PLANT_TO_TYPE_MAP)
elif (type == 'exchange'):
params = {'request': 'CSV_N_', 'p8_indx': 7}
response: Response = session.get(DATA_URL, params=params, verify=False)
CSV_data = list(reader(response.text.splitlines()))
return (CSV_data, EXCHANGE_MAP)
else:
raise ParserException('HN.py', f'Invalid data type: {type}') |
def test_aes_keyschedule_not_too_slowed_down():
n = 100000
key_u8 = np.zeros((n, 16), dtype='uint8')
key_u32 = np.zeros((n, 16), dtype='uint32')
t0 = time.process_time()
scared.aes.key_schedule(key_u8)
pt_u8 = (time.process_time() - t0)
t0 = time.process_time()
scared.aes.key_schedule(key_u32)
pt_u32 = (time.process_time() - t0)
assert (pt_u32 < (1.5 * pt_u8)) |
class CustomPromptTemplate(StringPromptTemplate):
template: str
tools: List[Tool]
agent_toolnames: List[str]
max_context_length: int = 5
keep_n_last_thoughts: int = 2
current_context_length: int = 0
model_steps_processed: int = 0
all_steps_processed: int = 0
my_summarize_agent: Any = None
last_summary: str = ''
project: (Any | None) = None
intermediate_steps: list[(AgentAction, str)] = []
hook: (Callable[([CustomPromptTemplate], None)] | None) = None
def _prompt_type(self) -> str:
return 'taskmaster'
def thought_log(self, thoughts: list[(AgentAction, str)]) -> str:
result = ''
for (i, (action, aresult)) in enumerate(thoughts):
if self.my_summarize_agent:
aresult = trim_extra(aresult, (1300 if (i != (len(thoughts) - 1)) else 1750))
if (action.tool == 'WarnAgent'):
result += (action.log + f'''
System note: {aresult}
''')
elif (action.tool == 'AgentFeedback'):
result += ((action.log + aresult) + '\n')
else:
result += (action.log + f'''
AResult: {aresult}
''')
return result
def format(self, **kwargs) -> str:
if ('intermediate_steps' in kwargs):
model_steps = kwargs.pop('intermediate_steps')
self.intermediate_steps += model_steps[self.model_steps_processed:]
self.model_steps_processed = len(model_steps)
intermediate_steps = self.intermediate_steps
self.current_context_length += (len(intermediate_steps) - self.all_steps_processed)
self.all_steps_processed = len(intermediate_steps)
if ((self.current_context_length >= self.max_context_length) and self.my_summarize_agent):
self.last_summary = self.my_summarize_agent.run(summary=self.last_summary, thought_process=self.thought_log(intermediate_steps[(- self.current_context_length):(- self.keep_n_last_thoughts)]))
self.current_context_length = self.keep_n_last_thoughts
if self.my_summarize_agent:
kwargs['agent_scratchpad'] = ('Here is a summary of what has happened:\n' + trim_extra(self.last_summary, 2700, 1900))
kwargs['agent_scratchpad'] += '\nEND OF SUMMARY\n'
else:
kwargs['agent_scratchpad'] = ''
kwargs['agent_scratchpad'] += 'Here go your thoughts and actions:\n'
kwargs['agent_scratchpad'] += self.thought_log(intermediate_steps[(- self.current_context_length):])
kwargs['tools'] = '\n'.join([f'{tool.name}: {tool.description}' for tool in self.tools if (tool.name in self.agent_toolnames)])
kwargs['tool_names'] = self.agent_toolnames
if self.project:
for (key, value) in self.project.prompt_fields().items():
kwargs[key] = value
result = remove_surrogates(remove_project_summaries(self.template.format(**kwargs).replace('{tools}', kwargs['tools'])))
result = trim_extra(result, 25000)
if self.hook:
self.hook(self)
if (self.project and os.path.exists(self.project.path)):
with open(os.path.join(self.project.path, '.prompts.log'), 'a') as f:
f.write((result + '\n\n\n\n\n'))
return result |
class SparkMetricImplementation(Generic[TMetric], MetricImplementation):
def __init__(self, engine: SparkEngine, metric: TMetric):
self.engine = engine
self.metric = metric
def calculate(self, context, data: SparkInputData):
raise NotImplementedError
def supported_engines(cls):
return (SparkEngine,) |
class OptionSeriesStreamgraphSonificationTracksMapping(Options):
def frequency(self) -> 'OptionSeriesStreamgraphSonificationTracksMappingFrequency':
return self._config_sub_data('frequency', OptionSeriesStreamgraphSonificationTracksMappingFrequency)
def gapBetweenNotes(self) -> 'OptionSeriesStreamgraphSonificationTracksMappingGapbetweennotes':
return self._config_sub_data('gapBetweenNotes', OptionSeriesStreamgraphSonificationTracksMappingGapbetweennotes)
def highpass(self) -> 'OptionSeriesStreamgraphSonificationTracksMappingHighpass':
return self._config_sub_data('highpass', OptionSeriesStreamgraphSonificationTracksMappingHighpass)
def lowpass(self) -> 'OptionSeriesStreamgraphSonificationTracksMappingLowpass':
return self._config_sub_data('lowpass', OptionSeriesStreamgraphSonificationTracksMappingLowpass)
def noteDuration(self) -> 'OptionSeriesStreamgraphSonificationTracksMappingNoteduration':
return self._config_sub_data('noteDuration', OptionSeriesStreamgraphSonificationTracksMappingNoteduration)
def pan(self) -> 'OptionSeriesStreamgraphSonificationTracksMappingPan':
return self._config_sub_data('pan', OptionSeriesStreamgraphSonificationTracksMappingPan)
def pitch(self) -> 'OptionSeriesStreamgraphSonificationTracksMappingPitch':
return self._config_sub_data('pitch', OptionSeriesStreamgraphSonificationTracksMappingPitch)
def playDelay(self) -> 'OptionSeriesStreamgraphSonificationTracksMappingPlaydelay':
return self._config_sub_data('playDelay', OptionSeriesStreamgraphSonificationTracksMappingPlaydelay)
def rate(self) -> 'OptionSeriesStreamgraphSonificationTracksMappingRate':
return self._config_sub_data('rate', OptionSeriesStreamgraphSonificationTracksMappingRate)
def text(self):
return self._config_get(None)
def text(self, text: str):
self._config(text, js_type=False)
def time(self) -> 'OptionSeriesStreamgraphSonificationTracksMappingTime':
return self._config_sub_data('time', OptionSeriesStreamgraphSonificationTracksMappingTime)
def tremolo(self) -> 'OptionSeriesStreamgraphSonificationTracksMappingTremolo':
return self._config_sub_data('tremolo', OptionSeriesStreamgraphSonificationTracksMappingTremolo)
def volume(self) -> 'OptionSeriesStreamgraphSonificationTracksMappingVolume':
return self._config_sub_data('volume', OptionSeriesStreamgraphSonificationTracksMappingVolume) |
class RelationshipMemberMutualAuthentication(ModelNormal):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
lazy_import()
return {'type': (TypeMutualAuthentication,), 'id': (str,)}
_property
def discriminator():
return None
attribute_map = {'type': 'type', 'id': 'id'}
read_only_vars = {'id'}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
def test_has_grim_support_raises_on_grim_call(monkeypatch, caplog):
if (not utils.shutil.which('grim')):
monkeypatch.setattr(utils.shutil, 'which', (lambda _: '/usr/bin/grim'))
def mocked_subprocess_run(*_, **__):
raise OSError
monkeypatch.setattr(utils.subprocess, 'run', mocked_subprocess_run)
has_grim = utils.has_grim_support()
assert (not has_grim)
assert ('error' in caplog.text.lower())
assert ('grim support' in caplog.text) |
def prompt_for_stop_actions(item_id, data):
log.debug('prompt_for_stop_actions Called : {0}', data)
settings = xbmcaddon.Addon()
current_position = data.get('currentPossition', 0)
duration = data.get('duration', 0)
next_episode = data.get('next_episode')
item_type = data.get('item_type')
can_delete = data.get('can_delete', False)
prompt_next_percentage = int(settings.getSetting('promptPlayNextEpisodePercentage'))
play_prompt = (settings.getSetting('promptPlayNextEpisodePercentage_prompt') == 'true')
prompt_delete_episode_percentage = int(settings.getSetting('promptDeleteEpisodePercentage'))
prompt_delete_movie_percentage = int(settings.getSetting('promptDeleteMoviePercentage'))
if ((prompt_next_percentage == 100) and (prompt_delete_episode_percentage == 100) and (prompt_delete_movie_percentage == 100)):
return
prompt_to_delete = False
if (duration == 0):
log.debug('No duration so returing')
return
percenatge_complete = int(((current_position / duration) * 100))
log.debug('Episode Percentage Complete: {0}', percenatge_complete)
if (can_delete and (prompt_delete_episode_percentage < 100) and (item_type == 'Episode') and (percenatge_complete > prompt_delete_episode_percentage)):
prompt_to_delete = True
if (can_delete and (prompt_delete_movie_percentage < 100) and (item_type == 'Movie') and (percenatge_complete > prompt_delete_movie_percentage)):
prompt_to_delete = True
if prompt_to_delete:
log.debug('Prompting for delete')
delete(item_id)
if ((next_episode is not None) and (prompt_next_percentage < 100) and (item_type == 'Episode') and (percenatge_complete > prompt_next_percentage)):
if play_prompt:
plugin_path = settings.getAddonInfo('path')
plugin_path_real = xbmcvfs.translatePath(os.path.join(plugin_path))
play_next_dialog = PlayNextDialog('PlayNextDialog.xml', plugin_path_real, 'default', '720p')
play_next_dialog.set_episode_info(next_episode)
play_next_dialog.doModal()
if (not play_next_dialog.get_play_called()):
xbmc.executebuiltin('Container.Refresh')
else:
next_item_id = next_episode.get('Id')
log.debug('Playing Next Episode: {0}', next_item_id)
play_info = {}
play_info['item_id'] = next_item_id
play_info['auto_resume'] = '-1'
play_info['force_transcode'] = False
send_event_notification('embycon_play_action', play_info) |
def run_migrations_online():
connectable = engine_from_config(config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata, include_object=include_object, compare_type=True)
with context.begin_transaction():
context.run_migrations() |
def get_calendar_html() -> HTML:
html = "<div id='cal-row' class='w-100' onmouseleave='calMouseLeave()'>%s</div> "
day_of_year = datetime.datetime.now().timetuple().tm_yday
date_year_begin = datetime.datetime(year=datetime.datetime.utcnow().year, month=1, day=1, hour=0, minute=0)
nid_now = int((time.time() * 1000))
nid_minus_day_of_year = int((date_year_begin.timestamp() * 1000))
res = mw.col.db.all(('select distinct notes.id from notes where id > %s and id < %s order by id asc' % (nid_minus_day_of_year, nid_now)))
counts = []
c = 1
notes_in_current_day = 0
for (i, r) in enumerate(res):
c_day_of_year = time.localtime((r[0] / 1000)).tm_yday
if (c_day_of_year == c):
notes_in_current_day += 1
if (i == (len(res) - 1)):
counts.append(notes_in_current_day)
else:
counts.append(notes_in_current_day)
notes_in_current_day = 1
counts += [0 for _ in range(0, ((c_day_of_year - c) - 1))]
c = c_day_of_year
while (len(counts) < day_of_year):
counts.append(0)
html_content = ''
added = 0
for (i, notes_in_current_day) in enumerate(counts):
if (notes_in_current_day > 20):
color = 'cal-three'
elif (notes_in_current_day > 10):
color = 'cal-two'
elif (notes_in_current_day > 0):
color = 'cal-one'
else:
color = ''
html_content = (f"{html_content}<div class='cal-block %s %s' data-index='%s'></div>" % (('cal-today' if (i == (len(counts) - 1)) else ''), color, added))
added += 1
html = (html % html_content)
return html |
def read_attachments(list, path, notespath) -> str:
attachments_list = '*Attachments:*\n'
for entry in list:
if ('image' in entry['mimetype']):
image = entry['filePath']
if (copy_file(image, path, notespath) is False):
image_type = mimetypes.guess_type(f'{path}{image}')
types = mimetypes.guess_all_extensions(image_type[0])
for type in types:
if (type in image):
image_name = image.replace(type, '')
for t in types:
if (len(glob.glob(f'{path}{image_name}{t}')) > 0):
image = f'{image_name}{t}'
print(f'Found "{image}"')
copy_file(image, path, notespath)
respath = os.path.join('resources', '')
attachments_list += f'''
'''
return attachments_list |
_group.command('collect-events')
('host-id')
('--query', '-q', help='KQL query to scope search')
('--index', '-i', multiple=True, help='Index(es) to search against (default: all indexes)')
('--rta-name', '-r', help='Name of RTA in order to save events directly to unit tests data directory')
('--rule-id', help='Updates rule mapping in rule-mapping.yml file (requires --rta-name)')
('--view-events', is_flag=True, help='Print events after saving')
_context
def collect_events(ctx, host_id, query, index, rta_name, rule_id, view_events):
client: Elasticsearch = ctx.obj['es']
dsl = (kql.to_dsl(query) if query else MATCH_ALL)
dsl['bool'].setdefault('filter', []).append({'bool': {'should': [{'match_phrase': {'host.id': host_id}}]}})
try:
collector = CollectRtaEvents(client)
start = time.time()
click.pause('Press any key once detonation is complete ...')
start_time = f'now-{(round((time.time() - start)) + 5)}s'
events = collector.run(dsl, (index or '*'), start_time)
events.save(rta_name=rta_name, host_id=host_id)
if (rta_name and rule_id):
events.evaluate_against_rule_and_update_mapping(rule_id, rta_name)
if (view_events and events.events):
events.echo_events(pager=True)
return events
except AssertionError as e:
error_msg = 'No events collected! Verify events are streaming and that the agent-hostname is correct'
client_error(error_msg, e, ctx=ctx) |
class Downsample(Runner):
async def __call__(self, es, params):
(params, request_params, transport_params, request_headers) = self._transport_request_params(params)
es = es.options(**transport_params)
fixed_interval = mandatory(params, 'fixed-interval', self)
if (fixed_interval is None):
raise exceptions.DataError("Parameter source for operation 'downsample' did not provide the mandatory parameter 'fixed-interval'. Add it to your parameter source and try again.")
source_index = mandatory(params, 'source-index', self)
if (source_index is None):
raise exceptions.DataError("Parameter source for operation 'downsample' did not provide the mandatory parameter 'source-index'. Add it to your parameter source and try again.")
target_index = mandatory(params, 'target-index', self)
if (target_index is None):
raise exceptions.DataError("Parameter source for operation 'downsample' did not provide the mandatory parameter 'target-index'. Add it to your parameter source and try again.")
path = f'/{source_index}/_downsample/{target_index}'
(await es.perform_request(method='POST', path=path, body={'fixed_interval': fixed_interval}, params=request_params, headers=request_headers))
return {'weight': 1, 'unit': 'ops', 'success': True}
def __repr__(self, *args, **kwargs):
return 'downsample' |
class Migration(migrations.Migration):
dependencies = [('core', '0003_subscriptions')]
operations = [migrations.AlterField(model_name='communitysubscription', name='bills', field=models.ManyToManyField(to='core.SubscriptionBill', null=True, blank=True), preserve_default=True), migrations.AlterField(model_name='roomsubscription', name='bills', field=models.ManyToManyField(to='core.SubscriptionBill', null=True, blank=True), preserve_default=True)] |
class TextFilter(filters.Filter):
def __init__(self, options, default_encoding='utf-8'):
super().__init__(options, default_encoding)
def get_default_config(self):
return {'normalize': '', 'convert_encoding': '', 'errors': 'strict'}
def validate_options(self, k, v):
super().validate_options(k, v)
if ((k == 'errors') and (v.lower() not in ('strict', 'replace', 'ignore', 'backslashreplace'))):
raise ValueError(f"{self.__class__.__name}: '{v}' is not a valid value for '{k}'")
if ((k == 'normalize') and (v.upper() not in ('NFC', 'NFKC', 'NFD', 'NFKD'))):
raise ValueError(f"{self.__class__.__name}: '{v}' is not a valid value for '{k}'")
def setup(self):
self.normalize = self.config['normalize'].upper()
self.convert_encoding = self.config['convert_encoding'].lower()
self.errors = self.config['errors'].lower()
if self.convert_encoding:
self.convert_encoding = codecs.lookup(filters.PYTHON_ENCODING_NAMES.get(self.default_encoding, self.default_encoding).lower()).name
if (self.convert_encoding.startswith(('utf-32', 'utf-16')) and (not self.convert_encoding.endswith(('le', 'be')))):
self.convert_encoding += '-le'
if (self.convert_encoding == 'utf-8-sig'):
self.convert_encoding = 'utf-8'
def convert(self, text, encoding):
if (self.normalize in ('NFC', 'NFKC', 'NFD', 'NFKD')):
text = unicodedata.normalize(self.normalize, text)
if self.convert_encoding:
text = text.encode(self.convert_encoding, self.errors).decode(self.convert_encoding)
encoding = self.convert_encoding
return (text, encoding)
def filter(self, source_file, encoding):
with codecs.open(source_file, 'r', encoding=encoding) as f:
text = f.read()
(text, encoding) = self.convert(text, encoding)
return [filters.SourceText(text, source_file, encoding, 'text')]
def sfilter(self, source):
(text, encoding) = self.convert(source.text, source.encoding)
return [filters.SourceText(text, source.context, encoding, 'text')] |
def extractWwwSakuranovelsCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [("I'm Back in the Other World?", "I'm Back in the Other World?", 'translated'), ('Reincarnated Into Werewolf', 'Reincarnated Into Werewolf', 'translated'), ('Flash Marriage', 'Flash Marriage', 'translated'), ('Unexpected Marriage', 'Unexpected Marriage', 'translated'), ('Magicraft Meister', 'Magicraft Meister', 'translated'), ('Spice of Life', 'Spice of Life', 'translated'), ('Awakening', 'Awakening', 'translated'), ('Law of the Devil', 'Law of the Devil', 'translated'), ('Help Gooogle Sensei', 'Help Gooogle Sensei', 'translated'), ('Botsuraku', 'Botsuraku Youtei Nanode, Kajishokunin wo Mezasu', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
(scope='function')
def set_notification_service_type_to_none(db):
original_value = CONFIG.notifications.notification_service_type
CONFIG.notifications.notification_service_type = None
ApplicationConfig.update_config_set(db, CONFIG)
(yield)
CONFIG.notifications.notification_service_type = original_value
ApplicationConfig.update_config_set(db, CONFIG) |
class ConsensusAPI(ABC):
def __init__(self, context: ConsensusContextAPI) -> None:
...
def validate_seal(self, header: BlockHeaderAPI) -> None:
...
def validate_seal_extension(self, header: BlockHeaderAPI, parents: Iterable[BlockHeaderAPI]) -> None:
...
def get_fee_recipient(cls, header: BlockHeaderAPI) -> Address:
... |
class CRUDCasbin(CRUDBase[(CasbinRule, CreatePolicy, UpdatePolicy)]):
async def get_all_policy(self, ptype: str, sub: str) -> Select:
se = select(self.model).order_by(self.model.id)
where_list = []
if ptype:
where_list.append((self.model.ptype == ptype))
if sub:
where_list.append(self.model.v0.like(f'%{sub}%'))
if where_list:
se = se.where(and_(*where_list))
return se
async def delete_policies_by_sub(self, db: AsyncSession, sub: DeleteAllPolicies) -> int:
where_list = []
if sub.uuid:
where_list.append((self.model.v0 == sub.uuid))
where_list.append((self.model.v0 == sub.role))
result = (await db.execute(delete(self.model).where(or_(*where_list))))
return result.rowcount
async def delete_groups_by_uuid(self, db: AsyncSession, sub: DeleteAllUserRoles) -> int:
result = (await db.execute(delete(self.model).where((self.model.v0 == sub.uuid))))
return result.rowcount |
def clean_up_decoded_string_like_hf(out_string: str) -> str:
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return out_string |
def extractNovustreasuresBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Xuan Wang Above Di Daughter Runs Away', 'Xuan Wang Above Di Daughter Runs Away', 'translated'), ('The Aloof Prince', 'The Aloof Prince Pampers his Wild First Rate Consort!', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class TransactionProcessBehaviour(TickerBehaviour):
def setup(self) -> None:
def act(self) -> None:
game = cast(Game, self.context.game)
if (game.phase.value == Phase.GAME.value):
self._process_transactions()
def teardown(self) -> None:
def _process_transactions(self) -> None:
game = cast(Game, self.context.game)
tac_dialogue = game.tac_dialogue
transactions = cast(Dict[(str, Dict[(str, Any)])], self.context.shared_state.get('transactions', OrderedDict()))
tx_ids = list(transactions.keys())
for tx_id in tx_ids:
last_msg = tac_dialogue.last_message
if (last_msg is None):
raise ValueError('No last message available.')
tx_content = transactions.pop(tx_id, None)
if (tx_content is None):
raise ValueError('Tx for id={} not found.'.format(tx_id))
terms = tx_content['terms']
sender_signature = tx_content['sender_signature']
counterparty_signature = tx_content['counterparty_signature']
msg = tac_dialogue.reply(performative=TacMessage.Performative.TRANSACTION, target_message=last_msg, transaction_id=tx_id, ledger_id=terms.ledger_id, sender_address=terms.sender_address, counterparty_address=terms.counterparty_address, amount_by_currency_id=terms.amount_by_currency_id, fee_by_currency_id=terms.fee_by_currency_id, quantities_by_good_id=terms.quantities_by_good_id, sender_signature=sender_signature, counterparty_signature=counterparty_signature, nonce=terms.nonce)
self.context.logger.info('sending transaction {} to controller, message={}.'.format(tx_id, msg))
self.context.outbox.put_message(message=msg) |
def run_projection(mesh, expr, p):
V = FunctionSpace(mesh, 'KMV', p)
T = V.finat_element.cell
(u, v) = (TrialFunction(V), TestFunction(V))
qr = finat.quadrature.make_quadrature(T, p, 'KMV')
r = Function(V)
f = interpolate(expr(*SpatialCoordinate(mesh)), V)
solve(((inner(u, v) * dx(scheme=qr)) == (inner(f, v) * dx(scheme=qr))), r, solver_parameters={'ksp_type': 'preonly', 'pc_type': 'jacobi'})
return norm((r - f)) |
.parametrize('elasticapm_client', [{'client_class': AzureFunctionsTestClient}], indirect=['elasticapm_client'])
def test_cloud_info(elasticapm_client):
with mock.patch.dict(os.environ, {'REGION_NAME': 'eu-liechtenstein', 'WEBSITE_OWNER_NAME': '2491fc8e-f7c1-4020-b9c6-fd16+my-resource-group-ARegionShortNamewebspace', 'WEBSITE_SITE_NAME': 'foo', 'WEBSITE_RESOURCE_GROUP': 'bar'}):
cloud_info = elasticapm_client.get_cloud_info()
assert (cloud_info['provider'] == 'azure')
assert (cloud_info['region'] == 'eu-liechtenstein')
assert (cloud_info['service']['name'] == 'functions')
assert (cloud_info['account']['id'] == '2491fc8e-f7c1-4020-b9c6-fd16')
assert (cloud_info['instance']['name'] == 'foo')
assert (cloud_info['project']['name'] == 'bar') |
def test_distribution_mapper():
act_space = spaces.Dict(spaces={'selection': spaces.Discrete(10), 'order': spaces.MultiBinary(15), 'scale_input': spaces.Box(shape=(5,), low=0, high=100, dtype=np.float64), 'order_by_weight': spaces.Box(shape=(5,), low=0, high=100, dtype=np.float64)})
config = [{'action_space': spaces.Box, 'distribution': 'maze.distributions.squashed_gaussian.SquashedGaussianProbabilityDistribution'}, {'action_head': 'order_by_weight', 'distribution': 'maze.distributions.beta.BetaProbabilityDistribution'}]
distribution_mapper = DistributionMapper(action_space=act_space, distribution_mapper_config=config)
repr(distribution_mapper)
logits_dict = dict()
for action_head in act_space.spaces.keys():
logits_shape = distribution_mapper.required_logits_shape(action_head)
logits_tensor = torch.from_numpy(np.random.randn(*logits_shape))
torch_dist = distribution_mapper.action_head_distribution(action_head=action_head, logits=logits_tensor, temperature=1.0)
logits_dict[action_head] = logits_tensor
if (action_head == 'selection'):
assert isinstance(torch_dist, CategoricalProbabilityDistribution)
elif (action_head == 'order'):
assert isinstance(torch_dist, BernoulliProbabilityDistribution)
elif (action_head == 'scale_input'):
assert isinstance(torch_dist, SquashedGaussianProbabilityDistribution)
elif (action_head == 'order_by_weight'):
assert isinstance(torch_dist, BetaProbabilityDistribution)
dict_dist = distribution_mapper.logits_dict_to_distribution(logits_dict=logits_dict, temperature=1.0)
assert isinstance(dict_dist, DictProbabilityDistribution) |
def delete_snapshots(snapshot_id, server_id):
url = ((base_url + '/images/') + str(snapshot_id))
r = requests.delete(url=url, headers=headers)
if (not r.ok):
print(f'Snapshot #{snapshot_id} (Server #{server_id}) could not be deleted: {r.reason}')
print(r.text)
else:
print(f'Snapshot #{snapshot_id} (Server #{server_id}) was successfully deleted') |
class TherapyPlanTemplate(Document):
def after_insert(self):
if (not self.link_existing_item):
self.create_item_from_template()
elif (self.linked_item and self.total_amount):
make_item_price(self.linked_item, self.total_amount)
def validate(self):
self.set_totals()
def on_update(self):
doc_before_save = self.get_doc_before_save()
if (not doc_before_save):
return
if ((doc_before_save.item_name != self.item_name) or (doc_before_save.item_group != self.item_group) or (doc_before_save.description != self.description)):
self.update_item()
if (doc_before_save.therapy_types != self.therapy_types):
self.update_item_price()
def set_totals(self):
total_sessions = 0
total_amount = 0
for entry in self.therapy_types:
total_sessions += cint(entry.no_of_sessions)
total_amount += flt(entry.amount)
self.total_sessions = total_sessions
self.total_amount = total_amount
def create_item_from_template(self):
uom = (frappe.db.exists('UOM', 'Nos') or frappe.db.get_single_value('Stock Settings', 'stock_uom'))
item = frappe.get_doc({'doctype': 'Item', 'item_code': self.item_code, 'item_name': self.item_name, 'item_group': self.item_group, 'description': self.description, 'is_sales_item': 1, 'is_service_item': 1, 'is_purchase_item': 0, 'is_stock_item': 0, 'show_in_website': 0, 'is_pro_applicable': 0, 'stock_uom': uom}).insert(ignore_permissions=True, ignore_mandatory=True)
make_item_price(item.name, self.total_amount)
self.db_set('linked_item', item.name)
def update_item(self):
item_doc = frappe.get_doc('Item', {'item_code': self.linked_item})
item_doc.item_name = self.item_name
item_doc.item_group = self.item_group
item_doc.description = self.description
item_doc.ignore_mandatory = True
item_doc.save(ignore_permissions=True)
def update_item_price(self):
item_price = frappe.get_doc('Item Price', {'item_code': self.linked_item})
item_price.item_name = self.item_name
item_price.price_list_rate = self.total_amount
item_price.ignore_mandatory = True
item_price.save(ignore_permissions=True) |
class P3IntMomentsDualSet(DualSet):
def __init__(self, cell, order):
assert ((cell == UFCInterval()) and (order == 3))
entity_ids = {0: {0: [0], 1: [1]}, 1: {0: [2, 3]}}
vertnodes = [PointEvaluation(cell, xx) for xx in cell.vertices]
Q = make_quadrature(cell, 3)
ones = np.asarray([1.0 for x in Q.pts])
xs = np.asarray([x for (x,) in Q.pts])
intnodes = [IntegralMoment(cell, Q, ones), IntegralMoment(cell, Q, xs)]
nodes = (vertnodes + intnodes)
super().__init__(nodes, cell, entity_ids) |
class Subscription(models.Model):
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(User, related_name='+')
location = models.ForeignKey(Location)
user = models.ForeignKey(User)
price = models.DecimalField(decimal_places=2, max_digits=9)
description = models.CharField(max_length=256, blank=True, null=True)
start_date = models.DateField()
end_date = models.DateField(blank=True, null=True)
objects = SubscriptionManager()
def get_period(self, target_date=None):
if (not target_date):
target_date = timezone.now().date()
if ((target_date < self.start_date) or (self.end_date and (target_date > self.end_date))):
return (None, None)
if (target_date.day == self.start_date.day):
period_start = target_date
else:
month = target_date.month
year = target_date.year
if (target_date.day < self.start_date.day):
if (target_date.day == 1):
month = 12
year = (target_date.year - 1)
else:
month = (target_date.month - 1)
period_start = date(year, month, self.start_date.day)
logger.debug('')
logger.debug(('in get_period(). period_start=%s' % period_start))
logger.debug('')
period_end = (period_start + relativedelta(months=1))
if (period_end.day == period_start.day):
period_end = (period_end - timedelta(days=1))
return (period_start, period_end)
def get_next_period_start(self, target_date=None):
if (not target_date):
target_date = timezone.now().date()
if (self.start_date > target_date):
return self.start_date
(this_period_start, this_period_end) = self.get_period(target_date=target_date)
if (this_period_end is None):
return None
next_period_start = (this_period_end + timedelta(days=1))
if (self.end_date and (next_period_start > self.end_date)):
return None
return next_period_start
def is_period_boundary(self, target_date=None):
if (not target_date):
if (not self.end_date):
return False
target_date = (self.end_date - timedelta(days=1))
period = self.get_period(target_date=target_date)
return (period and (period[1] == target_date))
def total_periods(self, target_date=None):
if (not target_date):
target_date = timezone.now().date()
if (self.start_date > target_date):
return 0
if (self.end_date and (self.end_date < target_date)):
target_date = self.end_date
rd = relativedelta((target_date + timedelta(days=1)), self.start_date)
return (rd.months + (12 * rd.years))
def bills_between(self, start, end):
d = start
bills = []
while (d < end):
b = self.get_bill_for_date(d)
if b:
bills.append(b)
d = self.get_next_period_start(d)
if (not d):
break
return bills
def get_bill_for_date(self, date):
result = SubscriptionBill.objects.filter(subscription=self, period_start__lte=date, period_end__gte=date)
logger.debug(('subscription %d: get_bill_for_date %s' % (self.id, date)))
logger.debug('bill object(s):')
logger.debug(result)
if result.count():
if (result.count() > 1):
logger.debug("Warning! Multiple bills found for one date. This shouldn't happen")
raise Exception('Error: multiple bills for one date:')
return result[0]
else:
return None
def days_between(self, start, end):
days = 0
if (not self.end_date):
self.end_date = end
if ((self.start_date >= start) and (self.end_date <= end)):
days = (self.end_date - self.start_date).days
elif ((self.start_date <= start) and (self.end_date >= end)):
days = (end - start).days
elif (self.start_date < start):
days = (self.end_date - start).days
elif (self.end_date > end):
days = (end - self.start_date).days
return days
def is_active(self, target_date=None):
if (not target_date):
target_date = timezone.now().date()
return ((self.start_date <= target_date) and ((self.end_date is None) or (self.end_date >= target_date)))
def generate_bill(self, delete_old_items=True, target_date=None):
if (not target_date):
target_date = timezone.now().date()
(period_start, period_end) = self.get_period(target_date)
if (not period_start):
return None
logger.debug(' ')
logger.debug(('in generate_bill for target_date = %s and get_period = (%s, %s)' % (target_date, period_start, period_end)))
prorated = False
if (self.end_date and (self.end_date < period_end)):
prorated = True
original_period_end = period_end
period_end = self.end_date
try:
bill = SubscriptionBill.objects.get(period_start=period_start, subscription=self)
logger.debug(('Found existing bill #%d for period start %s' % (bill.id, period_start.strftime('%B %d %Y'))))
if (prorated and (bill.period_end != period_end)):
bill.period_end = period_end
bill.save()
if (not delete_old_items):
return list(bill.line_items)
except Exception as e:
logger.debug('Generating new bill item')
bill = SubscriptionBill.objects.create(period_start=period_start, period_end=period_end)
logger.debug(('working with bill %d (%s)' % (bill.id, bill.period_start.strftime('%B %d %Y'))))
custom_items = list(bill.line_items.filter(custom=True))
if delete_old_items:
if (bill.total_paid() > 0):
logger.debug('Warning: modifying a bill with payments on it.')
for item in bill.line_items.all():
item.delete()
line_items = []
desc = ('%s (%s to %s)' % (self.description, period_start, period_end))
if prorated:
period_days = Decimal((period_end - period_start).days)
original_period_days = (original_period_end - period_start).days
price = ((period_days / original_period_days) * self.price)
else:
price = self.price
line_item = BillLineItem(bill=bill, description=desc, amount=price, paid_by_house=False)
line_items.append(line_item)
effective_bill_charge = price
for item in custom_items:
line_items.append(item)
effective_bill_charge += item.amount
logger.debug(item.amount)
logger.debug(('effective room charge after discounts: %d' % effective_bill_charge))
for location_fee in LocationFee.objects.filter(location=self.location, fee__paid_by_house=True):
desc = ('%s (%s%c)' % (location_fee.fee.description, (location_fee.fee.percentage * 100), '%'))
amount = (float(effective_bill_charge) * location_fee.fee.percentage)
logger.debug(('Fee %s for %d' % (desc, amount)))
fee_line_item = BillLineItem(bill=bill, description=desc, amount=amount, paid_by_house=True, fee=location_fee.fee)
line_items.append(fee_line_item)
bill.save()
for item in line_items:
item.save()
self.bills.add(bill)
self.save()
return line_items
def generate_all_bills(self, target_date=None):
today = timezone.now().date()
if (not target_date):
target_date = self.start_date
if (self.end_date and (self.end_date < today)):
end_date = self.end_date
else:
end_date = today
period_start = target_date
while (period_start and (period_start < today) and (period_start < end_date)):
self.generate_bill(target_date=period_start)
period_start = self.get_next_period_start(period_start)
def last_paid(self, include_partial=False):
bills = self.bills.order_by('period_start').reverse()
if (not bills):
return None
for b in bills:
try:
(paid_until_start, paid_until_end) = self.get_period(target_date=b.period_end)
except:
logger.debug("didn't like date")
logger.debug(b.period_end)
if (b.is_paid() or (include_partial and (b.total_paid() > 0))):
return paid_until_end
return b.period_start
def delete_unpaid_bills(self):
for bill in self.bills.all():
if (bill.total_paid() == 0):
bill.delete()
def has_unpaid_bills(self):
for bill in self.bills.all():
if (not bill.is_paid()):
return True
return False
def update_for_end_date(self, new_end_date):
self.end_date = new_end_date
self.save()
today = timezone.localtime(timezone.now()).date()
(period_start, period_end) = self.get_period(today)
self.delete_unpaid_bills()
self.generate_all_bills()
def expected_num_bills(self):
today = timezone.localtime(timezone.now()).date()
period_start = self.start_date
num_expected = 0
while (period_start and (period_start < today) and (period_start < self.end_date)):
num_expected += 1
period_start = self.get_next_period_start(period_start)
return num_expected |
def config_twitter(config):
twitter_setup = input('Would you like the bot to post to Twitter? (Y/n) ')
if twitter_setup.lower().startswith('n'):
return config
if ('twitter' in config.keys()):
replace = input('Twitter configuration already exists. Replace? (Y/n) ')
if (replace.lower() in ['n', 'no']):
return config
input('Create a new Twitter app at post matching stories. For this step, you can be logged in as yourself or with the\nposting account, if they\'re different. Fill out Name, Description, and Website with\nvalues meaningful to you. These are not used in trackthenews config but may be\npublicly visible. Then click the "Keys and Access Tokens" tab.\n\nPress [Enter] to continue...')
api_key = input('Enter the provided API key: ').strip()
api_secret = input('Enter the provided API secret: ').strip()
input('Now ensure you are logged in with the account that will do the posting.\n\nPress [Enter] to continue...')
tw = tweepy.OAuth1UserHandler(api_key, api_secret, callback='oob')
auth_url = tw.get_authorization_url()
pin = input('Enter the pin found at {} '.format(auth_url)).strip()
(oauth_token, oauth_secret) = tw.get_access_token(pin)
twitter = {'api_key': api_key, 'api_secret': api_secret, 'oauth_token': oauth_token, 'oauth_secret': oauth_secret}
config['twitter'] = twitter
return config |
def keyframe_selection(shots: List[Shot], min_dist: float=4) -> List[int]:
camera_centers = np.stack([shot.pose.get_origin() for shot in shots], 0)
distances = np.linalg.norm(np.diff(camera_centers, axis=0), axis=1)
selected = [0]
cum = 0
for i in range(1, len(camera_centers)):
cum += distances[(i - 1)]
if (cum >= min_dist):
selected.append(i)
cum = 0
return selected |
class OptionPlotoptionsSeriesTooltipDatetimelabelformats(Options):
def day(self):
return self._config_get('%A, %e %b %Y')
def day(self, text: str):
self._config(text, js_type=False)
def hour(self):
return self._config_get('%A, %e %b, %H:%M')
def hour(self, text: str):
self._config(text, js_type=False)
def millisecond(self):
return self._config_get('%A, %e %b, %H:%M:%S.%L')
def millisecond(self, text: str):
self._config(text, js_type=False)
def minute(self):
return self._config_get('%A, %e %b, %H:%M')
def minute(self, text: str):
self._config(text, js_type=False)
def month(self):
return self._config_get('%B %Y')
def month(self, text: str):
self._config(text, js_type=False)
def second(self):
return self._config_get('%A, %e %b, %H:%M:%S')
def second(self, text: str):
self._config(text, js_type=False)
def week(self):
return self._config_get('Week from %A, %e %b %Y')
def week(self, text: str):
self._config(text, js_type=False)
def year(self):
return self._config_get('%Y')
def year(self, text: str):
self._config(text, js_type=False) |
class AW1RegistrationHandler(Handler):
SUPPORTED_PROTOCOL = RegisterMessage.protocol_id
def setup(self) -> None:
def handle(self, message: Message) -> None:
register_msg = cast(RegisterMessage, message)
register_dialogues = cast(RegisterDialogues, self.context.register_dialogues)
register_dialogue = cast(Optional[RegisterDialogue], register_dialogues.update(register_msg))
if (register_dialogue is None):
self._handle_unidentified_dialogue(register_msg)
return
if (register_msg.performative is RegisterMessage.Performative.REGISTER):
self._handle_register(register_msg, register_dialogue)
else:
self._handle_invalid(register_msg, register_dialogue)
def teardown(self) -> None:
def _handle_unidentified_dialogue(self, register_msg: RegisterMessage) -> None:
self.context.logger.info(f'received invalid register_msg message={register_msg}, unidentified dialogue.')
def _handle_register(self, register_msg: RegisterMessage, register_dialogue: RegisterDialogue) -> None:
self.context.logger.info(f'received register_msg register message={register_msg} in dialogue={register_dialogue}.')
strategy = cast(Strategy, self.context.strategy)
(is_valid, error_code, error_msg) = strategy.valid_registration(register_msg.info, register_msg.sender)
if is_valid:
strategy.lock_registration_temporarily(register_msg.sender, register_msg.info)
self.context.logger.info(f'valid registration={register_msg.info}. Verifying if tokens staked.')
terms = strategy.get_terms(register_msg.sender)
if (not strategy.developer_handle_only):
contract_api_dialogues = cast(ContractApiDialogues, self.context.contract_api_dialogues)
kwargs = strategy.get_kwargs(register_msg.info)
(contract_api_msg, contract_api_dialogue) = contract_api_dialogues.create(counterparty=LEDGER_API_ADDRESS, performative=ContractApiMessage.Performative.GET_STATE, ledger_id=strategy.contract_ledger_id, contract_id=strategy.contract_id, contract_address=strategy.contract_address, callable=strategy.contract_callable, kwargs=ContractApiMessage.Kwargs(kwargs))
contract_api_dialogue = cast(ContractApiDialogue, contract_api_dialogue)
contract_api_dialogue.terms = terms
contract_api_dialogue.associated_register_dialogue = register_dialogue
self.context.outbox.put_message(contract_api_msg)
else:
strategy.finalize_registration(register_msg.sender)
register_dialogue.terms = terms
tx_behaviour = cast(TransactionBehaviour, self.context.behaviours.transaction)
tx_behaviour.waiting.append(register_dialogue)
else:
self.context.logger.info(f'invalid registration={register_msg.info}. Rejecting.')
reply = register_dialogue.reply(performative=RegisterMessage.Performative.ERROR, error_code=error_code, error_msg=error_msg, info={})
self.context.outbox.put_message(reply)
def _handle_invalid(self, register_msg: RegisterMessage, register_dialogue: RegisterDialogue) -> None:
self.context.logger.warning(f'cannot handle register_msg message of performative={register_msg.performative} in dialogue={register_dialogue}.') |
class InterComFrontEndBinding(InterComRedisInterface):
def add_analysis_task(self, fw):
self._add_to_redis_queue('analysis_task', fw, fw.uid)
def add_re_analyze_task(self, fw, unpack=True):
if unpack:
self._add_to_redis_queue('re_analyze_task', fw, fw.uid)
else:
self._add_to_redis_queue('update_task', fw, fw.uid)
def add_single_file_task(self, fw):
self._add_to_redis_queue('single_file_task', fw, fw.uid)
def add_compare_task(self, compare_id, force=False):
self._add_to_redis_queue('compare_task', (compare_id, force), compare_id)
def delete_file(self, uid_list: set[str]):
self._add_to_redis_queue('file_delete_task', uid_list)
def get_available_analysis_plugins(self):
plugin_dict = self.redis.get('analysis_plugins', delete=False)
if (plugin_dict is None):
raise RuntimeError('No available plug-ins found. FACT backend might be down!')
return plugin_dict
def get_binary_and_filename(self, uid: str) -> tuple[((bytes | None), (str | None))]:
return self._request_response_listener(uid, 'raw_download_task', 'raw_download_task_resp')
def get_file_diff(self, uid_pair: tuple[(str, str)]) -> (str | None):
return self._request_response_listener(uid_pair, 'file_diff_task', 'file_diff_task_resp')
def peek_in_binary(self, uid: str, offset: int, length: int) -> bytes:
return self._request_response_listener((uid, offset, length), 'binary_peek_task', 'binary_peek_task_resp')
def get_repacked_binary_and_file_name(self, uid: str):
return self._request_response_listener(uid, 'tar_repack_task', 'tar_repack_task_resp')
def add_binary_search_request(self, yara_rule_binary: bytes, firmware_uid: (str | None)=None):
request_id = generate_task_id(yara_rule_binary)
self._add_to_redis_queue('binary_search_task', (yara_rule_binary, firmware_uid), request_id)
return request_id
def get_binary_search_result(self, request_id):
result = self._response_listener('binary_search_task_resp', request_id, timeout=(time() + 10))
return (result if (result is not None) else (None, None))
def get_backend_logs(self):
return self._request_response_listener(None, 'logs_task', 'logs_task_resp')
def _request_response_listener(self, input_data, request_connection, response_connection):
request_id = generate_task_id(input_data)
self._add_to_redis_queue(request_connection, input_data, request_id)
logging.debug(f'Request sent: {request_connection} -> {request_id}')
return self._response_listener(response_connection, request_id)
def _response_listener(self, response_connection, request_id, timeout=None):
output_data = None
if (timeout is None):
timeout = (time() + int(config.frontend.communication_timeout))
while (timeout > time()):
output_data = self.redis.get(request_id)
if output_data:
logging.debug(f'Response received: {response_connection} -> {request_id}')
break
logging.debug(f'No response yet: {response_connection} -> {request_id}')
sleep(0.1)
return output_data
def _add_to_redis_queue(self, key: str, data: Any, task_id: (str | None)=None):
self.redis.queue_put(key, (data, task_id)) |
class Alert():
def __init__(self, fn: Callable, args: Tuple=None, kwargs: Dict=None, delay: float=2, msg: str=None, callback: Callable=None, repeat: bool=False) -> None:
if (args is None):
args = ()
if (kwargs is None):
kwargs = {}
if (not callable(fn)):
raise TypeError('You can only set an alert on a callable object')
if (isinstance(repeat, int) and (repeat < 0)):
raise ValueError('repeat must be True, False or a positive integer')
self._kill = False
start_value = fn(*args, **kwargs)
self._thread = Thread(target=self._loop, daemon=True, args=(fn, args, kwargs, start_value, delay, msg, callback, repeat))
self._thread.start()
self.start_time = time.time()
_instances.add(self)
def _loop(self, fn: Callable, args: Tuple, kwargs: Dict, start_value: int, delay: float, msg: str, callback: Callable, repeat: Union[(int, bool, None)]=False) -> None:
try:
sleep = min(delay, 0.05)
while (repeat is not None):
next_ = (time.time() + delay)
while ((next_ > time.time()) and (not self._kill)):
time.sleep(sleep)
if self._kill:
break
value = fn(*args, **kwargs)
if (value == start_value):
continue
if msg:
fmt_msg = msg.format(start_value, value)
print(f"{color('bright red')}ALERT{color}: {fmt_msg}")
if callback:
callback(start_value, value)
start_value = value
if (not repeat):
repeat = None
elif (isinstance(repeat, int) and (not isinstance(repeat, bool))):
repeat -= 1
finally:
_instances.discard(self)
def is_alive(self) -> bool:
return self._thread.is_alive()
def wait(self, timeout: int=None) -> None:
self._thread.join(timeout)
def stop(self, wait: bool=True) -> None:
self._kill = True
if wait:
self.wait() |
def _user_group_changed(task_id, group_name, dc_name):
if (dc_name and group_name):
dc = Dc.objects.get_by_name(dc_name)
mon = get_monitoring(dc)
if (not mon.enabled):
logger.info('Monitoring is disabled in DC %s', dc)
return
try:
group = Role.objects.get(dc=dc, name=group_name)
except Role.DoesNotExist:
logger.info('Going to delete group %s from dc %s.', group_name, dc.name)
res = mon.user_group_delete(name=group_name)
else:
logger.info('Going to update group %s in dc %s.', group.name, dc.name)
res = mon.user_group_sync(group=group)
_log_mon_usergroup_action(res, mon, task_id, group_name, dc_name)
elif dc_name:
try:
dc = Dc.objects.get_by_name(dc_name)
except Dc.DoesNotExist:
logger.warning('DC deletion hook is not implemented -> manual cleanup in Zabbix is required')
else:
mon = get_monitoring(dc)
if (not mon.enabled):
logger.info('Monitoring is disabled in DC %s', dc)
return
res = mon.user_group_sync(dc_as_group=True)
_log_mon_usergroup_action(res, mon, task_id, group_name, dc.name)
elif group_name:
try:
group = Role.objects.get(name=group_name)
except Role.DoesNotExist:
for dc in Dc.objects.all():
logger.info('Going to delete group %s from dc %s.', group_name, dc.name)
mon = get_monitoring(dc)
if (not mon.enabled):
logger.info('Monitoring is disabled in DC %s', dc)
continue
try:
res = mon.user_group_delete(name=group_name)
except MonitoringError as exc:
logger.exception(exc)
logger.error('Creating a separate task for dc %s and group %s because it crashed.', dc.name, group_name)
mon_user_group_changed.call(task_id, group_name=group_name, dc_name=dc.name)
else:
_log_mon_usergroup_action(res, mon, task_id, group_name, dc.name)
else:
related_dcs = Dc.objects.filter(roles=group)
unrelated_dcs = Dc.objects.exclude(id__in=related_dcs)
for dc in related_dcs:
logger.info('Going to update group %s in dc %s.', group.name, dc.name)
mon = get_monitoring(dc)
if (not mon.enabled):
logger.info('Monitoring is disabled in DC %s', dc)
continue
try:
res = mon.user_group_sync(group=group)
except MonitoringError as exc:
logger.exception(exc)
logger.error('Creating a separate task for dc %s and group %s because it crashed.', dc.name, group_name)
mon_user_group_changed.call(task_id, group_name=group_name, dc_name=dc.name)
else:
_log_mon_usergroup_action(res, mon, task_id, group_name, dc.name)
for dc in unrelated_dcs:
logger.info('Going to delete group %s from dc %s.', group.name, dc.name)
mon = get_monitoring(dc)
if (not mon.enabled):
logger.info('Monitoring is disabled in DC %s', dc)
continue
try:
res = mon.user_group_delete(name=group_name)
except MonitoringError as exc:
logger.exception(exc)
logger.error('Creating a separate task for dc %s and group %s because it crashed.', dc.name, group_name)
mon_user_group_changed.call(task_id, group_name=group_name, dc_name=dc.name)
else:
_log_mon_usergroup_action(res, mon, task_id, group_name, dc.name)
else:
raise AssertionError('Either group name or dc name has to be defined.') |
def splitrename(linkcurrent):
renamecurrent = ''
if ('>' in linkcurrent):
file_rename = linkcurrent.split('>')
file_rename = [file_rename.strip() for file_rename in file_rename]
linkcurrent = file_rename[0]
if file_rename[1]:
renamecurrent = file_rename[1]
return (linkcurrent, renamecurrent) |
class ArrayTest(unittest.TestCase):
def test_double(self):
import array
for typecode in 'df':
a = array.array(typecode, (1.0, 2.0, 3.0, 4.5))
v = VARIANT()
v.value = a
self.assertEqual(v.value, (1.0, 2.0, 3.0, 4.5))
def test_int(self):
import array
for typecode in 'bhiBHIlL':
a = array.array(typecode, (1, 1, 1, 1))
v = VARIANT()
v.value = a
self.assertEqual(v.value, (1, 1, 1, 1)) |
def test_inference_error_reporting():
mh = bm.SingleSiteAncestralMetropolisHastings()
with pytest.raises(TypeError) as ex:
mh.infer(None, {}, 10)
assert (str(ex.value) == "Parameter 'queries' is required to be a list but is of type NoneType.")
with pytest.raises(TypeError) as ex:
mh.infer([], 123, 10)
assert (str(ex.value) == "Parameter 'observations' is required to be a dictionary but is of type int.")
with pytest.raises(TypeError) as ex:
mh.infer([f], {}, 10)
assert (str(ex.value) == 'A query is required to be a random variable but is of type function.')
with pytest.raises(TypeError) as ex:
mh.infer([f()], {f: torch.tensor(True)}, 10)
assert (str(ex.value) == 'An observation is required to be a random variable but is of type function.')
with pytest.raises(TypeError) as ex:
mh.infer([f()], {f(): 123.0}, 10)
assert (str(ex.value) == 'An observed value is required to be a tensor but is of type float.')
with pytest.raises(TypeError) as ex:
mh.infer([g(f())], {}, 10)
assert (str(ex.value) == 'The arguments to a query must not be random variables.')
with pytest.raises(TypeError) as ex:
mh.infer([f()], {g(f()): torch.tensor(123)}, 10)
assert (str(ex.value) == 'The arguments to an observation must not be random variables.')
with pytest.raises(TypeError) as ex:
mh.infer([f()], {h(): torch.tensor(123)}, 10)
assert (str(ex.value) == 'An observation must observe a random_variable, not a functional.')
with pytest.raises(TypeError) as ex:
mh.infer([h()], {}, 10)
assert (str(ex.value) == 'The value returned by a queried function must be a tensor.')
with pytest.raises(TypeError) as ex:
mh.infer([f()], {}, 10)
assert (str(ex.value) == 'A random_variable is required to return a distribution.')
with pytest.raises(TypeError) as ex:
mh.infer([flip()], {}, 10)[flip]
assert (str(ex.value) == 'The key is required to be a random variable but is of type function.') |
class FlowSizer(wx.PySizer):
def __init__(self, orient=wx.HORIZONTAL):
super().__init__()
self._orient = orient
self._frozen = False
self._needed_size = None
def CalcMin(self):
if (self._needed_size is not None):
return self._needed_size
horizontal = (self._orient == wx.HORIZONTAL)
dx = dy = 0
for item in self.GetChildren():
(idx, idy) = item.CalcMin()
if horizontal:
dy = max(dy, idy)
else:
dx = max(dx, idx)
return wx.Size(dx, dy)
def RecalcSizes(self):
horizontal = (self._orient == wx.HORIZONTAL)
(x, y) = self.GetPosition()
(dx, dy) = self.GetSize().Get()
(x0, y0) = (x, y)
ex = (x + dx)
ey = (y + dy)
mdx = mdy = sdx = sdy = 0
visible = True
cur_max = 0
for item in self.GetChildren():
(idx, idy) = item.CalcMin()
expand = (item.GetFlag() & wx.EXPAND)
if horizontal:
if ((x > x0) and ((x + idx) > ex)):
x = x0
y += (mdy + sdy)
mdy = sdy = 0
if (y >= ey):
visible = False
cur_max = max(idy, cur_max)
if expand:
idy = cur_max
if item.IsSpacer():
sdy = max(sdy, idy)
if (x == x0):
idx = 0
item.SetDimension(wx.Point(x, y), wx.Size(idx, idy))
item.Show(visible)
x += idx
mdy = max(mdy, idy)
else:
if ((y > y0) and ((y + idy) > ey)):
y = y0
x += (mdx + sdx)
mdx = sdx = 0
if (x >= ex):
visible = False
cur_max = max(idx, cur_max)
if expand:
idx = cur_max
if item.IsSpacer():
sdx = max(sdx, idx)
if (y == y0):
idy = 0
item.SetDimension(wx.Point(x, y), wx.Size(idx, idy))
item.Show(visible)
y += idy
mdx = max(mdx, idx)
if ((not visible) and (self._needed_size is None)):
max_dx = max_dy = 0
if horizontal:
max_dy = max(dy, (((y + mdy) + sdy) - y0))
else:
max_dx = max(dx, (((x + mdx) + sdx) - x0))
self._needed_size = wx.Size(max_dx, max_dy)
if (not self._frozen):
self._do_parent('_freeze')
do_later(self._do_parent, '_thaw')
else:
self._needed_size = None
def _freeze(self, window):
window.Freeze()
self._frozen = True
def _thaw(self, window):
window.Layout()
window.Refresh()
if self._frozen:
self._frozen = False
window.Thaw()
def _do_parent(self, method):
i = 0
while True:
try:
item = self.GetItem(i)
if (item is None):
break
i += 1
except:
return
if item.IsWindow():
getattr(self, method)(item.GetWindow().GetParent())
return |
class TestLLMClient(BaseEvenniaTestCase):
def setUp(self):
self.npc = create_object(LLMNPC, key='Test NPC')
self.npc.db_home = None
self.npc.save()
def tearDown(self):
self.npc.delete()
super().tearDown()
_settings(LLM_PROMPT_PREFIX='You are a test bot.')
('evennia.contrib.rpg.llm.llm_npc.task.deferLater')
def test_npc_at_talked_to(self, mock_deferLater):
mock_LLMClient = Mock()
self.npc.ndb.llm_client = mock_LLMClient
self.npc.at_talked_to('Hello', self.npc)
mock_deferLater.assert_called_with(Something, self.npc.thinking_timeout, Something)
mock_LLMClient.get_response.assert_called_with('You are a test bot.\nTest NPC: Hello') |
.smoke
def test_parse_workflow_with_multiple_generators():
filename = 'test/data/good-dags/multi_generators.yaml'
wf = Workflow.load(filename)
assert (frozenset((node.name for node in wf.specs.graphs.primary.graph.nodes())) == frozenset(['file_lister', 'file_lister_1', 'file_lister_2', 'file_lister_3', 'file_lister-6e0a68-fc', 'file_lister_3-b3341b-fc']))
assert (len(wf.specs.graphs.secondary) == 1)
assert (frozenset((node.name for node in wf.specs.graphs.secondary[0].graph.nodes())) == frozenset(['SubDagSuccessFileSensor']))
print(wf.build_dag(PrimaryDagBuilder, SubDagBuilder, GeneratorBuilder)) |
def extractLandofLightNovels(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())):
return None
if item['title'].startswith('C3'):
return buildReleaseMessageWithType(item, 'Cube x Cursed x Curious', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('Chrome Shelled Regios'):
return buildReleaseMessageWithType(item, 'Chrome Shelled Regios', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('Utsuro no Hako to Zero no Maria'):
return buildReleaseMessageWithType(item, 'Utsuro no Hako to Zero no Maria', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('Seirei Tsukai no Blade Dance'):
return buildReleaseMessageWithType(item, 'Seirei Tsukai no Blade Dance', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('Baka to test'):
return buildReleaseMessageWithType(item, 'Baka to Test', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('Tokyo Ravens'):
return buildReleaseMessageWithType(item, 'Tokyo Ravens', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('The Zashiki Warashi of Intellectual Village'):
return buildReleaseMessageWithType(item, 'The Zashiki Warashi of Intellectual Village', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('Overlord'):
return buildReleaseMessageWithType(item, 'Overlord', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('Mahouka Koukou no Rettousei'):
return buildReleaseMessageWithType(item, 'Mahouka Koukou no Rettousei', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('To aru New Testament'):
return buildReleaseMessageWithType(item, 'To Aru Majutsu no Index: New Testament', vol, chp, frag=frag, postfix=postfix)
return False |
class OptionPlotoptionsPackedbubbleSonificationTracksMapping(Options):
def frequency(self) -> 'OptionPlotoptionsPackedbubbleSonificationTracksMappingFrequency':
return self._config_sub_data('frequency', OptionPlotoptionsPackedbubbleSonificationTracksMappingFrequency)
def gapBetweenNotes(self) -> 'OptionPlotoptionsPackedbubbleSonificationTracksMappingGapbetweennotes':
return self._config_sub_data('gapBetweenNotes', OptionPlotoptionsPackedbubbleSonificationTracksMappingGapbetweennotes)
def highpass(self) -> 'OptionPlotoptionsPackedbubbleSonificationTracksMappingHighpass':
return self._config_sub_data('highpass', OptionPlotoptionsPackedbubbleSonificationTracksMappingHighpass)
def lowpass(self) -> 'OptionPlotoptionsPackedbubbleSonificationTracksMappingLowpass':
return self._config_sub_data('lowpass', OptionPlotoptionsPackedbubbleSonificationTracksMappingLowpass)
def noteDuration(self) -> 'OptionPlotoptionsPackedbubbleSonificationTracksMappingNoteduration':
return self._config_sub_data('noteDuration', OptionPlotoptionsPackedbubbleSonificationTracksMappingNoteduration)
def pan(self) -> 'OptionPlotoptionsPackedbubbleSonificationTracksMappingPan':
return self._config_sub_data('pan', OptionPlotoptionsPackedbubbleSonificationTracksMappingPan)
def pitch(self) -> 'OptionPlotoptionsPackedbubbleSonificationTracksMappingPitch':
return self._config_sub_data('pitch', OptionPlotoptionsPackedbubbleSonificationTracksMappingPitch)
def playDelay(self) -> 'OptionPlotoptionsPackedbubbleSonificationTracksMappingPlaydelay':
return self._config_sub_data('playDelay', OptionPlotoptionsPackedbubbleSonificationTracksMappingPlaydelay)
def rate(self) -> 'OptionPlotoptionsPackedbubbleSonificationTracksMappingRate':
return self._config_sub_data('rate', OptionPlotoptionsPackedbubbleSonificationTracksMappingRate)
def text(self):
return self._config_get(None)
def text(self, text: str):
self._config(text, js_type=False)
def time(self) -> 'OptionPlotoptionsPackedbubbleSonificationTracksMappingTime':
return self._config_sub_data('time', OptionPlotoptionsPackedbubbleSonificationTracksMappingTime)
def tremolo(self) -> 'OptionPlotoptionsPackedbubbleSonificationTracksMappingTremolo':
return self._config_sub_data('tremolo', OptionPlotoptionsPackedbubbleSonificationTracksMappingTremolo)
def volume(self) -> 'OptionPlotoptionsPackedbubbleSonificationTracksMappingVolume':
return self._config_sub_data('volume', OptionPlotoptionsPackedbubbleSonificationTracksMappingVolume) |
class TestDefaultOutput():
def setup_method(self):
class ExampleSerializer(serializers.Serializer):
has_default = serializers.CharField(default='x')
has_default_callable = serializers.CharField(default=(lambda : 'y'))
no_default = serializers.CharField()
self.Serializer = ExampleSerializer
def test_default_used_for_dict(self):
serializer = self.Serializer({'no_default': 'abc'})
assert (serializer.data == {'has_default': 'x', 'has_default_callable': 'y', 'no_default': 'abc'})
def test_default_used_for_object(self):
instance = MockObject(no_default='abc')
serializer = self.Serializer(instance)
assert (serializer.data == {'has_default': 'x', 'has_default_callable': 'y', 'no_default': 'abc'})
def test_default_not_used_when_in_dict(self):
serializer = self.Serializer({'has_default': 'def', 'has_default_callable': 'ghi', 'no_default': 'abc'})
assert (serializer.data == {'has_default': 'def', 'has_default_callable': 'ghi', 'no_default': 'abc'})
def test_default_not_used_when_in_object(self):
instance = MockObject(has_default='def', has_default_callable='ghi', no_default='abc')
serializer = self.Serializer(instance)
assert (serializer.data == {'has_default': 'def', 'has_default_callable': 'ghi', 'no_default': 'abc'})
def test_default_for_dotted_source(self):
class Serializer(serializers.Serializer):
traversed = serializers.CharField(default='x', source='traversed.attr')
assert (Serializer({}).data == {'traversed': 'x'})
assert (Serializer({'traversed': {}}).data == {'traversed': 'x'})
assert (Serializer({'traversed': None}).data == {'traversed': 'x'})
assert (Serializer({'traversed': {'attr': 'abc'}}).data == {'traversed': 'abc'})
def test_default_for_multiple_dotted_source(self):
class Serializer(serializers.Serializer):
c = serializers.CharField(default='x', source='a.b.c')
assert (Serializer({}).data == {'c': 'x'})
assert (Serializer({'a': {}}).data == {'c': 'x'})
assert (Serializer({'a': None}).data == {'c': 'x'})
assert (Serializer({'a': {'b': {}}}).data == {'c': 'x'})
assert (Serializer({'a': {'b': None}}).data == {'c': 'x'})
assert (Serializer({'a': {'b': {'c': 'abc'}}}).data == {'c': 'abc'})
class ModelSerializer(serializers.Serializer):
target = serializers.CharField(default='x', source='target.target.name')
a = NestedForeignKeySource(name='Root Object', target=None)
assert (ModelSerializer(a).data == {'target': 'x'})
b = NullableForeignKeySource(name='Intermediary Object', target=None)
a.target = b
assert (ModelSerializer(a).data == {'target': 'x'})
c = ForeignKeyTarget(name='Target Object')
b.target = c
assert (ModelSerializer(a).data == {'target': 'Target Object'})
def test_default_for_nested_serializer(self):
class NestedSerializer(serializers.Serializer):
a = serializers.CharField(default='1')
c = serializers.CharField(default='2', source='b.c')
class Serializer(serializers.Serializer):
nested = NestedSerializer()
assert (Serializer({'nested': None}).data == {'nested': None})
assert (Serializer({'nested': {}}).data == {'nested': {'a': '1', 'c': '2'}})
assert (Serializer({'nested': {'a': '3', 'b': {}}}).data == {'nested': {'a': '3', 'c': '2'}})
assert (Serializer({'nested': {'a': '3', 'b': {'c': '4'}}}).data == {'nested': {'a': '3', 'c': '4'}})
def test_default_for_allow_null(self):
class Serializer(serializers.Serializer):
foo = serializers.CharField()
bar = serializers.CharField(source='foo.bar', allow_null=True)
optional = serializers.CharField(required=False, allow_null=True)
assert (Serializer({'foo': None}).data == {'foo': None, 'bar': None, 'optional': None}) |
def _get_valid_view_op_and_second_cat(view_ops: List[Operator]) -> Tuple[(Operator, Operator)]:
view_op = None
second_cat = None
for a_view_op in view_ops:
view_op_output = a_view_op._attrs['outputs'][0]
next_next_ops = view_op_output._attrs['dst_ops']
next_concats = [n for n in next_next_ops if (n._attrs['op'] == 'concatenate')]
if (len(next_concats) != 1):
continue
if _is_valid_cat_op(next_concats[0]):
view_op = a_view_op
second_cat = next_concats[0]
break
return (view_op, second_cat) |
class SliderDate(Slider):
name = 'Slider Date'
def __init__(self, page: primitives.PageModel, number: Union[(float, list)], min_val: float, max_val: float, width: Union[(tuple, int)], height: Union[(tuple, int)], helper: str, options: dict, html_code: str, profile: Union[(dict, bool)], verbose: bool=False):
super(SliderDate, self).__init__(page, number, min_val, max_val, width, height, helper, options, html_code, profile, verbose=verbose)
(self.options.min, self.options.max, self.options.step) = (min_val, max_val, 86400)
_js__builder__ = ('\nconst minDt = new Date(options.min).getTime() / 1000; const maxDt = new Date(options.max).getTime() / 1000; \noptions.min = minDt; options.max = maxDt; options.value = new Date(data).getTime() / 1000;\n%(jqId)s.slider(options).css(options.css)' % {'jqId': JsQuery.decorate_var('jQuery(htmlObj)', convert_var=False)})
def dom(self) -> JsHtmlJqueryUI.JsHtmlSliderDate:
if (self._dom is None):
self._dom = JsHtmlJqueryUI.JsHtmlSliderDate(self, page=self.page)
return self._dom |
def test_get_config_file():
c = get_config_file(None)
assert (c is None)
c = get_config_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'configs/good.config'))
assert (c is not None)
assert (c.legacy_config is not None)
with pytest.raises(configparser.Error):
get_config_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'configs/bad.config')) |
def execute(command: Iterable, hide_log=False, mute=False, timeout: int=30, wait=True, kill=False, drop=False, stdin: Optional[Union[(bytes, str)]]=None, shell=False, **kwargs):
command_string = command
close = None
if isinstance(command, (list, tuple)):
command_string = subprocess.list2cmdline(command)
if shell:
command = command_string
else:
sys.stderr.write('Deprecation warning! Switch arguments to a list for common.execute()\n\n')
if (not hide_log):
print(('%s %s > %s' % (USER_NAME, HOSTNAME, command_string)))
if isinstance(stdin, (bytes, str)):
(stdin, close) = temporary_file_helper(stdin)
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
if (drop or kill):
devnull = open(os.devnull, 'w')
stdout = devnull
stderr = devnull
start = time.time()
p = subprocess.Popen(command, stdin=(stdin or subprocess.PIPE), stdout=stdout, stderr=stderr, shell=shell, **kwargs)
if kill:
delta = 0.5
for _ in range((int((timeout / delta)) + 1)):
time.sleep(delta)
if (p.poll() is not None):
return
log('Killing process', str(p.pid))
try:
p.kill()
time.sleep(0.5)
except OSError:
pass
elif wait:
output = ''
if (not stdin):
try:
p.stdin.write(os.linesep.encode('ascii'))
except IOError:
pass
while (p.poll() is None):
line = p.stdout.readline().decode('ascii', 'ignore')
if line:
output += line
if (not (hide_log or mute)):
print(line.rstrip())
output += p.stdout.read().decode('ascii', 'ignore')
output = output.strip()
end = time.time()
run_time = (end - start)
if (run_time < MIN_EXECUTION_TIME):
time.sleep((MIN_EXECUTION_TIME - run_time))
if (not (hide_log or mute)):
if (p.returncode != 0):
print(('exit code = %d' % p.returncode))
print('')
if close:
close()
return (p.returncode, output)
else:
if close:
close()
return p |
class LTComponent(LTItem):
def __init__(self, bbox):
LTItem.__init__(self)
self.set_bbox(bbox)
return
def __repr__(self):
return ('<%s %s>' % (self.__class__.__name__, bbox2str(self.bbox)))
def __lt__(self, _):
raise ValueError
def __le__(self, _):
raise ValueError
def __gt__(self, _):
raise ValueError
def __ge__(self, _):
raise ValueError
def set_bbox(self, bbox):
(x0, y0, x1, y1) = bbox
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
self.width = (x1 - x0)
self.height = (y1 - y0)
self.bbox = bbox
return
def is_empty(self):
return ((self.width <= 0) or (self.height <= 0))
def is_hoverlap(self, obj):
assert isinstance(obj, LTComponent)
return ((obj.x0 <= self.x1) and (self.x0 <= obj.x1))
def hdistance(self, obj):
assert isinstance(obj, LTComponent)
if self.is_hoverlap(obj):
return 0
else:
return min(abs((self.x0 - obj.x1)), abs((self.x1 - obj.x0)))
def hoverlap(self, obj):
assert isinstance(obj, LTComponent)
if self.is_hoverlap(obj):
return min(abs((self.x0 - obj.x1)), abs((self.x1 - obj.x0)))
else:
return 0
def is_voverlap(self, obj):
assert isinstance(obj, LTComponent)
return ((obj.y0 <= self.y1) and (self.y0 <= obj.y1))
def vdistance(self, obj):
assert isinstance(obj, LTComponent)
if self.is_voverlap(obj):
return 0
else:
return min(abs((self.y0 - obj.y1)), abs((self.y1 - obj.y0)))
def voverlap(self, obj):
assert isinstance(obj, LTComponent)
if self.is_voverlap(obj):
return min(abs((self.y0 - obj.y1)), abs((self.y1 - obj.y0)))
else:
return 0 |
def extractAsadatranslationsWpcomstagingCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class PartialCaseTests(unittest.TestCase):
def test_can_add_case_to_pypher(self):
one = 1
two = 2
three = 3
case = Case(__.n.__eyes__)
case.WHEN('blue', one)
case.WHEN('brown', two)
case.ELSE(three)
p = Pypher()
p.apply_partial(case)
c = str(p)
params = p.bound_params
exp = 'CASE n.`eyes` WHEN {blue} THEN {one} WHEN {brown} THEN {two} ELSE {three} END'.format(blue='blue', one=one, brown='brown', two=two, three=three)
self.assertEqual(c, exp) |
class AnyValue(object):
def __init__(self, data_type, permit_none=False):
self.data_type = data_type
self.permit_none = permit_none
def __eq__(self, other):
if (other is None):
return self.permit_none
if isinstance(other, AnyValue):
return (self.data_type == other.data_type)
if (self.data_type == 'str'):
return isinstance(other, six.text_type)
if (self.data_type == 'bytes'):
return isinstance(other, six.binary_type)
if (self.data_type == 'int'):
return isinstance(other, six.integer_types)
if ((self.data_type == 'datetime') and (type(other) == datetime.datetime)):
return True
if ((self.data_type == 'datetime') and isinstance(other, six.string_types) and DATETIME_RE.match(other)):
return True
if ((self.data_type == 'date') and (type(other) == datetime.date)):
return True
if ((self.data_type == 'date') and isinstance(other, six.string_types) and DATE_RE.match(other)):
return True
if (self.data_type == type(other).__name__):
return True
return False
def __ne__(self, other):
return (not self.__eq__(other))
def __repr__(self):
return 'any {} type value{}'.format(self.data_type, (' or None' if self.permit_none else ''))
def __copy__(self):
return self
def __deepcopy__(self, *_):
return self |
class Git():
def __init__(self, repo, cwd, branch='master'):
self.repo = repo
self.cwd = cwd
self.branch = branch
def clone_if_necessary(self):
try:
self.status()
except GitError:
logger.info('cloning %s', self.repo)
return _git('.', 'clone', '-b', self.branch, self.repo, self.cwd)
def status(self):
return _git(self.cwd, 'status', '-sb')
def fetch(self):
return _git(self.cwd, 'fetch', 'origin', self.branch)
def pull(self):
return _git(self.cwd, 'pull', '--rebase')
def last_commit_date(self):
return _git(self.cwd, 'log', '-1', '--format=%cd')
def changed_files(self):
self.fetch()
res = _git(self.cwd, 'diff', '--stat', f'origin/{self.branch}')
lines = res.splitlines()
if lines:
last_line = lines[(- 1)]
return int(last_line.split()[0])
return 0 |
class StorageTestFile(StorageTestCase, TestCase):
CONFIG = "\n configurationVersion: '1'\n logFile: /dev/stderr\n databaseEngine: sqlite://\n defaultStorage: storage-1\n \n storages:\n - name: storage-1\n module: file\n configuration:\n path: {testpath}/data\n consistencyCheckWrites: True\n hmac:\n password: geheim12345\n kdfIterations: 1000\n kdfSalt: BBiZ+lIVSefMCdE4eOPX211n/04KY1M4c2SM/9XHUcA=\n \n ios:\n - name: file\n module: file \n " |
class AdPreview(AdPreviewMixin, AbstractObject):
def __init__(self, api=None):
super(AdPreview, self).__init__()
self._isAdPreview = True
self._api = api
class Field(AbstractObject.Field):
body = 'body'
transformation_spec = 'transformation_spec'
class AdFormat():
audience_network_instream_video = 'AUDIENCE_NETWORK_INSTREAM_VIDEO'
audience_network_instream_video_mobile = 'AUDIENCE_NETWORK_INSTREAM_VIDEO_MOBILE'
audience_network_outstream_video = 'AUDIENCE_NETWORK_OUTSTREAM_VIDEO'
audience_network_rewarded_video = 'AUDIENCE_NETWORK_REWARDED_VIDEO'
biz_disco_feed_mobile = 'BIZ_DISCO_FEED_MOBILE'
desktop_feed_standard = 'DESKTOP_FEED_STANDARD'
facebook_profile_feed_desktop = 'FACEBOOK_PROFILE_FEED_DESKTOP'
facebook_profile_feed_mobile = 'FACEBOOK_PROFILE_FEED_MOBILE'
facebook_reels_banner = 'FACEBOOK_REELS_BANNER'
facebook_reels_banner_desktop = 'FACEBOOK_REELS_BANNER_DESKTOP'
facebook_reels_mobile = 'FACEBOOK_REELS_MOBILE'
facebook_reels_postloop = 'FACEBOOK_REELS_POSTLOOP'
facebook_reels_sticker = 'FACEBOOK_REELS_STICKER'
facebook_story_mobile = 'FACEBOOK_STORY_MOBILE'
facebook_story_sticker_mobile = 'FACEBOOK_STORY_STICKER_MOBILE'
instagram_explore_contextual = 'INSTAGRAM_EXPLORE_CONTEXTUAL'
instagram_explore_grid_home = 'INSTAGRAM_EXPLORE_GRID_HOME'
instagram_explore_immersive = 'INSTAGRAM_EXPLORE_IMMERSIVE'
instagram_feed_web = 'INSTAGRAM_FEED_WEB'
instagram_feed_web_m_site = 'INSTAGRAM_FEED_WEB_M_SITE'
instagram_profile_feed = 'INSTAGRAM_PROFILE_FEED'
instagram_profile_reels = 'INSTAGRAM_PROFILE_REELS'
instagram_reels = 'INSTAGRAM_REELS'
instagram_reels_overlay = 'INSTAGRAM_REELS_OVERLAY'
instagram_search_chain = 'INSTAGRAM_SEARCH_CHAIN'
instagram_search_grid = 'INSTAGRAM_SEARCH_GRID'
instagram_standard = 'INSTAGRAM_STANDARD'
instagram_story = 'INSTAGRAM_STORY'
instagram_story_effect_tray = 'INSTAGRAM_STORY_EFFECT_TRAY'
instagram_story_web = 'INSTAGRAM_STORY_WEB'
instagram_story_web_m_site = 'INSTAGRAM_STORY_WEB_M_SITE'
instant_article_recirculation_ad = 'INSTANT_ARTICLE_RECIRCULATION_AD'
instant_article_standard = 'INSTANT_ARTICLE_STANDARD'
instream_banner_desktop = 'INSTREAM_BANNER_DESKTOP'
instream_banner_mobile = 'INSTREAM_BANNER_MOBILE'
instream_video_desktop = 'INSTREAM_VIDEO_DESKTOP'
instream_video_image = 'INSTREAM_VIDEO_IMAGE'
instream_video_mobile = 'INSTREAM_VIDEO_MOBILE'
job_browser_desktop = 'JOB_BROWSER_DESKTOP'
job_browser_mobile = 'JOB_BROWSER_MOBILE'
marketplace_mobile = 'MARKETPLACE_MOBILE'
messenger_mobile_inbox_media = 'MESSENGER_MOBILE_INBOX_MEDIA'
messenger_mobile_story_media = 'MESSENGER_MOBILE_STORY_MEDIA'
mobile_banner = 'MOBILE_BANNER'
mobile_feed_basic = 'MOBILE_FEED_BASIC'
mobile_feed_standard = 'MOBILE_FEED_STANDARD'
mobile_fullwidth = 'MOBILE_FULLWIDTH'
mobile_interstitial = 'MOBILE_INTERSTITIAL'
mobile_medium_rectangle = 'MOBILE_MEDIUM_RECTANGLE'
mobile_native = 'MOBILE_NATIVE'
right_column_standard = 'RIGHT_COLUMN_STANDARD'
suggested_video_desktop = 'SUGGESTED_VIDEO_DESKTOP'
suggested_video_mobile = 'SUGGESTED_VIDEO_MOBILE'
watch_feed_home = 'WATCH_FEED_HOME'
watch_feed_mobile = 'WATCH_FEED_MOBILE'
class CreativeFeature():
product_metadata_automation = 'product_metadata_automation'
profile_card = 'profile_card'
standard_enhancements_catalog = 'standard_enhancements_catalog'
class RenderType():
fallback = 'FALLBACK'
def get_endpoint(cls):
return 'previews'
_field_types = {'body': 'string', 'transformation_spec': 'Object'}
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['AdFormat'] = AdPreview.AdFormat.__dict__.values()
field_enum_info['CreativeFeature'] = AdPreview.CreativeFeature.__dict__.values()
field_enum_info['RenderType'] = AdPreview.RenderType.__dict__.values()
return field_enum_info |
def whoami(ctx):
msg = 'Attempting to get user information associated with current API token'
LOGGER.info(msg)
index_event(ctx.obj.es, module=__name__, event_type='INFO', event=msg)
click.echo(f'[*] {msg}')
user = ctx.obj.okta.get_current_user(ctx)
if user:
user.list_roles(ctx, mute=False)
else:
msg = "Unable to list current user's assigned roles. No user object found"
LOGGER.error(msg)
index_event(ctx.obj.es, module=__name__, event_type='ERROR', event=msg)
click.secho(f'[!] {msg}', fg='red')
if user:
groups = user.get_groups(ctx)
if groups:
click.echo(f"[*] Group memberships for user ID {user.obj['id']}:")
for okta_group in groups:
group = OktaGroup(okta_group)
group.print_info()
else:
msg = "Unable to list current user's group memberships. No user object found"
LOGGER.error(msg)
index_event(ctx.obj.es, module=__name__, event_type='ERROR', event=msg)
click.secho(f'[!] {msg}', fg='red') |
class ReceiptsNormalizer(BaseNormalizer[(ReceiptsV65, ReceiptsBundles)]):
is_normalization_slow = True
def normalize_result(self, cmd: ReceiptsV65) -> ReceiptsBundles:
trie_roots_and_data = map(make_trie_root_and_nodes, cmd.payload)
return tuple(zip(cmd.payload, trie_roots_and_data)) |
def generate_mosquitto_entry(entry: bytes) -> dict:
entry_decoded = _to_str(entry)
(user, _, _, salt_hash, passwd_hash, *_) = re.split('[:$]', entry_decoded)
passwd_entry = f'{user}:$dynamic_82${b64decode(passwd_hash).hex()}$HEX${b64decode(salt_hash).hex()}'
result_entry = {'type': 'mosquitto', 'entry': entry_decoded, 'password-hash': passwd_hash}
result_entry['cracked'] = crack_hash(passwd_entry.encode(), result_entry, '--format=dynamic_82')
return {f'{user}:mosquitto': result_entry} |
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, 'Ozgur Yilmaz', '1.0.0')
try:
mplugin.registerNode(kPluginNodeTypeName, cpomPluginId, nodeCreator, nodeInitializer)
except:
sys.stderr.write(('Failed to register node: %s' % kPluginNodeTypeName))
raise |
def questions_creator(y):
global options, position_over, j, template_options, answer
template_options = [data['results'][y]['correct_answer'], data['results'][y]['incorrect_answers'][0], data['results'][y]['incorrect_answers'][1], data['results'][y]['incorrect_answers'][2]]
for index in range(4):
print(('\n' + template_options[index]))
print('\n\n\n')
options = ([None] * 4)
position_over = ([None] * 4)
j = 0
def option_rand():
global j, position, options, answer
position = random.randrange(4)
for r in range(4):
if ((not (position_over[0] == None)) and (not (position_over[1] == None)) and (not (position_over[2] == None)) and (not (position_over[3] == None))):
break
if ((position == position_over[0]) or (position == position_over[1]) or (position == position_over[2]) or (position == position_over[3])):
option_rand()
else:
position_over[j] = position
options[j] = template_options[position]
if (position == 0):
answer = j
j += 1
option_rand()
for index in range(4):
print(('\n' + options[index]))
print((('\n\nANSER IS : ' + options[answer]) + '\n\n'))
display_q(globals()['x']) |
class OptionPlotoptionsLinePointEvents(Options):
def click(self):
return self._config_get(None)
def click(self, value: Any):
self._config(value, js_type=False)
def drag(self):
return self._config_get(None)
def drag(self, value: Any):
self._config(value, js_type=False)
def dragStart(self):
return self._config_get(None)
def dragStart(self, value: Any):
self._config(value, js_type=False)
def drop(self):
return self._config_get(None)
def drop(self, value: Any):
self._config(value, js_type=False)
def mouseOut(self):
return self._config_get(None)
def mouseOut(self, value: Any):
self._config(value, js_type=False)
def mouseOver(self):
return self._config_get(None)
def mouseOver(self, value: Any):
self._config(value, js_type=False)
def remove(self):
return self._config_get(None)
def remove(self, value: Any):
self._config(value, js_type=False)
def select(self):
return self._config_get(None)
def select(self, value: Any):
self._config(value, js_type=False)
def unselect(self):
return self._config_get(None)
def unselect(self, value: Any):
self._config(value, js_type=False)
def update(self):
return self._config_get(None)
def update(self, value: Any):
self._config(value, js_type=False) |
class DoltTableNameTransformer(TypeTransformer[DoltTable]):
def __init__(self):
super().__init__(name='DoltTable', t=DoltTable)
def get_literal_type(self, t: Type[DoltTable]) -> LiteralType:
return LiteralType(simple=_type_models.SimpleType.STRUCT, metadata={})
def to_literal(self, ctx: FlyteContext, python_val: DoltTable, python_type: typing.Type[DoltTable], expected: LiteralType) -> Literal:
if (not isinstance(python_val, DoltTable)):
raise AssertionError(f'Value cannot be converted to a table: {python_val}')
conf = python_val.config
if ((python_val.data is not None) and (python_val.config.tablename is not None)):
db = dolt.Dolt(conf.db_path)
with tempfile.NamedTemporaryFile() as f:
python_val.data.to_csv(f.name, index=False)
message = f'Generated by Flyte execution id: {ctx.user_space_params.execution_id}'
dolt_int.save(db=db, tablename=conf.tablename, filename=f.name, branch_conf=conf.branch_conf, meta_conf=conf.meta_conf, remote_conf=conf.remote_conf, save_args=conf.io_args, commit_message=message)
s = Struct()
s.update(python_val.to_dict())
return Literal(Scalar(generic=s))
def to_python_value(self, ctx: FlyteContext, lv: Literal, expected_python_type: typing.Type[DoltTable]) -> DoltTable:
if (not (lv and lv.scalar and lv.scalar.generic and ('config' in lv.scalar.generic))):
raise ValueError('DoltTable requires DoltConfig to load python value')
conf_dict = MessageToDict(lv.scalar.generic['config'])
conf = DoltConfig(**conf_dict)
db = dolt.Dolt(conf.db_path)
with tempfile.NamedTemporaryFile() as f:
dolt_int.load(db=db, tablename=conf.tablename, sql=conf.sql, filename=f.name, branch_conf=conf.branch_conf, meta_conf=conf.meta_conf, remote_conf=conf.remote_conf, load_args=conf.io_args)
df = pandas.read_csv(f)
lv.data = df
return lv |
def test_everything_defined():
(list_instructions, task) = construct_graph_non_aliased(1)
InsertMissingDefinitions().run(task)
assert ([node.instructions for node in task.graph.nodes] == [list_instructions[0:2], list_instructions[2:5], [list_instructions[5]], [list_instructions[6]], list_instructions[7:9]]) |
class TestLoadStandardizedLanguage():
def test_valid_input_with_multi_provider(self, mocker: MockerFixture):
mocker.patch('edenai_apis.utils.languages.load_language_constraints', side_effect=[['en', 'fr'], ['fr', 'es']])
providers = ['provider1', 'provider2']
feature = 'test_feature'
subfeature = 'test_subfeature'
expected_output = ['en', 'fr', 'es']
output = load_standardized_language(feature, subfeature, providers)
assert (sorted(output) == sorted(expected_output)), f'Expected `{expected_output}` but got `{output}`' |
class BodhiClient():
def __init__(self, base_url: str=BASE_URL, client_id: str=CLIENT_ID, id_provider: str=IDP, staging: bool=False, oidc_storage_path: typing.Optional[str]=None):
if staging:
base_url = STG_BASE_URL
id_provider = STG_IDP
client_id = STG_CLIENT_ID
if (base_url[(- 1)] != '/'):
base_url = (base_url + '/')
self.base_url = base_url
self.csrf_token = ''
self.oidc_storage_path = (oidc_storage_path or os.path.join(os.path.expanduser('~'), '.config', 'bodhi', 'client.json'))
self._build_oidc_client(client_id, id_provider)
def _build_oidc_client(self, client_id, id_provider):
self.oidc = OIDCClient(client_id, SCOPE, id_provider.rstrip('/'), storage=JSONStorage(self.oidc_storage_path))
def send_request(self, url, verb='GET', **kwargs):
auth = kwargs.pop('auth', False)
if auth:
self.ensure_auth()
request_func = self.oidc.request
else:
request_func = requests.request
try:
response = request_func(verb, f'{self.base_url}{url}', **kwargs)
except (OIDCClientError, ConnectionError) as e:
raise BodhiClientException(str(e))
if (not response.ok):
raise BodhiClientException(response.text)
return munchify(response.json())
def ensure_auth(self):
self.oidc.ensure_auth(use_kerberos=True)
if (not self.oidc.has_cookie('auth_tkt', domain=urlparse(self.base_url).hostname)):
while True:
resp = self.oidc.request('GET', f'{self.base_url}oidc/login-token')
if resp.ok:
break
if (resp.status_code == 401):
self.clear_auth()
self.oidc.login(use_kerberos=True)
else:
resp.raise_for_status()
def clear_auth(self):
self.oidc.clear_auth()
self.csrf_token = ''
def username(self):
return self.oidc.username
def save(self, **kwargs) -> 'munch.Munch':
kwargs['csrf_token'] = self.csrf()
if ('type_' in kwargs):
kwargs['type'] = kwargs['type_']
return self.send_request('updates/', verb='POST', auth=True, data=kwargs)
def request(self, update: str, request: str) -> 'munch.Munch':
try:
return self.send_request(f'updates/{update}/request', verb='POST', auth=True, data={'update': update, 'request': request, 'csrf_token': self.csrf()})
except RequestException as exc:
if ((exc.response is not None) and (exc.response.status_code == 404)):
raise UpdateNotFound(update)
else:
raise
def waive(self, update: str, comment: str, tests: typing.Optional[typing.Iterable[str]]=None) -> 'munch.Munch':
data = {'update': update, 'tests': tests, 'comment': comment, 'csrf_token': self.csrf()}
try:
return self.send_request(f'updates/{update}/waive-test-results', verb='POST', auth=True, data=data)
except RequestException as exc:
if ((exc.response is not None) and (exc.response.status_code == 404)):
raise UpdateNotFound(update)
else:
raise
def trigger_tests(self, update: str) -> 'munch.Munch':
try:
return self.send_request(f'updates/{update}/trigger-tests', verb='POST', auth=True, data={'update': update, 'csrf_token': self.csrf()})
except RequestException as exc:
if ((exc.response is not None) and (exc.response.status_code == 404)):
raise UpdateNotFound(update)
else:
raise
def query(self, **kwargs) -> 'munch.Munch':
if ('limit' in kwargs):
kwargs['rows_per_page'] = kwargs['limit']
del kwargs['limit']
if kwargs.get('mine'):
if (self.username is None):
raise BodhiClientException('Could not get user info.')
kwargs['user'] = self.username
if ('package' in kwargs):
if re.search(UPDATE_TITLE_RE, kwargs['package']):
kwargs['builds'] = kwargs['package']
elif re.search(UPDATE_ID_RE, kwargs['package']):
kwargs['updateid'] = kwargs['package']
else:
kwargs['packages'] = kwargs['package']
del kwargs['package']
if ('release' in kwargs):
if isinstance(kwargs['release'], list):
kwargs['releases'] = kwargs['release']
else:
kwargs['releases'] = [kwargs['release']]
del kwargs['release']
if ('type_' in kwargs):
kwargs['type'] = kwargs['type_']
del kwargs['type_']
if (('bugs' in kwargs) and (kwargs['bugs'] == '')):
kwargs['bugs'] = None
return self.send_request('updates/', verb='GET', params=kwargs)
def get_test_status(self, update: str) -> 'munch.Munch':
return self.send_request(f'updates/{update}/get-test-results', verb='GET')
def comment(self, update: str, comment: str, karma: int=0) -> 'munch.Munch':
return self.send_request('comments/', verb='POST', auth=True, data={'update': update, 'text': comment, 'karma': karma, 'csrf_token': self.csrf()})
def save_override(self, nvr: str, notes: str, duration: typing.Optional[int]=None, expiration_date: typing.Optional[datetime.datetime]=None, edit: bool=False, expired: bool=False) -> 'munch.Munch':
if ((duration is None) and (expiration_date is None)):
raise TypeError('The duration or the expiration_date must be provided.')
if ((duration is not None) and (expiration_date is not None)):
raise TypeError('The duration and the expiration_date cannot be provided at the same time.')
if duration:
expiration_date = (datetime.datetime.utcnow() + datetime.timedelta(days=duration))
data = {'nvr': nvr, 'expiration_date': expiration_date, 'notes': notes, 'csrf_token': self.csrf()}
if edit:
data['edited'] = nvr
if expired:
data['expired'] = expired
return self.send_request('overrides/', verb='POST', auth=True, data=data)
def get_compose(self, release: str, request: str) -> 'munch.Munch':
try:
return self.send_request(f'composes/{release}/{request}', verb='GET')
except RequestException as exc:
if ((exc.response is not None) and (exc.response.status_code == 404)):
raise ComposeNotFound(release, request)
else:
raise
def list_composes(self) -> 'munch.Munch':
return self.send_request('composes/', verb='GET')
def list_overrides(self, user: typing.Optional[str]=None, packages: typing.Optional[str]=None, expired: typing.Optional[bool]=None, releases: typing.Optional[str]=None, builds: typing.Optional[str]=None, rows_per_page: typing.Optional[int]=None, page: typing.Optional[int]=None) -> 'munch.Munch':
params: typing.MutableMapping[(str, typing.Union[(int, str, None)])] = {}
if user:
params['user'] = user
if packages:
params['packages'] = packages
if (expired is not None):
params['expired'] = expired
if releases:
params['releases'] = releases
if builds:
params['builds'] = builds
if rows_per_page:
params['rows_per_page'] = rows_per_page
if page:
params['page'] = page
return self.send_request('overrides/', verb='GET', params=params)
def csrf(self) -> str:
if (not self.csrf_token):
self.csrf_token = self.send_request('csrf', verb='GET', auth=True)['csrf_token']
return self.csrf_token
def parse_file(self, input_file: str) -> typing.List[typing.Dict[(str, typing.Any)]]:
if (not os.path.exists(input_file)):
raise ValueError(f'No such file or directory: {input_file}')
defaults = dict(severity='unspecified', suggest='unspecified')
config = configparser.ConfigParser(defaults=defaults)
read = config.read(input_file)
if ((len(read) != 1) or (read[0] != input_file)):
raise ValueError(f'Invalid input file: {input_file}')
updates = []
for section in config.sections():
update = {'builds': section, 'bugs': config.get(section, 'bugs', raw=True), 'close_bugs': config.getboolean(section, 'close_bugs'), 'display_name': config.get(section, 'display_name', raw=True, fallback=None), 'type': config.get(section, 'type', raw=True), 'type_': config.get(section, 'type', raw=True), 'request': config.get(section, 'request', raw=True), 'severity': config.get(section, 'severity', raw=True), 'notes': config.get(section, 'notes', raw=True), 'autokarma': config.get(section, 'autokarma', raw=True), 'stable_karma': config.get(section, 'stable_karma', raw=True), 'unstable_karma': config.get(section, 'unstable_karma', raw=True), 'suggest': config.get(section, 'suggest', raw=True)}
updates.append(update)
return updates
def latest_builds(self, package: str) -> 'munch.Munch':
return self.send_request('latest_builds', params={'package': package})
def testable(self) -> typing.Iterator[dict]:
if (dnf is None):
raise RuntimeError('dnf is required by this method and is not installed.')
base = dnf.Base()
sack = base.fill_sack(load_system_repo=True)
query = sack.query()
installed = query.installed()
with open('/etc/fedora-release', 'r') as f:
fedora = f.readlines()[0].split()[2]
tag = f'f{fedora}-updates-testing'
builds = self.get_koji_session().listTagged(tag, latest=True)
for build in builds:
pkgs = installed.filter(name=build['name'], version=build['version'], release=build['release']).run()
if len(pkgs):
update_list = self.query(builds=build['nvr'])['updates']
for update in update_list:
(yield update)
def compose_str(compose: dict, minimal: bool=True) -> str:
line_formatter = '{0:<16}: {1}'
security = ('*' if compose['security'] else ' ')
title = f"{security}{compose['release']['name']}-{compose['request']}"
details = f"{len(compose['update_summary']):3d} updates ({compose['state']}) "
minimal_repr = line_formatter.format(title, details)
if minimal:
return minimal_repr
line_formatter = '{0:>12}: {1}\n'
compose_lines = [f'''{'=':=^80}
''', f''' {minimal_repr}
''', f'''{'=':=^80}
''']
compose_lines += [line_formatter.format('Content Type', compose['content_type']), line_formatter.format('Started', compose['date_created']), line_formatter.format('Updated', compose['state_date'])]
if (('error_message' in compose) and compose['error_message']):
compose_lines.append(line_formatter.format('Error', compose['error_message']))
compose_lines += ['\nUpdates:\n\n']
line_formatter = f' {line_formatter}'
for s in compose['update_summary']:
compose_lines.append(line_formatter.format(s['alias'], s['title']))
return ''.join(compose_lines)
def override_str(override: dict, minimal: bool=True) -> str:
if isinstance(override, str):
return override
if minimal:
return f"{override['submitter']['name']}'s {override['build']['nvr']} override (expires {override['expiration_date']})"
divider = ('=' * 60)
nvr = '\n'.join(textwrap.wrap(override['build']['nvr'].replace(',', ', '), width=60, initial_indent=(' ' * 5), subsequent_indent=(' ' * 5)))
val = f'''{divider}
{nvr}
{divider}
'''
val += f''' Submitter: {override['submitter']['name']}
'''
val += f''' Expiration Date: {override['expiration_date']}
'''
val += f''' Notes: {override['notes']}
'''
val += f" Expired: {(override['expired_date'] is not None)}"
return val
def update_str(self, update: dict, minimal: bool=False) -> str:
if isinstance(update, str):
return update
if minimal:
val = ''
security = ('*' if (update['type'] == 'security') else ' ')
date = ((update['date_pushed'] and update['date_pushed'].split()[0]) or update['date_submitted'].split()[0])
days_in_status = (_days_since(update['date_pushed']) if update['date_pushed'] else _days_since(update['date_submitted']))
if update['builds']:
title = update['builds'][0]['nvr']
else:
title = (update['title'] or update['alias'])
content_type = (update['content_type'] or 'unspecified')
val += f"{security}{title:40} {content_type:9} {update['status']:8} {date:>10} ({days_in_status})"
for build in update['builds'][1:]:
val += f'''
{build['nvr']}'''
return val
wrap_width = 66
wrap_line = functools.partial(textwrap.wrap, width=wrap_width)
line_formatter = '{0:>12}: {1}\n'
update_lines = [f'''{'=':=^80}
''']
update_lines += [(line + '\n') for line in textwrap.wrap(update['title'], width=80, initial_indent=(' ' * 5), subsequent_indent=(' ' * 5))]
update_lines.append(f'''{'=':=^80}
''')
update_lines.append(line_formatter.format('Update ID', update['alias']))
update_lines += [line_formatter.format('Content Type', update['content_type']), line_formatter.format('Release', update['release']['long_name']), line_formatter.format('Status', update['status']), line_formatter.format('Type', update['type']), line_formatter.format('Severity', update['severity']), line_formatter.format('Karma', update['karma']), line_formatter.format('Autokarma', f"{update['autokarma']} [{update['unstable_karma']}, {update['stable_karma']}]"), line_formatter.format('Autotime', update['autotime'])]
try:
test_status = self.get_test_status(update['alias'])
except RequestException as err:
log.debug('ERROR while retrieving CI status: %s', err)
test_status = None
if test_status:
info = None
waivers = None
if ('errors' in test_status):
info = '\n'.join([el.description for el in test_status.errors])
elif ('decision' in test_status):
info = test_status.decision.summary
waivers = test_status.decision.waivers
elif ('decisions' in test_status):
info = '\n'.join([d.summary for d in test_status.decisions])
waivers = list(itertools.chain(*(d.waivers for d in test_status.decisions)))
else:
log.debug('No `errors` nor `decision` in the data returned')
if info:
update_lines.append(line_formatter.format('CI Status', info))
if waivers:
waivers_lines = []
for waiver in waivers:
dt = datetime.datetime.strptime(waiver['timestamp'], '%Y-%m-%dT%H:%M:%S.%f')
waivers_lines.append(f"{waiver['username']} - {dt.strftime('%Y-%m-%d %H:%M:%S')}")
waivers_lines += wrap_line(waiver['comment'])
waivers_lines.append(f"build: {waiver['subject_identifier']}")
waivers_lines.append(f"testcase: {waiver['testcase']}")
update_lines.append(line_formatter.format('Waivers', waivers_lines[0]))
waiver_line_formatter = line_formatter.replace(': ', ' ')
update_lines += [waiver_line_formatter.format(indent, line) for (indent, line) in zip(itertools.repeat(' ', (len(waivers_lines) - 1)), waivers_lines[1:])]
if (update['request'] is not None):
update_lines.append(line_formatter.format('Request', update['request']))
if len(update['bugs']):
bugs = list(itertools.chain(*[wrap_line(f"{bug['bug_id']} - {bug['title']}") for bug in update['bugs']]))
indent_lines = (['Bugs'] + ([' '] * (len(bugs) - 1)))
update_lines += [line_formatter.format(indent, line) for (indent, line) in zip(indent_lines, bugs)]
if update['notes']:
notes_lines = list(itertools.chain(*[wrap_line(line) for line in update['notes'].splitlines()]))
indent_lines = (['Notes'] + ([' '] * (len(notes_lines) - 1)))
for (indent, line) in zip(indent_lines, notes_lines):
update_lines.append(line_formatter.format(indent, line))
update_lines += [line_formatter.format('Submitter', update['user']['name']), line_formatter.format('Submitted', update['date_submitted'])]
if len(update['comments']):
comments_lines = []
for comment in update['comments']:
comments_lines.append(f"{comment['user']['name']} - {comment['timestamp']} (karma {comment['karma']})")
comments_lines += wrap_line(comment['text'])
update_lines.append(line_formatter.format('Comments', comments_lines[0]))
comment_line_formatter = line_formatter.replace(': ', ' ')
update_lines += [comment_line_formatter.format(indent, line) for (indent, line) in zip(itertools.repeat(' ', (len(comments_lines) - 1)), comments_lines[1:])]
update_lines.append(f'''
{self.base_url}updates/{update['alias']}
''')
return ''.join(update_lines)
def get_releases(self, **kwargs) -> 'munch.Munch':
return self.send_request('releases/', verb='GET', params=kwargs)
def get_koji_session(self) -> koji.ClientSession:
config = configparser.ConfigParser()
koji_conf = os.path.join(os.path.expanduser('~'), '.koji', 'config')
if (not os.path.exists(koji_conf)):
koji_conf = '/etc/koji.conf'
with open(koji_conf) as fh:
config.read_file(fh)
session = koji.ClientSession(config.get('koji', 'server'))
return session
koji_session = property(fget=get_koji_session)
def candidates(self) -> typing.Iterable[dict]:
builds = []
data = self.get_releases()
koji = self.get_koji_session()
for release in data['releases']:
try:
for build in koji.listTagged(release['candidate_tag'], latest=True):
if (build['owner_name'] == self.username):
builds.append(build)
except Exception:
log.exception('Unable to query candidate builds for %s', release)
return builds |
def run_threaded(fal_dbt: FalDbt, parsed: argparse.Namespace, node_graph: NodeGraph) -> int:
from fal.dbt.planner.plan import OriginGraph, FilteredGraph, PlannedGraph, ScriptConnectedGraph
from fal.dbt.planner.schedule import schedule_graph
from fal.dbt.planner.executor import parallel_executor
execution_plan = ExecutionPlan.create_plan_from_graph(parsed, node_graph, fal_dbt)
origin_graph = OriginGraph(node_graph.graph)
filtered_graph = FilteredGraph.from_execution_plan(origin_graph, execution_plan=execution_plan)
connected_graph = ScriptConnectedGraph.from_filtered_graph(filtered_graph)
planned_graph = PlannedGraph.from_script_connected_graph(connected_graph, enable_chunking=False)
scheduler = schedule_graph(planned_graph.graph, node_graph)
return parallel_executor(parsed, fal_dbt, scheduler) |
def create_logger(app):
Logger = logging.getLoggerClass()
class DebugLogger(Logger):
def getEffectiveLevel(x):
if ((x.level == 0) and app.debug):
return logging.DEBUG
return Logger.getEffectiveLevel(x)
class DebugHandler(StreamHandler):
def emit(x, record):
(StreamHandler.emit(x, record) if app.debug else None)
class DebugRFHandler(RotatingFileHandler):
def emit(x, record):
(RotatingFileHandler.emit(x, record) if app.debug else None)
class ProdRFHandler(RotatingFileHandler):
def emit(x, record):
(RotatingFileHandler.emit(x, record) if (not app.debug) else None)
debug_handler = DebugHandler()
debug_handler.setLevel(logging.DEBUG)
debug_handler.setFormatter(Formatter(_debug_log_format))
logger = logging.getLogger(app.logger_name)
del logger.handlers[:]
logger.__class__ = DebugLogger
logger.addHandler(debug_handler)
app_logs = app.config.logging
if (not app_logs):
app_logs = _def_log_config
for (lname, lconf) in app_logs.items():
lfile = os.path.join(app.root_path, 'logs', (lname + '.log'))
max_size = (lconf.max_size or _def_log_config.production.max_size)
file_no = (lconf.file_no or _def_log_config.production.file_no)
level = LOG_LEVELS.get((lconf.level or 'warning'), LOG_LEVELS.get('warning'))
lformat = (lconf.format or _def_log_config.production.format)
on_app_debug = lconf.on_app_debug
if on_app_debug:
handler = DebugRFHandler(lfile, maxBytes=max_size, backupCount=file_no)
else:
handler = ProdRFHandler(lfile, maxBytes=max_size, backupCount=file_no)
handler.setLevel(level)
handler.setFormatter(Formatter(lformat))
logger.addHandler(handler)
return logger |
class LayoutParserComputerVisionModel(ComputerVisionModel):
def __init__(self, config: dict, model_path: str=DEFAULT_MODEL_PATH):
super().__init__()
self.score_threshold = float(config.get('score_threshold', DEFAULT_SCORE_THRESHOLD))
self.avoid_overlapping = bool(config.get('avoid_overlapping', True))
self.model_path = model_path
self._lazy_model = LazyLoaded[BaseLayoutModel](self._load_model)
def _load_model(self) -> BaseLayoutModel:
model = load_model(self.model_path)
LOGGER.info('loaded layout model: %r', self.model_path)
return model
def layout_model(self) -> BaseLayoutModel:
return self._lazy_model.get()
def preload(self):
self._lazy_model.get()
def predict_single(self, image: PIL.Image.Image) -> ComputerVisionModelResult:
return LayoutParserComputerVisionModelResult(self.layout_model.detect(image), score_threshold=self.score_threshold, avoid_overlapping=self.avoid_overlapping) |
def _parse_directive_options(content: str, directive_class: type[Directive], as_yaml: bool, line: (int | None), additional_options: (dict[(str, str)] | None)=None) -> _DirectiveOptions:
yaml_block: (None | str) = None
if content.startswith('---'):
line = (None if (line is None) else (line + 1))
content = '\n'.join(content.splitlines()[1:])
match = re.search('^-{3,}', content, re.MULTILINE)
if match:
yaml_block = content[:match.start()]
content = content[(match.end() + 1):]
else:
yaml_block = content
content = ''
yaml_block = dedent(yaml_block)
elif content.lstrip().startswith(':'):
content_lines = content.splitlines()
yaml_lines = []
while content_lines:
if (not content_lines[0].lstrip().startswith(':')):
break
yaml_lines.append(content_lines.pop(0).lstrip()[1:])
yaml_block = '\n'.join(yaml_lines)
content = '\n'.join(content_lines)
has_options_block = (yaml_block is not None)
if as_yaml:
yaml_errors: list[tuple[(str, (int | None))]] = []
try:
yaml_options = (yaml.safe_load((yaml_block or '')) or {})
except (yaml.parser.ParserError, yaml.scanner.ScannerError):
yaml_options = {}
yaml_errors.append(('Invalid options format (bad YAML)', line))
if (not isinstance(yaml_options, dict)):
yaml_options = {}
yaml_errors.append(('Invalid options format (not a dict)', line))
return _DirectiveOptions(content, yaml_options, yaml_errors, has_options_block)
options: dict[(str, str)] = {}
if (yaml_block is not None):
try:
options = dict(options_to_items(yaml_block))
except TokenizeError as err:
return _DirectiveOptions(content, options, [(f'Invalid options format: {err.problem}', line)], has_options_block)
if issubclass(directive_class, TestDirective):
return _DirectiveOptions(content, options, [], has_options_block)
if additional_options:
options = {**additional_options, **options}
options_spec: dict[(str, Callable)] = directive_class.option_spec
unknown_options: list[str] = []
new_options: dict[(str, Any)] = {}
validation_errors: list[tuple[(str, (int | None))]] = []
value: (str | None)
for (name, value) in options.items():
try:
convertor = options_spec[name]
except KeyError:
unknown_options.append(name)
continue
if (not value):
value = None
if (convertor is flag):
value = None
try:
converted_value = convertor(value)
except (ValueError, TypeError) as error:
validation_errors.append((f'Invalid option value for {name!r}: {value}: {error}', line))
else:
new_options[name] = converted_value
if unknown_options:
validation_errors.append((f'Unknown option keys: {sorted(unknown_options)} (allowed: {sorted(options_spec)})', line))
return _DirectiveOptions(content, new_options, validation_errors, has_options_block) |
class TestEvalFormula(unittest.TestCase):
def setUp(self):
ws = XLSWorkSheet('Test Worksheet')
ws.insert_column_data('A', ['1.0', '2.0', '3.0'])
ws.insert_column_data('B', ['4.0', '5.0', '6.0'])
ws.insert_column_data('C', ['7.0', '8.0', '9.0'])
self.ws = ws
def test_simple_substitutions(self):
self.assertEqual(eval_formula('123', self.ws), '123')
self.assertEqual(eval_formula('123.0', self.ws), '123.0')
self.assertEqual(eval_formula('', self.ws), '')
self.assertEqual(eval_formula('not a number', self.ws), 'not a number')
self.assertEqual(eval_formula('=A1', self.ws), 1.0)
def test_operations(self):
self.assertEqual(eval_formula('=A1+A2', self.ws), 3.0)
self.assertEqual(eval_formula('=A1-A2', self.ws), (- 1.0))
self.assertEqual(eval_formula('=A1*A2', self.ws), 2.0)
self.assertEqual(eval_formula('=A1/A2', self.ws), 0.5)
def test_recursive_evaluation(self):
self.ws.insert_column_data('D', ['=C1', '=C1+C2', '=D1+D2'])
self.assertEqual(eval_formula('=D1', self.ws), 7.0)
self.assertEqual(eval_formula('=D2', self.ws), 15.0)
self.assertEqual(eval_formula('=D3', self.ws), 22.0)
def test_bad_values(self):
self.ws.insert_column_data('D', ['one', 'two', '=D1+D2'])
self.assertEqual(eval_formula('=D1', self.ws), 'one')
self.assertEqual(eval_formula('=D3', self.ws), BAD_REF)
def test_simple_substitutions_with_integers(self):
self.ws.insert_column_data('D', ['1', '2', '3'])
self.assertEqual(eval_formula('=D1', self.ws), 1)
self.assertEqual(eval_formula('=D2', self.ws), 2)
self.assertEqual(eval_formula('=D3', self.ws), 3)
def test_operations_with_integers(self):
self.ws.insert_column_data('D', ['1', '2', '3'])
self.assertEqual(eval_formula('=D1+D2', self.ws), 3)
self.assertEqual(eval_formula('=D1-D2', self.ws), (- 1))
self.assertEqual(eval_formula('=D1*D2', self.ws), 2)
self.assertEqual(eval_formula('=D1/D2', self.ws), 0.5)
def test_operations_mixing_integers_and_floats(self):
self.ws.insert_column_data('D', ['1', '2', '3'])
self.assertEqual(eval_formula('=A1+D2', self.ws), 3.0)
self.assertEqual(eval_formula('=A1-D2', self.ws), (- 1.0))
self.assertEqual(eval_formula('=A1*D2', self.ws), 2.0)
self.assertEqual(eval_formula('=A1/D2', self.ws), 0.5) |
class Screens():
async def cap_screens(msg: Message):
link = msg.text
duration_process_cmd = ['ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=duration', '-of', 'default=noprint_wrappers=1:nokey=1', link]
process = (await asyncio.create_subprocess_exec(*duration_process_cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE))
(duration, _) = (await process.communicate())
duration_int = int(float(duration.decode('utf-8').rstrip()))
tmp_dir = os.path.join(pathlib.Path().absolute(), Common().working_dir, secrets.token_hex(2))
if (not os.path.exists(tmp_dir)):
os.mkdir(tmp_dir)
for x in range(4):
shot_duration = int((duration_int * ((x + 1) / 5)))
tmp_file = os.path.join(tmp_dir, f'{secrets.token_hex(2)}.jpg')
screen_cap_process_cmd = ['ffmpeg', '-ss', f'{shot_duration}', '-i', link, '-vframes', '1', '-q:v', '2', tmp_file]
screen_process = (await asyncio.create_subprocess_exec(*screen_cap_process_cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE))
(_, __) = (await screen_process.communicate())
if os.path.isfile(tmp_file):
(await msg.reply_photo(photo=tmp_file, caption=f'Screen capture at {shot_duration} second.'))
try:
shutil.rmtree(tmp_dir)
except Exception as e:
logging.error(e) |
()
def get_appointment_billing_item_and_rate(doc):
if isinstance(doc, str):
doc = json.loads(doc)
doc = frappe.get_doc(doc)
service_item = None
practitioner_charge = None
department = (doc.medical_department if (doc.doctype == 'Patient Encounter') else doc.department)
service_unit = (doc.service_unit if (doc.doctype == 'Patient Appointment') else None)
is_inpatient = doc.inpatient_record
if doc.get('practitioner'):
(service_item, practitioner_charge) = get_practitioner_billing_details(doc.practitioner, is_inpatient)
if ((not service_item) and doc.get('appointment_type')):
(service_item, appointment_charge) = get_appointment_type_billing_details(doc.appointment_type, (department if department else service_unit), is_inpatient)
if (not practitioner_charge):
practitioner_charge = appointment_charge
if (not service_item):
service_item = get_healthcare_service_item(is_inpatient)
if (not service_item):
throw_config_service_item(is_inpatient)
if ((not practitioner_charge) and doc.get('practitioner')):
throw_config_practitioner_charge(is_inpatient, doc.practitioner)
if ((not practitioner_charge) and (not doc.get('practitioner'))):
throw_config_appointment_type_charge(is_inpatient, doc.appointment_type)
return {'service_item': service_item, 'practitioner_charge': practitioner_charge} |
class AboutDialog(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setWindowTitle('About')
self.setModal(True)
self.setFixedSize(QSize(600, 480))
self.setWindowFlags((self.windowFlags() & (~ Qt.WindowContextHelpButtonHint)))
self.setWindowFlags((self.windowFlags() & (~ Qt.WindowCloseButtonHint)))
main_layout = QVBoxLayout()
main_layout.addLayout(self.createTopLayout())
main_layout.addLayout(self.createGplLayout())
main_layout.addLayout(self.createButtonLayout())
self.setLayout(main_layout)
def createTopLayout(self):
top_layout = QHBoxLayout()
top_layout.addLayout(self.createInfoLayout(), 1)
return top_layout
def createInfoLayout():
info_layout = QVBoxLayout()
ert = QLabel()
ert.setAlignment(Qt.AlignHCenter)
title_font = QFont()
title_font.setPointSize(40)
ert.setFont(title_font)
ert.setText('ERT')
info_layout.addWidget(ert)
info_layout.addStretch(1)
ert_title = QLabel()
ert_title.setAlignment(Qt.AlignHCenter)
ert_title.setText('Ensemble based Reservoir Tool')
info_layout.addWidget(ert_title)
version = QLabel()
version.setAlignment(Qt.AlignHCenter)
version.setText(f'Versions: resdata:{resdata.__version__} ert:{ert_gui.__version__}')
info_layout.addWidget(version)
info_layout.addStretch(5)
return info_layout
def createGplLayout(self):
gpl = QLabel()
gpl.setText('ERT is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. <br> <br> ERT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. <br> <br> See the GNU General Public License at <a href=" for more details. ')
gpl.setWordWrap(True)
gpl_layout = QVBoxLayout()
gpl_layout.addWidget(gpl)
return gpl_layout
def createButtonLayout(self):
button_layout = QHBoxLayout()
close_button = QPushButton('Close')
close_button.setObjectName('close_button')
close_button.clicked.connect(self.accept)
button_layout.addStretch()
button_layout.addWidget(close_button)
button_layout.addStretch()
return button_layout |
class GCIDTest(unittest.TestCase):
def setUpClass(cls):
cls.maxDiff = None
cls.font = FakeFont(['.notdef', 'A', 'B', 'C', 'D'])
def testDecompileToXML(self):
table = newTable('cidg')
table.decompile(CIDG_DATA, self.font)
self.assertEqual(getXML(table.toXML, self.font), CIDG_XML)
def testCompileFromXML(self):
table = newTable('cidg')
for (name, attrs, content) in parseXML(CIDG_XML):
table.fromXML(name, attrs, content, font=self.font)
self.assertEqual(hexStr(table.compile(self.font)), hexStr(CIDG_DATA)) |
.compilertest
def test_delta_3(tmp_path):
builder1 = Builder(logger, tmp_path, 'cache_test_1.yaml')
builder2 = Builder(logger, tmp_path, 'cache_test_1.yaml', enable_cache=False)
b1 = builder1.build()
b2 = builder2.build()
builder1.check('baseline', b1, b2, strip_cache_keys=True)
deltas = [f'cache_random_{i}.yaml' for i in [1, 2, 3, 4, 5]]
random.shuffle(deltas)
for delta in deltas:
builder1.apply_yaml(delta)
builder2.apply_yaml(delta)
b1 = builder1.build()
b2 = builder2.build()
builder1.check('after deltas', b1, b2, strip_cache_keys=True)
builder3 = Builder(logger, tmp_path, 'cache_result_3.yaml')
b3 = builder3.build()
builder3.check('final', b3, b1) |
def encrypt_access_request_results(data: Union[(str, bytes)], request_id: str) -> str:
cache = get_cache()
encryption_cache_key = get_encryption_cache_key(privacy_request_id=request_id, encryption_attr='key')
if isinstance(data, bytes):
data = data.decode(CONFIG.security.encoding)
encryption_key: (str | None) = cache.get(encryption_cache_key)
if (not encryption_key):
return data
bytes_encryption_key: bytes = encryption_key.encode(encoding=CONFIG.security.encoding)
nonce: bytes = secrets.token_bytes(CONFIG.security.aes_gcm_nonce_length)
return bytes_to_b64_str((nonce + encrypt_to_bytes_verify_secrets_length(data, bytes_encryption_key, nonce))) |
.parametrize('alg,expected_hash', list(TINY_DATA_HASHES_HASHLIB.items()), ids=list(TINY_DATA_HASHES_HASHLIB.keys()))
def test_hash_matches_uppercase(alg, expected_hash):
fname = os.path.join(DATA_DIR, 'tiny-data.txt')
check_tiny_data(fname)
known_hash = f'{alg}:{expected_hash.upper()}'
assert hash_matches(fname, known_hash, strict=True)
with pytest.raises(ValueError) as error:
hash_matches(fname, known_hash[:(- 5)], strict=True, source='Neverland')
assert ('Neverland' in str(error.value)) |
class TestIterateAll():
def test_iterate_all_should_return_an_generator(self):
assert isinstance(iterate_all({'1': 1}), Generator)
.parametrize(('expected', 'args', 'assert_desc'), [([1], {'iterable': {'1': 1}}, "should return [1] for dict {'1': 1}"), ([2], {'iterable': {'2': 2}}, "should return [2] for dict {'2': 2}"), ([3], {'iterable': {'3': 3}}, "should return [3] for dict {'3': 3}"), ([1, 2], {'iterable': {'1': 1, '2': 2}}, "should return [1, 2] for dict {'1': 1, '2': 2}"), ([1, 2, 3], {'iterable': {'1': 1, '2': 2, '3': 3}}, "should return [1, 2, 3] for dict {'1': 1, '2': 2, '3': 3}"), ([1], {'iterable': {'1': {'1': 1}}}, 'should return [1], for dict {"1": {"1": 1}}}'), ([1, 2], {'iterable': {'1': {'1': 1}, '2': 2}}, 'should return [1, 2], for dict {"1": {"1": 1}, "2": 2}}'), ([1, 2], {'iterable': {'1': {'1': 1, '2': 2}}}, 'should return [1, 2], for dict {"1": {"1": 1, "2": 2}}}'), ([1], {'iterable': [1]}, 'should return [1], for list [1]'), ([2], {'iterable': [2]}, 'should return [2], for list [2]'), ([1, 2], {'iterable': [1, 2]}, 'should return [1, 2], for list [1, 2]'), ([1, 2, 3], {'iterable': [1, 2, 3]}, 'should return [1, 2, 3], for list [1, 2, 3]'), ([1, 2, 3], {'iterable': [1, [2, 3]]}, 'should return [1, 2, 3], for list [1, [2, 3]]'), ([1, 2, 3], {'iterable': {'1': 1, '2': [2, 3]}}, 'should return [1, 2, 3], for dict {"1": 1, "2": [2, 3]}'), ([1, 2, 3], {'iterable': [1, {'2': [2, 3]}]}, 'should return [1, 2, 3], for dict [1, {"2": [2, 3]}]}'), ([1, 2, 3], {'iterable': {'1': 1, '2': [{'2': 2}, {'3': 3}]}}, 'should return [1, 2, 3], for dict {"1": 1, "2": [{"2": 2}, {"3": 3}]}'), (['1'], {'iterable': {'1': 1}, 'returned': 'key'}, "should return ['1'] for dict {'1': 1}"), (['1', '2'], {'iterable': {'1': 1, '2': [2, 3]}, 'returned': 'key'}, 'should return [1, 2, 3], for dict {"1": 1, "2": [{"2": 2}, {"3": 3}]}')])
def test_basicTest(self, expected, args, assert_desc):
output = [it for it in iterate_all(**args)]
assert (expected == output), assert_desc
def test_bad_value_for_returned(self):
with pytest.raises(ValueError):
ret = [it for it in iterate_all(iterable={'1': 1}, returned='Bad')]
assert ([1] == ret) |
def parse_int_list_list(list_of_lists: list[list[str]]) -> list[list[Any]]:
new_lists: list[list[Any]] = []
for lst in list_of_lists:
new_list: list[Any] = []
for item in lst:
try:
new_list.append(int(item))
except ValueError:
new_list.append(item)
new_lists.append(new_list)
new_lists = [line for line in new_lists if (line != [])]
return new_lists |
class ClusterEnvironmentInfo(InternalTelemetryDevice):
serverless_status = serverless.Status.Public
def __init__(self, client, metrics_store, revision_override):
super().__init__()
self.metrics_store = metrics_store
self.client = client
self.revision_override = revision_override
def on_benchmark_start(self):
try:
client_info = self.client.info()
except BaseException:
self.logger.exception('Could not retrieve cluster version info')
return
distribution_flavor = client_info['version'].get('build_flavor', 'oss')
revision = client_info['version'].get('build_hash', distribution_flavor)
if self.revision_override:
revision = self.revision_override
distribution_version = client_info['version'].get('number', distribution_flavor)
self.metrics_store.add_meta_info(metrics.MetaInfoScope.cluster, None, 'source_revision', revision)
self.metrics_store.add_meta_info(metrics.MetaInfoScope.cluster, None, 'distribution_version', distribution_version)
self.metrics_store.add_meta_info(metrics.MetaInfoScope.cluster, None, 'distribution_flavor', distribution_flavor) |
def test_get_alert_template_workflow_param():
alerts_data = AlertsDataMock()
workflow_integration = get_slack_integration_mock(is_slack_workflow=True, slack_token='mock', slack_channel_name='mock')
assert (workflow_integration._get_alert_template(alert=alerts_data.dbt_test).text == json.dumps(alerts_data.dbt_test.data, sort_keys=True))
integration = get_slack_integration_mock(slack_token='mock', slack_channel_name='mock')
integration._get_dbt_test_template = mock.Mock(return_value='dbt_test')
assert (integration._get_alert_template(alert=alerts_data.dbt_test) == 'dbt_test') |
class KnativeIngressProcessor(ManagedKubernetesProcessor):
INGRESS_CLASS: ClassVar[str] = 'ambassador.ingress.networking.knative.dev'
service_dep: ServiceDependency
def __init__(self, manager: ResourceManager):
super().__init__(manager)
self.service_dep = self.deps.want(ServiceDependency)
def kinds(self) -> FrozenSet[KubernetesGVK]:
return frozenset([KubernetesGVK.for_knative_networking('Ingress')])
def _has_required_annotations(self, obj: KubernetesObject) -> bool:
annotations = obj.annotations
ingress_class = annotations.get('networking.knative.dev/ingress.class', self.INGRESS_CLASS)
if (ingress_class.lower() != self.INGRESS_CLASS):
self.logger.debug(f'Ignoring Knative {obj.kind} {obj.name}; set networking.knative.dev/ingress.class annotation to {self.INGRESS_CLASS} for ambassador to parse it.')
return False
if (obj.ambassador_id != Config.ambassador_id):
self.logger.info(f'Knative {obj.kind} {obj.name} does not have Ambassador ID {Config.ambassador_id}, ignoring...')
return False
return True
def _emit_mapping(self, obj: KubernetesObject, rule_count: int, rule: Dict[(str, Any)]) -> None:
hosts = rule.get('hosts', [])
split_mapping_specs: List[Dict[(str, Any)]] = []
paths = rule.get(' {}).get('paths', [])
for path in paths:
global_headers = path.get('appendHeaders', {})
splits = path.get('splits', [])
for split in splits:
service_name = split.get('serviceName')
if (not service_name):
continue
service_namespace = split.get('serviceNamespace', obj.namespace)
service_port = split.get('servicePort', 80)
headers = split.get('appendHeaders', {})
headers = {**global_headers, **headers}
split_mapping_specs.append({'service': f'{service_name}.{service_namespace}:{service_port}', 'add_request_headers': headers, 'weight': split.get('percent', 100), 'prefix': path.get('path', '/'), 'timeout_ms': int((durationpy.from_str(path.get('timeout', '15s')).total_seconds() * 1000))})
for (split_count, (host, split_mapping_spec)) in enumerate(itertools.product(hosts, split_mapping_specs)):
mapping_identifier = f'{obj.name}-{rule_count}-{split_count}'
spec = {'ambassador_id': obj.ambassador_id, 'host': host}
spec.update(split_mapping_spec)
mapping = NormalizedResource.from_data('Mapping', mapping_identifier, namespace=obj.namespace, generation=obj.generation, labels=obj.labels, spec=spec)
self.logger.debug(f'Generated Mapping from Knative {obj.kind}: {mapping}')
self.manager.emit(mapping)
def _make_status(self, generation: int=1, lb_domain: Optional[str]=None) -> Dict[(str, Any)]:
utcnow = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
status = {'observedGeneration': generation, 'conditions': [{'lastTransitionTime': utcnow, 'status': 'True', 'type': 'LoadBalancerReady'}, {'lastTransitionTime': utcnow, 'status': 'True', 'type': 'NetworkConfigured'}, {'lastTransitionTime': utcnow, 'status': 'True', 'type': 'Ready'}]}
if lb_domain:
load_balancer = {'ingress': [{'domainInternal': lb_domain}]}
status['loadBalancer'] = load_balancer
status['privateLoadBalancer'] = load_balancer
return status
def _update_status(self, obj: KubernetesObject) -> None:
has_new_generation = (obj.generation > obj.status.get('observedGeneration', 0))
current_lb_domain = None
if ((not self.service_dep.ambassador_service) or (not self.service_dep.ambassador_service.name)):
self.logger.warning(f"Unable to set Knative {obj.kind} {obj.name}'s load balancer, could not find Ambassador service")
else:
current_lb_domain = f'{self.service_dep.ambassador_service.name}.{self.service_dep.ambassador_service.namespace}.svc.cluster.local'
observed_ingress: Dict[(str, Any)] = next(iter(obj.status.get('privateLoadBalancer', {}).get('ingress', [])), {})
observed_lb_domain = observed_ingress.get('domainInternal')
has_new_lb_domain = (current_lb_domain != observed_lb_domain)
if (has_new_generation or has_new_lb_domain):
status = self._make_status(generation=obj.generation, lb_domain=current_lb_domain)
if status:
status_update = (obj.gvk.domain, obj.namespace, status)
self.logger.info(f'Updating Knative {obj.kind} {obj.name} status to {status_update}')
self.aconf.k8s_status_updates[f'{obj.name}.{obj.namespace}'] = status_update
else:
self.logger.debug(f'Not reconciling Knative {obj.kind} {obj.name}: observed and current generations are in sync')
def _process(self, obj: KubernetesObject) -> None:
if (not self._has_required_annotations(obj)):
return
rules = obj.spec.get('rules', [])
for (rule_count, rule) in enumerate(rules):
self._emit_mapping(obj, rule_count, rule)
self._update_status(obj) |
class DiscoveryPeerBackend(BasePeerBackend):
def __init__(self, event_bus: EndpointAPI) -> None:
self.event_bus = event_bus
async def get_peer_candidates(self, max_candidates: int, should_skip_fn: Callable[([NodeAPI], bool)]) -> Tuple[(NodeAPI, ...)]:
(await self.event_bus.wait_until_any_endpoint_subscribed_to(PeerCandidatesRequest))
response = (await self.event_bus.request(PeerCandidatesRequest(max_candidates, should_skip_fn), TO_DISCOVERY_BROADCAST_CONFIG))
return response.candidates |
def _prepare_trie(trie: Trie[(K, V)], get_storage_root: Callable[([Address], Root)]=None) -> Mapping[(Bytes, Bytes)]:
mapped: MutableMapping[(Bytes, Bytes)] = {}
for (preimage, value) in trie._data.items():
if isinstance(value, Account):
assert (get_storage_root is not None)
address = Address(preimage)
encoded_value = encode_node(value, get_storage_root(address))
else:
encoded_value = encode_node(value)
ensure((encoded_value != b''), AssertionError)
key: Bytes
if trie.secured:
key = keccak256(preimage)
else:
key = preimage
mapped[bytes_to_nibble_list(key)] = encoded_value
return mapped |
class OptionSeriesVariablepieSonificationContexttracksMappingLowpass(Options):
def frequency(self) -> 'OptionSeriesVariablepieSonificationContexttracksMappingLowpassFrequency':
return self._config_sub_data('frequency', OptionSeriesVariablepieSonificationContexttracksMappingLowpassFrequency)
def resonance(self) -> 'OptionSeriesVariablepieSonificationContexttracksMappingLowpassResonance':
return self._config_sub_data('resonance', OptionSeriesVariablepieSonificationContexttracksMappingLowpassResonance) |
def process_crawl(spider, setting={}):
p = Process(target=crawl, args=(spider, setting))
p.start()
max_run_time = 30
p.join((max_run_time * 60))
if p.is_alive():
logger.warning('kill the spider:{} which has run {} minutes,'.format(type(spider).__name__), max_run_time)
p.terminate() |
def mask_along_axis_(specgrams, num_masks, min_size, max_size, mask_val=None, axis=2):
device = specgrams.device
if (axis != 3):
specgrams = specgrams.transpose(axis, (- 1))
(n, _, _, a) = specgrams.shape
if (num_masks == 1):
masks = region_mask(n, min_size, max_size, a, device=device)
else:
masks = region_mask((num_masks * n), min_size, max_size, a, device=device).view(num_masks, n, a).amax(dim=0)
masks = masks.view(n, 1, 1, a)
if mask_val:
specgrams.masked_fill_(masks, mask_val)
else:
mask_vals = (specgrams.mul(masks).sum(((- 2), (- 1))) / (masks.sum(((- 2), (- 1))) * specgrams.shape[(- 2)]))
specgrams = torch.where(masks, mask_vals[(..., None, None)], specgrams)
if (axis == 3):
return specgrams
else:
return specgrams.transpose(axis, (- 1)) |
def lpe_filename(line_program, file_index):
lp_header = line_program.header
file_entries = lp_header['file_entry']
file_entry = file_entries[(file_index - 1)]
dir_index = file_entry['dir_index']
if (dir_index == 0):
return file_entry.name.decode()
directory = lp_header['include_directory'][(dir_index - 1)]
return posixpath.join(directory, file_entry.name).decode() |
class AskForCard(GenericAction):
card_usage = 'drop'
def __init__(self, source: Character, target: Character, card_cls: Type[PhysicalCard], categories: Sequence[str]=('cards', 'showncards')):
self.source = source
self.target = target
self.card_cls = card_cls
self.categories = categories
self.card: Optional[Card] = None
def apply_action(self):
target = self.target
if (not self.card):
cards = user_choose_cards(self, target, self.categories)
if ((not cards) or (len(cards) != 1)):
self.card = None
return False
self.card = cards[0]
return self.process_card(self.card)
def cond(self, cl):
from thb.cards.base import VirtualCard
t = self.target
return ((len(cl) == 1) and cl[0].is_card(self.card_cls) and (cl[0].is_card(VirtualCard) or (cl[0].resides_in.owner is t)))
def process_card(self, card):
raise NotImplementedError |
class TestGetSystemsUserManages():
(scope='function')
def url(self, viewer_user) -> str:
return (V1_URL_PREFIX + f'/user/{viewer_user.id}/system-manager')
def test_get_systems_managed_by_user_not_authenticated(self, api_client: TestClient, url: str) -> None:
resp = api_client.get(url, headers={})
assert (resp.status_code == HTTP_401_UNAUTHORIZED)
def test_get_systems_managed_by_user_wrong_scope(self, api_client: TestClient, generate_auth_header, url):
auth_header = generate_auth_header(scopes=[PRIVACY_REQUEST_READ])
resp = api_client.get(url, headers=auth_header)
assert (resp.status_code == HTTP_403_FORBIDDEN)
def test_get_systems_managed_by_self(self, api_client: TestClient, url, viewer_user, system, db) -> None:
viewer_user.set_as_system_manager(db, system)
auth_header = generate_auth_header_for_user(viewer_user, [])
resp = api_client.get(url, headers=auth_header)
assert (resp.status_code == HTTP_200_OK)
assert (len(resp.json()) == 1)
assert (resp.json()[0]['fides_key'] == system.fides_key)
def test_get_systems_managed_by_other_user(self, api_client: TestClient, url, viewer_user, system, db) -> None:
viewer_user.set_as_system_manager(db, system)
another_user = FidesUser.create(db=db, data={'username': 'another_user', 'password': '&%3Qe2fGo7'})
client = ClientDetail(hashed_secret='thisisatest', salt='thisisstillatest', scopes=SCOPE_REGISTRY, user_id=another_user.id)
db.add(client)
db.commit()
auth_header = generate_auth_header_for_user(another_user, [])
resp = api_client.get(url, headers=auth_header)
assert (resp.status_code == HTTP_403_FORBIDDEN)
client.delete(db=db)
another_user.delete(db)
def test_get_systems_managed_by_user_not_found(self, api_client: TestClient, generate_auth_header, url) -> None:
auth_header = generate_auth_header(scopes=[SYSTEM_MANAGER_READ])
resp = api_client.get((V1_URL_PREFIX + f'/user/bad_user/system-manager'), headers=auth_header)
assert (resp.status_code == HTTP_404_NOT_FOUND)
assert (resp.json()['detail'] == f'No user found with id bad_user.')
def test_get_systems_managed_by_user_none_exist(self, api_client: TestClient, generate_auth_header, url) -> None:
auth_header = generate_auth_header(scopes=[SYSTEM_MANAGER_READ])
resp = api_client.get(url, headers=auth_header)
assert (resp.status_code == HTTP_200_OK)
assert (resp.json() == [])
def test_get_systems_managed_by_user(self, api_client: TestClient, generate_auth_header, url, viewer_user, system, db) -> None:
viewer_user.set_as_system_manager(db, system)
auth_header = generate_auth_header(scopes=[SYSTEM_MANAGER_READ])
resp = api_client.get(url, headers=auth_header)
assert (resp.status_code == HTTP_200_OK)
assert (len(resp.json()) == 1)
assert (resp.json()[0]['fides_key'] == system.fides_key) |
def visualize_spans(doc: Union[(spacy.tokens.Doc, Dict[(str, str)])], *, spans_key: str='sc', attrs: List[str]=SPAN_ATTRS, show_table: bool=True, title: Optional[str]='Spans', manual: bool=False, displacy_options: Optional[Dict]=None):
if (SPACY_VERSION < Version('3.3.0')):
raise ValueError(f"'visualize_spans' requires spacy>=3.3.0. You have spacy=={spacy.__version__}")
if (not displacy_options):
displacy_options = dict()
displacy_options['spans_key'] = spans_key
if title:
st.header(title)
if manual:
if show_table:
st.warning("When the parameter 'manual' is set to True, the parameter 'show_table' must be set to False.")
if (not isinstance(doc, dict)):
st.warning("When the parameter 'manual' is set to True, the parameter 'doc' must be of type 'Dict', not 'spacy.tokens.Doc'.")
html = displacy.render(doc, style='span', options=displacy_options, manual=manual)
st.write(f'{get_html(html)}', unsafe_allow_html=True)
if show_table:
data = [[str(getattr(span, attr)) for attr in attrs] for span in doc.spans[spans_key]]
if data:
df = pd.DataFrame(data, columns=attrs)
st.dataframe(df) |
def generate_time_plot(methods, datasets, runtimes_per_method, colors):
num_methods = len(methods)
num_datasets = len(datasets)
x_ticks = np.linspace(0.0, 1.0, num_methods)
width = ((0.6 / num_methods) / num_datasets)
spacing = ((0.4 / num_methods) / num_datasets)
(fig, ax1) = plt.subplots()
ax1.set_ylabel('Time [s]', color='b')
ax1.tick_params('y', colors='b')
ax1.set_yscale('log')
fig.suptitle('Hand-Eye Calibration Method Timings', fontsize='24')
handles = []
for (i, dataset) in enumerate(datasets):
runtimes = [runtimes_per_method[dataset][method] for method in methods]
bp = ax1.boxplot(runtimes, 0, '', positions=(x_ticks + ((((i - (num_datasets / 2.0)) + 0.5) * spacing) * 2)), widths=width)
plt.setp(bp['boxes'], color=colors[i], linewidth=line_width)
plt.setp(bp['whiskers'], color=colors[i], linewidth=line_width)
plt.setp(bp['fliers'], color=colors[i], marker='+', linewidth=line_width)
plt.setp(bp['medians'], color=colors[i], marker='+', linewidth=line_width)
plt.setp(bp['caps'], color=colors[i], linewidth=line_width)
handles.append(mpatches.Patch(color=colors[i], label=dataset))
plt.legend(handles=handles, loc=2)
plt.xticks(x_ticks, methods)
plt.xlim((x_ticks[0] - ((2.5 * spacing) * num_datasets)), (x_ticks[(- 1)] + ((2.5 * spacing) * num_datasets)))
plt.show() |
.django_db
def test_tas_program_activity_multiple_object_classes(client, tas_mulitple_oc_per_tas):
tas = '002-X-0000-000'
submission_year = 2020
query_params = f'?fiscal_year={submission_year}'
resp = client.get(url.format(tas=tas, query_params=query_params))
expected_result = {'fiscal_year': submission_year, 'treasury_account_symbol': tas, 'messages': [], 'page_metadata': {'hasNext': False, 'hasPrevious': False, 'limit': 10, 'next': None, 'page': 1, 'previous': None, 'total': 1}, 'results': [{'name': 'NAME 4', 'gross_outlay_amount': 111000.0, 'obligated_amount': 11100.0, 'children': [{'gross_outlay_amount': 101000.0, 'name': 'Other', 'obligated_amount': 10100.0}, {'gross_outlay_amount': 10000.0, 'name': 'Other2', 'obligated_amount': 1000.0}]}]}
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.json() == expected_result) |
class PacketInBodyMiss(base_tests.SimpleDataPlane):
def runTest(self):
logging.info('Running PacketInBodyMiss Test')
of_ports = config['port_map'].keys()
of_ports.sort()
delete_all_flows(self.controller)
logging.info('Sending set_config_request to set miss_send_len... ')
req = ofp.message.set_config()
req.miss_send_len = 65535
self.controller.message_send(req)
sleep(1)
pkt = simple_tcp_packet()
match = parse.packet_to_flow_match(pkt)
self.assertTrue((match is not None), 'Could not generate flow match from pkt')
match.wildcards = ofp.OFPFW_ALL
match.in_port = of_ports[0]
self.dataplane.send(of_ports[0], str(pkt))
response = verify_packet_in(self, str(pkt), of_ports[0], ofp.OFPR_NO_MATCH)
self.assertEqual(response.total_len, len(str(pkt)), 'PacketIn total_len field is incorrect')
self.assertTrue((len(response.data) == len(str(pkt))), 'Complete Data packet was not sent') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.