code stringlengths 281 23.7M |
|---|
class HelperChrome():
def __init__(self, printer: ColorPrinter, screen_control: 'Controller', flags: ScreenFlags):
self.printer = printer
self.screen_control = screen_control
self.flags = flags
self.mode = SELECT_MODE
self.width = 50
self.sidebar_y = 0
self.description_clear = True
if self.get_is_sidebar_mode():
logger.add_event('init_wide_mode')
else:
logger.add_event('init_narrow_mode')
def output(self, mode: str) -> None:
self.mode = mode
for func in [self.output_side, self.output_bottom, self.toggle_cursor]:
try:
func()
except curses.error:
pass
def output_description(self, line_obj: LineMatch) -> None:
self.output_description_pane(line_obj)
def toggle_cursor(self) -> None:
if (self.mode == COMMAND_MODE):
curses.curs_set(BLOCK_CURSOR)
else:
curses.curs_set(INVISIBLE_CURSOR)
def reduce_max_y(self, max_y: int) -> int:
if self.get_is_sidebar_mode():
return max_y
return (max_y - 4)
def reduce_max_x(self, max_x: int) -> int:
if (not self.get_is_sidebar_mode()):
return max_x
return (max_x - self.width)
def get_min_x(self) -> int:
if (self.mode == COMMAND_MODE):
return 0
return self.screen_control.get_chrome_boundaries()[0]
def get_min_y(self) -> int:
return self.screen_control.get_chrome_boundaries()[1]
def get_is_sidebar_mode(self) -> bool:
(_max_y, max_x) = self.screen_control.get_screen_dimensions()
return (max_x > 200)
def trim_line(self, line: str, width: int) -> str:
return line[:width]
def output_description_pane(self, line_obj: LineMatch) -> None:
if (not self.get_is_sidebar_mode()):
return
(_max_y, max_x) = self.screen_control.get_screen_dimensions()
border_x = (max_x - self.width)
start_y = (self.sidebar_y + 1)
start_x = (border_x + 2)
header_line = (('Description for ' + line_obj.path) + ' :')
line_prefix = ' * '
desc_lines = [line_obj.get_time_last_accessed(), line_obj.get_time_last_modified(), line_obj.get_owner_user(), line_obj.get_owner_group(), line_obj.get_file_size(), line_obj.get_length_in_lines()]
self.printer.addstr(start_y, start_x, header_line)
y_pos = (start_y + 2)
for desc_line in desc_lines:
desc_line = self.trim_line(desc_line, ((max_x - start_x) - len(line_prefix)))
self.printer.addstr(y_pos, start_x, (line_prefix + desc_line))
y_pos = (y_pos + 1)
self.description_clear = False
def clear_description_pane(self) -> None:
if self.description_clear:
return
(max_y, max_x) = self.screen_control.get_screen_dimensions()
border_x = (max_x - self.width)
start_y = (self.sidebar_y + 1)
self.printer.clear_square(start_y, (max_y - 1), (border_x + 2), max_x)
self.description_clear = True
def output_side(self) -> None:
if (not self.get_is_sidebar_mode()):
return
(max_y, max_x) = self.screen_control.get_screen_dimensions()
border_x = (max_x - self.width)
if (self.mode == COMMAND_MODE):
border_x = (len(SHORT_COMMAND_PROMPT) + 20)
usage_lines = usage_strings.USAGE_PAGE.split('\n')
if (self.mode == COMMAND_MODE):
usage_lines = usage_strings.USAGE_COMMAND.split('\n')
for (index, usage_line) in enumerate(usage_lines):
self.printer.addstr((self.get_min_y() + index), (border_x + 2), usage_line)
self.sidebar_y = (self.get_min_y() + index)
for y_pos in range(self.get_min_y(), max_y):
self.printer.addstr(y_pos, border_x, '|')
def output_bottom(self) -> None:
if self.get_is_sidebar_mode():
return
(max_y, max_x) = self.screen_control.get_screen_dimensions()
border_y = (max_y - 2)
usage_str = {SELECT_MODE: self.get_short_nav_usage_string(), X_MODE: self.get_short_nav_usage_string(), COMMAND_MODE: SHORT_COMMAND_USAGE}[self.mode]
border_str = ('_' * ((max_x - self.get_min_x()) - 0))
self.printer.addstr(border_y, self.get_min_x(), border_str)
self.printer.addstr((border_y + 1), self.get_min_x(), usage_str)
def get_short_nav_usage_string(self) -> str:
nav_options = [SHORT_NAV_OPTION_SELECTION_STR, SHORT_NAV_OPTION_NAVIGATION_STR, SHORT_NAV_OPTION_OPEN_STR, SHORT_NAV_OPTION_QUICK_SELECT_STR, SHORT_NAV_OPTION_COMMAND_STR]
if self.flags.get_all_input():
nav_options.remove(SHORT_NAV_OPTION_OPEN_STR)
return ', '.join(nav_options) |
def test_ot_span(tracer: Tracer):
with tracer.start_as_current_span('test'):
with tracer.start_as_current_span('testspan', kind=SpanKind.CONSUMER):
with tracer.start_as_current_span('testspan2'):
pass
client = tracer.client
transaction = client.events[constants.TRANSACTION][0]
span1 = client.events[constants.SPAN][1]
span2 = client.events[constants.SPAN][0]
assert (span1['transaction_id'] == span1['parent_id'] == transaction['id'])
assert (span1['name'] == 'testspan')
assert (span2['transaction_id'] == transaction['id'])
assert (span2['parent_id'] == span1['id'])
assert (span2['name'] == 'testspan2') |
class SyslogHandler(logging.handlers.SysLogHandler):
OVERFLOW_BEHAVIOR_FRAGMENT = 0
OVERFLOW_BEHAVIOR_TRUNCATE = 1
_MINIMUM_MTU_CACHE = {}
def __init__(self, address=('localhost', logging.handlers.SYSLOG_UDP_PORT), facility=logging.handlers.SysLogHandler.LOG_USER, socket_type=None, overflow=OVERFLOW_BEHAVIOR_FRAGMENT):
super(SyslogHandler, self).__init__(address, facility, socket_type)
self.address = getattr(self, 'address')
self.socket = getattr(self, 'socket')
self.socktype = getattr(self, 'socktype')
self.unixsocket = getattr(self, 'unixsocket')
if ((not self.unixsocket) and (self.socktype == socket.SOCK_DGRAM)):
if (address[0] not in self._MINIMUM_MTU_CACHE):
self._MINIMUM_MTU_CACHE[address[0]] = _discover_minimum_mtu_to_target(address[0], 9999)
self.maximum_length = (self._MINIMUM_MTU_CACHE[address[0]] - DATAGRAM_HEADER_LENGTH_IN_BYTES)
self.overflow = overflow
else:
self.maximum_length = 1048576
self.overflow = self.OVERFLOW_BEHAVIOR_TRUNCATE
def emit(self, record):
try:
formatted_message = self.format(record)
encoded_message = formatted_message.encode('utf-8')
prefix = suffix = b''
ident = getattr(self, 'ident', None)
if ident:
prefix = (ident.encode('utf-8') if isinstance(ident, six.text_type) else ident)
if getattr(self, 'append_nul', True):
suffix = '\x00'.encode('utf-8')
priority = '<{:d}>'.format(self.encodePriority(self.facility, self.mapPriority(record.levelname))).encode('utf-8')
message_length = len(encoded_message)
message_length_limit = (((self.maximum_length - len(prefix)) - len(suffix)) - len(priority))
if (message_length < message_length_limit):
parts = [(((priority + prefix) + encoded_message) + suffix)]
elif (self.overflow == self.OVERFLOW_BEHAVIOR_TRUNCATE):
(truncated_message, _) = self._cleanly_slice_encoded_string(encoded_message, message_length_limit)
parts = [(((priority + prefix) + truncated_message) + suffix)]
else:
try:
index = formatted_message.index(record.getMessage()[:40])
(start_of_message, to_chunk) = (formatted_message[:index], formatted_message[index:])
except (TypeError, ValueError):
(start_of_message, to_chunk) = ('{} '.format(formatted_message[:30]), formatted_message[30:])
start_of_message_bytes = start_of_message.encode('utf-8')
to_chunk_bytes = to_chunk.encode('utf-8')
chunk_length_limit = ((message_length_limit - len(start_of_message_bytes)) - 12)
i = 1
parts = []
remaining_message = to_chunk_bytes
while remaining_message:
message_id = b''
subtractor = 0
if (i > 1):
message_id = '{}'.format(i).encode('utf-8')
subtractor = (14 + len(message_id))
(chunk, remaining_message) = self._cleanly_slice_encoded_string(remaining_message, (chunk_length_limit - subtractor))
if (i > 1):
chunk = (((b"(cont'd #" + message_id) + b') ...') + chunk)
i += 1
if remaining_message:
chunk = (chunk + b"... (cont'd)")
parts.append(((((priority + prefix) + start_of_message_bytes) + chunk) + suffix))
self._send(parts)
except Exception:
self.handleError(record)
def _send(self, parts):
for message in parts:
if self.unixsocket:
try:
self.socket.send(message)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(message)
elif (self.socktype == socket.SOCK_DGRAM):
self.socket.sendto(message, self.address)
else:
self.socket.sendall(message)
def _cleanly_slice_encoded_string(encoded_string, length_limit):
sliced = encoded_string[:length_limit]
remaining = encoded_string[length_limit:]
try:
sliced.decode('utf-8')
except UnicodeDecodeError as e:
(sliced, remaining) = (sliced[:e.start], (sliced[e.start:] + remaining))
return (sliced, remaining) |
class ChannelDetailView(ChannelMixin, ObjectDetailView):
template_name = 'website/channel_detail.html'
attributes = ['name']
max_num_lines = 10000
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
channel = self.object
filename = channel.get_log_filename()
bucket = []
for log in (x.strip() for x in tail_log_file(filename, 0, self.max_num_lines)):
if (not log):
continue
try:
(time, msg) = log.split(' [-] ')
time_key = time.split(':')[0]
except ValueError:
continue
bucket.append({'key': time_key, 'timestamp': time, 'message': msg})
context['object_list'] = bucket
context['object_filters'] = sorted(set([x['key'] for x in bucket]))
return context
def get_object(self, queryset=None):
if (not queryset):
queryset = self.get_queryset()
channel = slugify(self.kwargs.get('slug', ''))
obj = next((x for x in queryset if (slugify(x.db_key) == channel)), None)
if (not obj):
raise HttpResponseBadRequest(('No %(verbose_name)s found matching the query' % {'verbose_name': queryset.model._meta.verbose_name}))
return obj |
class perm021fc_ccr_bias_permute(perm021fc_ccr_bias):
def __init__(self, layout='021'):
super().__init__()
self._attrs['op'] = 'perm021fc_ccr_bias_permute'
self._attrs['shape'] = [0]
self._attrs['layout'] = 'Permute3DBMM_{}'.format(layout)
def __call__(self, a: Tensor, b: Tensor, bias: Tensor) -> Tensor:
(a, b) = self._align_ab(a, b)
self._attrs['inputs'] = [a, b, bias]
self._attrs['input_accessors'] = [TensorAccessor(tensor) for tensor in self._attrs['inputs']]
self._set_depth()
self._sanity_check(a, b)
output_shape = self._infer_shapes(a, b, bias)
self._extract_epilogue_alignment(output_shape)
output = Tensor(output_shape, src_ops={self}, dtype=a._attrs['dtype'])
self._attrs['outputs'] = [output]
self._attrs['output_accessors'] = [TensorAccessor(output)]
if (self._attrs['layout'] == 'Permute3DBMM_021'):
(b, m, n) = output_shape
output_shape = [b, n, m]
self._attrs['epilogue_alignment'] = 1
return reshape()(output, output_shape)
else:
raise NotImplementedError('{} is not implemented!'.format(self._attrs['layout']))
def _get_op_attributes(self):
return {'layout': self._attrs['layout'].split('_')[(- 1)]} |
class ModuleEnableNameValidator(object):
def __call__(self, form, field):
already_enabled = {}
for module in form.module_toggle.data.split(','):
if (module == ''):
return True
try:
(module_name, stream) = module.strip().split(':')
except ValueError:
raise ValidationError(message="Module name '{0}' must consist of two parts separated with colon, e.g. module:stream".format(module))
pattern = re.compile(re.compile('^([a-zA-Z0-9-_!][^\\ ]*)$'))
if (pattern.match(module_name) is None):
raise ValidationError(message="Module name '{0}' must contain only letters, digits, dashes, underscores.".format(module_name))
if (module_name in already_enabled):
raise ValidationError("Module name '{0}' specified multiple times".format(module_name))
else:
already_enabled[module_name] = True
if (pattern.match(stream) is None):
raise ValidationError(message="Stream part of module name '{0}' must contain only letters, digits, dashes, underscores.".format(stream)) |
def extractThemanwithoutriceWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def cmd_run(jobs: Jobs, reqid: RequestID, *, copy: bool=False, force: bool=False) -> None:
if copy:
raise NotImplementedError
if force:
raise NotImplementedError
if (not reqid):
raise NotImplementedError
if (not jobs.queues[reqid.workerid].paused):
cmd_queue_push(jobs, reqid)
else:
job = _cmd_run(jobs, reqid)
job.check_ssh(onunknown='wait:3') |
class perm021fc_ccr(bmm):
def __init__(self):
super().__init__()
self._attrs['op'] = 'perm021fc_ccr'
def cal_align_ab(m, n, k):
return common.default_align_ab(m, k, self._attrs['inputs'][0].dtype())
self._attrs['f_ab_alignment'] = cal_align_ab
def _infer_shapes(self, a: Tensor, b: Tensor):
a_shapes = a._attrs['shape']
b_shapes = b._attrs['shape']
batch_size_a = a_shapes[0]
batch_size_b = b_shapes[0]
if ((batch_size_a != batch_size_b) and (batch_size_a != IntImm(1)) and (batch_size_b != IntImm(1))):
raise RuntimeError('bmm operand A and B should have same batch_size, or batch_size = 1! Current shape A: {} shape B: {} .'.format(a_shapes, b_shapes))
batch_size = (batch_size_b if (batch_size_a == IntImm(1)) else batch_size_a)
return [batch_size, a_shapes[2], b_shapes[1]]
def _extract_dims(self, for_profiling=False):
return {'B': [common.DimInfo(common.Source.OUTPUT, tensor_idx=0, dim_idx=0)], 'M': [common.DimInfo(common.Source.INPUT, tensor_idx=0, dim_idx=2), common.DimInfo(common.Source.OUTPUT, tensor_idx=0, dim_idx=1)], 'N': [common.DimInfo(common.Source.INPUT, tensor_idx=1, dim_idx=1), common.DimInfo(common.Source.OUTPUT, tensor_idx=0, dim_idx=2)], 'K': [common.DimInfo(common.Source.INPUT, tensor_idx=0, dim_idx=1), common.DimInfo(common.Source.INPUT, tensor_idx=1, dim_idx=2)]}
def _invert_exec_key(self, key):
return common.gemm_inverse_key_func(key)
def _gen_profile_cmd(self, profiler_prefix, cfg, exec_key):
def fbuild_cmd(exec_key):
(B, M, N, K) = self._invert_exec_key(exec_key)
cmd = []
cmd.append(B)
cmd.append(M)
cmd.append(N)
cmd.append(K)
return cmd
return super()._gen_profile_cmd(profiler_prefix, cfg, exec_key, fbuild_cmd)
def _align_ab(self, a: Tensor, b: Tensor):
a_shape = a._attrs['shape']
b_shape = b._attrs['shape']
ak = a_shape[1]
bk = b_shape[2]
if (ak != bk):
raise RuntimeError(f'A/B shape mismatch, ak: {ak}, bk: {bk}, a_shape: {a_shape}, b_shape: {b_shape}')
if (not isinstance(bk, IntImm)):
raise RuntimeError('Last dim K must be static! Current shape: {}'.format(b_shape))
k = ak._attrs['values'][0]
if (not alignment.valid_alignment((k % 2), a.dtype())):
pad_k = int((((k // 8) + 1) * 8))
pad_a = _create_host_zero_tensor(shape=[a_shape[0], IntImm((pad_k - k)), a_shape[2]], dtype=a.dtype())
pad_b = _create_host_zero_tensor(shape=[b_shape[0], b_shape[1], IntImm((pad_k - k))], dtype=b.dtype())
cat_a = concatenate()
cat_b = concatenate()
a = cat_a([a, pad_a], dim=1)
b = cat_b([b, pad_b], dim=2)
return (a, b) |
def test_massW1W1(W):
u = TrialFunction(W)[1]
v = TestFunction(W)[1]
A = assemble((inner(u, v) * dx))
assert (A.M.sparsity.shape == (2, 2))
assert np.allclose(A.M[(0, 0)].values, 0.0)
assert np.allclose(A.M[(1, 0)].values, 0.0)
assert np.allclose(A.M[(0, 1)].values, 0.0)
assert (not np.allclose(A.M[(1, 1)].values, 0.0)) |
def test_open_plugin_settings(preference, plugin_engine):
plugin_engine.collect()
preference.run()
Q.map(Q.select(preference.widget, Q.props('name', 'plugin.list')), TV.column(Q.props('title', 'Active')), TV.cell_renderer(0), Q.emit('toggled', 0))
settings_button = Q.select(preference.widget, Q.props('name', 'plugin.settings'))
assert (settings_button.props.sensitive is True)
settings_button.emit('clicked')
plugin_a = plugin_engine.lookup('PluginA')
assert (plugin_a.plugin_object.parent == preference.widget) |
def test_chunk_message_ordering():
mq = MessageQueue(log_time_order=True)
push_elements(mq)
results: List[QueueItem] = []
while mq:
results.append(mq.pop())
assert isinstance(results[0], ChunkIndex)
assert (results[0].message_start_time == 1)
assert isinstance(results[1], ChunkIndex)
assert (results[1].message_start_time == 3)
assert isinstance(results[2], tuple)
assert (results[2][2] == 10)
assert isinstance(results[3], tuple)
assert (results[3][2] == 20)
assert isinstance(results[4], ChunkIndex)
assert (results[4].message_start_time == 4)
assert isinstance(results[5], tuple)
assert (results[5][2] == 30) |
def render() -> None:
global FRAME_PROCESSORS_CHECKBOX_GROUP
FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup(label=wording.get('frame_processors_checkbox_group_label'), choices=sort_frame_processors(facefusion.globals.frame_processors), value=facefusion.globals.frame_processors)
register_ui_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP) |
class FlowStorePipeSplit(FlowStorePipe):
async def open_request(self):
self.store_parallel('open_request')
async def open_ws(self):
self.store_parallel('open_ws')
async def close_request(self):
self.store_parallel('close_request')
async def close_ws(self):
self.store_parallel('close_ws')
async def pipe_request(self, next_pipe, **kwargs):
self.store_linear('pipe_request')
return (await next_pipe(**kwargs))
async def pipe_ws(self, next_pipe, **kwargs):
self.store_linear('pipe_ws')
return (await next_pipe(**kwargs)) |
class OptionSeriesColumnrangeSonificationTracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
class EnumStepTypes(Enums):
def linear(self):
self._set_value()
def linear_closed(self):
self._set_value('linear-closed')
def basis(self):
self._set_value()
def basis_open(self):
self._set_value('basis-open')
def basis_closed(self):
self._set_value('basis-closed')
def bundle(self):
self._set_value()
def cardinal(self):
self._set_value()
def cardinal_open(self):
self._set_value(value='cardinal-open')
def cardinal_closed(self):
self._set_value(value='cardinal-closed')
def monotone(self):
self._set_value()
def step(self):
self._set_value()
def step_before(self):
self._set_value(value='step-before')
def step_after(self):
self._set_value(value='step-after') |
_numba
def test_spline_jacobian_implementations():
data = CheckerBoard().scatter(size=1500, random_state=1)
coords = (data.easting, data.northing)
jac_numpy = Spline(engine='numpy').jacobian(coords, coords)
jac_numba = Spline(engine='numba').jacobian(coords, coords)
npt.assert_allclose(jac_numpy, jac_numba) |
def sync_benchmark(func: Callable[(..., Any)], n: int) -> Union[(float, str)]:
try:
starttime = timeit.default_timer()
for _ in range(n):
func()
endtime = timeit.default_timer()
execution_time = (endtime - starttime)
return execution_time
except Exception:
return 'N/A' |
('update', help='Performs an update operation on current bench. Without any flags will backup, pull, setup requirements, build, run patches and restart bench. Using specific flags will only do certain tasks instead of all')
('--pull', is_flag=True, help='Pull updates for all the apps in bench')
('--apps', type=str)
('--patch', is_flag=True, help='Run migrations for all sites in the bench')
('--build', is_flag=True, help='Build JS and CSS assets for the bench')
('--requirements', is_flag=True, help='Update requirements. If run alone, equivalent to `bench setup requirements`')
('--restart-supervisor', is_flag=True, help='Restart supervisor processes after update')
('--restart-systemd', is_flag=True, help='Restart systemd units after update')
('--no-backup', is_flag=True, help="If this flag is set, sites won't be backed up prior to updates. Note: This is not recommended in production.")
('--no-compile', is_flag=True, help="[DEPRECATED] This flag doesn't do anything now.")
('--force', is_flag=True, help='Forces major version upgrades')
('--reset', is_flag=True, help="Hard resets git branch's to their new states overriding any changes and overriding rebase on pull")
def update(pull, apps, patch, build, requirements, restart_supervisor, restart_systemd, no_backup, no_compile, force, reset):
from bench.utils.bench import update
update(pull=pull, apps=apps, patch=patch, build=build, requirements=requirements, restart_supervisor=restart_supervisor, restart_systemd=restart_systemd, backup=(not no_backup), compile=(not no_compile), force=force, reset=reset) |
class OptionSeriesBoxplotSonificationDefaultinstrumentoptionsMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def generate_gradio_app(model_path_or_url: str, examples: List[str], author_username: str=None) -> str:
from video_transformers import VideoModel
model = VideoModel.from_pretrained(model_path_or_url)
return f'''
import gradio as gr
from video_transformers import VideoModel
model: VideoModel = VideoModel.from_pretrained("{model_path_or_url}")
app = gr.Blocks()
with app:
gr.Markdown("# **<p align='center'>Video Classification with Transformers</p>**")
gr.Markdown("This space demonstrates the use of hybrid Transformer-based models for video classification.")
gr.Markdown(f"The model is trained to classify videos belonging to the following classes: {model.labels}")
with gr.Tabs():
with gr.TabItem("Upload & Predict"):
with gr.Box():
with gr.Row():
input_video = gr.Video(label="Input Video", show_label=True)
output_label = gr.Label(label="Model Output", show_label=True)
gr.Markdown("**Predict**")
with gr.Box():
with gr.Row():
submit_button = gr.Button("Submit")
gr.Markdown("**Examples:**")
# gr.Markdown("CricketShot, PlayingCello, Punch, ShavingBeard, TennisSwing")
with gr.Column():
gr.Examples({examples}, [input_video], [output_label], model.predict, cache_examples=True)
submit_button.click(model.predict, inputs=input_video, outputs=[output_label])
gr.Markdown("**Note:** The model is trained to classify videos belonging to the following classes: {model.labels}")
gr.Markdown("**Credits:**")
gr.Markdown("This space is powered by [video-transformers]('
gr.Markdown("{((("This model is finetuned by '" + author_username) + "'.") if author_username else '')}")
app.launch()
''' |
def get_random_raw_url_group(num_items):
dat = g.session.execute('SELECT url FROM raw_web_pages TABLESAMPLE SYSTEM(:percentage) ORDER BY url;', {'percentage': num_items})
dat = list(dat)
ret = []
for (linkurl,) in dat:
filtered = raw_url_filtered(linkurl)
ret.append((linkurl, filtered))
return ret |
class ImageFormatTest(unittest.TestCase):
def setUp(self):
super(ImageFormatTest, self).setUp()
self.kwargs = {'name': 'HD', 'width': 1920, 'height': 1080, 'pixel_aspect': 1.0, 'print_resolution': 300}
self.test_image_format = ImageFormat(**self.kwargs)
def test___auto_name__class_attribute_is_set_to_False(self):
assert (ImageFormat.__auto_name__ is False)
def test_width_argument_accepts_int_or_float_only(self):
test_value = '1920'
self.kwargs['width'] = test_value
with pytest.raises(TypeError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.width should be an instance of int or float not str')
def test_width_attribute_int_or_float(self):
test_value = '1920'
with pytest.raises(TypeError) as cm:
self.test_image_format.width = test_value
assert (str(cm.value) == 'ImageFormat.width should be an instance of int or float not str')
def test_width_argument_float_to_int_conversion(self):
self.kwargs['width'] = 1920.0
an_image_format = ImageFormat(**self.kwargs)
assert isinstance(an_image_format.width, int)
def test_width_attribute_float_to_int_conversion(self):
self.test_image_format.width = 1920.0
assert isinstance(self.test_image_format.width, int)
def test_width_argument_being_zero(self):
self.kwargs['width'] = 0
with pytest.raises(ValueError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.width cannot be zero or negative')
def test_width_attribute_being_zero(self):
with pytest.raises(ValueError) as cm:
self.test_image_format.width = 0
assert (str(cm.value) == 'ImageFormat.width cannot be zero or negative')
def test_width_argument_being_negative(self):
self.kwargs['width'] = (- 10)
with pytest.raises(ValueError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.width cannot be zero or negative')
def test_width_attribute_being_negative(self):
with pytest.raises(ValueError) as cm:
self.test_image_format.width = (- 100)
assert (str(cm.value) == 'ImageFormat.width cannot be zero or negative')
def test_height_argument_int_or_float(self):
test_value = '1080'
self.kwargs['height'] = test_value
with pytest.raises(TypeError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.height should be an instance of int or float not str')
def test_height_attribute_int_or_float(self):
test_value = '1080'
with pytest.raises(TypeError) as cm:
self.test_image_format.height = test_value
assert (str(cm.value) == 'ImageFormat.height should be an instance of int or float not str')
def test_height_argument_float_to_int_conversion(self):
self.kwargs['height'] = 1080.0
an_image_format = ImageFormat(**self.kwargs)
assert isinstance(an_image_format.height, int)
def test_height_attribute_float_to_int_conversion(self):
self.test_image_format.height = 1080.0
assert isinstance(self.test_image_format.height, int)
def test_height_argument_being_zero(self):
self.kwargs['height'] = 0
with pytest.raises(ValueError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.height cannot be zero or negative')
def test_height_attribute_being_zero(self):
with pytest.raises(ValueError) as cm:
self.test_image_format.height = 0
assert (str(cm.value) == 'ImageFormat.height cannot be zero or negative')
def test_height_argument_being_negative(self):
self.kwargs['height'] = (- 10)
with pytest.raises(ValueError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.height cannot be zero or negative')
def test_height_attribute_being_negative(self):
with pytest.raises(ValueError) as cm:
self.test_image_format.height = (- 100)
assert (str(cm.value) == 'ImageFormat.height cannot be zero or negative')
def test_device_aspect_attribute_float(self):
assert isinstance(self.test_image_format.device_aspect, float)
def test_device_aspect_ratio_correctly_calculated(self):
self.kwargs.update({'name': 'HD', 'width': 1920, 'height': 1080, 'pixel_aspect': 1.0, 'print_resolution': 300})
an_image_format = ImageFormat(**self.kwargs)
assert (('%1.4g' % an_image_format.device_aspect) == ('%1.4g' % 1.7778))
self.kwargs.update({'name': 'PAL', 'width': 720, 'height': 576, 'pixel_aspect': 1.0667, 'print_resolution': 300})
an_image_format = ImageFormat(**self.kwargs)
assert (('%1.4g' % an_image_format.device_aspect) == ('%1.4g' % 1.3333))
def test_device_aspect_attribute_updates(self):
self.kwargs.update({'name': 'PAL', 'width': 720, 'height': 576, 'pixel_aspect': 1.0667, 'print_resolution': 300})
an_image_format = ImageFormat(**self.kwargs)
previous_device_aspect = an_image_format.device_aspect
an_image_format.width = 1920
an_image_format.height = 1080
an_image_format.pixel_aspect = 1.0
assert (abs((an_image_format.device_aspect - 1.77778)) < 0.001)
assert (an_image_format.device_aspect != previous_device_aspect)
def test_device_aspect_attribute_write_protected(self):
with pytest.raises(AttributeError) as cm:
self.test_image_format.device_aspect = 10
assert (str(cm.value) == "can't set attribute")
def test_pixel_aspect_int_float(self):
self.kwargs['pixel_aspect'] = '1.0'
with pytest.raises(TypeError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.pixel_aspect should be an instance of int or float not str')
def test_pixel_aspect_int_float_2(self):
self.kwargs['pixel_aspect'] = 1.0
ImageFormat(**self.kwargs)
def test_pixel_aspect_int_float_3(self):
self.kwargs['pixel_aspect'] = 2
ImageFormat(**self.kwargs)
def test_pixel_aspect_float_conversion(self):
self.kwargs['pixel_aspect'] = 1
an_image_format = ImageFormat(**self.kwargs)
assert isinstance(an_image_format.pixel_aspect, float)
def test_pixel_aspect_argument_zero(self):
self.kwargs['pixel_aspect'] = 0
with pytest.raises(ValueError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.pixel_aspect cannot be zero or a negative value')
def test_pixel_aspect_attribute_zero(self):
with pytest.raises(ValueError) as cm:
self.test_image_format.pixel_aspect = 0
assert (str(cm.value) == 'ImageFormat.pixel_aspect cannot be zero or a negative value')
def test_pixel_aspect_argument_negative_float(self):
self.kwargs['pixel_aspect'] = (- 1.0)
with pytest.raises(ValueError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.pixel_aspect cannot be zero or a negative value')
def test_pixel_aspect_argument_negative_int(self):
self.kwargs['pixel_aspect'] = (- 1)
with pytest.raises(ValueError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.pixel_aspect cannot be zero or a negative value')
def test_pixel_aspect_attribute_negative_integer(self):
with pytest.raises(ValueError) as cm:
self.test_image_format.pixel_aspect = (- 1.0)
assert (str(cm.value) == 'ImageFormat.pixel_aspect cannot be zero or a negative value')
def test_pixel_aspect_attribute_negative_float(self):
with pytest.raises(ValueError) as cm:
self.test_image_format.pixel_aspect = (- 1)
assert (str(cm.value) == 'ImageFormat.pixel_aspect cannot be zero or a negative value')
def test_pixel_aspect_attribute_if_being_initialized_correctly(self):
self.kwargs.pop('pixel_aspect')
an_image_format = ImageFormat(**self.kwargs)
default_value = 1.0
assert (an_image_format.pixel_aspect == default_value)
def test_print_resolution_omit(self):
self.kwargs.pop('print_resolution')
imf = ImageFormat(**self.kwargs)
assert isinstance(imf.print_resolution, float)
def test_print_resolution_argument_accepts_int_float_only(self):
self.kwargs['print_resolution'] = '300.0'
with pytest.raises(TypeError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.print_resolution should be an instance of int or float not str')
def test_print_resolution_argument_accepts_int_float_only_2(self):
self.kwargs['print_resolution'] = 300
imf = ImageFormat(**self.kwargs)
assert isinstance(imf.print_resolution, float)
def test_print_resolution_argument_accepts_int_float_only_3(self):
self.kwargs['print_resolution'] = 300.0
imf = ImageFormat(**self.kwargs)
assert isinstance(imf.print_resolution, float)
def test_print_resolution_argument_zero(self):
self.kwargs['print_resolution'] = 0
with pytest.raises(ValueError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.print_resolution cannot be zero or negative')
def test_print_resolution_attribute_zero(self):
with pytest.raises(ValueError) as cm:
self.test_image_format.print_resolution = 0
assert (str(cm.value) == 'ImageFormat.print_resolution cannot be zero or negative')
def test_print_resolution_argument_negative_int(self):
self.kwargs['print_resolution'] = (- 300)
with pytest.raises(ValueError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.print_resolution cannot be zero or negative')
def test_print_resolution_argument_negative_float(self):
self.kwargs['print_resolution'] = (- 300.0)
with pytest.raises(ValueError) as cm:
ImageFormat(**self.kwargs)
assert (str(cm.value) == 'ImageFormat.print_resolution cannot be zero or negative')
def test_print_resolution_attribute_negative_int(self):
with pytest.raises(ValueError) as cm:
self.test_image_format.print_resolution = (- 300)
assert (str(cm.value) == 'ImageFormat.print_resolution cannot be zero or negative')
def test_print_resolution_attribute_negative_float(self):
with pytest.raises(ValueError) as cm:
self.test_image_format.print_resolution = (- 300.0)
assert (str(cm.value) == 'ImageFormat.print_resolution cannot be zero or negative')
def test_equality(self):
image_format1 = ImageFormat(**self.kwargs)
image_format2 = ImageFormat(**self.kwargs)
self.kwargs.update({'width': 720, 'height': 480, 'pixel_aspect': 0.888888})
image_format3 = ImageFormat(**self.kwargs)
assert (image_format1 == image_format2)
assert (not (image_format1 == image_format3))
def test_inequality(self):
image_format1 = ImageFormat(**self.kwargs)
image_format2 = ImageFormat(**self.kwargs)
self.kwargs.update({'name': 'NTSC', 'description': 'The famous NTSC image format', 'width': 720, 'height': 480, 'pixel_aspect': 0.888888})
image_format3 = ImageFormat(**self.kwargs)
assert (not (image_format1 != image_format2))
assert (image_format1 != image_format3)
def test_plural_class_name(self):
assert (self.test_image_format.plural_class_name == 'ImageFormats')
def test_hash_value(self):
assert (self.test_image_format.__hash__() == ((hash(self.test_image_format.id) + (2 * hash(self.test_image_format.name))) + (3 * hash(self.test_image_format.entity_type)))) |
def test_base_equals_has_expected_repr():
assert (repr(BaseEquals('foo')) == "<BaseEquals (base == 'foo')>")
assert (repr(BaseEquals('foo', with_sub=True)) == "<BaseEquals (base == 'foo' and sub is not None)>")
assert (repr(BaseEquals('foo', with_sub=False)) == "<BaseEquals (base == 'foo' and sub is None)>") |
class OptionSeriesTimelineSonificationContexttracksMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def test_opaque_paint():
pink = Color('pink')
white = Color('white')
with Image(filename='WIZARD:') as img:
img.opaque_paint(target=white, fill=pink, fuzz=(0.25 * img.quantum_range))
assert (img[(0, 0)] == pink)
with Image(filename='WIZARD:') as img:
img.opaque_paint(target='white', fill='pink', fuzz=(0.25 * img.quantum_range), invert=True)
assert (img[(0, 0)] == white)
with Image(filename='WIZARD:') as img:
was = img.signature
img.opaque_paint(target='white', fill='pink', fuzz=(0.25 * img.quantum_range), channel='red')
assert (was != img.signature) |
class BhToggleStringEscapeModeCommand(sublime_plugin.TextCommand):
def run(self, edit):
default_mode = sublime.load_settings('bh_core.sublime-settings').get('bracket_string_escape_mode', 'string')
if (self.view.settings().get('bracket_highlighter.bracket_string_escape_mode', default_mode) == 'regex'):
self.view.settings().set('bracket_highlighter.bracket_string_escape_mode', 'string')
sublime.status_message('Bracket String Escape Mode: string')
else:
self.view.settings().set('bracket_highlighter.bracket_string_escape_mode', 'regex')
sublime.status_message('Bracket String Escape Mode: regex') |
_os(*metadata.platforms)
def main():
key = 'Environment'
value = 'COR_PROFILER_PATH'
data = 'temp.dll'
with common.temporary_reg(common.HKCU, key, value, data):
pass
mmc = 'C:\\Users\\Public\\mmc.exe'
powershell = 'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe'
common.copy_file(EXE_FILE, mmc)
common.execute([mmc, '/c', powershell], timeout=2, kill=True)
common.remove_files(mmc) |
class ModeCategoryContribution(StrictBaseModelWithAlias):
source: (str | None) = Field(None, alias='_source')
comment: (str | None) = Field(None, alias='_comment')
url: ((str | list[str]) | None) = Field(None, alias='_url')
datetime: ((datetime | date) | None)
value: (Percentage | None) |
class DiceRoll(TraitType):
default_value = (1, 1)
info_text = 'a tuple of the form (n,m), where both n and m are integers in the range from 1 to 6 representing a roll of a pair of dice'
def validate(self, object, name, value):
if (isinstance(value, tuple) and (len(value) == 2) and (1 <= value[0] <= 6) and (1 <= value[1] <= 6)):
return value
self.error(object, name, value) |
def generate_delft_training_data(model_name: str, tei_source_path: str, raw_source_path: str, delft_output_path: str, sciencebeam_parser: ScienceBeamParser):
training_tei_parser = get_training_tei_parser_for_model_name(model_name, sciencebeam_parser=sciencebeam_parser)
data_generator = get_data_generator_for_model_name(model_name, sciencebeam_parser=sciencebeam_parser)
LOGGER.debug('tei_source_path: %r', tei_source_path)
tei_file_list = glob(tei_source_path)
if (not tei_file_list):
raise RuntimeError(('no files found for file pattern %r' % tei_source_path))
LOGGER.info('tei_file_list: %r', tei_file_list)
if raw_source_path:
raw_file_list: Sequence[Optional[str]] = get_raw_file_list_for_tei_file_list(tei_file_list, raw_source_path=raw_source_path)
else:
raw_file_list = ([None] * len(tei_file_list))
LOGGER.info('raw_file_list: %r', raw_file_list)
LOGGER.info('writing to : %r', delft_output_path)
with auto_uploading_output_file(delft_output_path, mode='w', encoding='utf-8') as data_fp:
for (document_index, (tei_file, raw_file)) in enumerate(zip(tei_file_list, raw_file_list)):
if (document_index > 0):
data_fp.write('\n\n')
data_fp.writelines(iter_generate_delft_training_data_lines_for_document(tei_file=tei_file, raw_file=raw_file, training_tei_parser=training_tei_parser, data_generator=data_generator)) |
def prompt_user(question: str, options: str, default: str='') -> str:
options = options.lower()
default = default.lower()
assert ((len(default) < 2) and (default in options))
if ('?' not in options):
options += '?'
prompt_options = ','.join(((o.upper() if (o == default) else o) for o in options))
prompt = f'{question} [{prompt_options}]? '
result = ''
while True:
result = input(prompt).strip().lower()
if (result == '?'):
for option in PROMPT_HELP:
click.secho(f'{option} - {PROMPT_HELP[option]}', fg='red', bold=True)
elif ((len(result) == 1) and (result in options)):
return result
elif result:
click.echo(f'invalid response "{result}"')
elif default:
return default |
def min_score(ar, scorer: Optional[topk_scorer]=None):
assert (len(ar) > 0), 'dc.min_score is not defined for empty arrays'
if (scorer is None):
scorer = alpha_length_normalized()
def op_max_score(seqs, score):
return np.array(([score] + [scorer(s.logprobs, s) for s in seqs])).min()
return ar.reduce(op_max_score, np.finfo(np.float32).max) |
def extractSelfTaughtJapanese(item):
badwords = ['travel', 'Japanese Study: Intermediate', 'Japanese Study: Advanced', 'contests', 'E-book publishing', 'test', 'grammar', 'research', 'Reviews', 'aside']
if any([(bad in item['tags']) for bad in badwords]):
return None
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())):
return None
return False |
class LiteDRAMInterface(Record):
def __init__(self, address_align, settings):
rankbits = log2_int(settings.phy.nranks)
self.address_align = address_align
self.address_width = (((settings.geom.rowbits + settings.geom.colbits) + rankbits) - address_align)
self.data_width = (settings.phy.dfi_databits * settings.phy.nphases)
self.nbanks = (settings.phy.nranks * (2 ** settings.geom.bankbits))
self.nranks = settings.phy.nranks
self.settings = settings
layout = [(('bank' + str(i)), cmd_layout(self.address_width)) for i in range(self.nbanks)]
layout += data_layout(self.data_width)
Record.__init__(self, layout) |
def DoConfigWrite(stmt_cursor, config, field, expr, before=False):
assert isinstance(expr, (LoopIR.Read, LoopIR.StrideExpr, LoopIR.Const))
s = stmt_cursor._node
cw_s = LoopIR.WriteConfig(config, field, expr, None, s.srcinfo)
if before:
(ir, fwd) = stmt_cursor.before()._insert([cw_s])
else:
(ir, fwd) = stmt_cursor.after()._insert([cw_s])
cfg = Check_DeleteConfigWrite(ir, [cw_s])
return (ir, fwd, cfg) |
class VideoWidget(Widget):
DEFAULT_MIN_SIZE = (100, 100)
source = event.StringProp('', settable=True, doc='\n The source of the video. This must be a url of a resource\n on the web.\n ')
def _create_dom(self):
global window
node = window.document.createElement('video')
node.controls = 'controls'
node.textContent = 'Your browser does not support HTML5 video.'
self.src_node = window.document.createElement('source')
self.src_node.type = 'video/mp4'
self.src_node.src = None
node.appendChild(self.src_node)
return node
def _render_dom(self):
return None
def __source_changed(self):
self.src_node.src = (self.source or None)
self.node.load() |
def test_circular_dependency_2_init(circular_dependency_phi_functions, variable_v, variable_u):
list_of_phi_functions = circular_dependency_phi_functions
list_of_phi_functions[6].substitute(variable_v[6], variable_u[5])
graph = PhiDependencyGraph(list_of_phi_functions)
assert ((set(graph.edges) == {(list_of_phi_functions[1], list_of_phi_functions[0]), (list_of_phi_functions[0], list_of_phi_functions[1]), (list_of_phi_functions[4], list_of_phi_functions[3]), (list_of_phi_functions[5], list_of_phi_functions[3]), (list_of_phi_functions[2], list_of_phi_functions[4]), (list_of_phi_functions[5], list_of_phi_functions[4]), (list_of_phi_functions[6], list_of_phi_functions[5]), (list_of_phi_functions[7], list_of_phi_functions[6]), (list_of_phi_functions[3], list_of_phi_functions[7])}) and (set(graph.nodes) == set(list_of_phi_functions))) |
def extract_step_back(query: str, model: BaseLLM=None) -> Optional[Sentence]:
_input = _STEP_BACK_PROMPT.format(question=query)
if _DEBUG:
print(f'Input: {_input}', file=sys.stderr)
if (model is None):
model = OpenAI(temperature=0)
output = model(_input)
if _DEBUG:
print(f'Output: {output}', file=sys.stderr)
try:
return output.split('AI: ')[1].strip()
except:
return None |
class QueeningQueue(Service, PeerSubscriber, QueenTrackerAPI):
_queen_peer: ETHPeer = None
_queen_updated: asyncio.Event
_knights: WaitingPeers[ETHPeer]
_peasants: WaitingPeers[ETHPeer]
subscription_msg_types: FrozenSet[Type[CommandAPI[Any]]] = frozenset()
msg_queue_maxsize: int = 2000
_report_interval = 30
def __init__(self, peer_pool: ETHPeerPool) -> None:
self.logger = get_logger('trinity.sync.beam.queen.QueeningQueue')
self._peer_pool = peer_pool
self._knights = WaitingPeers(NodeDataV65)
self._peasants = WaitingPeers(NodeDataV65)
self._queen_updated = asyncio.Event()
self._desired_knights = 0
self._num_peers = 0
async def run(self) -> None:
with self.subscribe(self._peer_pool):
self.manager.run_daemon_task(self._report_statistics)
(await self.manager.wait_finished())
async def _report_statistics(self) -> None:
while self.manager.is_running:
(await asyncio.sleep(self._report_interval))
self.logger.debug('queen-stats: free_knights=%d/%d free_peasants=%d/%d queen=%s', len(self._knights), self._desired_knights, len(self._peasants), ((self._num_peers - self._desired_knights) - 1), self._queen_peer)
def register_peer(self, peer: BasePeer) -> None:
super().register_peer(peer)
self._insert_peer(peer)
self._num_peers += 1
def deregister_peer(self, peer: BasePeer) -> None:
super().deregister_peer(peer)
if (self._queen_peer == peer):
self._queen_peer = None
self._num_peers -= 1
async def get_queen_peer(self) -> ETHPeer:
t = Timer()
while (self._queen_peer is None):
try:
promote_knight = self._knights.pop_nowait()
except asyncio.QueueEmpty:
(await self._queen_updated.wait())
self._queen_updated.clear()
else:
self._insert_peer(promote_knight)
queen_starve_time = t.elapsed
if (queen_starve_time > WARN_AFTER_QUEEN_STARVED):
self.logger.debug('Blocked for %.2fs waiting for queen=%s', queen_starve_time, self._queen_peer)
return self._queen_peer
def queen(self) -> Optional[ETHPeer]:
return self._queen_peer
def set_desired_knight_count(self, desired_knights: int) -> None:
self._desired_knights = desired_knights
while (len(self._knights) < self._desired_knights):
try:
promoted_knight = self._peasants.pop_nowait()
except asyncio.QueueEmpty:
break
else:
self._knights.put_nowait(promoted_knight)
def pop_knights(self) -> Iterable[ETHPeer]:
for _ in range(self._desired_knights):
try:
(yield self._knights.pop_nowait())
except asyncio.QueueEmpty:
try:
(yield self._peasants.pop_nowait())
except asyncio.QueueEmpty:
break
while len(self._knights):
try:
demoted_knight = self._knights.pop_nowait()
except asyncio.QueueEmpty:
break
else:
self._peasants.put_nowait(demoted_knight)
async def pop_fastest_peasant(self) -> ETHPeer:
while True:
peer = (await self._peasants.get_fastest())
if (not peer.is_alive):
self.logger.info('Dropping %s from beam peers, as no longer active', peer)
if (peer == self._queen_peer):
self._queen_peer = None
continue
if self._should_be_queen(peer):
self.logger.debug('About to draw peasant %s, but realized it should be queen', peer)
self._insert_peer(peer)
continue
peer_is_requesting = peer.eth_api.get_node_data.is_requesting
if peer_is_requesting:
self.logger.debug('QueenQueuer is skipping active peer %s', peer)
self.insert_peer(peer, delay=NON_IDEAL_RESPONSE_PENALTY)
continue
return peer
def insert_peer(self, peer: ETHPeer, delay: float=0) -> None:
if (not peer.is_alive):
return
elif self._should_be_queen(peer):
self.logger.debug('Fast-tracking peasant to promote to queen: %s', peer)
self._insert_peer(peer)
elif (delay > 0):
loop = asyncio.get_event_loop()
loop.call_later(delay, functools.partial(self._insert_peer, peer))
else:
self._insert_peer(peer)
def penalize_queen(self, peer: ETHPeer, delay: float=NON_IDEAL_RESPONSE_PENALTY) -> None:
if (peer == self._queen_peer):
self._queen_peer = None
self.logger.debug('Penalizing %s for %.2fs, for minor infraction', peer, delay)
loop = asyncio.get_event_loop()
loop.call_later(delay, functools.partial(self._insert_peer, peer))
def _should_be_queen(self, peer: ETHPeer) -> bool:
if (not peer.is_alive):
raise ValueError(f'{peer} is no longer alive')
if (self._queen_peer is None):
return True
elif (not self._queen_peer.is_alive):
return True
elif (peer == self._queen_peer):
return True
else:
new_peer_quality = _peer_sort_key(peer)
current_queen_quality = _peer_sort_key(self._queen_peer)
return (new_peer_quality < current_queen_quality)
def _insert_peer(self, peer: ETHPeer) -> None:
if (not peer.is_alive):
self.logger.debug('Peer %s is no longer alive, not adding to queue', peer)
return
if self._should_be_queen(peer):
(old_queen, self._queen_peer) = (self._queen_peer, peer)
if (peer != old_queen):
self.logger.debug('Switching queen peer from %s to %s', old_queen, peer)
self._queen_updated.set()
if (old_queen is not None):
self._insert_peer(old_queen)
else:
self._peasants.put_nowait(peer)
if (len(self._knights) < self._desired_knights):
try:
promoted_knight = self._peasants.pop_nowait()
except asyncio.QueueEmpty:
return
else:
self._knights.put_nowait(promoted_knight) |
class Ui_Wallbreaker(object):
def setupUi(self, Wallbreaker):
Wallbreaker.setObjectName('Wallbreaker')
Wallbreaker.resize(822, 612)
self.gridLayout_4 = QtWidgets.QGridLayout(Wallbreaker)
self.gridLayout_4.setObjectName('gridLayout_4')
self.groupBox = QtWidgets.QGroupBox(Wallbreaker)
self.groupBox.setObjectName('groupBox')
self.gridLayout = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout.setObjectName('gridLayout')
self.txtSearchData = QtWidgets.QPlainTextEdit(self.groupBox)
self.txtSearchData.setReadOnly(True)
self.txtSearchData.setObjectName('txtSearchData')
self.gridLayout.addWidget(self.txtSearchData, 0, 0, 1, 1)
self.gridLayout_4.addWidget(self.groupBox, 0, 0, 1, 1)
self.groupBox_2 = QtWidgets.QGroupBox(Wallbreaker)
self.groupBox_2.setObjectName('groupBox_2')
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_3.setObjectName('gridLayout_3')
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName('verticalLayout')
self.btnClassSearch = QtWidgets.QPushButton(self.groupBox_2)
self.btnClassSearch.setMinimumSize(QtCore.QSize(0, 40))
self.btnClassSearch.setObjectName('btnClassSearch')
self.verticalLayout.addWidget(self.btnClassSearch)
self.btnClassDump = QtWidgets.QPushButton(self.groupBox_2)
self.btnClassDump.setMinimumSize(QtCore.QSize(0, 40))
self.btnClassDump.setObjectName('btnClassDump')
self.verticalLayout.addWidget(self.btnClassDump)
self.btnObjectSearch = QtWidgets.QPushButton(self.groupBox_2)
self.btnObjectSearch.setMinimumSize(QtCore.QSize(0, 40))
self.btnObjectSearch.setObjectName('btnObjectSearch')
self.verticalLayout.addWidget(self.btnObjectSearch)
self.btnObjectDump = QtWidgets.QPushButton(self.groupBox_2)
self.btnObjectDump.setMinimumSize(QtCore.QSize(0, 40))
self.btnObjectDump.setObjectName('btnObjectDump')
self.verticalLayout.addWidget(self.btnObjectDump)
self.btnClearUI = QtWidgets.QPushButton(self.groupBox_2)
self.btnClearUI.setMinimumSize(QtCore.QSize(0, 40))
self.btnClearUI.setObjectName('btnClearUI')
self.verticalLayout.addWidget(self.btnClearUI)
self.gridLayout_3.addLayout(self.verticalLayout, 0, 2, 4, 1)
self.label_4 = QtWidgets.QLabel(self.groupBox_2)
self.label_4.setObjectName('label_4')
self.gridLayout_3.addWidget(self.label_4, 3, 0, 1, 1)
self.txtAddress = QtWidgets.QLineEdit(self.groupBox_2)
self.txtAddress.setObjectName('txtAddress')
self.gridLayout_3.addWidget(self.txtAddress, 3, 1, 1, 1)
self.txtClassName = QtWidgets.QLineEdit(self.groupBox_2)
self.txtClassName.setObjectName('txtClassName')
self.gridLayout_3.addWidget(self.txtClassName, 0, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(self.groupBox_2)
self.label_3.setObjectName('label_3')
self.gridLayout_3.addWidget(self.label_3, 0, 0, 1, 1)
self.listClasses = QtWidgets.QListWidget(self.groupBox_2)
self.listClasses.setObjectName('listClasses')
self.gridLayout_3.addWidget(self.listClasses, 1, 1, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_2, 1, 0, 1, 1)
self.retranslateUi(Wallbreaker)
QtCore.QMetaObject.connectSlotsByName(Wallbreaker)
def retranslateUi(self, Wallbreaker):
_translate = QtCore.QCoreApplication.translate
Wallbreaker.setWindowTitle(_translate('Wallbreaker', 'Wallbreaker'))
self.groupBox.setTitle(_translate('Wallbreaker', ''))
self.groupBox_2.setTitle(_translate('Wallbreaker', ''))
self.btnClassSearch.setText(_translate('Wallbreaker', 'classsearch'))
self.btnClassDump.setText(_translate('Wallbreaker', 'classdump'))
self.btnObjectSearch.setText(_translate('Wallbreaker', 'objectsearch'))
self.btnObjectDump.setText(_translate('Wallbreaker', 'objectdump'))
self.btnClearUI.setText(_translate('Wallbreaker', ''))
self.label_4.setText(_translate('Wallbreaker', ':'))
self.label_3.setText(_translate('Wallbreaker', ':')) |
class Header(object):
SIZE = 936
def __init__(self, io):
stream = BytesIO(io.read(self.SIZE))
self.signature = stream.read(4)
if (self.signature != b'PML_'):
raise PMLError('not a Process Monitor backing file (signature missing).')
self.version = read_u32(stream)
if (self.version not in [9]):
raise NotImplementedError('Not supporting PML version {}'.format(self.version))
self.is_64bit = read_u32(stream)
self.computer_name = read_utf16(stream, 32)
self.system_root = read_utf16(stream, 520)
self.number_of_events = read_u32(stream)
stream.seek(8, 1)
self.events_offset = read_u64(stream)
self.events_offsets_array_offset = read_u64(stream)
self.process_table_offset = read_u64(stream)
self.strings_table_offset = read_u64(stream)
self.icon_table_offset = read_u64(stream)
stream.seek(12, 1)
self.windows_major_number = read_u32(stream)
self.windows_minor_number = read_u32(stream)
self.windows_build_number = read_u32(stream)
self.windows_build_number_after_decimal_point = read_u32(stream)
self.service_pack_name = read_utf16(stream, 50)
stream.seek(214, 1)
self.number_of_logical_processors = read_u32(stream)
self.ram_memory_size = read_u64(stream)
self.header_size = read_u64(stream)
self.hosts_and_ports_tables_offset = read_u64(stream)
if ((self.events_offset == 0) or (self.events_offsets_array_offset == 0) or (self.process_table_offset == 0) or (self.strings_table_offset == 0) or (self.icon_table_offset == 0)):
raise PMLError('PML was not closed cleanly during capture and is corrupt.')
if ((self.header_size != self.SIZE) or (self.hosts_and_ports_tables_offset == 0)):
raise PMLError('PML is corrupt and cannot be opened.') |
def get_csv_line(jobname, json, index, data, version_str, serverMode, scale_by_TB=1):
clat = 'clat'
con = 1
verstr = version_str.split('-')[1]
fio_version = StrictVersion(verstr)
v3_version = StrictVersion('3.0')
if (fio_version >= v3_version):
clat = 'clat_ns'
con = 1000
if serverMode:
options1 = ('percentile_list' in json['job options'])
options2 = ('percentile_list' in json['global options'])
else:
options1 = ('percentile_list' in json['jobs'][0]['job options'])
options2 = ('percentile_list' in json['global options'])
iotype = ['read', 'write', 'trim']
if (options1 or options2):
percent = ['25.000000', '50.000000', '70.000000', '75.000000', '90.000000', '99.000000', '99.900000', '99.990000', '99.999000', '99.999900']
else:
percent = ['50.000000', '70.000000', '90.000000', '99.000000', '99.900000', '99.990000', '99.999900']
line = [jobname, (data['read']['iops'] / scale_by_TB), (data['read']['bw'] / scale_by_TB), (data['write']['iops'] / scale_by_TB), (data['write']['bw'] / scale_by_TB), (data['trim']['iops'] / scale_by_TB), (data['trim']['bw'] / scale_by_TB)]
for io in iotype:
line.append(str((data[io][clat]['mean'] / con)))
line.append(str((data[io][clat]['max'] / con)))
if (data[io]['iops'] > 0):
for p in percent:
if ('percentile' in data[io][clat]):
line.append(str((data[io][clat]['percentile'][p] / con)))
else:
line.append(0)
else:
for _p in percent:
line.append(0)
return line |
class bsn_gentable_clear_request(bsn_header):
version = 6
type = 4
experimenter = 6035143
subtype = 48
def __init__(self, xid=None, table_id=None, checksum=None, checksum_mask=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (table_id != None):
self.table_id = table_id
else:
self.table_id = 0
if (checksum != None):
self.checksum = checksum
else:
self.checksum = 0
if (checksum_mask != None):
self.checksum_mask = checksum_mask
else:
self.checksum_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!L', self.experimenter))
packed.append(struct.pack('!L', self.subtype))
packed.append(struct.pack('!H', self.table_id))
packed.append(('\x00' * 2))
packed.append(util.pack_checksum_128(self.checksum))
packed.append(util.pack_checksum_128(self.checksum_mask))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = bsn_gentable_clear_request()
_version = reader.read('!B')[0]
assert (_version == 6)
_type = reader.read('!B')[0]
assert (_type == 4)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_experimenter = reader.read('!L')[0]
assert (_experimenter == 6035143)
_subtype = reader.read('!L')[0]
assert (_subtype == 48)
obj.table_id = reader.read('!H')[0]
reader.skip(2)
obj.checksum = util.unpack_checksum_128(reader)
obj.checksum_mask = util.unpack_checksum_128(reader)
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.table_id != other.table_id):
return False
if (self.checksum != other.checksum):
return False
if (self.checksum_mask != other.checksum_mask):
return False
return True
def pretty_print(self, q):
q.text('bsn_gentable_clear_request {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('table_id = ')
q.text(('%#x' % self.table_id))
q.text(',')
q.breakable()
q.text('checksum = ')
q.pp(self.checksum)
q.text(',')
q.breakable()
q.text('checksum_mask = ')
q.pp(self.checksum_mask)
q.breakable()
q.text('}') |
def name_graph(sorted_graph: List[Tensor]) -> None:
global func_cnt
global tensor_cnt
global func_name_to_tensor_cnt
global user_provided_dim
_LOGGER.debug(f'before name_graph: func_cnt={func_cnt!r}, tensor_cnt={tensor_cnt!r}, len(func_name_to_tensor_cnt)={len(func_name_to_tensor_cnt)!r}, len(user_provided_dim)={len(user_provided_dim)!r}')
for node in sorted_graph:
funcs = node.src_ops()
if (len(funcs) == 0):
if (node._attrs['name'] is None):
tensor_name = unique_name(f'tensor_{tensor_cnt}')
node._attrs['name'] = tensor_name
tensor_cnt += 1
if isinstance(node, IntVarTensor):
if (not isinstance(node._attrs['int_var'], IntImm)):
raise RuntimeError(f'''We don't support emitting standalone IntVarTensor at this moment.
Encountered {node._attrs['name']}: {node._attrs['int_var']}.''')
else:
node._attrs['int_var']._attrs['name'] = tensor_name
else:
for func in funcs:
if (func._attrs['name'] is None):
func_name = '{op_kind}_{idx}'.format(op_kind=func._attrs['op'], idx=func_cnt)
func_name = unique_name(func_name)
func._attrs['name'] = func_name
func._attrs['original_name'] = func_name
func_cnt += 1
func_name_to_tensor_cnt[func_name] = 0
if (node._attrs['name'] is None):
func_tensor_count = func_name_to_tensor_cnt[func_name]
node_name = unique_name(f'{func_name}_{func_tensor_count}')
node._attrs['name'] = node_name
func_name_to_tensor_cnt[func_name] = (func_tensor_count + 1)
if isinstance(node, IntVarTensor):
shape_name = node._attrs['int_var']._attrs['name']
if (shape_name is None):
node._attrs['int_var']._attrs['name'] = node_name
tensor_name = node._attrs['name']
for (i, dim) in enumerate(node._attrs['shape']):
if (dim._attrs['name'] is not None):
user_provided_dim.add(dim._attrs['name'])
if ((dim._attrs['name'] is None) and (not isinstance(dim, JaggedIntVar))):
dim_name = '{tname}_dim_{idx}'.format(tname=tensor_name, idx=i)
dim._attrs['name'] = dim_name
for tensor in sorted_graph:
if tensor.is_jagged():
jagged_int_var = tensor._attrs['shape'][0]
jagged_int_var._attrs['name'] = jagged_int_var.total_length()._attrs['name']
batch_dim = jagged_int_var.batch_dim()
if (batch_dim._attrs['name'] is None):
jagged_int_var_name = jagged_int_var._attrs['name']
batch_dim._attrs['name'] = f'{jagged_int_var_name}_jagged_batch_dim'
_LOGGER.debug(f'after name_graph: func_cnt={func_cnt!r}, tensor_cnt={tensor_cnt!r}, len(func_name_to_tensor_cnt)={len(func_name_to_tensor_cnt)!r}, len(user_provided_dim)={len(user_provided_dim)!r}') |
class OAuth2ClientCredentialsAuthenticationStrategy(OAuth2AuthenticationStrategyBase):
name = 'oauth2_client_credentials'
configuration_model = OAuth2BaseConfiguration
def add_authentication(self, request: PreparedRequest, connection_config: ConnectionConfig) -> PreparedRequest:
access_token = connection_config.secrets.get('access_token')
if (not access_token):
access_token = self.get_access_token(connection_config)
else:
access_token = self._refresh_token(connection_config)
request.headers['Authorization'] = ('Bearer ' + access_token)
return request |
def downgrade():
op.create_table('booked_ticket', sa.Column('id', sa.INTEGER(), nullable=False), sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True), sa.Column('ticket_id', sa.INTEGER(), autoincrement=False, nullable=True), sa.Column('quantity', sa.INTEGER(), autoincrement=False, nullable=True), sa.ForeignKeyConstraint(['ticket_id'], [u'tickets.id'], name=u'booked_ticket_ticket_id_fkey', ondelete=u'CASCADE'), sa.ForeignKeyConstraint(['user_id'], [u'users.id'], name=u'booked_ticket_user_id_fkey', ondelete=u'CASCADE'), sa.PrimaryKeyConstraint('id', name=u'booked_ticket_pkey'), sa.UniqueConstraint('user_id', 'ticket_id', name=u'user_ticket_uc')) |
(cis_audit.CISAudit, '_get_utcnow', mock_datetime_utcnow)
class TestRunTests():
test = cis_audit.CISAudit()
test_args = {}
test_args['_id'] = '1.1'
test_args['type'] = 'test'
test_args['levels'] = {'server': 1, 'workstation': 1}
test_args['description'] = 'pytest'
def test_run_tests_pass(self):
test_args = self.test_args.copy()
test_args['function'] = mock_run_tests_pass
result = self.test.run_tests([test_args])
assert (result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Pass', '0ms')])
def test_run_tests_fail(self):
test_args = self.test_args.copy()
test_args['function'] = mock_run_tests_fail
result = self.test.run_tests([test_args])
assert (result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Fail', '0ms')])
def test_run_tests_error(self):
test_args = self.test_args.copy()
test_args['function'] = mock_run_tests_error
result = self.test.run_tests([test_args])
assert (result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Error', '0ms')])
def test_run_tests_exception(self):
test_args = self.test_args.copy()
test_args['function'] = mock_run_tests_exception
result = self.test.run_tests([test_args])
assert (result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Error', '0ms')])
def test_run_tests_skipped(self):
test_args = self.test_args.copy()
test_args['function'] = mock_run_tests_skipped
result = self.test.run_tests([test_args])
assert (result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Skipped', '0ms')])
def test_run_tests_kwargs(self):
test_args = self.test_args.copy()
test_args['function'] = mock_run_tests_kwargs
test_args['kwargs'] = {'foo': 'bar'}
test_args.pop('levels')
result = self.test.run_tests([test_args])
assert (result == [(test_args['_id'], test_args['description'], None, 'Pass', '0ms')])
def test_run_tests_type_header(self):
test_args = self.test_args.copy()
test_args['type'] = 'header'
result = self.test.run_tests([test_args])
assert (result == [(test_args['_id'], test_args['description'])])
def test_run_tests_type_manual(self):
test_args = self.test_args.copy()
test_args['type'] = 'manual'
result = self.test.run_tests([test_args])
assert (result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Manual')])
def test_run_tests_type_none(self, caplog):
test_args = self.test_args.copy()
test_args.pop('type', None)
result = self.test.run_tests([test_args])
assert (result == [('1.1', 'pytest', 1, 'Not Implemented')])
assert (caplog.records[0].msg == 'Test 1.1 does not explicitly define a type, so assuming it is a test')
assert (caplog.records[1].msg == 'Checking whether to run test 1.1')
assert (caplog.records[2].msg == 'Including test 1.1')
def test_run_tests_type_skip(self, caplog):
test_args = self.test_args.copy()
test_args['type'] = 'skip'
result = self.test.run_tests([test_args])
assert (result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Skipped')])
def test_run_tests_error_not_implemented(self, caplog):
test_args = self.test_args.copy()
test_args.pop('type')
result = self.test.run_tests([test_args])
assert (result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Not Implemented')]) |
class TestEndToEndGenerator(UseOef):
def setup_class(cls):
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
shutil.copytree(Path(ROOT_DIR, 'packages'), Path(cls.t, 'packages'))
os.chdir(cls.t)
cls.private_key_path_1 = os.path.join(cls.t, (DEFAULT_PRIVATE_KEY_FILE + '_1'))
cls.private_key_path_2 = os.path.join(cls.t, (DEFAULT_PRIVATE_KEY_FILE + '_2'))
create_private_key(DEFAULT_LEDGER, cls.private_key_path_1)
create_private_key(DEFAULT_LEDGER, cls.private_key_path_2)
def test_generated_protocol_end_to_end(self):
agent_name_1 = 'my_aea_1'
agent_name_2 = 'my_aea_2'
builder_1 = AEABuilder()
builder_1.set_name(agent_name_1)
builder_1.add_private_key(DEFAULT_LEDGER, self.private_key_path_1)
builder_1.set_default_ledger(DEFAULT_LEDGER)
builder_1.add_protocol(Path(ROOT_DIR, 'packages', 'fetchai', 'protocols', 'fipa'))
builder_1.add_protocol(Path(ROOT_DIR, 'packages', 'fetchai', 'protocols', 'oef_search'))
builder_1.add_component(ComponentType.PROTOCOL, Path(PATH_TO_T_PROTOCOL), skip_consistency_check=True)
builder_1.add_connection(Path(ROOT_DIR, 'packages', 'fetchai', 'connections', 'oef'))
builder_1.set_default_connection(OEF_CONNECTION_PUBLIC_ID)
builder_2 = AEABuilder()
builder_2.set_name(agent_name_2)
builder_2.add_private_key(DEFAULT_LEDGER, self.private_key_path_2)
builder_2.set_default_ledger(DEFAULT_LEDGER)
builder_2.add_protocol(Path(ROOT_DIR, 'packages', 'fetchai', 'protocols', 'fipa'))
builder_2.add_protocol(Path(ROOT_DIR, 'packages', 'fetchai', 'protocols', 'oef_search'))
builder_2.add_component(ComponentType.PROTOCOL, Path(PATH_TO_T_PROTOCOL), skip_consistency_check=True)
builder_2.add_connection(Path(ROOT_DIR, 'packages', 'fetchai', 'connections', 'oef'))
builder_2.set_default_connection(OEF_CONNECTION_PUBLIC_ID)
aea_1 = builder_1.build(connection_ids=[OEF_CONNECTION_PUBLIC_ID])
aea_2 = builder_2.build(connection_ids=[OEF_CONNECTION_PUBLIC_ID])
def role_from_first_message_1(message: Message, receiver_address: Address) -> BaseDialogue.Role:
return TProtocolDialogue.Role.ROLE_1
agent_1_dialogues = TProtocolDialogues(self_address=aea_1.identity.address, role_from_first_message=role_from_first_message_1)
def role_from_first_message_1(message: Message, receiver_address: Address) -> BaseDialogue.Role:
return TProtocolDialogue.Role.ROLE_2
agent_2_dialogues = TProtocolDialogues(self_address=aea_2.identity.address, role_from_first_message=role_from_first_message_1)
(message_1, aea_1_dialogue) = agent_1_dialogues.create(counterparty=aea_2.identity.address, performative=TProtocolMessage.Performative.PERFORMATIVE_PT, content_bytes=b'some bytes', content_int=42, content_float=42.7, content_bool=True, content_str='some string')
message_1 = cast(TProtocolMessage, message_1)
(message_2, aea_2_dialogue) = agent_2_dialogues.create(counterparty=aea_1.identity.address, performative=TProtocolMessage.Performative.PERFORMATIVE_PT, content_bytes=b'some other bytes', content_int=43, content_float=43.7, content_bool=False, content_str='some other string')
message_2 = cast(TProtocolMessage, message_2)
skill_context_1 = SkillContext(aea_1.context)
skill_1 = Skill(SkillConfig('fake_skill', 'fetchai', '0.1.0'), skill_context_1)
skill_context_1._skill = skill_1
agent_1_handler = Agent1Handler(skill_context=skill_context_1, name='fake_handler_1', dialogues=agent_1_dialogues)
aea_1.resources._handler_registry.register((PublicId.from_str('fetchai/fake_skill:0.1.0'), TProtocolMessage.protocol_id), agent_1_handler)
skill_context_2 = SkillContext(aea_2.context)
skill_2 = Skill(SkillConfig('fake_skill', 'fetchai', '0.1.0'), skill_context_2)
skill_context_2._skill = skill_2
agent_2_handler = Agent2Handler(message=message_2, dialogues=agent_2_dialogues, skill_context=skill_context_2, name='fake_handler_2')
aea_2.resources._handler_registry.register((PublicId.from_str('fetchai/fake_skill:0.1.0'), TProtocolMessage.protocol_id), agent_2_handler)
t_1 = Thread(target=aea_1.start)
t_2 = Thread(target=aea_2.start)
try:
t_1.start()
t_2.start()
time.sleep(1.0)
aea_1.outbox.put_message(message_1)
time.sleep(5.0)
assert (agent_2_handler.handled_message.message_id == message_1.message_id), 'Message from Agent 1 to 2: message ids do not match'
assert (agent_2_handler.handled_message.dialogue_reference == message_1.dialogue_reference), 'Message from Agent 1 to 2: dialogue references do not match'
assert (agent_2_handler.handled_message.dialogue_reference[0] == message_1.dialogue_reference[0]), 'Message from Agent 1 to 2: dialogue reference[0]s do not match'
assert (agent_2_handler.handled_message.dialogue_reference[1] == message_1.dialogue_reference[1]), 'Message from Agent 1 to 2: dialogue reference[1]s do not match'
assert (agent_2_handler.handled_message.target == message_1.target), 'Message from Agent 1 to 2: targets do not match'
assert (agent_2_handler.handled_message.performative == message_1.performative), 'Message from Agent 1 to 2: performatives do not match'
assert (agent_2_handler.handled_message.content_bytes == message_1.content_bytes), 'Message from Agent 1 to 2: content_bytes do not match'
assert (agent_2_handler.handled_message.content_int == message_1.content_int), 'Message from Agent 1 to 2: content_int do not match'
assert (agent_2_handler.handled_message.content_bool == message_1.content_bool), 'Message from Agent 1 to 2: content_bool do not match'
assert (agent_2_handler.handled_message.content_str == message_1.content_str), 'Message from Agent 1 to 2: content_str do not match'
assert (agent_1_handler.handled_message.message_id == message_2.message_id), 'Message from Agent 1 to 2: dialogue references do not match'
assert (agent_1_handler.handled_message.dialogue_reference == message_2.dialogue_reference), 'Message from Agent 2 to 1: dialogue references do not match'
assert (agent_1_handler.handled_message.dialogue_reference[0] == message_2.dialogue_reference[0]), 'Message from Agent 2 to 1: dialogue reference[0]s do not match'
assert (agent_1_handler.handled_message.dialogue_reference[1] == message_2.dialogue_reference[1]), 'Message from Agent 2 to 1: dialogue reference[1]s do not match'
assert (agent_1_handler.handled_message.target == message_2.target), 'Message from Agent 2 to 1: targets do not match'
assert (agent_1_handler.handled_message.performative == message_2.performative), 'Message from Agent 2 to 1: performatives do not match'
assert (agent_1_handler.handled_message.content_bytes == message_2.content_bytes), 'Message from Agent 2 to 1: content_bytes do not match'
assert (agent_1_handler.handled_message.content_int == message_2.content_int), 'Message from Agent 2 to 1: content_int do not match'
assert (agent_1_handler.handled_message.content_bool == message_2.content_bool), 'Message from Agent 2 to 1: content_bool do not match'
assert (agent_1_handler.handled_message.content_str == message_2.content_str), 'Message from Agent 2 to 1: content_str do not match'
time.sleep(2.0)
finally:
aea_1.stop()
aea_2.stop()
t_1.join()
t_2.join()
def teardown_class(cls):
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass |
def extractNostalgiaOn9ThAvenue(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False |
class TDE4LensDistortionBase(object):
def __init__(self, distortion_model=None):
self.distortion_model = distortion_model
self.data = None
def get_data(self, label):
max_search_length = 60
start_i = 0
while ((self.data[start_i] != label) and (start_i < max_search_length)):
start_i += 1
start_i += 1
return self.data[start_i]
def get_distortion(cls, distortion_model):
distortion_class_lut = {'3DE4 Radial - Standard, Degree 4': TDE4RadialStandardDegree4, '3DE4 Anamorphic - Standard, Degree 4': TDE4AnamorphicStandardDegree4}
return distortion_class_lut[distortion_model](distortion_model=distortion_model)
def load(self, data):
raise NotImplemented('Implement this on the child class.') |
def test_matrices_charpoly():
for (M, S, is_field) in _all_matrices():
P = _poly_type_from_matrix_type(M)
M1234 = M([[1, 2], [3, 4]])
assert (M1234.charpoly() == P([(- 2), (- 5), 1]))
M9 = M([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
assert (M9.charpoly() == P([3, (- 12), (- 16), 1]))
Mr = M([[1, 2, 3], [4, 5, 6]])
assert raises((lambda : Mr.charpoly()), ValueError) |
_deserializable
class DiscordLoader(BaseLoader):
def __init__(self):
if (not os.environ.get('DISCORD_TOKEN')):
raise ValueError('DISCORD_TOKEN is not set')
self.token = os.environ.get('DISCORD_TOKEN')
def _format_message(message):
return {'message_id': message.id, 'content': message.content, 'author': {'id': message.author.id, 'name': message.author.name, 'discriminator': message.author.discriminator}, 'created_at': message.created_at.isoformat(), 'attachments': [{'id': attachment.id, 'filename': attachment.filename, 'size': attachment.size, 'url': attachment.url, 'proxy_url': attachment.proxy_url, 'height': attachment.height, 'width': attachment.width} for attachment in message.attachments], 'embeds': [{'title': embed.title, 'type': embed.type, 'description': embed.description, 'url': embed.url, 'timestamp': embed.timestamp.isoformat(), 'color': embed.color, 'footer': {'text': embed.footer.text, 'icon_url': embed.footer.icon_url, 'proxy_icon_url': embed.footer.proxy_icon_url}, 'image': {'url': embed.image.url, 'proxy_url': embed.image.proxy_url, 'height': embed.image.height, 'width': embed.image.width}, 'thumbnail': {'url': embed.thumbnail.url, 'proxy_url': embed.thumbnail.proxy_url, 'height': embed.thumbnail.height, 'width': embed.thumbnail.width}, 'video': {'url': embed.video.url, 'height': embed.video.height, 'width': embed.video.width}, 'provider': {'name': embed.provider.name, 'url': embed.provider.url}, 'author': {'name': embed.author.name, 'url': embed.author.url, 'icon_url': embed.author.icon_url, 'proxy_icon_url': embed.author.proxy_icon_url}, 'fields': [{'name': field.name, 'value': field.value, 'inline': field.inline} for field in embed.fields]} for embed in message.embeds]}
def load_data(self, channel_id: str):
import discord
messages = []
class DiscordClient(discord.Client):
async def on_ready(self) -> None:
logging.info('Logged on as {0}!'.format(self.user))
try:
channel = self.get_channel(int(channel_id))
if (not isinstance(channel, discord.TextChannel)):
raise ValueError(f'Channel {channel_id} is not a text channel. Only text channels are supported for now.')
threads = {}
for thread in channel.threads:
threads[thread.id] = thread
async for message in channel.history(limit=None):
messages.append(DiscordLoader._format_message(message))
if (message.id in threads):
async for thread_message in threads[message.id].history(limit=None):
messages.append(DiscordLoader._format_message(thread_message))
except Exception as e:
logging.error(e)
(await self.close())
finally:
(await self.close())
intents = discord.Intents.default()
intents.message_content = True
client = DiscordClient(intents=intents)
client.run(self.token)
meta_data = {'url': channel_id}
messages = str(messages)
doc_id = hashlib.sha256((messages + channel_id).encode()).hexdigest()
return {'doc_id': doc_id, 'data': [{'content': messages, 'meta_data': meta_data}]} |
class InferenceCertificate():
def __init__(self, tracer):
self.tracer = tracer
self.event_processors = [flatten_streamed_chat_responses, flatten_streamed_openai_completion, fold_logit_bias]
def asdict(self, child=False):
if (type(self.tracer) is NullTracer):
return {'type': 'lmql.InferenceCertificate', 'warning': 'Untraced query. Make sure to use lmql.traced() or certificate=True to trace your queries.'}
return {'name': self.tracer.name, **({'type': 'lmql.InferenceCertificate', 'lmql.version': f'{version} (build on {build_on}, commit {commit})', 'time': time.strftime('%Y-%m-%d %H:%M:%S %z')} if (not child) else {}), **({'events': self.process_events()} if (len(self.tracer.events) > 0) else {}), **({'children': [certificate(c).asdict(child=True) for c in self.tracer.children]} if (len(self.tracer.children) > 0) else {}), **({'metrics': self.tracer.metrics} if (len(self.tracer.metrics) > 0) else {})}
def __str__(self) -> str:
return json.dumps(self.asdict(), indent=4)
def process_events(self):
events = self.tracer.events
for p in self.event_processors:
events = [p(e) for e in events]
return events
def __repr__(self) -> str:
return str(self) |
_meta(characters.eirin.LunaString)
class LunaString():
name = ''
description = ',<style=Card.Name></style>,'
def clickable(self):
return self.accept_cards([characters.eirin.LunaString(self.me)])
def is_complete(self, sk):
from thb.cards.base import VirtualCard
s = N.skill(characters.eirin.LunaString)
acards = sk.associated_cards
if (len(acards) != 1):
return (False, '')
c = acards[0]
if (c.resides_in.type not in ('cards', 'showncards')):
return (False, '')
if c.is_card(VirtualCard):
return (False, f'{s}')
return (True, f'{s}')
def is_action_valid(self, sk, tl):
from thb.cards.classes import AttackCard
(isc, t) = self.is_complete(sk)
if (not isc):
return (isc, t)
c = sk.associated_cards[0]
if actions.ttags(self.me)['luna_string_used']:
s = N.skill(characters.eirin.LunaString)
return (False, f'{s}')
return AttackCard().ui_meta.is_action_valid(c, tl) |
class ResendActivationCodeViaEmailForm(UserCacheMixin, forms.Form):
email = forms.EmailField(label=_('Email'))
def clean_email(self):
email = self.cleaned_data['email']
user = User.objects.filter(email__iexact=email).first()
if (not user):
raise ValidationError(_('You entered an invalid email address.'))
if user.is_active:
raise ValidationError(_('This account has already been activated.'))
activation = user.activation_set.first()
if (not activation):
raise ValidationError(_('Activation code not found.'))
now_with_shift = (timezone.now() - timedelta(hours=24))
if (activation.created_at > now_with_shift):
raise ValidationError(_('Activation code has already been sent. You can request a new code in 24 hours.'))
self.user_cache = user
return email |
(name='api.vm.status.tasks.vm_status_cb', base=MgmtCallbackTask, bind=True)
()
def vm_status_cb(result, task_id, vm_uuid=None):
vm = Vm.objects.select_related('slavevm').get(uuid=vm_uuid)
msg = result.get('message', '')
json = result.pop('json', None)
if ((result['returncode'] == 0) and msg and (msg.find('Successfully') == 0)):
if (result['meta']['apiview']['update'] and (msg.find('Successfully updated') != (- 1))):
old_json_active = vm.json_active
try:
json_active = vm.json.load(json)
vm_delete_snapshots_of_removed_disks(vm)
vm.json_active = json_active
vm.json = json_active
except Exception as e:
logger.exception(e)
logger.error('Could not parse json output from vm_status(%s). Error: %s', vm_uuid, e)
else:
with transaction.atomic():
vm.save(update_node_resources=True, update_storage_resources=True, update_fields=('enc_json', 'enc_json_active', 'changed'))
vm_update_ipaddress_usage(vm)
vm_json_active_changed.send(task_id, vm=vm, old_json_active=old_json_active)
change_time = _get_task_time(result, 'exec_time')
if (msg.find('Successfully started') >= 0):
new_status = Vm.RUNNING
elif (msg.find('Successfully completed stop') >= 0):
if result['meta']['apiview']['freeze']:
new_status = Vm.FROZEN
change_time = _get_task_time(result, 'finish_time')
else:
new_status = Vm.STOPPED
elif (msg.find('Successfully completed reboot') >= 0):
new_status = Vm.RUNNING
else:
logger.error('Did not find successful status change in result from vm_status(%s). Error: %s', vm_uuid, msg)
raise TaskException(result, ('Unknown status (%s)' % msg))
else:
logger.error('Found nonzero returncode in result from vm_status(%s). Error: %s', vm_uuid, msg)
if is_vm_missing(vm, msg):
logger.critical('VM %s has vanished from compute node!', vm_uuid)
if (vm.status == Vm.STOPPING):
_save_vm_status(task_id, vm, Vm.STOPPED, change_time=_get_task_time(result, 'finish_time'))
else:
_vm_status_cb_failed(result, task_id, vm)
raise TaskException(result, ('Got bad return code (%s). Error: %s' % (result['returncode'], msg)))
_save_vm_status(task_id, vm, new_status, change_time=change_time)
task_log_cb_success(result, task_id, vm=vm, **result['meta'])
return result |
def test_offchain_lookup_raises_for_improperly_formatted_rest_request_response(offchain_lookup_contract, monkeypatch):
normalized_address = to_hex_if_bytes(offchain_lookup_contract.address)
mock_offchain_lookup_request_response(monkeypatch, mocked_request_url=f' mocked_json_data=WEB3PY_AS_HEXBYTES, json_data_field='not_data')
with pytest.raises(Web3ValidationError, match="missing 'data' field"):
offchain_lookup_contract.caller.testOffchainLookup(OFFCHAIN_LOOKUP_CONTRACT_TEST_DATA) |
def matching_phrases_suffixes(x, allowed_phrases, allow_full_matches=False):
x = strip_next_token(x)
for phrase in allowed_phrases:
if (not phrase.startswith(x)):
continue
if (len(phrase) > len(x)):
(yield phrase[len(x):])
elif allow_full_matches:
(yield '') |
class OptionPlotoptionsColumnSonificationContexttracksActivewhen(Options):
def crossingDown(self):
return self._config_get(None)
def crossingDown(self, num: float):
self._config(num, js_type=False)
def crossingUp(self):
return self._config_get(None)
def crossingUp(self, num: float):
self._config(num, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get(None)
def prop(self, text: str):
self._config(text, js_type=False) |
def get_parameters(runtime, toolkit, environment):
parameters = {'runtime': runtime, 'toolkit': toolkit, 'environment': environment}
if (toolkit not in supported_combinations[runtime]):
msg = ('Python {runtime} and toolkit {toolkit} not supported by ' + 'test environments')
raise RuntimeError(msg.format(**parameters))
if (environment is None):
parameters['environment'] = (PACKAGE_NAME + '-test-{runtime}-{toolkit}'.format(**parameters))
return parameters |
def import_colors(self, context):
hex_strings = bpy.context.window_manager.clipboard.split(',')
for i in range(len(hex_strings)):
hex_strings[i] = hex_strings[i].strip().strip('#')
if ((len(hex_strings[i]) != 6) or (not all(((c in string.hexdigits) for c in hex_strings[i])))):
self.report({'ERROR_INVALID_INPUT'}, "Incorrect hex format '{}' use a #RRGGBB pattern".format(hex_strings[i]))
return
else:
name = 'color_ID_color_{}'.format(i)
if hasattr(bpy.context.scene.texToolsSettings, name):
color = utilities_color.hex_to_color(hex_strings[i])
setattr(bpy.context.scene.texToolsSettings, name, color)
else:
self.report({'ERROR_INVALID_INPUT'}, 'Only {}x colors have been imported instead of {}x'.format(i, len(hex_strings)))
return
bpy.context.scene.texToolsSettings.color_ID_count = len(hex_strings)
bpy.ops.ui.textools_popup('INVOKE_DEFAULT', message='{}x colors imported from clipboard'.format(len(hex_strings))) |
class FakeJob():
watcher = FakeWatcher()
def __init__(self):
self.job_id = str(len(self.watcher.jobs))
self._state = 'UNKNOWN'
def _state(self):
return self.watcher.get_state(self.job_id)
_state.setter
def _state(self, state: str):
self.watcher.jobs[self.job_id] = state
def state(self):
return self._state |
def validateRuleKeys(dat, fname):
checkBadValues(dat)
keys = list(dat.keys())
valid = ['badwords', 'compound_badwords', 'decompose', 'decomposeBefore', 'baseUrl', 'feeds', 'feedPostfix', 'stripTitle', 'tld', 'FOLLOW_GOOGLE_LINKS', 'allImages', 'fileDomains', 'destyle', 'preserveAttrs', 'type', 'extraStartUrls', 'trigger', 'decompose_svg', 'normal_fetch_mode', 'rewalk_disabled', 'send_raw_feed', 'special_case_filters', 'rewriteAttrs', 'rewalk_interval_days', 'disallow_duplicate_path_segments', 'skip_filters', 'www_no_www_same', ' 'max_active_jobs', 'titleTweakLut']
for key in keys:
assert (key in valid), ("Key '%s' from ruleset '%s' is not valid!" % (key, fname)) |
def test_acn_lookup_request_serialization():
msg = AcnMessage(dialogue_reference=('', ''), message_id=1, target=0, performative=AcnMessage.Performative.LOOKUP_REQUEST, agent_address='some_address')
msg_bytes = AcnMessage.serializer.encode(msg)
actual_msg = AcnMessage.serializer.decode(msg_bytes)
expected_msg = msg
assert (expected_msg == actual_msg) |
def generate_dataset_config(train_config, kwargs):
names = tuple(DATASET_PREPROC.keys())
assert (train_config.dataset in names), f'Unknown dataset: {train_config.dataset}'
dataset_config = {k: v for (k, v) in inspect.getmembers(datasets)}[train_config.dataset]()
update_config(dataset_config, **kwargs)
return dataset_config |
def assign_field_names(session) -> None:
statements = []
for (form, dict_map) in CUSTOM_FORM_IDENTIFIER_NAME_MAP.items():
for (identifier, name) in dict_map.items():
statements.append(f"UPDATE custom_forms SET name = '{name}' WHERE form = '{form}' and field_identifier = '{identifier}';")
for statement in statements:
session.execute(statement) |
class IssueQueryResultType(graphene.ObjectType):
concatenated_features: str
class Meta():
interfaces = (graphene.relay.Node,)
issue_id = graphene.ID()
issue_instance_id = graphene.ID()
run_id = graphene.ID()
code = graphene.Int()
message = graphene.String()
callable = graphene.String()
status = graphene.String()
filename = graphene.String()
location = graphene.String()
sources = graphene.List(graphene.String)
source_names = graphene.List(graphene.String)
sinks = graphene.List(graphene.String)
sink_names = graphene.List(graphene.String)
features = graphene.List(graphene.String)
is_new_issue = graphene.Boolean()
detected_time = graphene.DateTime()
min_trace_length_to_sources = graphene.Int()
min_trace_length_to_sinks = graphene.Int()
warning_message = graphene.String()
similar_issues = graphene.List(SimilarIssueResultType)
def resolve_sources(self, info: ResolveInfo) -> List[str]:
return list(sources(info.context['session'], self.issue_instance_id))
def resolve_source_names(self, info: ResolveInfo) -> List[str]:
return list(source_names(info.context['session'], self.issue_instance_id))
def resolve_sinks(self, info: ResolveInfo) -> List[str]:
return list(sinks(info.context['session'], self.issue_instance_id))
def resolve_sink_names(self, info: ResolveInfo) -> List[str]:
return list(sink_names(info.context['session'], self.issue_instance_id))
def resolve_features(self, info: ResolveInfo) -> List[str]:
return sorted(self.features)
def resolve_warning_message(self, info: ResolveInfo) -> str:
warning_message = get_warning_message(info.context['session'], self.code)
if warning_message:
return warning_message.message
return ''
def resolve_similar_issues(self, info: ResolveInfo) -> Set[SimilarIssue]:
other_issues = Instance(info.context['session'], DBID(self.run_id)).get()
for other_issue in other_issues:
if (other_issue.issue_instance_id.resolved() == self.issue_instance_id.resolved()):
continue
similarity = self.similarity_with(other_issue)
if (similarity.score > 0.5):
self.similar_issues.add(similarity)
return self.similar_issues |
class CTypesGenericPtr(CTypesData):
__slots__ = ['_address', '_as_ctype_ptr']
_automatic_casts = False
kind = 'pointer'
def _newp(cls, init):
return cls(init)
def _cast_from(cls, source):
if (source is None):
address = 0
elif isinstance(source, CTypesData):
address = source._cast_to_integer()
elif isinstance(source, (int, long)):
address = source
else:
raise TypeError(('bad type for cast to %r: %r' % (cls, type(source).__name__)))
return cls._new_pointer_at(address)
def _new_pointer_at(cls, address):
self = cls.__new__(cls)
self._address = address
self._as_ctype_ptr = ctypes.cast(address, cls._ctype)
return self
def _get_own_repr(self):
try:
return self._addr_repr(self._address)
except AttributeError:
return '???'
def _cast_to_integer(self):
return self._address
def __nonzero__(self):
return bool(self._address)
def __bool__(self):
return bool(self._address)
def _to_ctypes(cls, value):
if (not isinstance(value, CTypesData)):
raise TypeError(('unexpected %s object' % type(value).__name__))
address = value._convert_to_address(cls)
return ctypes.cast(address, cls._ctype)
def _from_ctypes(cls, ctypes_ptr):
address = (ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0)
return cls._new_pointer_at(address)
def _initialize(cls, ctypes_ptr, value):
if value:
ctypes_ptr.contents = cls._to_ctypes(value).contents
def _convert_to_address(self, BClass):
if ((BClass in (self.__class__, None)) or BClass._automatic_casts or self._automatic_casts):
return self._address
else:
return CTypesData._convert_to_address(self, BClass) |
(('yaml' in cfgdiff.supported_formats), 'requires PyYAML')
class YAMLDiffTestcase(CfgDiffTestCase):
def test_yaml_same(self):
self._test_same(cfgdiff.YAMLDiff, './tests/test_same_1-a.yaml', './tests/test_same_1-b.yaml')
def test_yaml_different(self):
self._test_different(cfgdiff.YAMLDiff, './tests/test_different_1-a.yaml', './tests/test_different_1-b.yaml') |
class Read(object):
def __init__(self, dir, file):
self.dir = dir
self.file = file
if ((dir is not None) and (file is not None)):
self.pth = os.path.join(dir, file)
else:
self.pth = None
def __str__(self):
return '{} fastq read'.format(self.file)
def __repr__(self):
return '<{}.{} instance at {}>'.format(self.file, self.__class__.__name__, hex(id(self))) |
def validate_model(model, val_loader):
print('Validating the model')
model.eval()
y_true = []
y_pred = []
with torch.no_grad():
for (step, (x, mel)) in enumerate(val_loader):
(x, mel) = (Variable(x).cuda(), Variable(mel).cuda())
logits = model.forward_eval(mel)
targets = x.cpu().view((- 1)).numpy()
y_true += targets.tolist()
predictions = return_classes(logits)
y_pred += predictions.tolist()
recall = get_metrics(y_pred, y_true)
print('Unweighted Recall for the validation set: ', recall)
print('\n') |
.parametrize('prk,info,length,okm', [('c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5', 'f0f1f2f3f4f5f6f7f8f9', 42, '3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bfd5b'), ('06a6b88c5853361a06104c9ceb35b45cefa193f40c15fc244', 'b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff', 82, 'b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87c14c01d5c1f3434f1d87'), ('19ef24a32c717b167f33a91d6f648bdfafdb6377ac434c1c293ccb04', '', 42, '8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8')])
def test_hkdf_expand(prk, info, length, okm):
result = hkdf_expand(bytes.fromhex(prk), bytes.fromhex(info), length)
assert (bytes.fromhex(okm) == result) |
def test_context_processing(app, client):
answer_bp = flask.Blueprint('answer_bp', __name__)
template_string = (lambda : flask.render_template_string('{% if notanswer %}{{ notanswer }} is not the answer. {% endif %}{% if answer %}{{ answer }} is the answer.{% endif %}'))
_bp.app_context_processor
def not_answer_context_processor():
return {'notanswer': 43}
_bp.context_processor
def answer_context_processor():
return {'answer': 42}
_bp.route('/bp')
def bp_page():
return template_string()
('/')
def app_page():
return template_string()
app.register_blueprint(answer_bp)
app_page_bytes = client.get('/').data
answer_page_bytes = client.get('/bp').data
assert (b'43' in app_page_bytes)
assert (b'42' not in app_page_bytes)
assert (b'42' in answer_page_bytes)
assert (b'43' in answer_page_bytes) |
def extractSayhellomtlSpace(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class StubServerProtocolTCP(StubServerProtocol):
def connection_made(self, transport):
self.transport = transport
self.addr = transport.get_extra_info('peername')
self.buffer = b''
def data_received(self, data):
self.buffer = utils.handle_dns_tcp_data((self.buffer + data), self.receive_helper)
def receive_helper(self, dnsq):
asyncio.ensure_future(self.make_request(self.addr, dnsq))
def on_answer(self, addr, msg):
self.transport.write((struct.pack('!H', len(msg)) + msg))
def eof_received(self):
self.transport.close() |
def _get_probability_of_entries_kept(k: int, table_index: int) -> float:
if (table_index > 5):
return 1
pow_2_k = (2 ** k)
if (table_index == 5):
return (1 - ((1 - (2 / pow_2_k)) ** pow_2_k))
else:
return (1 - ((1 - (2 / pow_2_k)) ** (_get_probability_of_entries_kept(k, (table_index + 1)) * pow_2_k))) |
class URLPathVersioning(BaseVersioning):
invalid_version_message = _('Invalid version in URL path.')
def determine_version(self, request, *args, **kwargs):
version = kwargs.get(self.version_param, self.default_version)
if (version is None):
version = self.default_version
if (not self.is_allowed_version(version)):
raise exceptions.NotFound(self.invalid_version_message)
return version
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
if (request.version is not None):
kwargs = {self.version_param: request.version, **(kwargs or {})}
return super().reverse(viewname, args, kwargs, request, format, **extra) |
def extractIdletranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Kaifuku Jutsushi no Yarinaoshi', 'Kaifuku Jutsushi no Yarinaoshi ~ Sokushi Mahou to Skill Copy no Choetsu Heal', 'translated'), ('Nagai Koto', 'Nagai Koto', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
('aea.cli.utils.package_utils.is_item_present', return_value=False)
.parametrize('vendor', [True, False])
def test_is_item_present_unified(mock_, vendor):
contex_mock = mock.MagicMock()
contex_mock.agent_config.author = ('some_author' if vendor else 'another_author')
public_id_mock = mock.MagicMock(author='some_author')
result = is_item_present_unified(contex_mock, 'some_component_type', public_id_mock)
assert (not result) |
class PaymentACK():
MAXIMUM_JSON_LENGTH = ((11 * 1000) * 1000)
def __init__(self, payment: Payment, memo: Optional[str]=None) -> None:
self.payment = payment
self.memo = memo
def to_dict(self) -> Dict[(str, Any)]:
data: Dict[(str, Any)] = {'payment': self.payment.to_dict()}
if self.memo:
data['memo'] = self.memo
return data
def from_dict(cls, data: Dict[(str, Any)]) -> 'PaymentACK':
if ('payment' not in data):
raise Bip270Exception("Missing required json 'payment' field")
memo = data.get('memo')
if ((memo is not None) and (type(memo) is not str)):
raise Bip270Exception("Invalid json 'memo' field")
payment = Payment.from_dict(data['payment'], ack=True)
return cls(payment, memo)
def to_json(self) -> str:
data = self.to_dict()
return json.dumps(data)
def from_json(cls, s: str) -> 'PaymentACK':
if (len(s) > cls.MAXIMUM_JSON_LENGTH):
raise Bip270Exception(f'Invalid payment ACK, too large')
data = json.loads(s)
return cls.from_dict(data) |
def _make_tensor_usage_records_simple_multistream(par_ops_seq: List[List[Operator]]) -> List[TensorUsageRecord]:
num_of_ops = len(par_ops_seq)
tensor_records = defaultdict((lambda : TensorUsageRecord(tensor=None, first_op_idx=num_of_ops, last_op_idx=(- 1), size=None)))
for (op_idx, par_ops) in enumerate(par_ops_seq):
for op in par_ops:
for tensor in (op._attrs['inputs'] + op._attrs['outputs']):
if tensor._attrs['is_param']:
continue
name = tensor._attrs['name']
this_tensor = tensor_records[name].tensor
if (this_tensor is None):
tensor_records[name].tensor = tensor
else:
assert (tensor == this_tensor), f'existing tensor: {this_tensor}, new tensor: {tensor}, op: {op}'
first_op_idx = tensor_records[name].first_op_idx
last_op_idx = tensor_records[name].last_op_idx
tensor_records[name].first_op_idx = min(first_op_idx, op_idx)
tensor_records[name].last_op_idx = max(last_op_idx, op_idx)
if tensor._attrs['is_output']:
tensor_records[name].last_op_idx = (num_of_ops - 1)
size = tensor_records[name].size
tensor_size = tensor.size_bytes(alignment=64)
if (size is None):
tensor_records[name].size = tensor_size
else:
assert (size == tensor_size)
tensor_views = []
for (name, tensor_record) in tensor_records.items():
this_tensor = tensor_record.tensor
if this_tensor._attrs['is_view_of']:
orig_tensor = _find_original_tensor(this_tensor)
if orig_tensor._attrs['is_param']:
continue
orig_tensor_name = orig_tensor._attrs['name']
assert (orig_tensor_name in tensor_records)
tensor_records[orig_tensor_name].last_op_idx = max(tensor_records[orig_tensor_name].last_op_idx, tensor_record.last_op_idx)
tensor_views.append(name)
for name in tensor_views:
del tensor_records[name]
records = tensor_records.values()
for (tensor, first_op_idx, last_op_idx, size) in records:
assert (tensor is not None)
assert (0 <= first_op_idx < num_of_ops)
assert (0 <= last_op_idx < num_of_ops)
assert (first_op_idx <= last_op_idx)
assert (size is not None)
return list(records) |
def test_email_address_parsing():
s = 'my email is: foo"'
iocs = find_iocs(s)
assert (iocs['email_addresses_complete'] == ['foo"'])
assert (iocs['email_addresses'] == [''])
s = 'Abc\\'
iocs = find_iocs(s)
print(iocs['email_addresses_complete'])
print(iocs['email_addresses'])
assert (iocs['email_addresses_complete'] == ['Abc\\'])
assert (iocs['email_addresses'] == [''])
s = '"'
iocs = find_iocs(s)
assert (iocs['email_addresses_complete'] == [''])
assert (iocs['email_addresses'] == [''])
s = ''
iocs = find_iocs(s)
assert (iocs['email_addresses_complete'] == [])
assert (iocs['email_addresses'] == [])
s = '"'
iocs = find_iocs(s)
assert (iocs['email_addresses_complete'] == ['"'])
assert (iocs['email_addresses'] == [''])
s = 'smtp.mailfrom='
iocs = find_iocs(s)
assert (iocs['email_addresses_complete'] == ['smtp.mailfrom='])
assert (iocs['email_addresses'] == [''])
s = '""'
iocs = find_iocs(s)
assert (iocs['email_addresses_complete'] == ['"'])
assert (iocs['email_addresses'] == [''])
s = '.'
iocs = find_iocs(s, parse_domain_from_email_address=False)
assert (iocs['email_addresses'] == [''])
assert (iocs['domains'] == [])
s = '"'
iocs = find_iocs(s)
assert (iocs['email_addresses'] == [''])
s = '-----'
iocs = find_iocs(s)
assert (iocs['email_addresses'] == [''])
s = 'foo- f-'
iocs = find_iocs(s)
assert (len(iocs['email_addresses']) == 2)
assert ('foo-' in iocs['email_addresses'])
assert ('f-' in iocs['email_addresses']) |
.skip
('turbomole')
def test_turbomole_cos(this_dir):
def calc_getter(charge, mult):
calc_kwargs = {'charge': charge, 'mult': mult, 'control_path': (this_dir / 'control_cos'), 'pal': 2}
return Turbomole(**calc_kwargs)
def gs_calc_getter():
return calc_getter(charge=0, mult=1)
bench = Benchmark('xtb_rx', calc_getter=calc_getter)
geoms = bench.get_geoms(11, set_calculator=True)
for (i, geom) in enumerate(geoms):
en = geom.energy
print(f'{i:02d}: {en:.6f} au')
(start, _, end) = geoms
images = (start, end)
cos_kwargs = {'calc_getter': gs_calc_getter, 'max_nodes': 9, 'climb': True}
cos = GrowingString(images, calc_getter=gs_calc_getter, max_nodes=9, climb=True)
opt_kwargs = {'rms_force': 0.002, 'rms_force_only': True, 'dump': True}
opt = StringOptimizer(cos, **opt_kwargs)
opt.run()
assert opt.is_converged
ens = [image.energy for image in cos.images]
assert (max(ens) == pytest.approx((- 178.)))
assert (opt.cur_cycle == 9) |
def extractMoonlightMltBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class OptionPlotoptionsBubbleSonificationContexttracksActivewhen(Options):
def crossingDown(self):
return self._config_get(None)
def crossingDown(self, num: float):
self._config(num, js_type=False)
def crossingUp(self):
return self._config_get(None)
def crossingUp(self, num: float):
self._config(num, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get(None)
def prop(self, text: str):
self._config(text, js_type=False) |
class Solution2():
def pathSum(self, root: TreeNode, s: int) -> List[List[int]]:
if (root is None):
return []
stk = [[root, [], s]]
ps = []
while stk:
(curr, p, s) = stk.pop()
if (curr is None):
continue
np = (p + [curr.val])
if ((curr.left is None) and (curr.right is None)):
if (curr.val == s):
ps.append(np)
continue
stk.append([curr.left, np, (s - curr.val)])
stk.append([curr.right, np, (s - curr.val)])
return ps |
def backprop_dish(dY, X, *, inplace: bool=False, threads_per_block=128, num_blocks=128):
_is_float_array(dY)
_is_float_array(X, shape=dY.shape)
out = dY
if (not inplace):
out = _alloc_like(dY, zeros=False)
if (dY.dtype == 'float32'):
backprop_dish_kernel_float((num_blocks,), (threads_per_block,), (out, dY, X, out.size))
else:
backprop_dish_kernel_double((num_blocks,), (threads_per_block,), (out, dY, X, out.size))
return out |
def _get_images() -> Dict[(str, str)]:
ret: Dict[(str, str)] = {}
image_names = ['test-auth', 'test-shadow', 'test-stats', 'kat-client', 'kat-server']
if (image := os.environ.get('AMBASSADOR_DOCKER_IMAGE')):
ret['emissary'] = image
else:
image_names.append('emissary')
try:
subprocess.run((['make'] + [f'docker/{name}.docker.push.remote' for name in image_names]), check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
except subprocess.CalledProcessError as err:
raise Exception(f'{err.stdout}{err}') from err
for name in image_names:
with open(f'docker/{name}.docker.push.remote', 'r') as fh:
tag = fh.readlines()[1].strip()
ret[name] = tag
return ret |
class TreeItemDelegate(QtGui.QStyledItemDelegate):
def sizeHint(self, option, index):
column = index.column()
item = self.editor._tree.itemFromIndex(index)
(expanded, node, instance) = self.editor._get_node_data(item)
column = index.column()
renderer = node.get_renderer(object, column=column)
if (renderer is None):
return super().sizeHint(option, index)
size_context = (option, index)
size = renderer.size(self.editor, node, column, instance, size_context)
if (size is None):
return QtCore.QSize(1, 21)
else:
return QtCore.QSize(*size)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
def paint(self, painter, option, index):
item = self.editor._tree.itemFromIndex(index)
(expanded, node, instance) = self.editor._get_node_data(item)
column = index.column()
renderer = node.get_renderer(object, column=column)
if ((renderer is None) and self.editor.factory.word_wrap):
renderer = DEFAULT_WRAP_RENDERER
if (renderer is None):
super().paint(painter, option, index)
else:
if (not renderer.handles_all):
super().paint(painter, option, index)
paint_context = (painter, option, index)
size = renderer.paint(self.editor, node, column, instance, paint_context)
if (size is not None):
do_later(self.sizeHintChanged.emit, index) |
def test_transform_gif(tmp_path, fx_asset):
src = str((fx_asset / 'nocomments-delay-100.gif'))
dst = str((tmp_path / 'test_transform_gif.gif'))
with Image(filename=src) as img:
assert (len(img.sequence) == 46)
assert (img.size == (350, 197))
for single in img.sequence:
assert (single.delay == 100)
img.transform(resize='175x98!')
assert (len(img.sequence) == 46)
assert (img.size == (175, 98))
for single in img.sequence:
assert (single.size == (175, 98))
assert (single.delay == 100)
img.save(filename=dst)
with Image(filename=dst) as gif:
assert (len(gif.sequence) == 46)
assert (gif.size == (175, 98))
for single in gif.sequence:
assert (single.size == (175, 98))
assert (single.delay == 100) |
class TestPythonChained(util.PluginTestCase):
def setup_fs(self):
config = self.dedent("\n matrix:\n - name: python\n sources:\n - '{}/**/*.txt'\n aspell:\n lang: en\n d: en_US\n hunspell:\n d: en_US\n pipeline:\n - pyspelling.filters.text:\n - pyspelling.filters.python:\n group_comments: true\n ").format(self.tempdir)
self.mktemp('.python.yml', config, 'utf-8')
def test_python_after_text(self):
bad_docstring = ['helo', 'begn']
bad_comments = ['flga', 'graet']
bad_comments2 = ['recieve', 'teh']
bad_words = ((bad_docstring + bad_comments) + bad_comments2)
good_words = ['yes', 'word']
template = self.dedent('\n """\n {}\n """\n def function():\n # {}\n # {}\n ').format('\n'.join((bad_docstring + good_words)), ' '.join((bad_comments + good_words)), ' '.join((bad_comments2 + good_words)))
self.mktemp('test.txt', template, 'utf-8')
self.assert_spellcheck('.python.yml', bad_words) |
def test_plot_style_copy_style():
style = PlotStyle('Test', 'red', 0.5, '.', 'o', 2.5)
style.setEnabled(False)
copy_style = PlotStyle('Copy')
copy_style.copyStyleFrom(style)
assert (style.name != copy_style.name)
assert (style.color == copy_style.color)
assert (style.alpha == copy_style.alpha)
assert (style.line_style == copy_style.line_style)
assert (style.marker == copy_style.marker)
assert (style.width == copy_style.width)
assert (style.size == copy_style.size)
assert (style.isEnabled() != copy_style.isEnabled())
another_copy_style = PlotStyle('Another Copy')
another_copy_style.copyStyleFrom(style, copy_enabled_state=True)
assert (style.isEnabled() == another_copy_style.isEnabled()) |
class TestCursorPagination(CursorPaginationTestsMixin):
def setup_method(self):
class MockObject():
def __init__(self, idx):
self.created = idx
class MockQuerySet():
def __init__(self, items):
self.items = items
def filter(self, q):
q_args = dict(q.deconstruct()[1])
if (not q_args):
q_args = dict(q.deconstruct()[2])
created__gt = q_args.get('created__gt')
created__lt = q_args.get('created__lt')
if (created__gt is not None):
return MockQuerySet([item for item in self.items if ((item.created is None) or (item.created > int(created__gt)))])
assert (created__lt is not None)
return MockQuerySet([item for item in self.items if ((item.created is None) or (item.created < int(created__lt)))])
def order_by(self, *ordering):
if ordering[0].startswith('-'):
return MockQuerySet(list(reversed(self.items)))
return self
def __getitem__(self, sliced):
return self.items[sliced]
class ExamplePagination(pagination.CursorPagination):
page_size = 5
page_size_query_param = 'page_size'
max_page_size = 20
ordering = 'created'
self.pagination = ExamplePagination()
self.queryset = MockQuerySet([MockObject(idx) for idx in [1, 1, 1, 1, 1, 1, 2, 3, 4, 4, 4, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 9, 9, 9, 9, 9, 9]])
def get_pages(self, url):
request = Request(factory.get(url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
current = [item.created for item in queryset]
next_url = self.pagination.get_next_link()
previous_url = self.pagination.get_previous_link()
if (next_url is not None):
request = Request(factory.get(next_url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
next = [item.created for item in queryset]
else:
next = None
if (previous_url is not None):
request = Request(factory.get(previous_url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
previous = [item.created for item in queryset]
else:
previous = None
return (previous, current, next, previous_url, next_url) |
class OptionSeriesArcdiagramSonificationDefaultspeechoptions(Options):
def activeWhen(self) -> 'OptionSeriesArcdiagramSonificationDefaultspeechoptionsActivewhen':
return self._config_sub_data('activeWhen', OptionSeriesArcdiagramSonificationDefaultspeechoptionsActivewhen)
def language(self):
return self._config_get('en-US')
def language(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionSeriesArcdiagramSonificationDefaultspeechoptionsMapping':
return self._config_sub_data('mapping', OptionSeriesArcdiagramSonificationDefaultspeechoptionsMapping)
def pointGrouping(self) -> 'OptionSeriesArcdiagramSonificationDefaultspeechoptionsPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesArcdiagramSonificationDefaultspeechoptionsPointgrouping)
def preferredVoice(self):
return self._config_get(None)
def preferredVoice(self, text: str):
self._config(text, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('speech')
def type(self, text: str):
self._config(text, js_type=False) |
def get_episodes(html, url):
if ('_napi' in url):
return get_episodes_from_api(html, url)
state = get_state(html)
check_login(state)
deviation = state['']['deviation']
gallery = None
for (key, value) in state[''].items():
if key.startswith('folder-deviations-gallery'):
gallery = value
break
s = []
for id in gallery['items']:
d = deviation[str(id)]
s.append(Episode(f"{id} - {d['title']}", d['url']))
if gallery['hasMore']:
username = next(iter(state['']['user'].values()))['username']
params = {'username': username, 'offset': gallery['nextOffset'], 'limit': gallery['itemsPerFetch'], 'mode': 'newest'}
folder_id = gallery['streamParams']['folderId']
if (folder_id < 0):
params['all_folder'] = 'true'
else:
params['folderid'] = folder_id
next_page_cache[url] = update_qs(' params)
return s[::(- 1)] |
class FeatureBar(HasPrivateTraits):
parent = Instance(wx.Window)
dock_control = Instance(DockControl)
control = Instance(wx.Window)
completed = Event()
bg_color = Color(, allow_none=True)
border_color = Color(2458543, allow_none=True)
horizontal = Bool(True)
def hide(self):
if (self.control is not None):
self.control.Hide()
def show(self):
(dock_control, parent) = (self.dock_control, self.parent)
if ((dock_control is None) or (parent is None)):
return
control = self.control
if (control is None):
self.control = control = wx.Frame(None, (- 1), '', style=wx.BORDER_NONE)
control.Bind(wx.EVT_ERASE_BACKGROUND, self._erase_background)
control.Bind(wx.EVT_PAINT, self._paint)
control.Bind(wx.EVT_LEFT_DOWN, self._left_down)
control.Bind(wx.EVT_LEFT_UP, self._left_up)
control.Bind(wx.EVT_RIGHT_DOWN, self._right_down)
control.Bind(wx.EVT_RIGHT_UP, self._right_up)
control.Bind(wx.EVT_MOTION, self._mouse_move)
control.Bind(wx.EVT_ENTER_WINDOW, self._mouse_enter)
control.SetDropTarget(PythonDropTarget(self))
size = wx.Size(32, 32)
width = height = 0
horizontal = self.horizontal
for feature in dock_control.active_features:
bitmap = feature.bitmap
if (bitmap is not None):
if horizontal:
width += (bitmap.GetWidth() + 3)
height = max(height, bitmap.GetHeight())
else:
width = max(width, bitmap.GetWidth())
height += (bitmap.GetHeight() + 3)
if (width > 0):
if horizontal:
size = wx.Size((width + 5), (height + 8))
else:
size = wx.Size((width + 8), (height + 5))
control.SetSize(size)
(px, py) = parent.GetScreenPosition()
(fx, fy) = dock_control.feature_popup_position
control.SetPosition(wx.Point((px + fx), (py + fy)))
control.Show()
def _paint(self, event):
window = self.control
(dx, dy) = window.GetSize().Get()
dc = wx.PaintDC(window)
bg_color = self.bg_color
border_color = self.border_color
if ((bg_color is not None) or (border_color is not None)):
if (border_color is None):
dc.SetPen(wx.TRANSPARENT_PEN)
else:
dc.SetPen(wx.Pen(border_color, 1, wx.SOLID))
if (bg_color is None):
dc.SetBrush(wx.TRANSPARENT_PEN)
else:
dc.SetBrush(wx.Brush(bg_color, wx.SOLID))
dc.DrawRectangle(0, 0, dx, dy)
if self.horizontal:
x = 4
for feature in self.dock_control.active_features:
bitmap = feature.bitmap
if (bitmap is not None):
dc.DrawBitmap(bitmap, x, 4, True)
x += (bitmap.GetWidth() + 3)
else:
y = 4
for feature in self.dock_control.active_features:
bitmap = feature.bitmap
if (bitmap is not None):
dc.DrawBitmap(bitmap, 4, y, True)
y += (bitmap.GetHeight() + 3)
def _erase_background(self, event):
pass
def _left_down(self, event):
self._feature = self._feature_at(event)
self._dragging = False
self._xy = (event.GetX(), event.GetY())
def _left_up(self, event):
self._dragging = None
(feature, self._feature) = (self._feature, None)
if (feature is not None):
if (feature is self._feature_at(event)):
self.control.ReleaseMouse()
self.completed = True
feature._set_event(event)
feature.click()
def _right_down(self, event):
self._feature = self._feature_at(event)
self._dragging = False
self._xy = (event.GetX(), event.GetY())
def _right_up(self, event):
self._dragging = None
(feature, self._feature) = (self._feature, None)
if (feature is not None):
if (feature is self._feature_at(event)):
self.control.ReleaseMouse()
self.completed = True
feature._set_event(event)
feature.right_click()
def _mouse_move(self, event):
if (self._dragging is None):
feature = self._feature_at(event)
if (feature is not self._tooltip_feature):
self._tooltip_feature = feature
tooltip = ''
if (feature is not None):
tooltip = feature.tooltip
wx.ToolTip.Enable(False)
wx.ToolTip.Enable(True)
self.control.SetToolTip(wx.ToolTip(tooltip))
(x, y) = (event.GetX(), event.GetY())
(dx, dy) = self.control.GetSize().Get()
if ((x < 0) or (y < 0) or (x >= dx) or (y >= dy)):
self.control.ReleaseMouse()
self._tooltip_feature = None
self.completed = True
return
if (not self._dragging):
(x, y) = self._xy
if ((abs((x - event.GetX())) + abs((y - event.GetY()))) < 3):
return
self._dragging = True
feature = self._feature
if (feature is not None):
feature._set_event(event)
prefix = button = ''
if event.RightIsDown():
button = 'right_'
if event.ControlDown():
prefix = 'control_'
elif event.AltDown():
prefix = 'alt_'
elif event.ShiftDown():
prefix = 'shift_'
object = getattr(feature, ('%s%sdrag' % (prefix, button)))()
if (object is not None):
self.control.ReleaseMouse()
self._feature = None
self.completed = True
self.dock_control.pre_drag_all(object)
PythonDropSource(self.control, object)
self.dock_control.post_drag_all()
self._dragging = None
def _mouse_enter(self, event):
self.control.CaptureMouse()
def wx_dropped_on(self, x, y, data, drag_result):
feature = self._can_drop_on_feature(x, y, data)
self.completed = True
self.dock_control.post_drag(FEATURE_EXTERNAL_DRAG)
if (feature is not None):
if isinstance(data, IFeatureTool):
dock_control = feature.dock_control
data.feature_dropped_on_dock_control(dock_control)
data.feature_dropped_on(dock_control.object)
else:
(wx, wy) = self.control.GetScreenPosition()
feature.trait_set(x=(wx + x), y=(wy + y))
feature.drop(data)
return drag_result
return wx.DragNone
def wx_drag_over(self, x, y, data, drag_result):
if (self._can_drop_on_feature(x, y, data) is not None):
return drag_result
return wx.DragNone
def wx_drag_leave(self, data):
self.completed = True
self.dock_control.post_drag(FEATURE_EXTERNAL_DRAG)
def _can_drop_on_feature(self, x, y, data):
feature = self._feature_at(FakeEvent(x, y))
if ((feature is not None) and feature.can_drop(data)):
return feature
return None
def _feature_at(self, event):
if self.horizontal:
x = 4
for feature in self.dock_control.active_features:
bitmap = feature.bitmap
if (bitmap is not None):
bdx = bitmap.GetWidth()
if self._is_in(event, x, 4, bdx, bitmap.GetHeight()):
return feature
x += (bdx + 3)
else:
y = 4
for feature in self.dock_control.active_features:
bitmap = feature.bitmap
if (bitmap is not None):
bdy = bitmap.GetHeight()
if self._is_in(event, 4, y, bitmap.GetWidth(), bdy):
return feature
y += (bdy + 3)
return None
def _is_in(self, event, x, y, dx, dy):
return ((x <= event.GetX() < (x + dx)) and (y <= event.GetY() < (y + dy))) |
def test_expanding_sum_multiple_vars(df_time):
expected_results = {'ambient_temp_expanding_sum': [np.nan, 31.31, 62.82, 94.97, 127.36, 159.98, 192.48, 225.0, 257.68, 291.44, 325.57, 359.65, 393.35, 427.24, 461.28], 'irradiation_expanding_sum': [np.nan, 0.51, 1.3, 1.95, 2.71, 3.13, 3.62, 4.19, 4.75, 5.49, 6.38, 6.85, 7.39, 7.79, 8.24]}
expected_df = df_time.copy()
expected_df['ambient_temp_expanding_sum'] = expected_results['ambient_temp_expanding_sum']
expected_df['irradiation_expanding_sum'] = expected_results['irradiation_expanding_sum']
transformer = ExpandingWindowFeatures(variables=['ambient_temp', 'irradiation'], functions='sum')
df_tr = transformer.fit_transform(df_time)
assert_frame_equal(df_tr, expected_df) |
class HeatPerturbation(AbstractPerturbation):
temperature_range: Tuple[(pd.NonNegativeFloat, pd.NonNegativeFloat)] = pd.Field((0, inf), title='Temperature range', description='Temparature range in which perturbation model is valid.', units=KELVIN)
def sample(self, temperature: Union[(ArrayLike[float], SpatialDataArray)]) -> Union[(ArrayLike[float], ArrayLike[Complex], SpatialDataArray)]:
_ax_if_none
def plot(self, temperature: ArrayLike[float], val: FieldVal='real', ax: Ax=None) -> Ax:
temperature_numpy = np.array(temperature)
values = self.sample(temperature_numpy)
values = self._get_val(values, val)
ax.plot(temperature_numpy, values)
ax.set_xlabel('temperature (K)')
ax.set_ylabel(f'{val}(perturbation value)')
ax.set_title('temperature dependence')
ax.set_aspect('auto')
return ax |
_page.route('/table/delete_records', methods=['POST'])
def delete_records():
res = check_uuid(all_data['uuid'], request.json['uuid'])
if (res != None):
return jsonify(res)
ids = request.json['ids']
for id in ids:
if (id in all_data['data']):
all_data['deleted_rows'][id] = 1
return jsonify(status='success', msg='') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.