code stringlengths 281 23.7M |
|---|
class DictElementLayer(Layer):
def __init__(self, incoming, key, name=None):
assert isinstance(incoming, DictLayer)
assert (key in incoming.keys())
self.input_layer = incoming
self.input_shape = incoming.output_shape
self.key = key
self.name = name
self.params = OrderedDict()
self.get_output_kwargs = []
def get_output_for(self, inputs_dict, **flags):
return inputs_dict[self.key]
def get_output_shape_for(self, input_shapes_dict, **flags):
return input_shapes_dict[self.key]
def output_dtype(self):
input_dtypes = get_layer_dtype(self.input_layer)
assert isinstance(input_dtypes, dict)
return input_dtypes[self.key] |
class NASNetALarge(nn.Module):
def __init__(self, num_classes=1000, in_chans=1, stem_size=96, channel_multiplier=2, num_features=4032, output_stride=32, drop_rate=0.0, global_pool='avg', pad_type='same'):
super(NASNetALarge, self).__init__()
self.num_classes = num_classes
self.stem_size = stem_size
self.num_features = num_features
self.channel_multiplier = channel_multiplier
self.drop_rate = drop_rate
assert (output_stride == 32)
channels = (self.num_features // 24)
self.conv0 = ConvBnAct(in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2, norm_kwargs=dict(eps=0.001, momentum=0.1), act_layer=None)
self.cell_stem_0 = CellStem0(self.stem_size, num_channels=(channels // (channel_multiplier ** 2)), pad_type=pad_type)
self.cell_stem_1 = CellStem1(self.stem_size, num_channels=(channels // channel_multiplier), pad_type=pad_type)
self.cell_0 = FirstCell(in_chs_left=channels, out_chs_left=(channels // 2), in_chs_right=(2 * channels), out_chs_right=channels, pad_type=pad_type)
self.cell_1 = NormalCell(in_chs_left=(2 * channels), out_chs_left=channels, in_chs_right=(6 * channels), out_chs_right=channels, pad_type=pad_type)
self.cell_2 = NormalCell(in_chs_left=(6 * channels), out_chs_left=channels, in_chs_right=(6 * channels), out_chs_right=channels, pad_type=pad_type)
self.cell_3 = NormalCell(in_chs_left=(6 * channels), out_chs_left=channels, in_chs_right=(6 * channels), out_chs_right=channels, pad_type=pad_type)
self.cell_4 = NormalCell(in_chs_left=(6 * channels), out_chs_left=channels, in_chs_right=(6 * channels), out_chs_right=channels, pad_type=pad_type)
self.cell_5 = NormalCell(in_chs_left=(6 * channels), out_chs_left=channels, in_chs_right=(6 * channels), out_chs_right=channels, pad_type=pad_type)
self.reduction_cell_0 = ReductionCell0(in_chs_left=(6 * channels), out_chs_left=(2 * channels), in_chs_right=(6 * channels), out_chs_right=(2 * channels), pad_type=pad_type)
self.cell_6 = FirstCell(in_chs_left=(6 * channels), out_chs_left=channels, in_chs_right=(8 * channels), out_chs_right=(2 * channels), pad_type=pad_type)
self.cell_7 = NormalCell(in_chs_left=(8 * channels), out_chs_left=(2 * channels), in_chs_right=(12 * channels), out_chs_right=(2 * channels), pad_type=pad_type)
self.cell_8 = NormalCell(in_chs_left=(12 * channels), out_chs_left=(2 * channels), in_chs_right=(12 * channels), out_chs_right=(2 * channels), pad_type=pad_type)
self.cell_9 = NormalCell(in_chs_left=(12 * channels), out_chs_left=(2 * channels), in_chs_right=(12 * channels), out_chs_right=(2 * channels), pad_type=pad_type)
self.cell_10 = NormalCell(in_chs_left=(12 * channels), out_chs_left=(2 * channels), in_chs_right=(12 * channels), out_chs_right=(2 * channels), pad_type=pad_type)
self.cell_11 = NormalCell(in_chs_left=(12 * channels), out_chs_left=(2 * channels), in_chs_right=(12 * channels), out_chs_right=(2 * channels), pad_type=pad_type)
self.reduction_cell_1 = ReductionCell1(in_chs_left=(12 * channels), out_chs_left=(4 * channels), in_chs_right=(12 * channels), out_chs_right=(4 * channels), pad_type=pad_type)
self.cell_12 = FirstCell(in_chs_left=(12 * channels), out_chs_left=(2 * channels), in_chs_right=(16 * channels), out_chs_right=(4 * channels), pad_type=pad_type)
self.cell_13 = NormalCell(in_chs_left=(16 * channels), out_chs_left=(4 * channels), in_chs_right=(24 * channels), out_chs_right=(4 * channels), pad_type=pad_type)
self.cell_14 = NormalCell(in_chs_left=(24 * channels), out_chs_left=(4 * channels), in_chs_right=(24 * channels), out_chs_right=(4 * channels), pad_type=pad_type)
self.cell_15 = NormalCell(in_chs_left=(24 * channels), out_chs_left=(4 * channels), in_chs_right=(24 * channels), out_chs_right=(4 * channels), pad_type=pad_type)
self.cell_16 = NormalCell(in_chs_left=(24 * channels), out_chs_left=(4 * channels), in_chs_right=(24 * channels), out_chs_right=(4 * channels), pad_type=pad_type)
self.cell_17 = NormalCell(in_chs_left=(24 * channels), out_chs_left=(4 * channels), in_chs_right=(24 * channels), out_chs_right=(4 * channels), pad_type=pad_type)
self.act = nn.ReLU(inplace=True)
self.feature_info = [dict(num_chs=96, reduction=2, module='conv0'), dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'), dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'), dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'), dict(num_chs=4032, reduction=32, module='act')]
(self.global_pool, self.last_linear) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def get_classifier(self):
return self.last_linear
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
(self.global_pool, self.last_linear) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x_conv0 = self.conv0(x)
x_stem_0 = self.cell_stem_0(x_conv0)
x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0)
x_cell_0 = self.cell_0(x_stem_1, x_stem_0)
x_cell_1 = self.cell_1(x_cell_0, x_stem_1)
x_cell_2 = self.cell_2(x_cell_1, x_cell_0)
x_cell_3 = self.cell_3(x_cell_2, x_cell_1)
x_cell_4 = self.cell_4(x_cell_3, x_cell_2)
x_cell_5 = self.cell_5(x_cell_4, x_cell_3)
x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4)
x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4)
x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0)
x_cell_8 = self.cell_8(x_cell_7, x_cell_6)
x_cell_9 = self.cell_9(x_cell_8, x_cell_7)
x_cell_10 = self.cell_10(x_cell_9, x_cell_8)
x_cell_11 = self.cell_11(x_cell_10, x_cell_9)
x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10)
x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10)
x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1)
x_cell_14 = self.cell_14(x_cell_13, x_cell_12)
x_cell_15 = self.cell_15(x_cell_14, x_cell_13)
x_cell_16 = self.cell_16(x_cell_15, x_cell_14)
x_cell_17 = self.cell_17(x_cell_16, x_cell_15)
x = self.act(x_cell_17)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x)
if (self.drop_rate > 0):
x = F.dropout(x, self.drop_rate, training=self.training)
x = self.last_linear(x)
return x |
def instance_norm(input, name='instance_norm'):
with tf.variable_scope(name):
depth = input.get_shape()[3]
scale = tf.get_variable('scale', [depth], initializer=tf.random_normal_initializer(1.0, 0.02, dtype=tf.float32))
offset = tf.get_variable('offset', [depth], initializer=tf.constant_initializer(0.0))
(mean, variance) = tf.nn.moments(input, axes=[1, 2], keep_dims=True)
epsilon = 1e-05
inv = tf.rsqrt((variance + epsilon))
normalized = ((input - mean) * inv)
return ((scale * normalized) + offset) |
class ELF_KO_Test(unittest.TestCase):
(IS_FAST_TEST, 'fast test')
def test_demigod_m0hamed_x86(self):
checklist = []
_kernel_api(params={'format': STRING})
def __my_printk(ql: Qiling, address: int, params):
ql.log.info(f'my printk: params={params!r}')
checklist.append(params['format'])
return 0
ql = Qiling(['../examples/rootfs/x86_linux/kernel/m0hamed_rootkit.ko'], '../examples/rootfs/x86_linux', verbose=QL_VERBOSE.DEBUG)
ql.os.set_api('printk', __my_printk)
ba = ql.loader.load_address
ql.run((ba + 480), (ba + 506))
self.assertEqual('DONT YOU EVER TRY TO READ THIS FILE OR I AM GOING TO DESTROY YOUR MOST SECRET DREAMS', checklist.pop(0))
self.assertEqual(len(checklist), 0)
def test_demigod_hello_x8664(self):
checklist = []
def __onenter_printk(ql: Qiling, address: int, params):
ql.log.info(f'about to enter printk: params={params!r}')
checklist.append(params['format'])
ql = Qiling(['../examples/rootfs/x8664_linux/kernel/hello.ko'], '../examples/rootfs/x8664_linux', verbose=QL_VERBOSE.DEBUG)
ql.os.set_api('printk', __onenter_printk, QL_INTERCEPT.ENTER)
ba = ql.loader.load_address
ql.run((ba + 100), (ba + 126))
ql.run((ba + 127), (ba + 144))
self.assertIn('Hello', checklist.pop(0))
self.assertIn('Goodbye', checklist.pop(0))
self.assertEqual(len(checklist), 0)
def test_demigod_hello_mips32(self):
checklist = []
def __onexit_printk(ql: Qiling, address: int, params, retval: int):
ql.log.info(f'done with printk: params={params!r}')
checklist.append(params['format'])
ql = Qiling(['../examples/rootfs/mips32_linux/kernel/hello.ko'], '../examples/rootfs/mips32_linux', verbose=QL_VERBOSE.DEBUG)
ql.os.set_api('printk', __onexit_printk, QL_INTERCEPT.EXIT)
ba = ql.loader.load_address
ql.run((ba + 96), (ba + 132))
ql.run((ba + 136), (ba + 152))
self.assertIn('Hello', checklist.pop(0))
self.assertEqual(len(checklist), 0) |
def create_tasks_from_benchmarks(benchmark_dict):
def version_of(dataset, language_pair):
if (language_pair[(- 2):] in ['zh', 'ja']):
return 1
return 0
return {f'{dataset}-{language_pair}': create_translation_task(dataset, language_pair, version_of(dataset, language_pair)) for (dataset, language_pairs) in benchmark_dict.items() for language_pair in language_pairs} |
class BM25Selector():
def __init__(self, source, target, index_name, num_proc=4, enable_rake=False):
self.source = source
self.target = target
self.num_proc = num_proc
global glob_data_source
glob_data_source = source
global glob_text_column_name
glob_text_column_name = 'text'
self.text_column_name = 'text'
self.index_name = index_name
self.enable_rake = enable_rake
if (not es.indices.exists(index=self.index_name)):
print('index name not exist, creating new index for the training corpora')
self.source.add_elasticsearch_index(self.text_column_name, es_client=es, es_index_name=self.index_name, es_index_config=es_config)
glob_data_source = self.source
else:
self.source.load_elasticsearch_index(self.text_column_name, es_client=es, es_index_name=self.index_name)
glob_data_source = self.source
def build_queries(self):
queries = []
if self.enable_rake:
r = Rake()
for idx in tqdm(range(len(self.target))):
r.extract_keywords_from_text(self.target[idx]['text'])
phrases = r.get_ranked_phrases_with_scores()
query = ' '.join([p[1] for p in phrases[:20]])
queries.append(query)
else:
queries = [self.target[idx]['text'] for idx in tqdm(range(len(self.target)))]
return queries
def build_dataset(self, top_k, output_file_path, batch_size=4):
global glob_top_k
glob_top_k = top_k
queries = self.build_queries()
query_neighbours = []
query_ranks = []
for i in tqdm(range(0, len(queries), batch_size)):
if ((i + batch_size) >= len(queries)):
batched_queries = queries[i:]
else:
batched_queries = queries[i:(i + batch_size)]
with Pool(processes=self.num_proc) as pool:
results = pool.map(get_neighbour_examples, batched_queries)
for result in results:
query_neighbours.extend(result['id'])
query_ranks.extend(list(range(len(result['id']))))
unique_query = {}
for (nid, rank) in zip(query_neighbours, query_ranks):
if (nid not in unique_query):
unique_query[nid] = rank
else:
unique_query[nid] = min(unique_query[nid], rank)
query_neighbours = []
ranks = []
for (nid, rank) in unique_query.items():
query_neighbours.append(nid)
ranks.append(rank)
texts = [self.source[idx][self.text_column_name] for idx in query_neighbours]
ids = list(range(len(texts)))
df = pd.DataFrame({'text': texts, 'id': ids, 'rank': ranks})
df.to_csv(output_file_path, index=False) |
class ProxyEnv(Env):
def __init__(self, wrapped_env):
self._wrapped_env = wrapped_env
self.action_space = self._wrapped_env.action_space
self.observation_space = self._wrapped_env.observation_space
def wrapped_env(self):
return self._wrapped_env
def reset(self, **kwargs):
return self._wrapped_env.reset(**kwargs)
def step(self, action):
return self._wrapped_env.step(action)
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
if hasattr(self.wrapped_env, 'terminate'):
self.wrapped_env.terminate()
def __getattr__(self, attr):
if (attr == '_wrapped_env'):
raise AttributeError()
return getattr(self._wrapped_env, attr)
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __str__(self):
return '{}({})'.format(type(self).__name__, self.wrapped_env) |
class MPRIS2(DBusProperty, DBusIntrospectable, MPRISObject):
BUS_NAME = 'org.mpris.MediaPlayer2.quodlibet'
PATH = '/org/mpris/MediaPlayer2'
ROOT_IFACE = 'org.mpris.MediaPlayer2'
ROOT_ISPEC = '\n<method name="Raise"/>\n<method name="Quit"/>'
ROOT_PROPS = '\n<property name="CanQuit" type="b" access="read"/>\n<property name="CanRaise" type="b" access="read"/>\n<property name="CanSetFullscreen" type="b" access="read"/>\n<property name="HasTrackList" type="b" access="read"/>\n<property name="Identity" type="s" access="read"/>\n<property name="DesktopEntry" type="s" access="read"/>\n<property name="SupportedUriSchemes" type="as" access="read"/>\n<property name="SupportedMimeTypes" type="as" access="read"/>'
PLAYER_IFACE = 'org.mpris.MediaPlayer2.Player'
PLAYER_ISPEC = '\n<method name="Next"/>\n<method name="Previous"/>\n<method name="Pause"/>\n<method name="PlayPause"/>\n<method name="Stop"/>\n<method name="Play"/>\n<method name="Seek">\n <arg direction="in" name="Offset" type="x"/>\n</method>\n<method name="SetPosition">\n <arg direction="in" name="TrackId" type="o"/>\n <arg direction="in" name="Position" type="x"/>\n</method>\n<method name="OpenUri">\n <arg direction="in" name="Uri" type="s"/>\n</method>\n<signal name="Seeked">\n <arg name="Position" type="x"/>\n</signal>'
PLAYER_PROPS = '\n<property name="PlaybackStatus" type="s" access="read"/>\n<property name="LoopStatus" type="s" access="readwrite"/>\n<property name="Rate" type="d" access="readwrite"/>\n<property name="Shuffle" type="b" access="readwrite"/>\n<property name="Metadata" type="a{sv}" access="read"/>\n<property name="Volume" type="d" access="readwrite"/>\n<property name="Position" type="x" access="read">\n <annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="false"/>\n</property>\n<property name="MinimumRate" type="d" access="read"/>\n<property name="MaximumRate" type="d" access="read"/>\n<property name="CanGoNext" type="b" access="read"/>\n<property name="CanGoPrevious" type="b" access="read"/>\n<property name="CanPlay" type="b" access="read"/>\n<property name="CanPause" type="b" access="read"/>\n<property name="CanSeek" type="b" access="read"/>\n<property name="CanControl" type="b" access="read">\n <annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="false"/>\n</property>'
def __init__(self):
DBusIntrospectable.__init__(self)
DBusProperty.__init__(self)
self.set_introspection(MPRIS2.ROOT_IFACE, MPRIS2.ROOT_ISPEC)
self.set_properties(MPRIS2.ROOT_IFACE, MPRIS2.ROOT_PROPS)
self.set_introspection(MPRIS2.PLAYER_IFACE, MPRIS2.PLAYER_ISPEC)
self.set_properties(MPRIS2.PLAYER_IFACE, MPRIS2.PLAYER_PROPS)
bus = dbus.SessionBus()
name = dbus.service.BusName(self.BUS_NAME, bus)
MPRISObject.__init__(self, bus, self.PATH, name)
self.__metadata = None
self.__cover = None
player_options = app.player_options
self.__repeat_id = player_options.connect('notify::repeat', self.__repeat_changed)
self.__random_id = player_options.connect('notify::shuffle', self.__shuffle_changed)
self.__single_id = player_options.connect('notify::single', self.__single_changed)
self.__lsig = app.librarian.connect('changed', self.__library_changed)
self.__vsig = app.player.connect('notify::volume', self.__volume_changed)
self.__seek_sig = app.player.connect('seek', self.__seeked)
def remove_from_connection(self, *arg, **kwargs):
super().remove_from_connection(*arg, **kwargs)
player_options = app.player_options
player_options.disconnect(self.__repeat_id)
player_options.disconnect(self.__random_id)
player_options.disconnect(self.__single_id)
app.librarian.disconnect(self.__lsig)
app.player.disconnect(self.__vsig)
app.player.disconnect(self.__seek_sig)
if (self.__cover is not None):
self.__cover.close()
self.__cover = None
self.__invalidate_metadata()
def __volume_changed(self, *args):
self.emit_properties_changed(self.PLAYER_IFACE, ['Volume'])
def __repeat_changed(self, *args):
self.emit_properties_changed(self.PLAYER_IFACE, ['LoopStatus'])
def __shuffle_changed(self, *args):
self.emit_properties_changed(self.PLAYER_IFACE, ['Shuffle'])
def __single_changed(self, *args):
self.emit_properties_changed(self.PLAYER_IFACE, ['LoopStatus'])
def __seeked(self, player, song, ms):
self.Seeked((ms * 1000))
def __library_changed(self, library, songs):
self.__invalidate_metadata()
if ((not songs) or (app.player.info not in songs)):
return
self.emit_properties_changed(self.PLAYER_IFACE, ['Metadata'])
.method(ROOT_IFACE)
def Raise(self):
app.present()
.method(ROOT_IFACE)
def Quit(self):
app.quit()
.signal(PLAYER_IFACE, signature='x')
def Seeked(self, position):
pass
.method(PLAYER_IFACE)
def Next(self):
player = app.player
paused = player.paused
player.next()
player.paused = paused
.method(PLAYER_IFACE)
def Previous(self):
player = app.player
paused = player.paused
player.previous()
player.paused = paused
.method(PLAYER_IFACE)
def Pause(self):
app.player.paused = True
.method(PLAYER_IFACE)
def Play(self):
app.player.play()
.method(PLAYER_IFACE)
def PlayPause(self):
app.player.playpause()
.method(PLAYER_IFACE)
def Stop(self):
app.player.stop()
.method(PLAYER_IFACE, in_signature='x')
def Seek(self, offset):
new_pos = (app.player.get_position() + (offset / 1000))
app.player.seek(new_pos)
.method(PLAYER_IFACE, in_signature='ox')
def SetPosition(self, track_id, position):
if (track_id == self.__get_current_track_id()):
app.player.seek((position / 1000))
def paused(self):
self.emit_properties_changed(self.PLAYER_IFACE, ['PlaybackStatus'])
unpaused = paused
def song_started(self, song):
self.__invalidate_metadata()
self.Seeked(0)
self.emit_properties_changed(self.PLAYER_IFACE, ['PlaybackStatus', 'Metadata'])
def __get_current_track_id(self):
path = '/net/sacredchao/QuodLibet'
if (not app.player.info):
return dbus.ObjectPath(((path + '/') + 'NoTrack'))
return dbus.ObjectPath(((path + '/') + str(id(app.player.info))))
def __invalidate_metadata(self):
self.__metadata = None
def __get_metadata(self):
if (self.__metadata is None):
self.__metadata = self.__get_metadata_real()
assert (self.__metadata is not None)
return self.__metadata
def __get_metadata_real(self):
metadata = {}
metadata['mpris:trackid'] = self.__get_current_track_id()
def ignore_overflow(dbus_type, value):
try:
return dbus_type(value)
except OverflowError:
return 0
song = app.player.info
if (not song):
return metadata
metadata['mpris:length'] = ignore_overflow(dbus.Int64, (song('~#length') * (10 ** 6)))
if (self.__cover is not None):
self.__cover.close()
self.__cover = None
cover = app.cover_manager.get_cover(song)
if cover:
is_temp = cover.name.startswith(tempfile.gettempdir())
if is_temp:
self.__cover = cover
metadata['mpris:artUrl'] = fsn2uri(cover.name)
list_val = {'artist': 'artist', 'albumArtist': 'albumartist', 'comment': 'comment', 'composer': 'composer', 'genre': 'genre', 'lyricist': 'lyricist'}
for (xesam, tag) in list_val.items():
vals = song.list(tag)
if vals:
metadata[('xesam:' + xesam)] = list(map(unival, vals))
sing_val = {'album': 'album', 'title': 'title', 'asText': '~lyrics'}
for (xesam, tag) in sing_val.items():
vals = song.comma(tag)
if vals:
metadata[('xesam:' + xesam)] = unival(vals)
metadata['xesam:url'] = song('~uri')
num_val = {'audioBPM': 'bpm', 'discNumber': 'disc', 'trackNumber': 'track', 'useCount': 'playcount'}
for (xesam, tag) in num_val.items():
val = song(('~#' + tag), None)
if (val is not None):
metadata[('xesam:' + xesam)] = ignore_overflow(dbus.Int32, val)
metadata['xesam:userRating'] = ignore_overflow(dbus.Double, song('~#rating'))
iso_8601_format = '%Y-%m-%dT%H:%M:%S'
tuple_time = time.gmtime(song('~#lastplayed'))
iso_time = time.strftime(iso_8601_format, tuple_time)
metadata['xesam:lastUsed'] = iso_time
year = song('~year')
if year:
try:
tuple_time = time.strptime(year, '%Y')
iso_time = time.strftime(iso_8601_format, tuple_time)
except ValueError:
pass
else:
metadata['xesam:contentCreated'] = iso_time
return metadata
def set_property(self, interface, name, value):
player = app.player
player_options = app.player_options
if (interface == self.PLAYER_IFACE):
if (name == 'LoopStatus'):
if (value == 'Playlist'):
player_options.repeat = True
app.window.order.repeater = RepeatListForever
elif (value == 'Track'):
player_options.repeat = True
app.window.order.repeater = RepeatSongForever
elif (value == 'None'):
player_options.repeat = False
app.window.order.repeater = RepeatListForever
elif (name == 'Rate'):
pass
elif (name == 'Shuffle'):
player_options.shuffle = value
elif (name == 'Volume'):
player.volume = value
def get_property(self, interface, name):
player = app.player
player_options = app.player_options
if (interface == self.ROOT_IFACE):
if (name == 'CanQuit'):
return True
elif (name == 'CanRaise'):
return True
elif (name == 'CanSetFullscreen'):
return False
elif (name == 'HasTrackList'):
return False
elif (name == 'Identity'):
return app.name
elif (name == 'DesktopEntry'):
return 'io.github.quodlibet.QuodLibet'
elif (name == 'SupportedUriSchemes'):
def can(s):
return False
schemes = [' ' 'ftp', 'file', 'mms']
return filter(can, schemes)
elif (name == 'SupportedMimeTypes'):
from quodlibet import formats
return formats.mimes
elif (interface == self.PLAYER_IFACE):
if (name == 'PlaybackStatus'):
if (not player.song):
return 'Stopped'
return ('Playing', 'Paused')[int(player.paused)]
elif (name == 'LoopStatus'):
if (not player_options.repeat):
return 'None'
else:
if player_options.single:
return 'Track'
return 'Playlist'
elif (name == 'Rate'):
return 1.0
elif (name == 'Shuffle'):
return player_options.shuffle
elif (name == 'Metadata'):
return self.__get_metadata()
elif (name == 'Volume'):
return player.volume
elif (name == 'Position'):
return (player.get_position() * 1000)
elif (name == 'MinimumRate'):
return 1.0
elif (name == 'MaximumRate'):
return 1.0
elif (name == 'CanGoNext'):
return True
elif (name == 'CanGoPrevious'):
return True
elif (name == 'CanPlay'):
return True
elif (name == 'CanPause'):
return True
elif (name == 'CanSeek'):
return True
elif (name == 'CanControl'):
return True |
def spy_auto_quant(auto_quant: AutoQuant):
class Spy():
def __init__(self):
self._eval_manager = None
def get_all_ptq_results(self) -> List[PtqResult]:
if (self._eval_manager is None):
return []
return [sess.ptq_result for sess in self._eval_manager._ptq_sessions]
spy = Spy()
_auto_quant_main = auto_quant._auto_quant_main
def _auto_quant_main_wrapper(fp32_sess, target_acc, starting_op_names, output_op_names, eval_manager, results_dir='/tmp'):
spy._eval_manager = eval_manager
return _auto_quant_main(fp32_sess, target_acc, starting_op_names, output_op_names, eval_manager, results_dir)
try:
setattr(auto_quant, '_auto_quant_main', _auto_quant_main_wrapper)
(yield spy)
finally:
setattr(auto_quant, '_auto_quant_main', _auto_quant_main) |
class _IterableCursor():
def __init__(self, context):
self._context = context
async def _iterate(self):
if self._context.dialect.support_prepare:
prepared = (await self._context.cursor.prepare(self._context))
return prepared.iterate(*self._context.parameters[0], timeout=self._context.timeout)
return self._context.cursor.iterate(self._context)
async def _get_cursor(self):
return (await (await self._iterate()))
def __aiter__(self):
return _LazyIterator(self._iterate)
def __await__(self):
return self._get_cursor().__await__() |
class TFResNetBasicLayer(tf.keras.layers.Layer):
def __init__(self, in_channels: int, out_channels: int, stride: int=1, activation: str='relu', **kwargs) -> None:
super().__init__(**kwargs)
should_apply_shortcut = ((in_channels != out_channels) or (stride != 1))
self.conv1 = TFResNetConvLayer(out_channels, stride=stride, name='layer.0')
self.conv2 = TFResNetConvLayer(out_channels, activation=None, name='layer.1')
self.shortcut = (TFResNetShortCut(out_channels, stride=stride, name='shortcut') if should_apply_shortcut else tf.keras.layers.Activation('linear', name='shortcut'))
self.activation = ACT2FN[activation]
def call(self, hidden_state: tf.Tensor, training: bool=False) -> tf.Tensor:
residual = hidden_state
hidden_state = self.conv1(hidden_state, training=training)
hidden_state = self.conv2(hidden_state, training=training)
residual = self.shortcut(residual, training=training)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state |
class TestPynagImporter(unittest.TestCase):
def setUp(self):
filename = tempfile.mktemp()
self.filename = filename
def tearDown(self):
if os.path.exists(self.filename):
os.remove(self.filename)
def test_parse_csv_string(self):
objects = importer.parse_csv_string(_TYPICAL_TESTCASE)
self.assertEqual(2, len(objects))
self.assertEqual('generic-service', objects[0]['use'])
self.assertEqual('generic-host', objects[1]['use'])
def test_parse_csv_string_empty(self):
objects = importer.parse_csv_string('')
self.assertEqual([], objects)
def test_parse_csv_file(self):
with open(self.filename, 'w') as f:
f.write(_TYPICAL_TESTCASE)
objects_from_file = importer.parse_csv_file(self.filename)
objects_from_string = importer.parse_csv_string(_TYPICAL_TESTCASE)
self.assertEqual(objects_from_string, objects_from_file)
def test_dict_to_objects(self):
dict_list = importer.parse_csv_string(_TYPICAL_TESTCASE)
pynag_objects = importer.dict_to_pynag_objects(dict_list)
self.assertEqual(2, len(pynag_objects))
self.assertEqual('generic-service', pynag_objects[0].use)
self.assertEqual('generic-host', pynag_objects[1].use)
self.assertEqual('service', pynag_objects[0].object_type)
self.assertEqual('host', pynag_objects[1].object_type)
def test_import_from_csv_file(self):
with open(self.filename, 'w') as f:
f.write(_TYPICAL_TESTCASE)
pynag_objects = importer.import_from_csv_file(filename=self.filename, seperator=',')
self.assertEqual(2, len(pynag_objects))
self.assertEqual('generic-service', pynag_objects[0].use)
self.assertEqual('generic-host', pynag_objects[1].use)
self.assertEqual('service', pynag_objects[0].object_type)
self.assertEqual('host', pynag_objects[1].object_type) |
def parseEtree(inFileName, silence=False, print_warnings=True, mapping=None, reverse_mapping=None, nsmap=None):
parser = None
doc = parsexml_(inFileName, parser)
gds_collector = GdsCollector_()
rootNode = doc.getroot()
(rootTag, rootClass) = get_root_tag(rootNode)
if (rootClass is None):
rootTag = 'root'
rootClass = root
rootObj = rootClass.factory()
rootObj.build(rootNode, gds_collector_=gds_collector)
if (mapping is None):
mapping = {}
if (reverse_mapping is None):
reverse_mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping, reverse_mapping_=reverse_mapping, nsmap_=nsmap)
reverse_node_mapping = rootObj.gds_reverse_node_mapping(mapping)
if (not SaveElementTreeNode):
doc = None
rootNode = None
if (not silence):
content = etree_.tostring(rootElement, pretty_print=True, xml_declaration=True, encoding='utf-8')
sys.stdout.write(str(content))
sys.stdout.write('\n')
if (print_warnings and (len(gds_collector.get_messages()) > 0)):
separator = (('-' * 50) + '\n')
sys.stderr.write(separator)
sys.stderr.write('----- Warnings -- count: {} -----\n'.format(len(gds_collector.get_messages())))
gds_collector.write_messages(sys.stderr)
sys.stderr.write(separator)
return (rootObj, rootElement, mapping, reverse_node_mapping) |
def make_layers(cfg, in_channels=3, batch_norm=False, dilation=1):
d_rate = dilation
layers = []
for v in cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=d_rate, dilation=d_rate)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers) |
class ArgNamesPlugin(Plugin):
def get_function_hook(self, fullname: str) -> (Callable[([FunctionContext], Type)] | None):
if (fullname in {'mod.func', 'mod.func_unfilled', 'mod.func_star_expr', 'mod.ClassInit', 'mod.Outer.NestedClassInit'}):
return extract_classname_and_set_as_return_type_function
return None
def get_method_hook(self, fullname: str) -> (Callable[([MethodContext], Type)] | None):
if (fullname in {'mod.Class.method', 'mod.Class.myclassmethod', 'mod.Class.mystaticmethod', 'mod.ClassUnfilled.method', 'mod.ClassStarExpr.method', 'mod.ClassChild.method', 'mod.ClassChild.myclassmethod'}):
return extract_classname_and_set_as_return_type_method
return None |
class TestVaisalaGLD360TextFileHandler(unittest.TestCase):
def test_vaisala_gld360(self):
expected_power = np.array([12.3, 13.2, (- 31.0)])
expected_lat = np.array([30.5342, (- 0.5727), 12.1529])
expected_lon = np.array([(- 90.1152), 104.0688, (- 10.8756)])
expected_time = np.array(['2017-06-20T00:00:00.', '2017-06-20T00:00:00.', '2017-06-20T00:00:00.'], dtype='datetime64[ns]')
filename = StringIO(u'2017-06-20 00:00:00.007178 30.5342 -90.1152 12.3 kA\n2017-06-20 00:00:00.020162 -0.5727 104.0688 13.2 kA\n2017-06-20 00:00:00.023183 12.1529 -10.8756 -31.0 kA')
filename_info = {}
filetype_info = {}
self.handler = VaisalaGLD360TextFileHandler(filename, filename_info, filetype_info)
filename.close()
dataset_id = make_dataid(name='power')
dataset_info = {'units': 'kA'}
result = self.handler.get_dataset(dataset_id, dataset_info).values
np.testing.assert_allclose(result, expected_power, rtol=1e-05)
dataset_id = make_dataid(name='latitude')
dataset_info = {}
result = self.handler.get_dataset(dataset_id, dataset_info).values
np.testing.assert_allclose(result, expected_lat, rtol=1e-05)
dataset_id = make_dataid(name='longitude')
dataset_info = {}
result = self.handler.get_dataset(dataset_id, dataset_info).values
np.testing.assert_allclose(result, expected_lon, rtol=1e-05)
dataset_id = make_dataid(name='time')
dataset_info = {}
result = self.handler.get_dataset(dataset_id, dataset_info).values
np.testing.assert_array_equal(result, expected_time) |
class Interface():
def __init__(self, alignments, frames):
logger.debug('Initializing %s: (alignments: %s, frames: %s)', self.__class__.__name__, alignments, frames)
self.alignments = alignments
self.frames = frames
self.controls = self.set_controls()
self.state = self.set_state()
self.skip_mode = {1: 'Standard', 2: 'No Faces', 3: 'Multi-Faces', 4: 'Has Faces'}
logger.debug('Initialized %s', self.__class__.__name__)
def set_controls(self):
controls = {'z': {'action': self.iterate_frame, 'args': ('navigation', (- 1)), 'help': 'Previous Frame'}, 'x': {'action': self.iterate_frame, 'args': ('navigation', 1), 'help': 'Next Frame'}, '[': {'action': self.iterate_frame, 'args': ('navigation', (- 100)), 'help': '100 Frames Back'}, ']': {'action': self.iterate_frame, 'args': ('navigation', 100), 'help': '100 Frames Forward'}, '{': {'action': self.iterate_frame, 'args': ('navigation', 'first'), 'help': 'Go to First Frame'}, '}': {'action': self.iterate_frame, 'args': ('navigation', 'last'), 'help': 'Go to Last Frame'}, 27: {'action': 'quit', 'key_text': 'ESC', 'args': ('navigation', None), 'help': 'Exit', 'key_type': ord}, '/': {'action': self.iterate_state, 'args': ('navigation', 'frame-size'), 'help': 'Cycle Frame Zoom'}, 's': {'action': self.iterate_state, 'args': ('navigation', 'skip-mode'), 'help': 'Skip Mode (All, No Faces, Multi Faces, Has Faces)'}, ' ': {'action': self.save_alignments, 'key_text': 'SPACE', 'args': ('edit', None), 'help': 'Save Alignments'}, 'r': {'action': self.reload_alignments, 'args': ('edit', None), 'help': 'Reload Alignments (Discard all changes)'}, 'd': {'action': self.delete_alignment, 'args': ('edit', None), 'help': 'Delete Selected Alignment'}, 'm': {'action': self.toggle_state, 'args': ('edit', 'active'), 'help': 'Change Mode (View, Edit)'}, range(10): {'action': self.set_state_value, 'key_text': '0 to 9', 'args': ['edit', 'selected'], 'help': 'Select/Deselect Face at this Index', 'key_type': range}, 'c': {'action': self.copy_alignments, 'args': ('edit', (- 1)), 'help': 'Copy Alignments from Previous Frame with Alignments'}, 'v': {'action': self.copy_alignments, 'args': ('edit', 1), 'help': 'Copy Alignments from Next Frame with Alignments'}, 'y': {'action': self.toggle_state, 'args': ('image', 'display'), 'help': 'Toggle Image'}, 'u': {'action': self.iterate_state, 'args': ('bounding_box', 'color'), 'help': 'Cycle Bounding Box Color'}, 'i': {'action': self.iterate_state, 'args': ('extract_box', 'color'), 'help': 'Cycle Extract Box Color'}, 'o': {'action': self.iterate_state, 'args': ('landmarks', 'color'), 'help': 'Cycle Landmarks Color'}, 'p': {'action': self.iterate_state, 'args': ('landmarks_mesh', 'color'), 'help': 'Cycle Landmarks Mesh Color'}, 'h': {'action': self.iterate_state, 'args': ('bounding_box', 'size'), 'help': 'Cycle Bounding Box thickness'}, 'j': {'action': self.iterate_state, 'args': ('extract_box', 'size'), 'help': 'Cycle Extract Box thickness'}, 'k': {'action': self.iterate_state, 'args': ('landmarks', 'size'), 'help': 'Cycle Landmarks - point size'}, 'l': {'action': self.iterate_state, 'args': ('landmarks_mesh', 'size'), 'help': 'Cycle Landmarks Mesh - thickness'}}
logger.debug('Controls: %s', controls)
return controls
def set_state():
state = {'bounding_box': dict(), 'extract_box': dict(), 'landmarks': dict(), 'landmarks_mesh': dict(), 'image': dict(), 'navigation': {'skip-mode': 1, 'frame-size': 1, 'frame_idx': 0, 'max_frame': 0, 'last_request': 0, 'frame_name': None}, 'edit': {'updated': False, 'update_faces': False, 'selected': None, 'active': 0, 'redraw': False}}
color = 0
for key in sorted(state.keys()):
if (key not in ('bounding_box', 'extract_box', 'landmarks', 'landmarks_mesh', 'image')):
continue
state[key]['display'] = True
if (key == 'image'):
continue
color += 1
state[key]['size'] = 1
state[key]['color'] = color
logger.debug('State: %s', state)
return state
def save_alignments(self, *args):
logger.debug('Saving Alignments')
if (not self.state['edit']['updated']):
logger.debug('Save received, but state not updated. Not saving')
return
self.alignments.save()
self.state['edit']['updated'] = False
self.set_redraw(True)
def reload_alignments(self, *args):
logger.debug('Reloading Alignments')
if (not self.state['edit']['updated']):
logger.debug('Reload received, but state not updated. Not reloading')
return
self.alignments.reload()
self.state['edit']['updated'] = False
self.state['edit']['update_faces'] = True
self.set_redraw(True)
def delete_alignment(self, *args):
logger.debug('Deleting Alignments')
selected_face = self.get_selected_face_id()
if ((self.get_edit_mode() == 'View') or (selected_face is None)):
logger.debug("Delete received, but edit mode is 'View'. Not deleting")
return
frame = self.get_frame_name()
if self.alignments.delete_face_at_index(frame, selected_face):
self.state['edit']['selected'] = None
self.state['edit']['updated'] = True
self.state['edit']['update_faces'] = True
self.set_redraw(True)
def copy_alignments(self, *args):
logger.debug('Copying Alignments')
if (self.get_edit_mode() != 'Edit'):
logger.debug("Copy received, but edit mode is not 'Edit'. Not copying")
return
frame_id = self.get_next_face_idx(args[1])
if (not (0 <= frame_id <= self.state['navigation']['max_frame'])):
return
current_frame = self.get_frame_name()
get_frame = self.frames.file_list_sorted[frame_id]['frame_fullname']
alignments = self.alignments.get_faces_in_frame(get_frame)
for alignment in alignments:
self.alignments.add_face(current_frame, alignment)
self.state['edit']['updated'] = True
self.state['edit']['update_faces'] = True
self.set_redraw(True)
def toggle_state(self, item, category):
logger.debug('Toggling state: (item: %s, category: %s)', item, category)
self.state[item][category] = (not self.state[item][category])
logger.debug('State toggled: (item: %s, category: %s, value: %s)', item, category, self.state[item][category])
self.set_redraw(True)
def iterate_state(self, item, category):
logger.debug('Cycling state: (item: %s, category: %s)', item, category)
if (category == 'color'):
max_val = 7
elif (category == 'frame-size'):
max_val = 6
elif (category == 'skip-mode'):
max_val = 4
else:
max_val = 3
val = self.state[item][category]
val = ((val + 1) if (val != max_val) else 1)
self.state[item][category] = val
logger.debug('Cycled state: (item: %s, category: %s, value: %s)', item, category, self.state[item][category])
self.set_redraw(True)
def set_state_value(self, item, category, value):
logger.debug('Setting state value: (item: %s, category: %s, value: %s)', item, category, value)
state = self.state[item][category]
value = (str(value) if (value is not None) else value)
if (state == value):
self.state[item][category] = None
else:
self.state[item][category] = value
logger.debug('Setting state value: (item: %s, category: %s, value: %s)', item, category, self.state[item][category])
self.set_redraw(True)
def iterate_frame(self, *args):
logger.debug('Iterating frame: (args: %s)', args)
iteration = args[1]
max_frame = self.state['navigation']['max_frame']
if (iteration in ('first', 'last')):
next_frame = (0 if (iteration == 'first') else max_frame)
self.state['navigation']['frame_idx'] = next_frame
self.state['navigation']['last_request'] = 0
self.set_redraw(True)
return
current_frame = self.state['navigation']['frame_idx']
next_frame = (current_frame + iteration)
end = (0 if (iteration < 0) else max_frame)
if ((max_frame == 0) or ((end > 0) and (next_frame >= end)) or ((end == 0) and (next_frame <= end))):
next_frame = end
self.state['navigation']['frame_idx'] = next_frame
self.state['navigation']['last_request'] = iteration
self.set_state_value('edit', 'selected', None)
def get_color(self, item):
return self.state[item]['color']
def get_size(self, item):
return self.state[item]['size']
def get_frame_scaling(self):
factors = (1, 1.25, 1.5, 2, 0.5, 0.75)
idx = (self.state['navigation']['frame-size'] - 1)
return factors[idx]
def get_edit_mode(self):
if self.state['edit']['active']:
return 'Edit'
return 'View'
def get_skip_mode(self):
return self.skip_mode[self.state['navigation']['skip-mode']]
def get_state_color(self):
color = (255, 255, 255)
if self.state['edit']['updated']:
color = (0, 0, 255)
elif self.state['edit']['active']:
color = (0, 255, 255)
return color
def get_frame_name(self):
return self.state['navigation']['frame_name']
def get_selected_face_id(self):
try:
return int(self.state['edit']['selected'])
except TypeError:
return None
def redraw(self):
return self.state['edit']['redraw']
def set_redraw(self, request):
self.state['edit']['redraw'] = request
def get_next_face_idx(self, increment):
navigation = self.state['navigation']
frame_list = self.frames.file_list_sorted
frame_idx = (navigation['frame_idx'] + increment)
while True:
if (not (0 <= frame_idx <= navigation['max_frame'])):
break
frame = frame_list[frame_idx]['frame_fullname']
if (not self.alignments.frame_has_faces(frame)):
frame_idx += increment
else:
break
return frame_idx |
class DataSaver(BaseLearner):
def __init__(self, learner: LearnerType, arg_picker: Callable) -> None:
self.learner = learner
self.extra_data: OrderedDict[(Any, Any)] = OrderedDict()
self.function = learner.function
self.arg_picker = arg_picker
def new(self) -> DataSaver:
return DataSaver(self.learner.new(), self.arg_picker)
_docstring_from(BaseLearner.ask)
def ask(self, *args, **kwargs):
return self.learner.ask(*args, **kwargs)
_docstring_from(BaseLearner.loss)
def loss(self, *args, **kwargs):
return self.learner.loss(*args, **kwargs)
_docstring_from(BaseLearner.remove_unfinished)
def remove_unfinished(self, *args, **kwargs):
return self.learner.remove_unfinished(*args, **kwargs)
def __getattr__(self, attr: str) -> Any:
return getattr(self.learner, attr)
_docstring_from(BaseLearner.tell)
def tell(self, x: Any, result: Any) -> None:
y = self.arg_picker(result)
self.extra_data[x] = result
self.learner.tell(x, y)
_docstring_from(BaseLearner.tell_pending)
def tell_pending(self, x: Any) -> None:
self.learner.tell_pending(x)
def to_dataframe(self, with_default_function_args: bool=True, function_prefix: str='function.', extra_data_name: str='extra_data', **kwargs: Any) -> pandas.DataFrame:
if (not with_pandas):
raise ImportError('pandas is not installed.')
df = self.learner.to_dataframe(with_default_function_args=with_default_function_args, function_prefix=function_prefix, **kwargs)
df[extra_data_name] = [self.extra_data[_to_key(x)] for (_, x) in df[df.attrs['inputs']].iterrows()]
return df
def load_dataframe(self, df: pandas.DataFrame, with_default_function_args: bool=True, function_prefix: str='function.', extra_data_name: str='extra_data', input_names: tuple[(str, ...)]=(), **kwargs) -> None:
self.learner.load_dataframe(df, with_default_function_args=with_default_function_args, function_prefix=function_prefix, **kwargs)
keys = df.attrs.get('inputs', list(input_names))
for (_, x) in df[(keys + [extra_data_name])].iterrows():
key = _to_key(x[:(- 1)])
self.extra_data[key] = x[(- 1)]
def _get_data(self) -> tuple[(Any, OrderedDict[(Any, Any)])]:
return (self.learner._get_data(), self.extra_data)
def _set_data(self, data: tuple[(Any, OrderedDict[(Any, Any)])]) -> None:
(learner_data, self.extra_data) = data
self.learner._set_data(learner_data)
def __getstate__(self) -> tuple[(LearnerType, Callable, OrderedDict)]:
return (self.learner, self.arg_picker, self.extra_data)
def __setstate__(self, state: tuple[(LearnerType, Callable, OrderedDict)]) -> None:
(learner, arg_picker, extra_data) = state
self.__init__(learner, arg_picker)
self.extra_data = extra_data
_docstring_from(BaseLearner.save)
def save(self, fname, compress=True) -> None:
BaseLearner.save(self, fname, compress)
_docstring_from(BaseLearner.load)
def load(self, fname, compress=True) -> None:
BaseLearner.load(self, fname, compress) |
def test_list_build_sources():
with get_gitlab_trigger() as trigger:
sources = trigger.list_build_sources_for_namespace('someorg')
assert (sources == [{'last_updated': , 'name': 'someproject', 'url': ' 'private': True, 'full_name': 'someorg/someproject', 'has_admin_permissions': False, 'description': ''}, {'last_updated': , 'name': 'anotherproject', 'url': ' 'private': False, 'full_name': 'someorg/anotherproject', 'has_admin_permissions': True, 'description': ''}]) |
class QiitaOAuth2TestIdentifiedByPermanentId(QiitaOAuth2Test):
def test_login(self):
self.strategy.set_settings({'SOCIAL_AUTH_QIITA_IDENTIFIED_BY_PERMANENT_ID': True})
user = self.do_login()
self.assertEqual(len(user.social), 1)
social = user.social[0]
self.assertEqual(social.uid, '12345')
self.assertEqual(social.extra_data['permanent_id'], 12345)
def test_partial_pipeline(self):
self.strategy.set_settings({'SOCIAL_AUTH_QIITA_IDENTIFIED_BY_PERMANENT_ID': True})
user = self.do_partial_pipeline()
self.assertEqual(len(user.social), 1)
social = user.social[0]
self.assertEqual(social.uid, '12345')
self.assertEqual(social.extra_data['permanent_id'], 12345) |
class TestPreInstallCommands():
def test_default(self, isolation, isolated_data_dir, platform):
config = {'project': {'name': 'my_app', 'version': '0.0.1'}}
project = Project(isolation, config=config)
environment = MockEnvironment(isolation, project.metadata, 'default', project.config.envs['default'], {}, isolated_data_dir, isolated_data_dir, platform, 0)
assert (environment.pre_install_commands == environment.pre_install_commands == [])
def test_not_array(self, isolation, isolated_data_dir, platform):
config = {'project': {'name': 'my_app', 'version': '0.0.1'}, 'tool': {'hatch': {'envs': {'default': {'pre-install-commands': 9000}}}}}
project = Project(isolation, config=config)
environment = MockEnvironment(isolation, project.metadata, 'default', project.config.envs['default'], {}, isolated_data_dir, isolated_data_dir, platform, 0)
with pytest.raises(TypeError, match='Field `tool.hatch.envs.default.pre-install-commands` must be an array'):
_ = environment.pre_install_commands
def test_entry_not_string(self, isolation, isolated_data_dir, platform):
config = {'project': {'name': 'my_app', 'version': '0.0.1'}, 'tool': {'hatch': {'envs': {'default': {'pre-install-commands': [9000]}}}}}
project = Project(isolation, config=config)
environment = MockEnvironment(isolation, project.metadata, 'default', project.config.envs['default'], {}, isolated_data_dir, isolated_data_dir, platform, 0)
with pytest.raises(TypeError, match='Command #1 of field `tool.hatch.envs.default.pre-install-commands` must be a string'):
_ = environment.pre_install_commands
def test_correct(self, isolation, isolated_data_dir, platform):
config = {'project': {'name': 'my_app', 'version': '0.0.1'}, 'tool': {'hatch': {'envs': {'default': {'pre-install-commands': ['baz test']}}}}}
project = Project(isolation, config=config)
environment = MockEnvironment(isolation, project.metadata, 'default', project.config.envs['default'], {}, isolated_data_dir, isolated_data_dir, platform, 0)
assert (environment.pre_install_commands == ['baz test']) |
def jsonable_encoder(obj: Any, include: Union[(SetIntStr, DictIntStrAny)]=None, exclude=None, by_alias: bool=True, skip_defaults: bool=None, exclude_unset: bool=True, exclude_none: bool=True):
if (hasattr(obj, 'json') or hasattr(obj, 'model_dump_json')):
return to_json(obj, include=include, exclude=exclude, by_alias=by_alias, exclude_unset=bool((exclude_unset or skip_defaults)), exclude_none=exclude_none)
return obj |
.parametrize('directory', ['demo', 'non-canonical-name'])
def test_search_for_directory_setup_with_base(provider: Provider, directory: str, fixture_dir: FixtureDirGetter) -> None:
dependency = DirectoryDependency('demo', (((fixture_dir('git') / 'github.com') / 'demo') / directory), base=(((fixture_dir('git') / 'github.com') / 'demo') / directory))
package = provider.search_for_direct_origin_dependency(dependency)
assert (package.name == 'demo')
assert (package.version.text == '0.1.2')
required = [r for r in package.requires if (not r.is_optional())]
optional = [r for r in package.requires if r.is_optional()]
assert (required == [get_dependency('pendulum', '>=1.4.4')])
assert (optional == [get_dependency('tomlkit'), get_dependency('cleo')])
assert (package.extras == {'foo': [get_dependency('cleo')], 'bar': [get_dependency('tomlkit')]})
assert (package.root_dir == (((fixture_dir('git') / 'github.com') / 'demo') / directory)) |
_tf
class TFXLNetModelLanguageGenerationTest(unittest.TestCase):
def test_lm_generate_xlnet_base_cased(self):
model = TFXLNetLMHeadModel.from_pretrained('xlnet-base-cased')
input_ids = tf.convert_to_tensor([[67, 2840, 19, 18, 1484, 20, 965, 29077, 8719, 1273, 21, 45, 273, 17, 10, 15048, 28, 27511, 21, 4185, 11, 41, 2444, 9, 32, 1025, 20, 8719, 26, 23, 673, 966, 19, 29077, 20643, 27511, 20822, 20643, 19, 17, 6616, 17511, 18, 8978, 20, 18, 777, 9, 19233, 1527, 17669, 19, 24, 673, 17, 28756, 150, 12943, 4354, 153, 27, 442, 37, 45, 668, 21, 24, 256, 20, 416, 22, 2771, 4901, 9, 12943, 4354, 153, 51, 24, 3004, 21, 28142, 23, 65, 20, 18, 416, 34, 24, 2958, 22947, 9, 1177, 45, 668, 3097, 13768, 23, 103, 28, 441, 148, 48, 20522, 19, 12943, 4354, 153, 12860, 34, 18, 326, 27, 17492, 684, 21, 6709, 9, 8585, 123, 266, 19, 12943, 4354, 153, 6872, 24, 3004, 20, 18, 9225, 2198, 19, 12717, 103, 22, 401, 24, 6348, 9, 12943, 4354, 153, 1068, 2768, 2286, 19, 33, 104, 19, 176, 24, 9313, 19, 20086, 28, 45, 10292, 9, 4, 3]], dtype=tf.int32)
expected_output_ids = [67, 2840, 19, 18, 1484, 20, 965, 29077, 8719, 1273, 21, 45, 273, 17, 10, 15048, 28, 27511, 21, 4185, 11, 41, 2444, 9, 32, 1025, 20, 8719, 26, 23, 673, 966, 19, 29077, 20643, 27511, 20822, 20643, 19, 17, 6616, 17511, 18, 8978, 20, 18, 777, 9, 19233, 1527, 17669, 19, 24, 673, 17, 28756, 150, 12943, 4354, 153, 27, 442, 37, 45, 668, 21, 24, 256, 20, 416, 22, 2771, 4901, 9, 12943, 4354, 153, 51, 24, 3004, 21, 28142, 23, 65, 20, 18, 416, 34, 24, 2958, 22947, 9, 1177, 45, 668, 3097, 13768, 23, 103, 28, 441, 148, 48, 20522, 19, 12943, 4354, 153, 12860, 34, 18, 326, 27, 17492, 684, 21, 6709, 9, 8585, 123, 266, 19, 12943, 4354, 153, 6872, 24, 3004, 20, 18, 9225, 2198, 19, 12717, 103, 22, 401, 24, 6348, 9, 12943, 4354, 153, 1068, 2768, 2286, 19, 33, 104, 19, 176, 24, 9313, 19, 20086, 28, 45, 10292, 9, 4, 3, 19, 12943, 4354, 153, 27, 442, 22, 2771, 4901, 9, 69, 27, 442, 22, 2771, 24, 11335, 20, 18, 9225, 2198, 9, 69, 27, 442, 22, 2771, 24, 11335, 20, 18, 9225, 2198, 9, 69, 27, 442, 22, 2771]
output_ids = model.generate(input_ids, max_length=200, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) |
class ZipRunIterator(AbstractRunIterator):
def __init__(self, range_iterators):
self.range_iterators = range_iterators
def ranges(self, start, end):
try:
iterators = [i.ranges(start, end) for i in self.range_iterators]
(starts, ends, values) = zip(*[next(i) for i in iterators])
starts = list(starts)
ends = list(ends)
values = list(values)
while (start < end):
min_end = min(ends)
(yield (start, min_end, values))
start = min_end
for (i, iterator) in enumerate(iterators):
if (ends[i] == min_end):
(starts[i], ends[i], values[i]) = next(iterator)
except StopIteration:
return
def __getitem__(self, index):
return [i[index] for i in self.range_iterators] |
.end_to_end()
def test_task_function_with_partialed_args(tmp_path, runner):
source = '\n import pytask\n import functools\n\n def func(produces, content):\n produces.write_text(content)\n\n task_func = pytask.mark.produces("out.txt")(\n functools.partial(func, content="hello")\n )\n '
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.OK)
assert ('Collected 1 task.' in result.output)
assert ('1 Succeeded' in result.output)
assert tmp_path.joinpath('out.txt').exists() |
def _init_profile(profile: QWebEngineProfile) -> None:
profile.setter = ProfileSetter(profile)
profile.setter.init_profile()
_qute_scheme_handler.install(profile)
_req_interceptor.install(profile)
_download_manager.install(profile)
cookies.install_filter(profile)
if (notification.bridge is not None):
notification.bridge.install(profile)
history.web_history.history_cleared.connect(profile.clearAllVisitedLinks)
history.web_history.url_cleared.connect((lambda url: profile.clearVisitedLinks([url])))
_global_settings.init_settings() |
class Document():
def __init__(self, name: str):
self.elements = []
self.parent_builder = None
self.name = name
self._generated_html = None
def generate_html(self) -> str:
if (self._generated_html is not None):
return self._generated_html
env = templates.environment
template = env.get_template('document.html')
self._generated_html = template.render(elements=self.elements, document=self)
return self._generated_html
def add_element(self, element) -> None:
self.elements.append(element) |
def greedyUpto(lit_str_):
patt1 = re.compile('(\\W)')
m1 = patt1.match(lit_str)
lit_str_ = patt1.sub(m1.group(0), lit_str_)
def gen(str_):
patt2 = re.compile((('^(.*)\\s*' + lit_str_) + '\\s*'))
m2 = patt2.match(str_)
if (m2 is not None):
matches = m2.group(0).strip()
return (1, str_, matches)
else:
return (0, str_, None)
return gen |
def sample_system(sysc, Ts, method='zoh', alpha=None, prewarp_frequency=None, name=None, copy_names=True, **kwargs):
if (not isctime(sysc)):
raise ValueError('First argument must be continuous time system')
return sysc.sample(Ts, method=method, alpha=alpha, prewarp_frequency=prewarp_frequency, name=name, copy_names=copy_names, **kwargs) |
_filter('duplicate')
class DuplicateFilter(BaseFilter, FileManagerAware):
def __init__(self, _):
self.duplicates = self.get_duplicates()
def __call__(self, fobj):
return (fobj in self.duplicates)
def __str__(self):
return '<Filter: duplicate>'
def get_duplicates(self):
duplicates = set()
for dups in group_by_hash(self.fm.thisdir.files_all):
if (len(dups) >= 2):
duplicates.update(dups)
return duplicates |
def save_plot_history(history, save_path, pickle_only=True):
print('saving history in pickle format...')
historyFile = (save_path + 'history.pickle')
try:
file_ = open(historyFile, 'wb')
pickle.dump(history, file_)
print('saved', historyFile)
except Exception as e:
print(e)
if pickle_only:
return
print('saving history in csv format...')
historyInDrivePath = (save_path + 'history.csv')
pd.DataFrame(history).to_csv(historyInDrivePath)
pd.DataFrame(history).to_csv('history.csv')
print('plotting and saving train test graphs...')
plt.figure(figsize=(10, 6))
plt.plot(history['acc'])
plt.plot(history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.grid(True)
plt.savefig('accuracy.png', bbox_inches='tight')
plt.savefig((save_path + 'accuracy.png'), bbox_inches='tight')
plt.close()
plt.figure(figsize=(10, 6))
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.grid(True)
plt.savefig('loss.png', bbox_inches='tight')
plt.savefig((save_path + 'loss.png'), bbox_inches='tight')
plt.close() |
def imp_hash_table_ref_cont(ht, old, env, cont, _vals):
if (_vals.num_values() != 2):
raise SchemeException('hash-ref handler produced the wrong number of results')
(key, post) = _vals.get_all_values()
after = imp_hash_table_post_ref_cont(post, ht, old, env, cont)
return ht.hash_ref(key, env, after) |
def _create_independent_chains_initial_circuit(parameters: FermiHubbardParameters) -> cirq.Circuit:
layout = parameters.layout
initial = cast(IndependentChainsInitialState, parameters.initial_state)
up_circuit = _create_chain_initial_circuit(parameters, layout.up_qubits, initial.up)
down_circuit = _create_chain_initial_circuit(parameters, layout.down_qubits, initial.down)
circuit = run_in_parallel(up_circuit, down_circuit)
circuit = align_givens_circuit(circuit)
return circuit |
def get_test_data(num_train=1000, num_test=500, input_shape=(10,), output_shape=(2,), classification=True, num_classes=2):
samples = (num_train + num_test)
if classification:
y = np.random.randint(0, num_classes, size=(samples,))
X = np.zeros(((samples,) + input_shape))
for i in range(samples):
X[i] = np.random.normal(loc=y[i], scale=0.7, size=input_shape)
else:
y_loc = np.random.random((samples,))
X = np.zeros(((samples,) + input_shape))
y = np.zeros(((samples,) + output_shape))
for i in range(samples):
X[i] = np.random.normal(loc=y_loc[i], scale=0.7, size=input_shape)
y[i] = np.random.normal(loc=y_loc[i], scale=0.7, size=output_shape)
return ((X[:num_train], y[:num_train]), (X[num_train:], y[num_train:])) |
class UnpackTest(object):
def test_single_value(self):
(data, code, headers) = utils.unpack('test')
assert (data == 'test')
assert (code == 200)
assert (headers == {})
def test_single_value_with_default_code(self):
(data, code, headers) = utils.unpack('test', 500)
assert (data == 'test')
assert (code == 500)
assert (headers == {})
def test_value_code(self):
(data, code, headers) = utils.unpack(('test', 201))
assert (data == 'test')
assert (code == 201)
assert (headers == {})
def test_value_code_headers(self):
(data, code, headers) = utils.unpack(('test', 201, {'Header': 'value'}))
assert (data == 'test')
assert (code == 201)
assert (headers == {'Header': 'value'})
def test_value_headers_default_code(self):
(data, code, headers) = utils.unpack(('test', None, {'Header': 'value'}))
assert (data == 'test')
assert (code == 200)
assert (headers == {'Header': 'value'})
def test_too_many_values(self):
with pytest.raises(ValueError):
utils.unpack((None, None, None, None)) |
def _init_representations():
global representations
if (sys.hexversion < ):
classobj = [(lambda c: ('classobj(%s)' % repr(c)))]
representations[types.ClassType] = classobj
instance = [(lambda f: ('instance(%s)' % repr(f.__class__)))]
representations[types.InstanceType] = instance
instancemethod = [(lambda i: ('instancemethod (%s)' % repr(i.im_func))), (lambda i: ('instancemethod (%s, %s)' % (repr(i.im_class), repr(i.im_func))))]
representations[types.MethodType] = instancemethod
frame = [(lambda f: ('frame (codename: %s)' % f.f_code.co_name)), (lambda f: ('frame (codename: %s, codeline: %s)' % (f.f_code.co_name, f.f_code.co_firstlineno))), (lambda f: ('frame (codename: %s, filename: %s, codeline: %s)' % (f.f_code.co_name, f.f_code.co_filename, f.f_code.co_firstlineno)))]
representations[types.FrameType] = frame
_dict = [(lambda d: str(type(d))), (lambda d: ('dict, len=%s' % len(d)))]
representations[dict] = _dict
function = [(lambda f: ('function (%s)' % f.__name__)), (lambda f: ('function (%s.%s)' % (f.__module__, f.__name__)))]
representations[types.FunctionType] = function
_list = [(lambda l: str(type(l))), (lambda l: ('list, len=%s' % len(l)))]
representations[list] = _list
module = [(lambda m: ('module(%s)' % getattr(m, '__name__', getattr(m, '__file__', ('nameless, id: %d' % id(m))))))]
representations[types.ModuleType] = module
_set = [(lambda s: str(type(s))), (lambda s: ('set, len=%s' % len(s)))]
representations[set] = _set |
class RepublishTFStaticForRosbag():
def __init__(self):
self._tf_msg = TFMessage()
self._pub = rospy.Publisher('/tf_static_republished', TFMessage, queue_size=1, latch=True)
self._sub = rospy.Subscriber('/tf_static', TFMessage, self._sub_callback)
self._timer = rospy.Timer(rospy.Duration(1), self._timer_callback)
def _sub_callback(self, tf_msg):
for transform in tf_msg.transforms:
self._tf_msg.transforms.append(transform)
self._pub.publish(self._tf_msg)
def _timer_callback(self, event):
if (not self._tf_msg.transforms):
rospy.logwarn_throttle(10, 'tf_msg is empty')
return
self._pub.publish(self._tf_msg) |
class F37_TestCase(F14_TestCase):
def runTest(self):
F14_TestCase.runTest(self)
self.assert_parse('zfcp --devnum=1', 'zfcp --devnum=1\n')
self.assert_parse_error('zfcp --wwpn=2 --fcplun=3')
self.assert_parse_error('zfcp --devnum=1 --wwpn=2')
self.assert_parse_error('zfcp --devnum=1 --fcplun=3') |
.parametrize('given_actual_arguments, expected_messages', [({'FOO': None}, ['Expected argument "foo" is not in matched arguments [\'FOO\']']), ({'foo': 42}, ['Expected argument "foo" is of type "int" instead "str"']), ({'foo': 'foo'}, ['Expected argument "foo" with value "fooo" does not match value "foo"'])], ids=['Missing argument', 'Wrong type argument', 'Wrong value argument'])
def test_checking_step_arguments_errors(given_actual_arguments, expected_messages):
expected_arguments = {'foo': 'fooo'}
messages = matches.check_step_arguments(expected_arguments, given_actual_arguments)
assert (messages == expected_messages) |
def test_highlighted(qtbot):
doc = QTextDocument()
completiondelegate._Highlighter(doc, 'Hello', Qt.GlobalColor.red)
doc.setPlainText('Hello World')
edit = QTextEdit()
qtbot.add_widget(edit)
edit.setDocument(doc)
colors = [f.foreground().color() for f in doc.allFormats()]
assert (QColor('red') in colors) |
def run_base_model_nfm(dfTrain, dfTest, folds, pnn_params):
fd = FeatureDictionary(dfTrain=dfTrain, dfTest=dfTest, numeric_cols=config.NUMERIC_COLS, ignore_cols=config.IGNORE_COLS)
data_parser = DataParser(feat_dict=fd)
(Xi_train, Xv_train, y_train) = data_parser.parse(df=dfTrain, has_label=True)
(Xi_test, Xv_test, ids_test) = data_parser.parse(df=dfTest)
print(dfTrain.dtypes)
pnn_params['feature_size'] = fd.feat_dim
pnn_params['field_size'] = len(Xi_train[0])
_get = (lambda x, l: [x[i] for i in l])
for (i, (train_idx, valid_idx)) in enumerate(folds):
(Xi_train_, Xv_train_, y_train_) = (_get(Xi_train, train_idx), _get(Xv_train, train_idx), _get(y_train, train_idx))
(Xi_valid_, Xv_valid_, y_valid_) = (_get(Xi_train, valid_idx), _get(Xv_train, valid_idx), _get(y_train, valid_idx))
afm = AFM(**pnn_params)
afm.fit(Xi_train_, Xv_train_, y_train_, Xi_valid_, Xv_valid_, y_valid_) |
class SequenceEntry(Channel):
def __init__(self, parent, number_of_channels, sequence_number):
super().__init__(parent, sequence_number)
self.number_of_channels = number_of_channels
self.length_values = [self.length_min, self.length_max]
self.loop_count_values = [self.loop_count_min, self.loop_count_max]
for i in range(1, (self.number_of_channels + 1)):
self.add_child(self.AnalogChannel, i, collection='ch', sequence_number=sequence_number)
def insert_id(self, command):
return command.format(ent=self.id)
length = Instrument.control('SEQuence:ELEM{ent}:LENGth?', 'SEQuence:ELEM{ent}:LENGth %s', 'This property sets or returns the number of samples of the entry.\n ', validator=strict_range, dynamic=True)
length_max = Instrument.measurement('SEQuence:ELEM{ent}:LENGth? MAXimum', 'This property queries the maximum entry samples length.', get_process=(lambda v: int(v)))
length_min = Instrument.measurement('SEQuence:ELEM{ent}:LENGth? MINimum', 'This property queries the minimum entry samples length.', get_process=(lambda v: int(v)))
loop_count = Instrument.control('SEQuence:ELEM{ent}:LOOP:COUNt?', 'SEQuence:ELEM{ent}:LOOP:COUNt %s', 'This property sets or returns the number of waveform repetitions for\n the entry.\n ', validator=strict_range, dynamic=True)
loop_count_max = Instrument.measurement('SEQuence:ELEM{ent}:LOOP:COUNt? MAXimum', 'This property queries the maximum number of waveform repetitions for\n the entry.', get_process=(lambda v: int(v)))
loop_count_min = Instrument.measurement('SEQuence:ELEM{ent}:LOOP:COUNt? MINimum', 'This property queries the minimum number of waveform repetitions for\n the entry.', get_process=(lambda v: int(v)))
class AnalogChannel(Channel):
def __init__(self, parent, id, sequence_number):
super().__init__(parent, id)
self.seq_num = sequence_number
self.waveform_values = list(self.parent.parent.waveforms.keys())
self.calculate_voltage_range()
def insert_id(self, command):
return command.format(ent=self.seq_num, ch=self.id)
voltage_amplitude = Instrument.control('SEQuence:ELEM{ent}:AMPlitude{ch}?', 'SEQuence:ELEM{ent}:AMPlitude{ch} %s', 'This property sets or returns the voltage peak-to-peak\n amplitude.', validator=strict_range, dynamic=True)
voltage_amplitude_max = Instrument.measurement('SEQuence:ELEM{ent}:AMPlitude{ch}? MAXimum', 'This property queries the maximum amplitude voltage level that\n can be set.')
voltage_amplitude_min = Instrument.measurement('SEQuence:ELEM{ent}:AMPlitude{ch}? MINimum', 'This property queries the minimum amplitude voltage level that\n can be set.')
voltage_offset = Instrument.control('SEQuence:ELEM{ent}:OFFset{ch}?', 'SEQuence:ELEM{ent}:OFFset{ch} %s', 'This property sets or returns the voltage offset.', validator=strict_range, dynamic=True)
voltage_offset_max = Instrument.measurement('SEQuence:ELEM{ent}:OFFset{ch}? MAXimum', 'This property queries the maximum voltage offset that can be\n set.')
voltage_offset_min = Instrument.measurement('SEQuence:ELEM{ent}:OFFset{ch}? MINimum', 'This property queries the minimum voltage offset that can be\n set.')
voltage_high = Instrument.control('SEQuence:ELEM{ent}:VOLTage:HIGH{ch}?', 'SEQuence:ELEM{ent}:VOLTage:HIGH{ch} %s', 'This property sets or returns the high voltage level of the\n waveform.', validator=strict_range, dynamic=True)
voltage_high_max = Instrument.measurement('SEQuence:ELEM{ent}:VOLTage:HIGH{ch}? MAXimum', 'This property queries the maximum high voltage level of the\n waveform that can be set to the output waveform.')
voltage_high_min = Instrument.measurement('SEQuence:ELEM{ent}:VOLTage:HIGH{ch}? MINimum', 'This property queries the minimum high voltage level of the\n waveform that can be set to the output waveform.')
voltage_low = Instrument.control('SEQuence:ELEM{ent}:VOLTage:LOW{ch}?', 'SEQuence:ELEM{ent}:VOLTage:LOW{ch} %s', 'This property sets or returns the low voltage level of the\n waveform.', validator=strict_range, dynamic=True)
voltage_low_max = Instrument.measurement('SEQuence:ELEM{ent}:VOLTage:LOW{ch}? MAXimum', 'This property queries the maximum low voltage level of the\n waveform that can be set to the output waveform.')
voltage_low_min = Instrument.measurement('SEQuence:ELEM{ent}:VOLTage:LOW{ch}? MINimum', 'This property queries the minimum low voltage level of the\n waveform that can be set to the output waveform.')
waveform = Instrument.control('SEQuence:ELEM{ent}:WAVeform{ch}?', 'SEQuence:ELEM{ent}:WAVeform{ch} %s', 'This property sets or returns the waveform. Its possible select\n a waveform only from those in the waveform list. In waveform list\n are already present 10 predefined waveform: Sine, Ramp, Square,\n Sync, DC, Gaussian, Lorentz, Haversine, Exp_Rise and Exp_Decay but\n user can import in the list others customized waveforms.', validator=strict_discrete_set, set_process=(lambda v: f'"{v}"'), dynamic=True)
def calculate_voltage_range(self):
self.voltage_amplitude_values = [self.voltage_amplitude_min, self.voltage_amplitude_max]
self.voltage_offset_values = [self.voltage_offset_min, self.voltage_offset_max]
self.voltage_high_values = [self.voltage_high_min, self.voltage_high_max]
self.voltage_low_values = [self.voltage_low_min, self.voltage_low_max] |
class QuadraticExpression(QuadraticProgramElement):
def __init__(self, quadratic_program: Any, coefficients: Union[(ndarray, spmatrix, List[List[float]], Dict[(Tuple[(Union[(int, str)], Union[(int, str)])], float)])]) -> None:
super().__init__(quadratic_program)
self.coefficients = coefficients
def __getitem__(self, key: Tuple[(Union[(int, str)], Union[(int, str)])]) -> float:
(i, j) = key
if isinstance(i, str):
i = self.quadratic_program.variables_index[i]
if isinstance(j, str):
j = self.quadratic_program.variables_index[j]
return self.coefficients[(min(i, j), max(i, j))]
def __setitem__(self, key: Tuple[(Union[(int, str)], Union[(int, str)])], value: float) -> None:
(i, j) = key
if isinstance(i, str):
i = self.quadratic_program.variables_index[i]
if isinstance(j, str):
j = self.quadratic_program.variables_index[j]
self.coefficients[(min(i, j), max(i, j))] = value
def _coeffs_to_dok_matrix(self, coefficients: Union[(ndarray, spmatrix, List[List[float]], Dict[(Tuple[(Union[(int, str)], Union[(int, str)])], float)])]) -> dok_matrix:
if isinstance(coefficients, (list, ndarray, spmatrix)):
coefficients = dok_matrix(coefficients)
elif isinstance(coefficients, dict):
n = self.quadratic_program.get_num_vars()
coeffs = dok_matrix((n, n))
for ((i, j), value) in coefficients.items():
if isinstance(i, str):
i = self.quadratic_program.variables_index[i]
if isinstance(j, str):
j = self.quadratic_program.variables_index[j]
coeffs[(i, j)] = value
coefficients = coeffs
else:
raise QiskitOptimizationError(f'Unsupported format for coefficients: {coefficients}')
return self._triangle_matrix(coefficients)
def _triangle_matrix(mat: dok_matrix) -> dok_matrix:
lower = tril(mat, (- 1), format='dok')
return ((mat + lower.transpose()) - lower).todok()
def _symmetric_matrix(mat: dok_matrix) -> dok_matrix:
upper = (triu(mat, 1, format='dok') / 2)
return ((mat + upper.transpose()) - upper).todok()
def coefficients(self) -> dok_matrix:
return self._coefficients
def coefficients(self, coefficients: Union[(ndarray, spmatrix, List[List[float]], Dict[(Tuple[(Union[(int, str)], Union[(int, str)])], float)])]) -> None:
self._coefficients = self._coeffs_to_dok_matrix(coefficients)
def to_array(self, symmetric: bool=False) -> ndarray:
coeffs = (self._symmetric_matrix(self._coefficients) if symmetric else self._coefficients)
return coeffs.toarray()
def to_dict(self, symmetric: bool=False, use_name: bool=False) -> Dict[(Union[(Tuple[(int, int)], Tuple[(str, str)])], float)]:
coeffs = (self._symmetric_matrix(self._coefficients) if symmetric else self._coefficients)
if use_name:
return {(self.quadratic_program.variables[i].name, self.quadratic_program.variables[j].name): v for ((i, j), v) in coeffs.items()}
else:
return {(int(i), int(j)): v for ((i, j), v) in coeffs.items()}
def evaluate(self, x: Union[(ndarray, List, Dict[(Union[(int, str)], float)])]) -> float:
x = self._cast_as_array(x)
val = ((x self.coefficients) x)
return val
def evaluate_gradient(self, x: Union[(ndarray, List, Dict[(Union[(int, str)], float)])]) -> ndarray:
x = self._cast_as_array(x)
val = ((self.coefficients.transpose() + self.coefficients) x)
return val
def _cast_as_array(self, x: Union[(ndarray, List, Dict[(Union[(int, str)], float)])]) -> Union[(dok_matrix, np.ndarray)]:
if isinstance(x, dict):
x_aux = np.zeros(self.quadratic_program.get_num_vars())
for (i, v) in x.items():
if isinstance(i, str):
i = self.quadratic_program.variables_index[i]
x_aux[i] = v
x = x_aux
if isinstance(x, list):
x = np.array(x)
return x
def bounds(self) -> ExpressionBounds:
l_b = u_b = 0.0
for ((ind1, ind2), coeff) in self.to_dict().items():
x = self.quadratic_program.get_variable(ind1)
if ((x.lowerbound == (- INFINITY)) or (x.upperbound == INFINITY)):
raise QiskitOptimizationError(f'Quadratic expression contains an unbounded variable: {x.name}')
y = self.quadratic_program.get_variable(ind2)
if ((y.lowerbound == (- INFINITY)) or (y.upperbound == INFINITY)):
raise QiskitOptimizationError(f'Quadratic expression contains an unbounded variable: {y.name}')
lst = []
if (ind1 == ind2):
if ((x.lowerbound * x.upperbound) <= 0.0):
lst.append(0.0)
lst.extend([(x.lowerbound ** 2), (x.upperbound ** 2)])
else:
lst.extend([(x.lowerbound * y.lowerbound), (x.lowerbound * y.upperbound), (x.upperbound * y.lowerbound), (x.upperbound * y.upperbound)])
lst2 = [(coeff * val) for val in lst]
l_b += min(lst2)
u_b += max(lst2)
return ExpressionBounds(lowerbound=l_b, upperbound=u_b)
def __repr__(self):
from ..translators.prettyprint import expr2str, DEFAULT_TRUNCATE
return f'<{self.__class__.__name__}: {expr2str(quadratic=self, truncate=DEFAULT_TRUNCATE)}>'
def __str__(self):
from ..translators.prettyprint import expr2str
return f'{expr2str(quadratic=self)}' |
class TfEnv(ProxyEnv):
_property
def observation_space(self):
return to_tf_space(self.wrapped_env.observation_space)
_property
def action_space(self):
return to_tf_space(self.wrapped_env.action_space)
_property
def spec(self):
return EnvSpec(observation_space=self.observation_space, action_space=self.action_space)
def vectorized(self):
return getattr(self.wrapped_env, 'vectorized', False)
def vec_env_executor(self, n_envs, max_path_length):
return VecTfEnv(self.wrapped_env.vec_env_executor(n_envs=n_envs, max_path_length=max_path_length))
def wrap(cls, env_cls, **extra_kwargs):
return WrappedCls(cls, env_cls, extra_kwargs) |
def fill_and_finalize_subplot(category, data_to_plot, accept_classes, axis, max_depth):
if (category == 'PR'):
create_PR_plot(axis, data_to_plot, accept_classes)
elif (category == 'AP'):
create_AP_plot(axis, data_to_plot, accept_classes, max_depth)
elif (category in ['Center_Dist', 'Size_Similarity', 'OS_Yaw', 'OS_Pitch_Roll']):
axis.set_title((category.replace('_', ' ') + ' (DDTP Metric)'))
if (category == 'Center_Dist'):
axis.set_ylim([0, 25])
axis.set_ylabel('Distance [m]')
else:
axis.set_ylim([0.0, 1.01])
axis.set_ylabel('Similarity')
for label in accept_classes:
(x_vals, y_vals) = get_x_y_vals(data_to_plot[category][label]['data'])
available_items_scaling = get_available_items_scaling(data_to_plot[category][label]['items'])
if (category == 'Center_Dist'):
y_vals = [((1 - y) * max_depth) for y in y_vals]
fill_standard_subplot(axis, x_vals, y_vals, label, available_items_scaling, max_depth)
else:
raise ValueError('Unsupported category, got {}.'.format(category)) |
def get_stack_info(frames, transformer=transform, capture_locals=True, frame_allowance=25):
__traceback_hide__ = True
result = []
for frame_info in frames:
if isinstance(frame_info, (list, tuple)):
(frame, lineno) = frame_info
else:
frame = frame_info
lineno = frame_info.f_lineno
f_locals = getattr(frame, 'f_locals', {})
if _getitem_from_frame(f_locals, '__traceback_hide__'):
continue
f_globals = getattr(frame, 'f_globals', {})
f_code = getattr(frame, 'f_code', None)
if f_code:
abs_path = frame.f_code.co_filename
function = frame.f_code.co_name
else:
abs_path = None
function = None
loader = _getitem_from_frame(f_globals, '__loader__')
module_name = _getitem_from_frame(f_globals, '__name__')
if lineno:
lineno -= 1
if ((lineno is not None) and abs_path):
(pre_context, context_line, post_context) = get_lines_from_file(abs_path, lineno, 5, loader, module_name)
else:
(pre_context, context_line, post_context) = (None, None, None)
try:
base_filename = sys.modules[module_name.split('.', 1)[0]].__file__
filename = abs_path.split(base_filename.rsplit(os.sep, 2)[0], 1)[(- 1)].lstrip(os.sep)
except Exception:
filename = abs_path
if (not filename):
filename = abs_path
frame_result = {'abs_path': abs_path, 'filename': filename, 'module': (module_name or None), 'function': (function or '<unknown>'), 'lineno': (lineno + 1)}
if capture_locals:
f_vars = get_frame_locals(frame, transformer=transformer)
if f_vars:
frame_result['vars'] = f_vars
if (context_line is not None):
frame_result.update({'pre_context': pre_context, 'context_line': context_line, 'post_context': post_context})
result.append(frame_result)
stackinfo = {'frames': slim_frame_data(result, frame_allowance=frame_allowance)}
return stackinfo |
def _build_warm_up_scheduler(optimizer, epochs=50, last_epoch=(- 1)):
warmup_epoch = cfg.TRAIN.LR_WARMUP.EPOCH
sc1 = _build_lr_scheduler(optimizer, cfg.TRAIN.LR_WARMUP, warmup_epoch, last_epoch)
sc2 = _build_lr_scheduler(optimizer, cfg.TRAIN.LR, (epochs - warmup_epoch), last_epoch)
return WarmUPScheduler(optimizer, sc1, sc2, epochs, last_epoch) |
class SpecificEquityTrades(object):
def __init__(self, trading_calendar, asset_finder, sids, start, end, delta, count=500):
self.trading_calendar = trading_calendar
self.count = count
self.start = start
self.end = end
self.delta = delta
self.sids = sids
self.generator = self.create_fresh_generator()
def __iter__(self):
return self
def next(self):
return self.generator.next()
def __next__(self):
return next(self.generator)
def rewind(self):
self.generator = self.create_fresh_generator()
def update_source_id(self, gen):
for event in gen:
event.source_id = self.get_hash()
(yield event)
def create_fresh_generator(self):
date_generator = date_gen(start=self.start, end=self.end, delta=self.delta, trading_calendar=self.trading_calendar)
return (create_trade(sid=sid, price=(float((i % 10)) + 1.0), amount=(((i * 50) % 900) + 100), datetime=date) for ((i, date), sid) in itertools.product(enumerate(date_generator), self.sids)) |
def _is_valid_bn_fold(conv: LayerType, fold_backward: bool) -> bool:
valid = True
if (not fold_backward):
if isinstance(conv, (torch.nn.Conv2d, torch.nn.Conv1d, torch.nn.Conv3d)):
valid &= all(((item == 0) for item in conv.padding))
valid &= (conv.groups == 1)
elif isinstance(conv, torch.nn.ConvTranspose2d):
valid = False
elif isinstance(conv, torch.nn.ConvTranspose2d):
valid &= (conv.groups in (1, conv.in_channels))
return valid |
def test_config_parsing_errors() -> None:
curr_dir = os.path.dirname(__file__)
config = os.path.join(curr_dir, 'file_fixtures', 'test_with_errors.pylintrc')
reporter = python_ta.reset_linter(config=config).reporter
message_ids = [msg.msg_id for message_lis in reporter.messages.values() for msg in message_lis]
assert all(((error in message_ids) for error in CONFIG_ERRORS_TO_CHECK)) |
def run_worker(rank, world_size, num_gpus, train_loader, test_loader):
print(f'Worker rank {rank} initializing RPC')
rpc.init_rpc(name=f'trainer_{rank}', rank=rank, world_size=world_size)
print(f'Worker {rank} done initializing RPC')
run_training_loop(rank, num_gpus, train_loader, test_loader)
rpc.shutdown() |
class DescribeColorFormat():
def it_knows_its_color_type(self, type_fixture):
(color_format, expected_value) = type_fixture
assert (color_format.type == expected_value)
def it_knows_its_RGB_value(self, rgb_get_fixture):
(color_format, expected_value) = rgb_get_fixture
assert (color_format.rgb == expected_value)
def it_can_change_its_RGB_value(self, rgb_set_fixture):
(color_format, new_value, expected_xml) = rgb_set_fixture
color_format.rgb = new_value
assert (color_format._element.xml == expected_xml)
def it_knows_its_theme_color(self, theme_color_get_fixture):
(color_format, expected_value) = theme_color_get_fixture
assert (color_format.theme_color == expected_value)
def it_can_change_its_theme_color(self, theme_color_set_fixture):
(color_format, new_value, expected_xml) = theme_color_set_fixture
color_format.theme_color = new_value
assert (color_format._element.xml == expected_xml)
(params=[('w:r', None), ('w:r/w:rPr', None), ('w:r/w:rPr/w:color{w:val=auto}', None), ('w:r/w:rPr/w:color{w:val=4224FF}', '4224ff'), ('w:r/w:rPr/w:color{w:val=auto,w:themeColor=accent1}', None), ('w:r/w:rPr/w:color{w:val=F00BA9,w:themeColor=accent1}', 'f00ba9')])
def rgb_get_fixture(self, request):
(r_cxml, rgb) = request.param
color_format = ColorFormat(element(r_cxml))
expected_value = (None if (rgb is None) else RGBColor.from_string(rgb))
return (color_format, expected_value)
(params=[('w:r', RGBColor(10, 20, 30), 'w:r/w:rPr/w:color{w:val=0A141E}'), ('w:r/w:rPr', RGBColor(1, 2, 3), 'w:r/w:rPr/w:color{w:val=010203}'), ('w:r/w:rPr/w:color{w:val=123abc}', RGBColor(42, 24, 99), 'w:r/w:rPr/w:color{w:val=2A1863}'), ('w:r/w:rPr/w:color{w:val=auto}', RGBColor(16, 17, 18), 'w:r/w:rPr/w:color{w:val=101112}'), ('w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=dark1}', RGBColor(24, 42, 99), 'w:r/w:rPr/w:color{w:val=182A63}'), ('w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=dark1}', None, 'w:r/w:rPr'), ('w:r', None, 'w:r')])
def rgb_set_fixture(self, request):
(r_cxml, new_value, expected_cxml) = request.param
color_format = ColorFormat(element(r_cxml))
expected_xml = xml(expected_cxml)
return (color_format, new_value, expected_xml)
(params=[('w:r', None), ('w:r/w:rPr', None), ('w:r/w:rPr/w:color{w:val=auto}', None), ('w:r/w:rPr/w:color{w:val=4224FF}', None), ('w:r/w:rPr/w:color{w:themeColor=accent1}', 'ACCENT_1'), ('w:r/w:rPr/w:color{w:val=F00BA9,w:themeColor=dark1}', 'DARK_1')])
def theme_color_get_fixture(self, request):
(r_cxml, value) = request.param
color_format = ColorFormat(element(r_cxml))
expected_value = (None if (value is None) else getattr(MSO_THEME_COLOR, value))
return (color_format, expected_value)
(params=[('w:r', 'ACCENT_1', 'w:r/w:rPr/w:color{w:val=000000,w:themeColor=accent1}'), ('w:r/w:rPr', 'ACCENT_2', 'w:r/w:rPr/w:color{w:val=000000,w:themeColor=accent2}'), ('w:r/w:rPr/w:color{w:val=101112}', 'ACCENT_3', 'w:r/w:rPr/w:color{w:val=101112,w:themeColor=accent3}'), ('w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=dark1}', 'LIGHT_2', 'w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=light2}'), ('w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=dark1}', None, 'w:r/w:rPr'), ('w:r', None, 'w:r')])
def theme_color_set_fixture(self, request):
(r_cxml, member, expected_cxml) = request.param
color_format = ColorFormat(element(r_cxml))
new_value = (None if (member is None) else getattr(MSO_THEME_COLOR, member))
expected_xml = xml(expected_cxml)
return (color_format, new_value, expected_xml)
(params=[('w:r', None), ('w:r/w:rPr', None), ('w:r/w:rPr/w:color{w:val=auto}', MSO_COLOR_TYPE.AUTO), ('w:r/w:rPr/w:color{w:val=4224FF}', MSO_COLOR_TYPE.RGB), ('w:r/w:rPr/w:color{w:themeColor=dark1}', MSO_COLOR_TYPE.THEME), ('w:r/w:rPr/w:color{w:val=F00BA9,w:themeColor=accent1}', MSO_COLOR_TYPE.THEME)])
def type_fixture(self, request):
(r_cxml, expected_value) = request.param
color_format = ColorFormat(element(r_cxml))
return (color_format, expected_value) |
class MultiprocessingDriver():
def __init__(self):
self._memories = {}
def __reduce__(self):
return (rebuild_driver, tuple())
def _command(self, command, arg):
ipc = multiprocessing.current_process().ipc
return ipc.command(command, arg)
def get(self, memory_id):
is_new = False
if (memory_id not in self._memories):
(memory, is_new) = self._command('shared.get', memory_id)
self._memories[memory_id] = memory
return (self._memories[memory_id], is_new)
def lock_acquire(self, lock_id):
self._command('shared.lock_acquire', lock_id)
def lock_release(self, lock_id):
self._command('shared.lock_release', lock_id)
def lock_status(self, lock_id):
return self._command('shared.lock_status', lock_id)
def import_data(self, data):
for (memory_id, memory) in data['storage'].items():
memory = self.get(memory_id)
memory.update(data)
if len(data['locks']):
self._command('shared.lock_import', data['locks'])
def export_data(self):
result = {'storage': {}}
for (memory_id, data) in self._memories.items():
result['storage'][memory_id] = dict(data)
result['locks'] = self._command('shared.lock_export', None)
return result |
def mouseRelease(widget, pos, button, modifier=None):
if isinstance(widget, QtWidgets.QGraphicsView):
widget = widget.viewport()
global_pos = QtCore.QPointF(widget.mapToGlobal(pos.toPoint()))
if (modifier is None):
modifier = QtCore.Qt.KeyboardModifier.NoModifier
event = QtGui.QMouseEvent(QtCore.QEvent.Type.MouseButtonRelease, pos, global_pos, button, QtCore.Qt.MouseButton.NoButton, modifier)
QtWidgets.QApplication.sendEvent(widget, event) |
def load_pretrained(model, url, filter_fn=None, strict=True):
if (not url):
logging.warning('Pretrained model URL is empty, using random initialization. Did you intend to use a `tf_` variant of the model?')
return
state_dict = load_state_dict_from_url(url, progress=False, map_location='cpu')
if (filter_fn is not None):
state_dict = filter_fn(state_dict)
model.load_state_dict(state_dict, strict=strict) |
('/convert', methods=['GET', 'POST'])
def convert():
jinja2_env = Environment()
custom_filters = get_custom_filters()
app.logger.debug(('Add the following customer filters to Jinja environment: %s' % ', '.join(custom_filters.keys())))
jinja2_env.filters.update(custom_filters)
try:
jinja2_tpl = jinja2_env.from_string(request.form['template'])
except (exceptions.TemplateSyntaxError, exceptions.TemplateError) as e:
return 'Syntax error in jinja2 template: {0}'.format(e)
dummy_values = ['Lorem', 'Ipsum', 'Amet', 'Elit', 'Expositum', 'Dissimile', 'Superiori', 'Laboro', 'Torquate', 'sunt']
values = {}
if bool(int(request.form['dummyvalues'])):
vars_to_fill = meta.find_undeclared_variables(jinja2_env.parse(request.form['template']))
for v in vars_to_fill:
values[v] = choice(dummy_values)
elif (request.form['input_type'] == 'json'):
try:
values = json.loads(request.form['values'])
except ValueError as e:
return 'Value error in JSON: {0}'.format(e)
elif (request.form['input_type'] == 'yaml'):
try:
values = yaml.load(request.form['values'])
except (ValueError, yaml.parser.ParserError, TypeError) as e:
return 'Value error in YAML: {0}'.format(e)
else:
return 'Undefined input_type: {0}'.format(request.form['input_type'])
try:
rendered_jinja2_tpl = jinja2_tpl.render(values)
except (exceptions.TemplateRuntimeError, ValueError, TypeError) as e:
return 'Error in your values input filed: {0}'.format(e)
if bool(int(request.form['showwhitespaces'])):
rendered_jinja2_tpl = rendered_jinja2_tpl.replace(' ', u'')
return escape(rendered_jinja2_tpl).replace('\n', '<br />') |
def test_window_by_position():
ds = simulate_genotype_call_dataset(n_variant=5, n_sample=3, seed=0)
assert (not has_windows(ds))
ds['variant_position'] = (['variants'], np.array([1, 4, 6, 8, 12]))
ds = window_by_position(ds, size=5, window_start_position='variant_position')
assert has_windows(ds)
np.testing.assert_equal(ds[window_contig].values, [0, 0, 0, 0, 0])
np.testing.assert_equal(ds[window_start].values, [0, 1, 2, 3, 4])
np.testing.assert_equal(ds[window_stop].values, [2, 4, 4, 5, 5]) |
class DictProperty(object):
def __init__(self, attr, key=None, read_only=False):
(self.attr, self.key, self.read_only) = (attr, key, read_only)
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
(self.getter, self.key) = (func, (self.key or func.__name__))
return self
def __get__(self, obj, cls):
if (obj is None):
return self
(key, storage) = (self.key, getattr(obj, self.attr))
if (key not in storage):
storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only:
raise AttributeError('Read-Only property.')
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only:
raise AttributeError('Read-Only property.')
del getattr(obj, self.attr)[self.key] |
def run_cmd(cmd, throw_on_error=True, env=None, stream_output=False, **kwargs):
cmd_env = os.environ.copy()
if env:
cmd_env.update(env)
if stream_output:
child = subprocess.Popen(cmd, env=cmd_env, **kwargs)
exit_code = child.wait()
if (throw_on_error and (exit_code != 0)):
raise Exception(('Non-zero exitcode: %s' % exit_code))
return exit_code
else:
child = subprocess.Popen(cmd, env=cmd_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
(stdout, stderr) = child.communicate()
exit_code = child.wait()
if (throw_on_error and (exit_code != 0)):
raise Exception(('Non-zero exitcode: %s\n\nSTDOUT:\n%s\n\nSTDERR:%s' % (exit_code, stdout, stderr)))
return (exit_code, stdout, stderr) |
def get_gpu_memory_info(device, unit='G', number_only=False):
device = get_device_index(device, optional=True)
handler = pynvml.nvmlDeviceGetHandleByIndex(device)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handler)
total = num_to_str(meminfo.total, unit, number_only=number_only)
used = num_to_str(meminfo.used, unit, number_only=number_only)
free = num_to_str(meminfo.free, unit, number_only=number_only)
ratio = (meminfo.used / meminfo.total)
ratio = ((ratio * 100) if number_only else f'{(ratio * 100):.1f}%')
return (total, used, free, ratio) |
class TimedStorage(Generic[(KeyType, ValueType)]):
frozen = False
def __init__(self, maxsize: Optional[int]=None):
self.maxsize = (maxsize or float('inf'))
self.data: Dict[(KeyType, ValueWithExpiration[ValueType])] = dict()
self.expiration_heap: List[HeapEntry[KeyType]] = []
self.key_to_heap: Dict[(KeyType, HeapEntry[KeyType])] = dict()
def _remove_outdated(self):
while ((not self.frozen) and self.expiration_heap and ((self.expiration_heap[ROOT].expiration_time < get_dht_time()) or (len(self.data) > self.maxsize))):
heap_entry = heapq.heappop(self.expiration_heap)
if (self.key_to_heap.get(heap_entry.key) == heap_entry):
del self.data[heap_entry.key], self.key_to_heap[heap_entry.key]
def store(self, key: KeyType, value: ValueType, expiration_time: DHTExpiration) -> bool:
if ((expiration_time < get_dht_time()) and (not self.frozen)):
return False
self.key_to_heap[key] = HeapEntry(expiration_time, key)
heapq.heappush(self.expiration_heap, self.key_to_heap[key])
if (key in self.data):
if (self.data[key].expiration_time < expiration_time):
self.data[key] = ValueWithExpiration(value, expiration_time)
return True
return False
self.data[key] = ValueWithExpiration(value, expiration_time)
self._remove_outdated()
return True
def get(self, key: KeyType) -> Optional[ValueWithExpiration[ValueType]]:
self._remove_outdated()
if (key in self.data):
return self.data[key]
return None
def items(self) -> Iterator[Tuple[(KeyType, ValueWithExpiration[ValueType])]]:
self._remove_outdated()
return ((key, value_and_expiration) for (key, value_and_expiration) in self.data.items())
def top(self) -> Tuple[(Optional[KeyType], Optional[ValueWithExpiration[ValueType]])]:
self._remove_outdated()
if self.data:
while (self.key_to_heap.get(self.expiration_heap[ROOT].key) != self.expiration_heap[ROOT]):
heapq.heappop(self.expiration_heap)
top_key = self.expiration_heap[ROOT].key
return (top_key, self.data[top_key])
return (None, None)
def clear(self):
self.data.clear()
self.key_to_heap.clear()
self.expiration_heap.clear()
def discard(self, key: KeyType):
if (key in self.key_to_heap):
del self.data[key], self.key_to_heap[key]
def __contains__(self, key: KeyType):
self._remove_outdated()
return (key in self.data)
def __len__(self):
self._remove_outdated()
return len(self.data)
def __delitem__(self, key: KeyType):
self.discard(key)
def __bool__(self):
return bool(self.data)
def __repr__(self):
return f'{self.__class__.__name__}({self.data})'
def freeze(self):
(prev_frozen, self.frozen) = (self.frozen, True)
try:
(yield self)
finally:
self.frozen = prev_frozen |
def prompt_user_for_input_game_log(window: QtWidgets.QWidget) -> (Path | None):
from randovania.layout.layout_description import LayoutDescription
return _prompt_user_for_file(window, caption='Select a Randovania seed log.', filter=f'Randovania Game, *.{LayoutDescription.file_extension()}', new_file=False) |
def test_top_down_OCHuman_dataset_compatibility():
dataset = 'TopDownOCHumanDataset'
dataset_class = DATASETS.get(dataset)
dataset_class.load_annotations = MagicMock()
dataset_class.coco = MagicMock()
channel_cfg = dict(num_output_channels=17, dataset_joints=17, dataset_channel=[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]], inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
data_cfg = dict(image_size=[192, 256], heatmap_size=[48, 64], num_output_channels=channel_cfg['num_output_channels'], num_joints=channel_cfg['dataset_joints'], dataset_channel=channel_cfg['dataset_channel'], inference_channel=channel_cfg['inference_channel'], soft_nms=False, nms_thr=1.0, oks_thr=0.9, vis_thr=0.2, use_gt_bbox=True, det_bbox_thr=0.0, bbox_file='')
with pytest.raises(AssertionError):
data_cfg_copy = copy.deepcopy(data_cfg)
data_cfg_copy['use_gt_bbox'] = False
with pytest.warns(DeprecationWarning):
_ = dataset_class(ann_file='tests/data/ochuman/test_ochuman.json', img_prefix='tests/data/ochuman/', data_cfg=data_cfg_copy, pipeline=[], test_mode=True)
with pytest.warns(DeprecationWarning):
custom_dataset = dataset_class(ann_file='tests/data/ochuman/test_ochuman.json', img_prefix='tests/data/ochuman/', data_cfg=data_cfg, pipeline=[], test_mode=True)
assert (custom_dataset.test_mode is True)
assert (custom_dataset.dataset_name == 'ochuman')
image_id = 1
assert (image_id in custom_dataset.img_ids)
assert (len(custom_dataset.img_ids) == 3)
_ = custom_dataset[0]
outputs = convert_db_to_output(custom_dataset.db)
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
assert_almost_equal(infos['AP'], 1.0)
with pytest.raises(KeyError):
_ = custom_dataset.evaluate(outputs, tmpdir, 'PCK') |
class SingleDataset(Dataset):
def __init__(self, anom_idx, x, y, data_selector, transform=None):
self.transform = transform
self.selected_data = data_selector.get_data(anom_idx, x, y)
def __getitem__(self, index):
data = self.selected_data[0][index]
target = self.selected_data[1][index]
if (self.transform is not None):
data = self.transform(data)
return (data, target)
def __len__(self):
return len(self.selected_data[0]) |
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, ValueError=ValueError, dict=dict, float=float, GeneratorType=GeneratorType, id=id, int=int, isinstance=isinstance, list=list, long=int, str=str, tuple=tuple):
def _iterencode_list(lst, _current_indent_level):
if (not lst):
(yield '[]')
return
if (markers is not None):
markerid = id(lst)
if (markerid in markers):
raise ValueError('Circular reference detected')
markers[markerid] = lst
buf = '['
if (_indent is not None):
_current_indent_level += 1
newline_indent = ('\n' + (' ' * (_indent * _current_indent_level)))
separator = (_item_separator + newline_indent)
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, str):
(yield (buf + _encoder(value)))
elif (value is None):
(yield (buf + 'null'))
elif (value is True):
(yield (buf + 'true'))
elif (value is False):
(yield (buf + 'false'))
elif isinstance(value, int):
(yield (buf + str(value)))
elif isinstance(value, float):
(yield (buf + _floatstr(value)))
else:
(yield buf)
if isinstance(value, (list, tuple, GeneratorType)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
(yield chunk)
if first:
(yield buf)
if (newline_indent is not None):
_current_indent_level -= 1
(yield ('\n' + (' ' * (_indent * _current_indent_level))))
(yield ']')
if (markers is not None):
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if (not dct):
(yield '{}')
return
if (markers is not None):
markerid = id(dct)
if (markerid in markers):
raise ValueError('Circular reference detected')
markers[markerid] = dct
(yield '{')
if (_indent is not None):
_current_indent_level += 1
newline_indent = ('\n' + (' ' * (_indent * _current_indent_level)))
item_separator = (_item_separator + newline_indent)
(yield newline_indent)
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = list(dct.items())
items.sort(key=(lambda kv: kv[0]))
else:
items = iter(dct.items())
for (key, value) in items:
if isinstance(key, str):
pass
elif isinstance(key, float):
key = _floatstr(key)
elif isinstance(key, int):
key = str(key)
elif (key is True):
key = 'true'
elif (key is False):
key = 'false'
elif (key is None):
key = 'null'
elif _skipkeys:
continue
else:
raise TypeError(('key %r is not a string' % (key,)))
if first:
first = False
else:
(yield item_separator)
(yield _encoder(key))
(yield _key_separator)
if isinstance(value, str):
(yield _encoder(value))
elif (value is None):
(yield 'null')
elif (value is True):
(yield 'true')
elif (value is False):
(yield 'false')
elif isinstance(value, int):
(yield str(value))
elif isinstance(value, float):
(yield _floatstr(value))
else:
if isinstance(value, collections.abc.Mapping):
chunks = _iterencode_dict(value, _current_indent_level)
elif isinstance(value, collections.abc.Iterable):
chunks = _iterencode_list(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
(yield chunk)
if (newline_indent is not None):
_current_indent_level -= 1
(yield ('\n' + (' ' * (_indent * _current_indent_level))))
(yield '}')
if (markers is not None):
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, str):
(yield _encoder(o))
elif (o is None):
(yield 'null')
elif (o is True):
(yield 'true')
elif (o is False):
(yield 'false')
elif isinstance(o, int):
(yield str(o))
elif isinstance(o, float):
(yield _floatstr(o))
elif isinstance(o, collections.abc.Mapping):
for chunk in _iterencode_dict(o, _current_indent_level):
(yield chunk)
elif isinstance(o, collections.abc.Iterable):
for chunk in _iterencode_list(o, _current_indent_level):
(yield chunk)
else:
if (markers is not None):
markerid = id(o)
if (markerid in markers):
raise ValueError('Circular reference detected')
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
(yield chunk)
if (markers is not None):
del markers[markerid]
return _iterencode |
.parametrize('env_id', ENV_IDS)
def test_envs(env_id):
OBS_MODES = ['state_dict', 'state', 'rgbd', 'pointcloud']
for obs_mode in OBS_MODES:
env: BaseEnv = gym.make(env_id, obs_mode=obs_mode)
env.reset()
action_space = env.action_space
for _ in range(5):
env.step(action_space.sample())
env.close()
del env |
.parametrize('base,other', [pytest.param(Version.parse('3.0.0'), Version.parse('3.0.0-1'), id='post'), pytest.param(Version.parse('3.0.0'), Version.parse('3.0.0+local.1'), id='local')])
def test_allows_post_releases_with_min(base: Version, other: Version) -> None:
range = VersionRange(min=base, include_min=True)
assert range.allows(other) |
class Agilent34450A(Instrument):
BOOLS = {True: 1, False: 0}
MODES = {'current': 'CURR', 'ac current': 'CURR:AC', 'voltage': 'VOLT', 'ac voltage': 'VOLT:AC', 'resistance': 'RES', '4w resistance': 'FRES', 'current frequency': 'FREQ:ACI', 'voltage frequency': 'FREQ:ACV', 'continuity': 'CONT', 'diode': 'DIOD', 'temperature': 'TEMP', 'capacitance': 'CAP'}
def mode(self):
get_command = ':configure?'
vals = self._conf_parser(self.values(get_command))
inv_modes = {v: k for (k, v) in self.MODES.items()}
mode = inv_modes[vals[0]]
return mode
def mode(self, value):
if (value in self.MODES):
if (value not in ['current frequency', 'voltage frequency']):
self.write((':configure:' + self.MODES[value]))
else:
if (value == 'current frequency'):
self.mode = 'ac current'
else:
self.mode = 'ac voltage'
self.write(':configure:freq')
else:
raise ValueError(f'Value {value} is not a supported mode for this device.')
current = Instrument.measurement(':READ?', ' Reads a DC current measurement in Amps, based on the\n active :attr:`~.Agilent34450A.mode`. ')
current_ac = Instrument.measurement(':READ?', ' Reads an AC current measurement in Amps, based on the\n active :attr:`~.Agilent34450A.mode`. ')
current_range = Instrument.control(':SENS:CURR:RANG?', ':SENS:CURR:RANG:AUTO 0;:SENS:CURR:RANG %s', ' A property that controls the DC current range in\n Amps, which can take values 100E-6, 1E-3, 10E-3, 100E-3, 1, 10,\n as well as "MIN", "MAX", or "DEF" (100 mA).\n Auto-range is disabled when this property is set. ', validator=strict_discrete_set, values=[0.0001, 0.001, 0.01, 0.1, 1, 10, 'MIN', 'DEF', 'MAX'])
current_auto_range = Instrument.control(':SENS:CURR:RANG:AUTO?', ':SENS:CURR:RANG:AUTO %d', ' A boolean property that toggles auto ranging for DC current. ', validator=strict_discrete_set, values=BOOLS, map_values=True)
current_resolution = Instrument.control(':SENS:CURR:RES?', ':SENS:CURR:RES %s', ' A property that controls the resolution in the DC current\n readings, which can take values 3.00E-5, 2.00E-5, 1.50E-6 (5 1/2 digits),\n as well as "MIN", "MAX", and "DEF" (3.00E-5). ', validator=strict_discrete_set, values=[3e-05, 2e-05, 1.5e-06, 'MAX', 'MIN', 'DEF'])
current_ac_range = Instrument.control(':SENS:CURR:AC:RANG?', ':SENS:CURR:AC:RANG:AUTO 0;:SENS:CURR:AC:RANG %s', ' A property that controls the AC current range in Amps, which can take\n values 10E-3, 100E-3, 1, 10, as well as "MIN", "MAX", or "DEF" (100 mA).\n Auto-range is disabled when this property is set. ', validator=strict_discrete_set, values=[0.01, 0.1, 1, 10, 'MIN', 'MAX', 'DEF'])
current_ac_auto_range = Instrument.control(':SENS:CURR:AC:RANG:AUTO?', ':SENS:CURR:AC:RANG:AUTO %d', ' A boolean property that toggles auto ranging for AC current. ', validator=strict_discrete_set, values=BOOLS, map_values=True)
current_ac_resolution = Instrument.control(':SENS:CURR:AC:RES?', ':SENS:CURR:AC:RES %s', ' An property that controls the resolution in the AC current\n readings, which can take values 3.00E-5, 2.00E-5, 1.50E-6 (5 1/2 digits),\n as well as "MIN", "MAX", or "DEF" (1.50E-6). ', validator=strict_discrete_set, values=[3e-05, 2e-05, 1.5e-06, 'MAX', 'MIN', 'DEF'])
voltage = Instrument.measurement(':READ?', ' Reads a DC voltage measurement in Volts, based on the\n active :attr:`~.Agilent34450A.mode`. ')
voltage_ac = Instrument.measurement(':READ?', ' Reads an AC voltage measurement in Volts, based on the\n active :attr:`~.Agilent34450A.mode`. ')
voltage_range = Instrument.control(':SENS:VOLT:RANG?', ':SENS:VOLT:RANG:AUTO 0;:SENS:VOLT:RANG %s', ' A property that controls the DC voltage range in Volts, which\n can take values 100E-3, 1, 10, 100, 1000, as well as "MIN", "MAX", or\n "DEF" (10 V). Auto-range is disabled when this property is set. ', validator=strict_discrete_set, values=[0.1, 1, 10, 100, 1000, 'MAX', 'MIN', 'DEF'])
voltage_auto_range = Instrument.control(':SENS:VOLT:RANG:AUTO?', ':SENS:VOLT:RANG:AUTO %d', ' A boolean property that toggles auto ranging for DC voltage. ', validator=strict_discrete_set, values=BOOLS, map_values=True)
voltage_resolution = Instrument.control(':SENS:VOLT:RES?', ':SENS:VOLT:RES %s', ' A property that controls the resolution in the DC voltage\n readings, which can take values 3.00E-5, 2.00E-5, 1.50E-6 (5 1/2 digits),\n as well as "MIN", "MAX", or "DEF" (1.50E-6). ', validator=strict_discrete_set, values=[3e-05, 2e-05, 1.5e-06, 'MAX', 'MIN', 'DEF'])
voltage_ac_range = Instrument.control(':SENS:VOLT:AC:RANG?', ':SENS:VOLT:RANG:AUTO 0;:SENS:VOLT:AC:RANG %s', ' A property that controls the AC voltage range in Volts, which can\n take values 100E-3, 1, 10, 100, 750, as well as "MIN", "MAX", or "DEF"\n (10 V).\n Auto-range is disabled when this property is set. ', validator=strict_discrete_set, values=[0.1, 1, 10, 100, 750, 'MAX', 'MIN', 'DEF'])
voltage_ac_auto_range = Instrument.control(':SENS:VOLT:AC:RANG:AUTO?', ':SENS:VOLT:AC:RANG:AUTO %d', ' A boolean property that toggles auto ranging for AC voltage. ', validator=strict_discrete_set, values=BOOLS, map_values=True)
voltage_ac_resolution = Instrument.control(':SENS:VOLT:AC:RES?', ':SENS:VOLT:AC:RES %s', ' A property that controls the resolution in the AC voltage readings,\n which can take values 3.00E-5, 2.00E-5, 1.50E-6 (5 1/2 digits),\n as well as "MIN", "MAX", or "DEF" (1.50E-6). ', validator=strict_discrete_set, values=[3e-05, 2e-05, 1.5e-06, 'MAX', 'MIN', 'DEF'])
resistance = Instrument.measurement(':READ?', ' Reads a resistance measurement in Ohms for 2-wire\n configuration, based on the active\n :attr:`~.Agilent34450A.mode`. ')
resistance_4w = Instrument.measurement(':READ?', ' Reads a resistance measurement in Ohms for\n 4-wire configuration, based on the active\n :attr:`~.Agilent34450A.mode`. ')
resistance_range = Instrument.control(':SENS:RES:RANG?', ':SENS:RES:RANG:AUTO 0;:SENS:RES:RANG %s', ' A property that controls the 2-wire resistance range in Ohms, which can\n take values 100, 1E3, 10E3, 100E3, 1E6, 10E6, 100E6, as well as "MIN", "MAX",\n or "DEF" (1E3).\n Auto-range is disabled when this property is set. ', validator=strict_discrete_set, values=[100, 1000.0, 10000.0, 100000.0, 1000000.0, .0, .0, 'MAX', 'MIN', 'DEF'])
resistance_auto_range = Instrument.control(':SENS:RES:RANG:AUTO?', ':SENS:RES:RANG:AUTO %d', ' A boolean property that toggles auto ranging for 2-wire resistance. ', validator=strict_discrete_set, values=BOOLS, map_values=True)
resistance_resolution = Instrument.control(':SENS:RES:RES?', ':SENS:RES:RES %s', ' A property that controls the resolution in the 2-wire\n resistance readings, which can take values 3.00E-5, 2.00E-5, 1.50E-6 (5 1/2 digits),\n as well as "MIN", "MAX", or "DEF" (1.50E-6). ', validator=strict_discrete_set, values=[3e-05, 2e-05, 1.5e-06, 'MAX', 'MIN', 'DEF'])
resistance_4w_range = Instrument.control(':SENS:FRES:RANG?', ':SENS:FRES:RANG:AUTO 0;:SENS:FRES:RANG %s', ' A property that controls the 4-wire resistance range\n in Ohms, which can take values 100, 1E3, 10E3, 100E3, 1E6, 10E6, 100E6,\n as well as "MIN", "MAX", or "DEF" (1E3).\n Auto-range is disabled when this property is set. ', validator=strict_discrete_set, values=[100, 1000.0, 10000.0, 100000.0, 1000000.0, .0, .0, 'MAX', 'MIN', 'DEF'])
resistance_4w_auto_range = Instrument.control(':SENS:FRES:RANG:AUTO?', ':SENS:FRES:RANG:AUTO %d', ' A boolean property that toggles auto ranging for 4-wire resistance. ', validator=strict_discrete_set, values=BOOLS, map_values=True)
resistance_4w_resolution = Instrument.control(':SENS:FRES:RES?', ':SENS:FRES:RES %s', ' A property that controls the resolution in the 4-wire\n resistance readings, which can take values 3.00E-5, 2.00E-5, 1.50E-6 (5 1/2 digits),\n as well as "MIN", "MAX", or "DEF" (1.50E-6). ', validator=strict_discrete_set, values=[3e-05, 2e-05, 1.5e-06, 'MAX', 'MIN', 'DEF'])
frequency = Instrument.measurement(':READ?', ' Reads a frequency measurement in Hz, based on the\n active :attr:`~.Agilent34450A.mode`. ')
frequency_current_range = Instrument.control(':SENS:FREQ:CURR:RANG?', ':SENS:FREQ:CURR:RANG:AUTO 0;:SENS:FREQ:CURR:RANG %s', ' A property that controls the current range in Amps for frequency on AC current\n measurements, which can take values 10E-3, 100E-3, 1, 10, as well as "MIN",\n "MAX", or "DEF" (100 mA).\n Auto-range is disabled when this property is set. ', validator=strict_discrete_set, values=[0.01, 0.1, 1, 10, 'MIN', 'MAX', 'DEF'])
frequency_current_auto_range = Instrument.control(':SENS:FREQ:CURR:RANG:AUTO?', ':SENS:FREQ:CURR:RANG:AUTO %d', ' Boolean property that toggles auto ranging for AC current in frequency measurements.', validator=strict_discrete_set, values=BOOLS, map_values=True)
frequency_voltage_range = Instrument.control(':SENS:FREQ:VOLT:RANG?', ':SENS:FREQ:VOLT:RANG:AUTO 0;:SENS:FREQ:VOLT:RANG %s', ' A property that controls the voltage range in Volts for frequency on AC voltage\n measurements, which can take values 100E-3, 1, 10, 100, 750,\n as well as "MIN", "MAX", or "DEF" (10 V).\n Auto-range is disabled when this property is set. ', validator=strict_discrete_set, values=[0.1, 1, 10, 100, 750, 'MAX', 'MIN', 'DEF'])
frequency_voltage_auto_range = Instrument.control(':SENS:FREQ:VOLT:RANG:AUTO?', ':SENS:FREQ:VOLT:RANG:AUTO %d', 'Boolean property that toggles auto ranging for AC voltage in frequency measurements. ', validator=strict_discrete_set, values=BOOLS, map_values=True)
frequency_aperture = Instrument.control(':SENS:FREQ:APER?', ':SENS:FREQ:APER %s', ' A property that controls the frequency aperture in seconds,\n which sets the integration period and measurement speed. Takes values\n 100 ms, 1 s, as well as "MIN", "MAX", or "DEF" (1 s). ', validator=strict_discrete_set, values=[0.1, 1, 'MIN', 'MAX', 'DEF'])
temperature = Instrument.measurement(':READ?', ' Reads a temperature measurement in Celsius, based on the active :attr:`~.Agilent34450A.mode`.\n ')
diode = Instrument.measurement(':READ?', ' Reads a diode measurement in Volts, based on the active :attr:`~.Agilent34450A.mode`.\n ')
capacitance = Instrument.measurement(':READ?', ' Reads a capacitance measurement in Farads, based on the active :attr:`~.Agilent34450A.mode`.\n ')
capacitance_range = Instrument.control(':SENS:CAP:RANG?', ':SENS:CAP:RANG:AUTO 0;:SENS:CAP:RANG %s', ' A property that controls the capacitance range\n in Farads, which can take values 1E-9, 10E-9, 100E-9, 1E-6, 10E-6, 100E-6,\n 1E-3, 10E-3, as well as "MIN", "MAX", or "DEF" (1E-6).\n Auto-range is disabled when this property is set. ', validator=strict_discrete_set, values=[1e-09, 1e-08, 1e-07, 1e-06, 1e-05, 0.0001, 0.001, 0.01, 'MAX', 'MIN', 'DEF'])
capacitance_auto_range = Instrument.control(':SENS:CAP:RANG:AUTO?', ':SENS:CAP:RANG:AUTO %d', ' A boolean property that toggles auto ranging for capacitance. ', validator=strict_discrete_set, values=BOOLS, map_values=True)
continuity = Instrument.measurement(':READ?', ' Reads a continuity measurement in Ohms,\n based on the active :attr:`~.Agilent34450A.mode`. ')
def __init__(self, adapter, name='HP/Agilent/Keysight 34450A Multimeter', **kwargs):
super().__init__(adapter, name, timeout=10000, **kwargs)
self.check_errors()
def configure_voltage(self, voltage_range='AUTO', ac=False, resolution='DEF'):
if (ac is True):
self.mode = 'ac voltage'
self.voltage_ac_resolution = resolution
if (voltage_range == 'AUTO'):
self.voltage_ac_auto_range = True
else:
self.voltage_ac_range = voltage_range
elif (ac is False):
self.mode = 'voltage'
self.voltage_resolution = resolution
if (voltage_range == 'AUTO'):
self.voltage_auto_range = True
else:
self.voltage_range = voltage_range
else:
raise TypeError('Value of ac should be a boolean.')
def configure_current(self, current_range='AUTO', ac=False, resolution='DEF'):
if (ac is True):
self.mode = 'ac current'
self.current_ac_resolution = resolution
if (current_range == 'AUTO'):
self.current_ac_auto_range = True
else:
self.current_ac_range = current_range
elif (ac is False):
self.mode = 'current'
self.current_resolution = resolution
if (current_range == 'AUTO'):
self.current_auto_range = True
else:
self.current_range = current_range
else:
raise TypeError('Value of ac should be a boolean.')
def configure_resistance(self, resistance_range='AUTO', wires=2, resolution='DEF'):
if (wires == 2):
self.mode = 'resistance'
self.resistance_resolution = resolution
if (resistance_range == 'AUTO'):
self.resistance_auto_range = True
else:
self.resistance_range = resistance_range
elif (wires == 4):
self.mode = '4w resistance'
self.resistance_4w_resolution = resolution
if (resistance_range == 'AUTO'):
self.resistance_4w_auto_range = True
else:
self.resistance_4w_range = resistance_range
else:
raise ValueError('Incorrect wires value, Agilent 34450A only supports 2 or 4 wireresistance meaurement.')
def configure_frequency(self, measured_from='voltage_ac', measured_from_range='AUTO', aperture='DEF'):
if (measured_from == 'voltage_ac'):
self.mode = 'voltage frequency'
if (measured_from_range == 'AUTO'):
self.frequency_voltage_auto_range = True
else:
self.frequency_voltage_range = measured_from_range
elif (measured_from == 'current_ac'):
self.mode = 'current frequency'
if (measured_from_range == 'AUTO'):
self.frequency_current_auto_range = True
else:
self.frequency_current_range = measured_from_range
else:
raise ValueError('Incorrect value for measured_from parameter. Use "voltage_ac" or "current_ac".')
self.frequency_aperture = aperture
def configure_temperature(self):
self.mode = 'temperature'
def configure_diode(self):
self.mode = 'diode'
def configure_capacitance(self, capacitance_range='AUTO'):
self.mode = 'capacitance'
if (capacitance_range == 'AUTO'):
self.capacitance_auto_range = True
else:
self.capacitance_range = capacitance_range
def configure_continuity(self):
self.mode = 'continuity'
def beep(self):
self.write(':SYST:BEEP')
def _conf_parser(self, conf_values):
if isinstance(conf_values, list):
one_long_string = ', '.join(map(str, conf_values))
else:
one_long_string = conf_values
list_of_elements = re.split('["\\s,]', one_long_string)
list_without_empty_elements = list(filter((lambda v: (v != '')), list_of_elements))
for (i, v) in enumerate(list_without_empty_elements):
try:
list_without_empty_elements[i] = float(v)
except ValueError as e:
log.error(e)
return list_without_empty_elements |
class MultigroupProperty(bpy.types.PropertyGroup, ArrayGetSet, SizeOffsetGetSet):
arch: PointerProperty(type=ArchProperty)
array: PointerProperty(type=ArrayProperty)
size_offset: PointerProperty(type=SizeOffsetProperty)
panel_fill_door: PointerProperty(type=FillPanel)
louver_fill_door: PointerProperty(type=FillLouver)
glass_fill_door: PointerProperty(type=FillGlassPanes)
bar_fill_window: PointerProperty(type=FillBars)
panel_fill_window: PointerProperty(type=FillPanel)
louver_fill_window: PointerProperty(type=FillLouver)
glass_fill_window: PointerProperty(type=FillGlassPanes)
frame_thickness: FloatProperty(name='Frame Thickness', min=get_scaled_unit(0.01), max=get_scaled_unit(1.0), default=get_scaled_unit(0.1), unit='LENGTH', description='Thickness of door/window Frame')
frame_depth: FloatProperty(name='Frame Depth', step=1, min=get_scaled_unit((- 1.0)), max=get_scaled_unit(1.0), default=get_scaled_unit(0.0), unit='LENGTH', description='Depth of door/window Frame')
window_height: FloatProperty(name='Window Height', step=1, min=get_scaled_unit(0.1), max=get_scaled_unit(1000.0), default=get_scaled_unit(1.0), unit='LENGTH', description='Height of windows')
dw_depth: FloatProperty(name='Door/Window Depth', min=get_scaled_unit(0.0), max=get_scaled_unit(1.0), default=get_scaled_unit(0.05), unit='LENGTH', description='Depth of door/window')
add_arch: BoolProperty(name='Add Arch', default=False, description='Add arch over door/window')
components: StringProperty(name='Components', default='dw', description="Components (Door and Windows): example: 'wdw' for a door surrounded by windows")
fill_types_door = [('NONE', 'None', '', 0), ('PANELS', 'Panels', '', 1), ('GLASS_PANES', 'Glass_Panes', '', 2), ('LOUVER', 'Louver', '', 3)]
show_door_fill: BoolProperty(name='Show Door Fill', default=True, description='Show fill type properties for door')
fill_type_door: EnumProperty(name='Fill Type Door', items=fill_types_door, default='NONE', description='Type of fill for door')
fill_types_window = [('NONE', 'None', '', 0), ('PANELS', 'Panels', '', 1), ('GLASS_PANES', 'Glass_Panes', '', 2), ('LOUVER', 'Louver', '', 3), ('BAR', 'Bar', '', 4)]
show_window_fill: BoolProperty(name='Show Window Fill', default=True, description='Show fill type properties for window')
fill_type_window: EnumProperty(name='Fill Type Window', items=fill_types_window, default='NONE', description='Type of fill for window')
def init(self, wall_dimensions):
self['wall_dimensions'] = wall_dimensions
def_h = (1.8 if ('d' in str(self.components)) else 1.0)
self.size_offset.init(((self['wall_dimensions'][0] / self.count), self['wall_dimensions'][1]), default_size=(2.0, def_h), default_offset=(0.0, 0.0), spread=self.array.spread)
if ('d' not in str(self.components)):
self.arch.init((((wall_dimensions[1] / 2) - self.size_offset.offset.y) - (self.size_offset.size.y / 2)))
else:
self.arch.init((wall_dimensions[1] - self.size_offset.size.y))
def draw(self, context, layout):
box = layout.box()
self.size_offset.draw(context, box)
if (('w' in str(self.components)) and ('d' in str(self.components))):
box.prop(self, 'window_height')
box = layout.box()
col = box.column(align=True)
col.label(text='Components')
col.prop(self, 'components', text='')
col = box.column(align=True)
row = col.row(align=True)
row.prop(self, 'dw_depth')
row = col.row(align=True)
row.prop(self, 'frame_depth')
row.prop(self, 'frame_thickness')
self.array.draw(context, box)
box = layout.box()
col = box.column(align=True)
col.prop(self, 'add_arch')
if self.add_arch:
self.arch.draw(context, box)
box = layout.box()
sp = box.split(factor=0.05, align=True)
sp.prop(self, 'show_door_fill', text='')
sp.prop_menu_enum(self, 'fill_type_door')
fill_map = {'PANELS': self.panel_fill_door, 'LOUVER': self.louver_fill_door, 'GLASS_PANES': self.glass_fill_door}
fill = fill_map.get(self.fill_type_door)
if (fill and self.show_door_fill):
fill.draw(box)
box = layout.box()
sp = box.split(factor=0.05, align=True)
sp.prop(self, 'show_window_fill', text='')
sp.prop_menu_enum(self, 'fill_type_window')
fill_map = {'BAR': self.bar_fill_window, 'PANELS': self.panel_fill_window, 'LOUVER': self.louver_fill_window, 'GLASS_PANES': self.glass_fill_window}
fill = fill_map.get(self.fill_type_window)
if (fill and self.show_window_fill):
fill.draw(box) |
class MemMinionIfcFL(Interface):
def construct(s, read=None, write=None, amo=None):
s.read = CalleeIfcFL(method=read)
s.write = CalleeIfcFL(method=write)
s.amo = CalleeIfcFL(method=amo)
def __str__(s):
return f'r{s.read}|w{s.write}|a{s.amo}'
def connect(s, other, parent):
if isinstance(other, MemMasterIfcCL):
m = MemIfcCL2FLAdapter(other.ReqType, other.RespType)
if hasattr(parent, 'MemIfcCL2FL_count'):
count = parent.MemIfcCL2FL_count
setattr(parent, ('MemIfcCL2FL_' + str(count)), m)
else:
parent.MemIfcCL2FL_count = 0
parent.MemIfcCL2FL_0 = m
connect_pairs(other, m.left, m.right, other)
parent.MemIfcCL2FL_count += 1
return True
elif isinstance(other, MemMasterIfcRTL):
m = MemIfcRTL2FLAdapter(other.ReqType, other.RespType)
if hasattr(parent, 'MemIfcRTL2FL_count'):
count = parent.MemIfcRTL2FL_count
setattr(parent, ('MemIfcRTL2FL_' + str(count)), m)
else:
parent.MemIfcRTL2FL_count = 0
parent.MemIfcRTL2FL_0 = m
connect_pairs(other, m.left, m.right, s)
parent.MemIfcRTL2FL_count += 1
return True
return False |
def cos_sim(a, b):
if (not isinstance(a, torch.Tensor)):
a = torch.tensor(a)
if (not isinstance(b, torch.Tensor)):
b = torch.tensor(b)
if (len(a.shape) == 1):
a = a.unsqueeze(0)
if (len(b.shape) == 1):
b = b.unsqueeze(0)
a_norm = torch.nn.functional.normalize(a, p=2, dim=1)
b_norm = torch.nn.functional.normalize(b, p=2, dim=1)
return torch.mm(a_norm, b_norm.transpose(0, 1)) |
()
('tab', value=cmdutils.Value.cur_tab)
('position', completion=miscmodels.inspector_position)
def devtools(tab: apitypes.Tab, position: apitypes.InspectorPosition=None) -> None:
try:
tab.private_api.toggle_inspector(position)
except apitypes.InspectorError as e:
raise cmdutils.CommandError(e) |
class BaseDataPreparing(object):
def __init__(self, vocab_file, slot_file, config, pretrained_embedding_file=None, word_embedding_file=None, word_seq_embedding_file=None, load_w2v_embedding=True, load_word_embedding=True, gen_new_data=False, is_inference=False):
self.gen_new_data = gen_new_data
self.train_data_file = os.path.join(config.get('data_dir'), config.get('data_file_name'))
self.dev_data_file = os.path.join(config.get('data_dir'), config.get('orig_dev'))
self.test_data_file = os.path.join(config.get('data_dir'), config.get('orig_test'))
self.train_valid_split_data_path = config.get('train_valid_data_dir')
self.tokenizer = CustomTokenizer(vocab_file, slot_file)
self.word_tokenizer = WordTokenizer()
self.slot_list = [value for (key, value) in self.tokenizer.slot2id.items()]
self.slot_label_size = len(self.tokenizer.slot2id)
if load_w2v_embedding:
self.word_embedding = gen_char_embedding(pretrained_embedding_file, self.tokenizer.vocab, output_file=word_embedding_file)
self.init_final_data_path(config, load_word_embedding)
self.train_samples_nums = 0
self.eval_samples_nums = 0
self.is_inference = is_inference
print('preprocessing data....')
if (not is_inference):
if load_word_embedding:
self.load_word_char_from_orig_data(gen_new_data)
else:
self.gen_train_dev_from_orig_data(gen_new_data)
else:
self.trans_test_data()
if load_word_embedding:
self.word_seq_embedding = gen_char_embedding(pretrained_embedding_file, self.word_tokenizer.vocab, output_file=word_seq_embedding_file)
def init_final_data_path(self, config, load_word_embedding):
root_path = ((config.get('data_dir') + '/') + config.get('train_valid_data_dir'))
if (not os.path.exists(root_path)):
os.mkdir(root_path)
self.train_X_path = os.path.join(config.get('data_dir'), config.get('train_valid_data_dir'), config.get('train_data_text_name'))
self.valid_X_path = os.path.join(config.get('data_dir'), config.get('train_valid_data_dir'), config.get('valid_data_text_name'))
self.train_Y_path = os.path.join(config.get('data_dir'), config.get('train_valid_data_dir'), config.get('train_data_tag_name'))
self.valid_Y_path = os.path.join(config.get('data_dir'), config.get('train_valid_data_dir'), config.get('valid_data_tag_name'))
self.test_X_path = os.path.join(config.get('data_dir'), config.get('train_valid_data_dir'), config.get('test_data_text_name'))
self.test_Y_path = os.path.join(config.get('data_dir'), config.get('train_valid_data_dir'), config.get('test_data_tag_name'))
if load_word_embedding:
self.train_word_path = os.path.join(config.get('data_dir'), config.get('train_valid_data_dir'), config.get('train_data_text_word_name'))
self.valid_word_path = os.path.join(config.get('data_dir'), config.get('train_valid_data_dir'), config.get('valid_data_text_word_name'))
self.test_word_path = os.path.join(config.get('data_dir'), config.get('train_valid_data_dir'), config.get('test_data_text_word_name'))
def tranform_singlg_data_example(self, text):
word_list = self.tokenizer.tokenize(text)
word_id_list = self.tokenizer.convert_tokens_to_ids(word_list)
return word_id_list
def translate_id_2_slot(self, text, label_list):
entity_list = []
text_list = [w for w in text]
tmp_entity = ''
tmp_entity_type = ''
for (char, label) in zip(text_list, label_list):
label_string = self.tokenizer.id2slot.get(label)
if (label_string == 'O'):
if (tmp_entity != ''):
entity_list.append({tmp_entity: tmp_entity_type})
tmp_entity = ''
tmp_entity_type = ''
elif (label_string == 'PAD'):
break
else:
tmp_entity += char
tmp_entity_type = re.split('-', label_string)[(- 1)]
return entity_list
def trans_test_data(self):
print('')
(test_data_X, test_data_Y) = self.trans_orig_data_to_training_data(self.test_data_file)
test_data_X_word = self.seg_word_for_data(self.test_data_file)
np.save(self.test_X_path, test_data_X)
np.save(self.test_Y_path, test_data_Y)
np.save(self.test_word_path, test_data_X_word)
def trans_orig_data_to_training_data(self, datas_file):
data_X = []
data_Y = []
with codecs.open(datas_file, 'r', 'utf-8') as fr:
for (index, line) in enumerate(fr):
line = line.strip('\n')
if ((index % 2) == 0):
data_X.append(self.tranform_singlg_data_example(line))
else:
slot_list = self.tokenizer.tokenize(line)
slot_list = [slots.upper() for slots in slot_list]
slot_id_list = self.tokenizer.convert_slot_to_ids(slot_list)
data_Y.append(slot_id_list)
return (data_X, data_Y)
def gen_train_dev_from_orig_data(self, gen_new):
if gen_new:
(train_data_X, train_data_Y) = self.trans_orig_data_to_training_data(self.train_data_file)
(dev_data_X, dev_data_Y) = self.trans_orig_data_to_training_data(self.dev_data_file)
(test_data_X, test_data_Y) = self.trans_orig_data_to_training_data(self.test_data_file)
self.train_samples_nums = len(train_data_X)
self.eval_samples_nums = len(dev_data_X)
np.save(self.train_X_path, train_data_X)
np.save(self.valid_X_path, dev_data_X)
np.save(self.train_Y_path, train_data_Y)
np.save(self.valid_Y_path, dev_data_Y)
np.save(self.test_X_path, test_data_X)
np.save(self.test_Y_path, test_data_Y)
else:
train_data_X = np.load(self.train_X_path)
dev_data_X = np.load(self.valid_X_path)
self.train_samples_nums = len(train_data_X)
self.eval_samples_nums = len(dev_data_X)
def gen_one_sample_words_on_chars(self, text):
print(text)
(word_ids_split, word_str_split) = self.word_tokenizer.seg(text)
print(word_ids_split)
print(word_str_split)
word_ids_seq_char_list = []
for (word, word_ids) in zip(word_str_split, word_ids_split):
word_len = len(word)
word_ids_seq_char_list.extend(([word_ids] * word_len))
print(word_ids_seq_char_list)
assert (len(word_ids_seq_char_list) == len(text.split(' ')))
return word_ids_seq_char_list
def seg_word_for_data(self, data_file):
all_word_ids_char_list = []
with codecs.open(data_file, 'r', 'utf-8') as fr:
for (index, line) in enumerate(fr):
line = line.strip('\n')
if ((index % 2) == 0):
all_word_ids_char_list.append(self.gen_one_sample_words_on_chars(line))
return all_word_ids_char_list
def load_word_char_from_orig_data(self, gen_new):
if gen_new:
(train_data_X, train_data_Y) = self.trans_orig_data_to_training_data(self.train_data_file)
train_data_X_word = self.seg_word_for_data(self.train_data_file)
(dev_data_X, dev_data_Y) = self.trans_orig_data_to_training_data(self.dev_data_file)
dev_data_X_word = self.seg_word_for_data(self.dev_data_file)
(test_data_X, test_data_Y) = self.trans_orig_data_to_training_data(self.test_data_file)
test_data_X_word = self.seg_word_for_data(self.test_data_file)
self.train_samples_nums = len(train_data_X)
self.eval_samples_nums = len(dev_data_X)
np.save(self.train_X_path, train_data_X)
np.save(self.valid_X_path, dev_data_X)
np.save(self.train_Y_path, train_data_Y)
np.save(self.valid_Y_path, dev_data_Y)
np.save(self.test_X_path, test_data_X)
np.save(self.test_Y_path, test_data_Y)
np.save(self.train_word_path, train_data_X_word)
np.save(self.valid_word_path, dev_data_X_word)
np.save(self.test_word_path, test_data_X_word)
else:
train_data_X = np.load(self.train_X_path)
dev_data_X = np.load(self.valid_X_path)
self.train_samples_nums = len(train_data_X)
self.eval_samples_nums = len(dev_data_X) |
def knn_point(k, xyz1, xyz2):
b = xyz1.get_shape()[0].value
n = xyz1.get_shape()[1].value
c = xyz1.get_shape()[2].value
m = xyz2.get_shape()[1].value
xyz1 = tf.tile(tf.reshape(xyz1, (b, 1, n, c)), [1, m, 1, 1])
xyz2 = tf.tile(tf.reshape(xyz2, (b, m, 1, c)), [1, 1, n, 1])
dist = tf.reduce_sum(((xyz1 - xyz2) ** 2), (- 1))
(outi, out) = select_top_k(k, dist)
idx = tf.slice(outi, [0, 0, 0], [(- 1), (- 1), k])
val = tf.slice(out, [0, 0, 0], [(- 1), (- 1), k])
return (val, idx) |
def make_grouped_dataset(dir):
images = []
assert os.path.isdir(dir), ('%s is not a valid directory' % dir)
fnames = sorted(os.walk(dir, followlinks=True))
for fname in sorted(fnames):
paths = []
root = fname[0]
for f in sorted(fname[2]):
if is_image_file(f):
paths.append(os.path.join(root, f))
if (len(paths) > 0):
images.append(paths)
return images |
def setUpModule():
global mol, mf, mycc
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [[8, (0.0, 0.0, 0.0)], [1, (0.0, (- 0.757), 0.587)], [1, (0.0, 0.757, 0.587)]]
mol.basis = '631g'
mol.build()
mf = scf.RHF(mol)
mf.kernel()
mycc = qcisd.QCISD(mf)
mycc.kernel() |
def get_labeled_data(pos_pos, pos_neg, neg_pos, neg_neg, total, train_s):
x_train = []
x_test = []
for x in pos_pos[:train_s]:
x_train.append((x, 1, 1))
for x in pos_pos[train_s:total]:
x_test.append((x, 1, 1))
for x in pos_neg[:train_s]:
x_train.append((x, 1, 0))
for x in pos_neg[train_s:total]:
x_test.append((x, 1, 0))
for x in neg_pos[:train_s]:
x_train.append((x, 0, 1))
for x in neg_pos[train_s:total]:
x_test.append((x, 0, 1))
for x in neg_neg[:train_s]:
x_train.append((x, 0, 0))
for x in neg_neg[train_s:total]:
x_test.append((x, 0, 0))
return (x_train, x_test) |
.parametrize(('max_workers', 'cpu_count', 'side_effect', 'expected_workers'), [(None, 3, None, 7), (3, 4, None, 3), (8, 3, None, 7), (None, 8, NotImplementedError(), 5), (2, 8, NotImplementedError(), 2), (8, 8, NotImplementedError(), 5)])
def test_executor_should_be_initialized_with_correct_workers(tmp_venv: VirtualEnv, pool: RepositoryPool, config: Config, io: BufferedIO, mocker: MockerFixture, max_workers: (int | None), cpu_count: (int | None), side_effect: (Exception | None), expected_workers: int) -> None:
config.merge({'installer': {'max-workers': max_workers}})
mocker.patch('os.cpu_count', return_value=cpu_count, side_effect=side_effect)
executor = Executor(tmp_venv, pool, config, io)
assert (executor._max_workers == expected_workers) |
def convert_examples_to_features(examples, tokenizer, max_seq_length, is_training):
features = []
for (example_index, example) in tqdm(enumerate(examples)):
context_tokens = tokenizer.tokenize(example.context_sentence)
start_ending_tokens = tokenizer.tokenize(example.start_ending)
choices_features = []
for (ending_index, ending) in enumerate(example.endings):
context_tokens_choice = context_tokens[:]
ending_tokens = (start_ending_tokens + tokenizer.tokenize(ending))
_truncate_seq_pair(context_tokens_choice, ending_tokens, (max_seq_length - 3))
tokens = ((((['[CLS]'] + context_tokens_choice) + ['[SEP]']) + ending_tokens) + ['[SEP]'])
segment_ids = (([0] * (len(context_tokens_choice) + 2)) + ([1] * (len(ending_tokens) + 1)))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
padding = ([0] * (max_seq_length - len(input_ids)))
input_ids += padding
input_mask += padding
segment_ids += padding
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
choices_features.append((tokens, input_ids, input_mask, segment_ids))
label = example.label
if (example_index < 5):
logger.info('*** Example ***')
logger.info('swag_id: {}'.format(example.swag_id))
for (choice_idx, (tokens, input_ids, input_mask, segment_ids)) in enumerate(choices_features):
logger.info('choice: {}'.format(choice_idx))
logger.info('tokens: {}'.format(' '.join(tokens)))
logger.info('input_ids: {}'.format(' '.join(map(str, input_ids))))
logger.info('input_mask: {}'.format(' '.join(map(str, input_mask))))
logger.info('segment_ids: {}'.format(' '.join(map(str, segment_ids))))
if is_training:
logger.info('label: {}'.format(label))
features.append(InputFeatures(example_id=example.swag_id, choices_features=choices_features, label=label))
return features |
def find_structure_handler(a: Attribute, type: Any, c: BaseConverter, prefer_attrs_converters: bool=False) -> (Callable[([Any, Any], Any)] | None):
if ((a.converter is not None) and prefer_attrs_converters):
handler = None
elif ((a.converter is not None) and (not prefer_attrs_converters) and (type is not None)):
handler = c.get_structure_hook(type)
if (handler == raise_error):
handler = None
elif (type is not None):
if (is_bare_final(type) and (a.default is not NOTHING) and (not isinstance(a.default, Factory))):
type = a.default.__class__
handler = c.get_structure_hook(type)
if (handler == c._structure_call):
def handler(v, _, _h=handler):
return _h(v, type)
else:
handler = c.get_structure_hook(type)
else:
handler = c.structure
return handler |
class CacheControlMixin():
cache_timeout = 60
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
response = super().dispatch(*args, **kwargs)
patch_response_headers(response, self.get_cache_timeout())
return response |
def timeConversion(time):
if ((time[(- 2):] == 'AM') and (time[:2] == '12')):
return ('00' + time[2:(- 2)])
elif (time[(- 2):] == 'AM'):
return time[:(- 2)]
elif ((time[(- 2):] == 'PM') and (time[:2] == '12')):
return time[:(- 2)]
else:
return (str((int(time[:2]) + 12)) + time[2:8]) |
class TransformerEncoderBlock(nn.Sequential):
def __init__(self, emb_size=225, drop_p=0.5, forward_expansion=4, forward_drop_p=0.0, **kwargs):
super().__init__(ResidualAdd(nn.Sequential(nn.LayerNorm(emb_size), MultiHeadAttention(emb_size, **kwargs), nn.Dropout(drop_p))), ResidualAdd(nn.Sequential(nn.LayerNorm(emb_size), FeedForwardBlock(emb_size, expansion=forward_expansion, drop_p=forward_drop_p), nn.Dropout(drop_p)))) |
def _make_transport(endpoint: config.EndpointConfiguration) -> TSocket:
if (endpoint.family == socket.AF_INET):
trans = TSocket(*endpoint.address)
elif (endpoint.family == socket.AF_UNIX):
trans = TSocket(unix_socket=endpoint.address)
else:
raise Exception(f'unsupported endpoint family {endpoint.family!r}')
return trans |
def spladder(options):
options = settings.parse_args(options)
if (not options.no_reset_conf):
options = settings.set_confidence_level(options)
fn_out_merge = get_filename('fn_out_merge', options)
fn_out_merge_val = get_filename('fn_out_merge_val', options)
_prep_workdir(options)
options = prep_annotation(options)
if (not os.path.exists(fn_out_merge)):
if options.sparse_bam:
prep_sparse_bam_filtered(options)
if (options.merge in ['single', 'merge_graphs', 'merge_all']):
for idx in range(len(options.samples)):
out_fname = ('%s/spladder/genes_graph_conf%i.%s.pickle' % (options.outdir, options.confidence, options.samples[idx]))
if (not os.path.exists(out_fname)):
spladder_core(options.bam_fnames[idx], out_fname, options)
if (options.merge in ['merge_bams', 'merge_all']):
out_fname = ('%s/spladder/genes_graph_conf%i.merge_bams.pickle' % (options.outdir, options.confidence))
if (not os.path.exists(out_fname)):
spladder_core(options.bam_fnames, out_fname, options)
if (options.merge in ['merge_graphs', 'merge_all']):
run_merge(options.samples, options)
if ((options.merge == 'merge_graphs') and options.validate_sg and (not os.path.exists(fn_out_merge_val))):
(genes, inserted) = pickle.load(open(fn_out_merge, 'rb'))
genes = filter_by_edgecount(genes, options)
pickle.dump((genes, inserted), open(fn_out_merge_val, 'wb'), (- 1))
del genes
if options.sparse_bam:
prep_sparse_bam_full(options)
if ((options.merge == 'single') or (options.qmode == 'collect')):
idxs = list(range(len(options.samples)))
else:
idxs = [0]
for idx in idxs:
if (len(options.chunked_merge) > 0):
(curr_level, max_level, chunk_start, chunk_end) = [int(_) for _ in options.chunked_merge[0]]
if (curr_level < max_level):
break
if (options.merge == 'single'):
fn_in_count = get_filename('fn_count_in', options, options.samples[idx])
fn_out_count = get_filename('fn_count_out', options, options.samples[idx])
elif ((options.merge == 'merge_graphs') and (options.qmode == 'single')):
fn_in_count = get_filename('fn_count_in', options)
fn_out_count = get_filename('fn_count_out', options, options.samples[0])
else:
fn_in_count = get_filename('fn_count_in', options)
fn_out_count = get_filename('fn_count_out', options)
fn_out_gene_count = (re.sub('.count.hdf5$', '', fn_out_count) + '.gene_exp.hdf5')
if options.quantify_graph:
if (not os.path.exists(fn_out_count)):
if (options.merge == 'single'):
count_graph_coverage_wrapper(fn_in_count, fn_out_count, options.bam_fnames, options, sample_idx=idx)
elif ((options.merge == 'merge_graphs') and (options.qmode == 'single')):
count_graph_coverage_wrapper(fn_in_count, fn_out_count, options.bam_fnames, options, qmode='single')
elif ((options.merge == 'merge_graphs') and (options.qmode == 'collect')):
collect_single_quantification_results(fn_out_count, idxs, options)
else:
count_graph_coverage_wrapper(fn_in_count, fn_out_count, options.bam_fnames, options)
if (not os.path.exists(fn_out_gene_count)):
if (options.merge == 'single'):
compute_gene_expression(options, fn_in_count, fn_out_count, fn_out_gene_count, sample_idx=[0])
else:
compute_gene_expression(options, fn_in_count, fn_out_count, fn_out_gene_count)
if options.extract_as:
collect_events(options)
if options.quantify_graph:
for idx in idxs:
for event_type in options.event_types:
if (options.merge == 'single'):
analyze_events(event_type, options.bam_fnames, options, sample_idx=idx)
else:
analyze_events(event_type, options.bam_fnames, options) |
def draw_record(record, save_path):
for (key, value) in record.items():
fig = plt.figure(figsize=(12, 6))
ball_round = np.arange(len(record[key]))
plt.title(f'{key} loss')
plt.xlabel('Ball round')
plt.ylabel('Loss')
plt.grid()
plt.bar(ball_round, record[key])
plt.savefig(f'{save_path}{key}_bar.png')
plt.close(fig) |
def openqa_collate(samples):
if (len(samples) == 0):
return {}
input_ids = collate_tokens([s['input_ids'] for s in samples], 0)
start_masks = torch.zeros(input_ids.size())
for (b_idx, s) in enumerate(samples):
for _ in s['start']:
if (_ != (- 1)):
start_masks[(b_idx, _)] = 1
net_input = {'input_ids': input_ids, 'segment_ids': collate_tokens([s['segment_ids'] for s in samples], 0), 'paragraph_mask': collate_tokens([s['paragraph_mask'] for s in samples], 0), 'question_mask': collate_tokens([s['question_mask'] for s in samples], 0), 'start_positions': collate_tokens([s['start'] for s in samples], (- 1)), 'end_positions': collate_tokens([s['end'] for s in samples], (- 1)), 'no_ans_targets': collate_tokens([s['no_answer'] for s in samples], 0), 'input_mask': collate_tokens([torch.ones_like(s['input_ids']) for s in samples], 0), 'start_masks': start_masks, 'input_ids_q': collate_tokens([s['input_ids_q'] for s in samples], 0), 'input_mask_q': collate_tokens([torch.ones_like(s['input_ids_q']) for s in samples], 0), 'input_ids_c': collate_tokens([s['input_ids_c'] for s in samples], 0), 'input_mask_c': collate_tokens([torch.ones_like(s['input_ids_c']) for s in samples], 0)}
return {'id': [s['qid'] for s in samples], 'q': [s['q'] for s in samples], 'doc_tokens': [s['doc_tokens'] for s in samples], 'q_subtoks': [s['q_subtoks'] for s in samples], 'wp_tokens': [s['wp_tokens'] for s in samples], 'tok_to_orig_index': [s['tok_to_orig_index'] for s in samples], 'para_offset': [s['para_offset'] for s in samples], 'true_answers': [s['true_answers'] for s in samples], 'net_input': net_input} |
def main(epsilon):
dis = load_model()
dis.eval()
loader = make_dataset()
correct_real = 0
correct_label = 0
total = 0
for (i, (x_real, y_real)) in enumerate(loader):
if (i == 100):
break
(x_real, y_real) = (x_real.cuda(), y_real.cuda())
(v_y_real, v_x_real) = (Variable(y_real), Variable(x_real))
adv_input = attack_label_Linf_PGD(v_x_real, v_y_real, dis, opt.steps, epsilon)
with torch.no_grad():
(_, d_multi) = dis(adv_input)
(_, idx) = torch.max(d_multi.data, dim=1)
label_correct = idx.eq(y_real)
correct_label += torch.sum(label_correct)
total += y_real.numel()
print(f'{epsilon}, {(correct_label / total)}') |
class MixedArguments():
def __init__(self, pyname, arguments, scope):
self.pyname = pyname
self.args = arguments
def get_pynames(self, parameters):
return ([self.pyname] + self.args.get_pynames(parameters[1:]))
def get_arguments(self, parameters):
result = []
for pyname in self.get_pynames(parameters):
if (pyname is None):
result.append(None)
else:
result.append(pyname.get_object())
return result
def get_instance_pyname(self):
return self.pyname |
class GaussianDiffusion():
def __init__(self, *, betas, model_mean_type, model_var_type, loss_type):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert (len(betas.shape) == 1), 'betas must be 1-D'
assert ((betas > 0).all() and (betas <= 1).all())
self.num_timesteps = int(betas.shape[0])
alphas = (1.0 - betas)
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:(- 1)])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert (self.alphas_cumprod_prev.shape == (self.num_timesteps,))
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt((1.0 - self.alphas_cumprod))
self.log_one_minus_alphas_cumprod = np.log((1.0 - self.alphas_cumprod))
self.sqrt_recip_alphas_cumprod = np.sqrt((1.0 / self.alphas_cumprod))
self.sqrt_recipm1_alphas_cumprod = np.sqrt(((1.0 / self.alphas_cumprod) - 1))
self.posterior_variance = ((betas * (1.0 - self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.posterior_log_variance_clipped = (np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:])) if (len(self.posterior_variance) > 1) else np.array([]))
self.posterior_mean_coef1 = ((betas * np.sqrt(self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.posterior_mean_coef2 = (((1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas)) / (1.0 - self.alphas_cumprod))
def q_mean_variance(self, x_start, t):
mean = (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
variance = _extract_into_tensor((1.0 - self.alphas_cumprod), t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return (mean, variance, log_variance)
def q_sample(self, x_start, t, noise=None):
if (noise is None):
noise = th.randn_like(x_start)
assert (noise.shape == x_start.shape)
return ((_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + (_extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise))
def q_posterior_mean_variance(self, x_start, x_t, t):
assert (x_start.shape == x_t.shape)
posterior_mean = ((_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start) + (_extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t))
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
assert (posterior_mean.shape[0] == posterior_variance.shape[0] == posterior_log_variance_clipped.shape[0] == x_start.shape[0])
return (posterior_mean, posterior_variance, posterior_log_variance_clipped)
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
if (model_kwargs is None):
model_kwargs = {}
(B, C) = x.shape[:2]
assert (t.shape == (B,))
model_output = model(x, t, **model_kwargs)
if isinstance(model_output, tuple):
(model_output, extra) = model_output
else:
extra = None
if (self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]):
assert (model_output.shape == (B, (C * 2), *x.shape[2:]))
(model_output, model_var_values) = th.split(model_output, C, dim=1)
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
frac = ((model_var_values + 1) / 2)
model_log_variance = ((frac * max_log) + ((1 - frac) * min_log))
model_variance = th.exp(model_log_variance)
else:
(model_variance, model_log_variance) = {ModelVarType.FIXED_LARGE: (np.append(self.posterior_variance[1], self.betas[1:]), np.log(np.append(self.posterior_variance[1], self.betas[1:]))), ModelVarType.FIXED_SMALL: (self.posterior_variance, self.posterior_log_variance_clipped)}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if (denoised_fn is not None):
x = denoised_fn(x)
if clip_denoised:
return x.clamp((- 1), 1)
return x
if (self.model_mean_type == ModelMeanType.START_X):
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output))
(model_mean, _, _) = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
assert (model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape)
return {'mean': model_mean, 'variance': model_variance, 'log_variance': model_log_variance, 'pred_xstart': pred_xstart, 'extra': extra}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert (x_t.shape == eps.shape)
return ((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - (_extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps))
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - pred_xstart) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape))
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
gradient = cond_fn(x, t, **model_kwargs)
new_mean = (p_mean_var['mean'].float() + (p_mean_var['variance'] * gradient.float()))
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var['pred_xstart'])
eps = (eps - ((1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs)))
out = p_mean_var.copy()
out['pred_xstart'] = self._predict_xstart_from_eps(x, t, eps)
(out['mean'], _, _) = self.q_posterior_mean_variance(x_start=out['pred_xstart'], x_t=x, t=t)
return out
def p_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None):
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
noise = th.randn_like(x)
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
if (cond_fn is not None):
out['mean'] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = (out['mean'] + ((nonzero_mask * th.exp((0.5 * out['log_variance']))) * noise))
return {'sample': sample, 'pred_xstart': out['pred_xstart']}
def p_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False):
final = None
for sample in self.p_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress):
final = sample
return final['sample']
def p_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False):
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::(- 1)]
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
with th.no_grad():
out = self.p_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs)
(yield out)
img = out['sample']
def ddim_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, eta=0.0):
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
if (cond_fn is not None):
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
eps = self._predict_eps_from_xstart(x, t, out['pred_xstart'])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = ((eta * th.sqrt(((1 - alpha_bar_prev) / (1 - alpha_bar)))) * th.sqrt((1 - (alpha_bar / alpha_bar_prev))))
noise = th.randn_like(x)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_prev)) + (th.sqrt(((1 - alpha_bar_prev) - (sigma ** 2))) * eps))
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
sample = (mean_pred + ((nonzero_mask * sigma) * noise))
return {'sample': sample, 'pred_xstart': out['pred_xstart']}
def ddim_reverse_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, eta=0.0):
assert (eta == 0.0), 'Reverse ODE only for deterministic path'
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
if (cond_fn is not None):
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
eps = (((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x) - out['pred_xstart']) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape))
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_next)) + (th.sqrt((1 - alpha_bar_next)) * eps))
return {'sample': mean_pred, 'pred_xstart': out['pred_xstart']}
def ddim_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0):
final = None
for sample in self.ddim_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress, eta=eta):
final = sample
return final['sample']
def ddim_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0):
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::(- 1)]
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
with th.no_grad():
out = self.ddim_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, eta=eta)
(yield out)
img = out['sample']
def _vb_terms_bpd(self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None):
(true_mean, _, true_log_variance_clipped) = self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)
out = self.p_mean_variance(model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
kl = normal_kl(true_mean, true_log_variance_clipped, out['mean'], out['log_variance'])
kl = (mean_flat(kl) / np.log(2.0))
decoder_nll = (- continuous_gaussian_log_likelihood(x_start, means=out['mean'], log_scales=(0.5 * out['log_variance'])))
assert (decoder_nll.shape == x_start.shape)
decoder_nll = (mean_flat(decoder_nll) / np.log(2.0))
output = th.where((t == 0), decoder_nll, kl)
return {'output': output, 'pred_xstart': out['pred_xstart']}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
if (model_kwargs is None):
model_kwargs = {}
if (noise is None):
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if ((self.loss_type == LossType.KL) or (self.loss_type == LossType.RESCALED_KL)):
terms['loss'] = self._vb_terms_bpd(model=model, x_start=x_start, x_t=x_t, t=t, clip_denoised=False, model_kwargs=model_kwargs)['output']
if (self.loss_type == LossType.RESCALED_KL):
terms['loss'] *= self.num_timesteps
elif ((self.loss_type == LossType.MSE) or (self.loss_type == LossType.RESCALED_MSE)):
model_output = model(x_t, t, **model_kwargs)
if (self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]):
(B, C) = x_t.shape[:2]
assert (model_output.shape == (B, (C * 2), *x_t.shape[2:]))
(model_output, model_var_values) = th.split(model_output, C, dim=1)
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms['vb'] = self._vb_terms_bpd(model=(lambda *args, r=frozen_out: r), x_start=x_start, x_t=x_t, t=t, clip_denoised=False)['output']
if (self.loss_type == LossType.RESCALED_MSE):
terms['vb'] *= (self.num_timesteps / 1000.0)
target = {ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)[0], ModelMeanType.START_X: x_start, ModelMeanType.EPSILON: noise}[self.model_mean_type]
assert (model_output.shape == target.shape == x_start.shape)
terms['mse'] = mean_flat(((target - model_output) ** 2))
if ('vb' in terms):
terms['loss'] = (terms['mse'] + terms['vb'])
else:
terms['loss'] = terms['mse']
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
batch_size = x_start.shape[0]
t = th.tensor(([(self.num_timesteps - 1)] * batch_size), device=x_start.device)
(qt_mean, _, qt_log_variance) = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return (mean_flat(kl_prior) / np.log(2.0))
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::(- 1)]:
t_batch = th.tensor(([t] * batch_size), device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
with th.no_grad():
out = self._vb_terms_bpd(model, x_start=x_start, x_t=x_t, t=t_batch, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
vb.append(out['output'])
xstart_mse.append(mean_flat(((out['pred_xstart'] - x_start) ** 2)))
eps = self._predict_eps_from_xstart(x_t, t_batch, out['pred_xstart'])
mse.append(mean_flat(((eps - noise) ** 2)))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = (vb.sum(dim=1) + prior_bpd)
return {'total_bpd': total_bpd, 'prior_bpd': prior_bpd, 'vb': vb, 'xstart_mse': xstart_mse, 'mse': mse} |
class DHT(mp.Process):
_node: DHTNode
def __init__(self, initial_peers: Optional[Sequence[Union[(Multiaddr, str)]]]=None, *, start: bool, p2p: Optional[P2P]=None, daemon: bool=True, num_workers: int=DEFAULT_NUM_WORKERS, record_validators: Iterable[RecordValidatorBase]=(), shutdown_timeout: float=3, await_ready: bool=True, **kwargs):
self._parent_pid = os.getpid()
super().__init__()
if (not ((initial_peers is None) or (isinstance(initial_peers, Sequence) and all((isinstance(item, (Multiaddr, str)) for item in initial_peers))))):
raise TypeError('initial_peers should be of type Optional[Sequence[Union[Multiaddr, str]]]')
self.initial_peers = initial_peers
self.kwargs = kwargs
self.num_workers = num_workers
self._record_validator = CompositeValidator(record_validators)
(self._inner_pipe, self._outer_pipe) = mp.Pipe(duplex=True)
self.shutdown_timeout = shutdown_timeout
self._ready = MPFuture()
self.daemon = daemon
self._peer_id = None
self._client_mode = None
self._p2p_replica = None
self._daemon_listen_maddr = (p2p.daemon_listen_maddr if (p2p is not None) else None)
if start:
self.run_in_background(await_ready=await_ready)
def run(self) -> None:
loop = switch_to_uvloop()
pipe_semaphore = asyncio.Semaphore(value=0)
loop.add_reader(self._inner_pipe.fileno(), pipe_semaphore.release)
async def _run():
try:
if (self._daemon_listen_maddr is not None):
replicated_p2p = (await P2P.replicate(self._daemon_listen_maddr))
else:
replicated_p2p = None
self._node = (await DHTNode.create(initial_peers=self.initial_peers, num_workers=self.num_workers, record_validator=self._record_validator, p2p=replicated_p2p, **self.kwargs))
except Exception as e:
logger.debug(e, exc_info=True)
self._ready.set_exception(e)
return
self._ready.set_result(None)
while True:
try:
(await asyncio.wait_for(pipe_semaphore.acquire(), timeout=self._node.protocol.wait_timeout))
except asyncio.TimeoutError:
pass
if (not self._inner_pipe.poll()):
continue
try:
(method, args, kwargs) = self._inner_pipe.recv()
except (OSError, ConnectionError, RuntimeError) as e:
logger.exception(e)
(await asyncio.sleep(self._node.protocol.wait_timeout))
continue
task = asyncio.create_task(getattr(self, method)(*args, **kwargs))
if (method == '_shutdown'):
(await task)
break
loop.run_until_complete(_run())
def run_in_background(self, await_ready: bool=True, timeout: Optional[float]=None) -> None:
self.start()
if await_ready:
self.wait_until_ready(timeout)
def wait_until_ready(self, timeout: Optional[float]=None) -> None:
self._ready.result(timeout=timeout)
def shutdown(self) -> None:
if self.is_alive():
self._outer_pipe.send(('_shutdown', [], {}))
self.join(self.shutdown_timeout)
if self.is_alive():
logger.warning('DHT did not shut down within the grace period; terminating it the hard way.')
self.terminate()
async def _shutdown(self):
(await self._node.shutdown())
def get(self, key: DHTKey, latest: bool=False, return_future: bool=False, **kwargs) -> Union[(Optional[ValueWithExpiration[DHTValue]], MPFuture)]:
future = MPFuture()
self._outer_pipe.send(('_get', [], dict(key=key, latest=latest, future=future, **kwargs)))
return (future if return_future else future.result())
async def _get(self, key: DHTKey, latest: bool, future: MPFuture, **kwargs):
try:
result = (await self._node.get(key, latest=latest, **kwargs))
if (not future.done()):
future.set_result(result)
except BaseException as e:
if (not future.done()):
future.set_exception(e)
raise
def store(self, key: DHTKey, value: DHTValue, expiration_time: DHTExpiration, subkey: Optional[Subkey]=None, return_future: bool=False, **kwargs) -> Union[(bool, MPFuture)]:
future = MPFuture()
self._outer_pipe.send(('_store', [], dict(key=key, value=value, expiration_time=expiration_time, subkey=subkey, future=future, **kwargs)))
return (future if return_future else future.result())
async def _store(self, key: DHTKey, value: DHTValue, expiration_time: DHTExpiration, subkey: Optional[Subkey], future: MPFuture, **kwargs):
try:
result = (await self._node.store(key, value, expiration_time, subkey=subkey, **kwargs))
if (not future.done()):
future.set_result(result)
except BaseException as e:
if (not future.done()):
future.set_exception(e)
raise
def run_coroutine(self, coro: Callable[([DHT, DHTNode], Awaitable[ReturnType])], return_future: bool=False) -> Union[(ReturnType, MPFuture[ReturnType])]:
future = MPFuture()
self._outer_pipe.send(('_run_coroutine', [], dict(coro=coro, future=future)))
return (future if return_future else future.result())
async def _run_coroutine(self, coro: Callable[([DHT, DHTNode], Awaitable[ReturnType])], future: MPFuture[ReturnType]):
try:
future.set_result((await coro(self, self._node)))
except BaseException as e:
logger.exception('Caught an exception when running a coroutine:')
future.set_exception(e)
def add_validators(self, record_validators: Iterable[RecordValidatorBase]) -> None:
if (not self._ready.done()):
raise RuntimeError("Can't append new validators before the DHT process has started. Consider adding them to the initial list via DHT.__init__(record_validators=...)")
self.run_coroutine(partial(DHT._add_validators, record_validators=record_validators))
async def _add_validators(_dht: DHT, node: DHTNode, record_validators: Iterable[RecordValidatorBase]) -> None:
node.protocol.record_validator.extend(record_validators)
def peer_id(self) -> PeerID:
if (self._peer_id is None):
self._peer_id = self.run_coroutine(DHT._get_peer_id)
return self._peer_id
async def _get_peer_id(_dht: DHT, node: DHTNode) -> PeerID:
return node.peer_id
def client_mode(self) -> bool:
if (self._client_mode is None):
self._client_mode = self.run_coroutine(DHT._get_client_mode)
return self._client_mode
async def _get_client_mode(_dht: DHT, node: DHTNode) -> bool:
return node.protocol.client_mode
def get_visible_maddrs(self, latest: bool=False) -> List[Multiaddr]:
return self.run_coroutine(partial(DHT._get_visible_maddrs, latest=latest))
async def _get_visible_maddrs(_dht: DHT, node: DHTNode, latest: bool=False) -> List[Multiaddr]:
return (await node.get_visible_maddrs(latest=latest))
async def replicate_p2p(self) -> P2P:
if (self._p2p_replica is None):
daemon_listen_maddr = self.run_coroutine(DHT._get_p2p_daemon_listen_maddr)
self._p2p_replica = (await P2P.replicate(daemon_listen_maddr))
return self._p2p_replica
async def _get_p2p_daemon_listen_maddr(_dht: DHT, node: DHTNode) -> Multiaddr:
return node.p2p.daemon_listen_maddr
def __del__(self):
if ((self._parent_pid == os.getpid()) and self.is_alive()):
self.shutdown() |
class CalcChangeLocalDroneMutationCommand(wx.Command):
def __init__(self, fitID, position, mutation, oldMutation=None):
wx.Command.__init__(self, True, 'Change Local Drone Mutation')
self.fitID = fitID
self.position = position
self.mutation = mutation
self.savedMutation = oldMutation
def Do(self):
pyfalog.debug('Doing changing of local drone mutation at position {} to {} for fit ID {}'.format(self.position, self.mutation, self.fitID))
sFit = Fit.getInstance()
fit = sFit.getFit(self.fitID)
drone = fit.drones[self.position]
if (not drone.isMutated):
return False
if (self.savedMutation is None):
self.savedMutation = {}
for mutator in drone.mutators.values():
self.savedMutation[mutator.attrID] = mutator.value
if (self.mutation == self.savedMutation):
return False
for mutator in drone.mutators.values():
if (mutator.attrID not in self.mutation):
continue
if (mutator.value != self.mutation[mutator.attrID]):
mutator.value = self.mutation[mutator.attrID]
return True
def Undo(self):
pyfalog.debug('Undoing changing of local drone mutation at position {} to {} for fit ID {}'.format(self.position, self.mutation, self.fitID))
cmd = CalcChangeLocalDroneMutationCommand(fitID=self.fitID, position=self.position, mutation=self.savedMutation)
return cmd.Do() |
class Laplace(Radial):
def _call_impl(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
(x, y) = (self._rescale(x), self._rescale(y))
return torch.exp((- torch.sqrt(pdist2(x, y))))
def string_id(self):
return f'Laplace[{self._scale_str}]'
def effective_dim(self, x) -> float:
return float('inf') |
class Canvas(Component):
def Update(self, loop):
for descendant in self.transform.GetDescendants():
comp = descendant.GetComponent(GuiComponent)
if (comp is not None):
rectTransform = descendant.GetComponent(RectTransform)
rect = (rectTransform.GetRect() + rectTransform.offset)
pos = Vector2(Input.mousePosition)
if (rect.min < pos < rect.max):
createTask(loop, comp.HoverUpdate()) |
def default_evaluation_params():
return {'AREA_RECALL_CONSTRAINT': 0.8, 'AREA_PRECISION_CONSTRAINT': 0.4, 'EV_PARAM_IND_CENTER_DIFF_THR': 1, 'MTYPE_OO_O': 1.0, 'MTYPE_OM_O': 0.8, 'MTYPE_OM_M': 1.0, 'GT_SAMPLE_NAME_2_ID': 'gt_img_([0-9]+).txt', 'DET_SAMPLE_NAME_2_ID': 'res_img_([0-9]+).txt', 'CRLF': False} |
class SubState():
def __init__(self, act, prob_state, all_state, curr_handcards_char, last_cards_value, last_category):
self.act = act
self.prob_state = prob_state
self.all_state = all_state
self.finished = False
self.mode = (MODE.PASSIVE_DECISION if (self.act == ACT_TYPE.PASSIVE) else MODE.ACTIVE_DECISION)
self.intention = np.array([])
self.last_cards_value = last_cards_value
self.minor_type = 0
self.category = last_category
self.minor_length = 0
self.curr_handcards_char = curr_handcards_char
self.active_decision = 0
self.active_response = 0
self.card_type = (- 1)
def get_mask(self):
if (self.act == ACT_TYPE.PASSIVE):
(decision_mask, response_mask, bomb_mask, _) = get_mask_alter(self.curr_handcards_char, to_char(self.last_cards_value), self.category)
if (self.mode == MODE.PASSIVE_DECISION):
return decision_mask
elif (self.mode == MODE.PASSIVE_RESPONSE):
return response_mask
elif (self.mode == MODE.PASSIVE_BOMB):
return bomb_mask
elif (self.mode == MODE.MINOR_RESPONSE):
(input_single, input_pair, _, _) = get_masks(self.curr_handcards_char, None)
if (self.minor_type == 1):
mask = np.append(input_pair, [0, 0])
else:
mask = input_single
for v in set(self.intention):
mask[(v - 3)] = 0
return mask
elif (self.act == ACT_TYPE.ACTIVE):
(decision_mask, response_mask, _, length_mask) = get_mask_alter(self.curr_handcards_char, [], self.category)
if (self.mode == MODE.ACTIVE_DECISION):
return decision_mask
elif (self.mode == MODE.ACTIVE_RESPONSE):
return response_mask[self.active_decision]
elif (self.mode == MODE.ACTIVE_SEQ):
return length_mask[self.active_decision][self.active_response]
elif (self.mode == MODE.MINOR_RESPONSE):
(input_single, input_pair, _, _) = get_masks(self.curr_handcards_char, None)
if (self.minor_type == 1):
mask = np.append(input_pair, [0, 0])
else:
mask = input_single
for v in set(self.intention):
mask[(v - 3)] = 0
return mask
def step(self, action):
if (self.act == ACT_TYPE.PASSIVE):
if (self.mode == MODE.PASSIVE_DECISION):
if ((action == 0) or (action == 2)):
self.finished = True
if (action == 2):
self.intention = np.array([16, 17])
self.card_type = Category.BIGBANG.value
else:
self.card_type = Category.EMPTY.value
return
elif (action == 1):
self.mode = MODE.PASSIVE_BOMB
return
elif (action == 3):
self.mode = MODE.PASSIVE_RESPONSE
return
else:
raise Exception('unexpected action')
elif (self.mode == MODE.PASSIVE_BOMB):
self.intention = np.array(([(action + 3)] * 4))
self.finished = True
self.card_type = Category.QUADRIC.value
return
elif (self.mode == MODE.PASSIVE_RESPONSE):
self.intention = give_cards_without_minor(action, self.last_cards_value, self.category, None)
if ((self.category == Category.THREE_ONE.value) or (self.category == Category.THREE_TWO.value) or (self.category == Category.THREE_ONE_LINE.value) or (self.category == Category.THREE_TWO_LINE.value) or (self.category == Category.FOUR_TAKE_TWO.value)):
if ((self.category == Category.THREE_TWO.value) or (self.category == Category.THREE_TWO_LINE.value)):
self.minor_type = 1
self.mode = MODE.MINOR_RESPONSE
discard_onehot_from_s_60(self.prob_state, Card.val2onehot60(self.intention))
self.minor_length = get_seq_length(self.category, self.last_cards_value)
if (self.minor_length is None):
self.minor_length = (2 if (self.category == Category.FOUR_TAKE_TWO.value) else 1)
self.card_type = self.category
return
else:
self.finished = True
self.card_type = self.category
return
elif (self.mode == MODE.MINOR_RESPONSE):
minor_value_cards = ([(action + 3)] * (1 if (self.minor_type == 0) else 2))
discard_onehot_from_s_60(self.prob_state, Card.val2onehot60(minor_value_cards))
self.intention = np.append(self.intention, minor_value_cards)
assert (self.minor_length > 0)
self.minor_length -= 1
if (self.minor_length == 0):
self.finished = True
return
else:
return
elif (self.act == ACT_TYPE.ACTIVE):
if (self.mode == MODE.ACTIVE_DECISION):
self.category = (action + 1)
self.active_decision = action
self.mode = MODE.ACTIVE_RESPONSE
self.card_type = self.category
return
elif (self.mode == MODE.ACTIVE_RESPONSE):
if ((self.category == Category.SINGLE_LINE.value) or (self.category == Category.DOUBLE_LINE.value) or (self.category == Category.TRIPLE_LINE.value) or (self.category == Category.THREE_ONE_LINE.value) or (self.category == Category.THREE_TWO_LINE.value)):
self.active_response = action
self.mode = MODE.ACTIVE_SEQ
return
elif ((self.category == Category.THREE_ONE.value) or (self.category == Category.THREE_TWO.value) or (self.category == Category.FOUR_TAKE_TWO.value)):
if ((self.category == Category.THREE_TWO.value) or (self.category == Category.THREE_TWO_LINE.value)):
self.minor_type = 1
self.mode = MODE.MINOR_RESPONSE
self.intention = give_cards_without_minor(action, np.array([]), self.category, None)
discard_onehot_from_s_60(self.prob_state, Card.val2onehot60(self.intention))
self.minor_length = (2 if (self.category == Category.FOUR_TAKE_TWO.value) else 1)
return
else:
self.intention = give_cards_without_minor(action, np.array([]), self.category, None)
self.finished = True
return
elif (self.mode == MODE.ACTIVE_SEQ):
self.minor_length = (action + 1)
self.intention = give_cards_without_minor(self.active_response, np.array([]), self.category, (action + 1))
if ((self.category == Category.THREE_ONE_LINE.value) or (self.category == Category.THREE_TWO_LINE.value)):
if ((self.category == Category.THREE_TWO.value) or (self.category == Category.THREE_TWO_LINE.value)):
self.minor_type = 1
self.mode = MODE.MINOR_RESPONSE
discard_onehot_from_s_60(self.prob_state, Card.val2onehot60(self.intention))
else:
self.finished = True
return
elif (self.mode == MODE.MINOR_RESPONSE):
minor_value_cards = ([(action + 3)] * (1 if (self.minor_type == 0) else 2))
discard_onehot_from_s_60(self.prob_state, Card.val2onehot60(minor_value_cards))
self.intention = np.append(self.intention, minor_value_cards)
assert (self.minor_length > 0)
self.minor_length -= 1
if (self.minor_length == 0):
self.finished = True
return
else:
return |
class TimeoutHTTPAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
self.timeout = 0
if ('timeout' in kwargs):
self.timeout = kwargs['timeout']
del kwargs['timeout']
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get('timeout')
if (timeout is None):
kwargs['timeout'] = self.timeout
return super().send(request, **kwargs) |
def getConfig():
parser = argparse.ArgumentParser()
parser.add_argument('action', choices=('train', 'test'))
parser.add_argument('--dataset', metavar='DIR', default='bird', help='name of the dataset')
parser.add_argument('--image-size', '-i', default=512, type=int, metavar='N', help='image size (default: 512)')
parser.add_argument('--input-size', '-cs', default=448, type=int, metavar='N', help='the input size of the model (default: 448)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--optim', default='sgd', type=str, help='the name of optimizer(adam,sgd)')
parser.add_argument('--scheduler', default='plateau', type=str, help='the name of scheduler(step,plateau)')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-05, type=float, metavar='W', help='weight decay (default: 1e-5)')
parser.add_argument('--parts', default=32, type=int, metavar='N', help='number of parts (default: 32)')
parser.add_argument('--alpha', default=0.95, type=float, metavar='N', help='weight for BAP loss')
parser.add_argument('--model-name', default='inception', type=str, help='model name')
parser.add_argument('--use-gpu', action='store_true', default=True, help='whether use gpu or not, default True')
parser.add_argument('--multi-gpu', action='store_true', default=True, help='whether use multiple gpus or not, default True')
parser.add_argument('--gpu-ids', default='0,1', help='gpu id list(eg: 0,1,2...)')
parser.add_argument('--epochs', default=80, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('-b', '--batch-size', default=16, type=int, metavar='N', help='mini-batch size (default: 16)')
parser.add_argument('--print-freq', '-pf', default=100, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--checkpoint-path', default='checkpoint', type=str, metavar='checkpoint_path', help='path to save checkpoint (default: checkpoint)')
args = parser.parse_args()
return args |
class Writer(SummaryWriter):
def __init__(self, logdir, sample_rate=16000):
super(Writer, self).__init__(logdir)
self.sample_rate = sample_rate
self.logdir = logdir
def logging_loss(self, losses, step):
for key in losses:
self.add_scalar('{}'.format(key), losses[key], step)
def logging_audio(self, target, prediction, step):
self.add_audio('raw_audio_predicted', prediction, step, self.sample_rate)
self.add_image('waveform_predicted', plot_waveform_to_numpy(prediction), step)
self.add_audio('raw_audio_target', target, step, self.sample_rate)
self.add_image('waveform_target', plot_waveform_to_numpy(target), step)
def logging_histogram(self, model, step):
for (tag, value) in model.named_parameters():
self.add_histogram(tag.replace('.', '/'), value.cpu().detach().numpy(), step) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.