code stringlengths 281 23.7M |
|---|
def get_detached_file_descriptor(filepath):
try:
import win32file
has_win32file = True
except ImportError:
has_win32file = False
if has_win32file:
import msvcrt
import os
handle = win32file.CreateFile(str(filepath), win32file.GENERIC_READ, ((win32file.FILE_SHARE_DELETE | win32file.FILE_SHARE_READ) | win32file.FILE_SHARE_WRITE), None, win32file.OPEN_EXISTING, 0, None)
detached_handle = handle.Detach()
file_descriptor = msvcrt.open_osfhandle(detached_handle, os.O_RDONLY)
return file_descriptor
return filepath |
def load_controller(args, index):
if (args.controller[index] == 'none'):
return None
elif (args.controller[index] == 'fudge_controller'):
controller = FudgeController(args, index)
if (len(args.controller_load_dir[index]) > 0):
controller.load(args.controller_load_dir[index])
elif (args.controller[index] == 'longformer_classifier'):
controller = LongformerClassifier(args, index)
if (len(args.controller_load_dir[index]) > 0):
controller.load(args.controller_load_dir[index])
else:
raise NotImplementedError
return controller |
class bdist(Command):
description = 'create a built (binary) distribution'
user_options = [('bdist-base=', 'b', 'temporary directory for creating built distributions'), ('plat-name=', 'p', ('platform name to embed in generated filenames (default: %s)' % get_platform())), ('formats=', None, 'formats for distribution (comma-separated list)'), ('dist-dir=', 'd', 'directory to put final built distributions in [default: dist]'), ('skip-build', None, 'skip rebuilding everything (for testing/debugging)'), ('owner=', 'u', 'Owner name used when creating a tar file [default: current user]'), ('group=', 'g', 'Group name used when creating a tar file [default: current group]')]
boolean_options = ['skip-build']
help_options = [('help-formats', None, 'lists available distribution formats', show_formats)]
no_format_option = ('bdist_rpm',)
default_format = {'posix': 'gztar', 'nt': 'zip'}
format_commands = ListCompat({'rpm': ('bdist_rpm', 'RPM distribution'), 'gztar': ('bdist_dumb', "gzip'ed tar file"), 'bztar': ('bdist_dumb', "bzip2'ed tar file"), 'xztar': ('bdist_dumb', "xz'ed tar file"), 'ztar': ('bdist_dumb', 'compressed tar file'), 'tar': ('bdist_dumb', 'tar file'), 'zip': ('bdist_dumb', 'ZIP file')})
format_command = format_commands
def initialize_options(self):
self.bdist_base = None
self.plat_name = None
self.formats = None
self.dist_dir = None
self.skip_build = 0
self.group = None
self.owner = None
def finalize_options(self):
if (self.plat_name is None):
if self.skip_build:
self.plat_name = get_platform()
else:
self.plat_name = self.get_finalized_command('build').plat_name
if (self.bdist_base is None):
build_base = self.get_finalized_command('build').build_base
self.bdist_base = os.path.join(build_base, ('bdist.' + self.plat_name))
self.ensure_string_list('formats')
if (self.formats is None):
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError(("don't know how to create built distributions on platform %s" % os.name))
if (self.dist_dir is None):
self.dist_dir = 'dist'
def run(self):
commands = []
for format in self.formats:
try:
commands.append(self.format_commands[format][0])
except KeyError:
raise DistutilsOptionError(("invalid format '%s'" % format))
for i in range(len(self.formats)):
cmd_name = commands[i]
sub_cmd = self.reinitialize_command(cmd_name)
if (cmd_name not in self.no_format_option):
sub_cmd.format = self.formats[i]
if (cmd_name == 'bdist_dumb'):
sub_cmd.owner = self.owner
sub_cmd.group = self.group
if (cmd_name in commands[(i + 1):]):
sub_cmd.keep_temp = 1
self.run_command(cmd_name) |
class Function(object):
def __init__(self, type_name, inputs, params):
self.type_name = type_name
self.inputs = inputs
self.params = params
self.ntop = self.params.get('ntop', 1)
if ('ntop' in self.params):
del self.params['ntop']
self.in_place = self.params.get('in_place', False)
if ('in_place' in self.params):
del self.params['in_place']
self.tops = tuple((Top(self, n) for n in range(self.ntop)))
def _get_name(self, names, autonames):
if ((self not in names) and (self.ntop > 0)):
names[self] = self._get_top_name(self.tops[0], names, autonames)
elif (self not in names):
autonames[self.type_name] += 1
names[self] = (self.type_name + str(autonames[self.type_name]))
return names[self]
def _get_top_name(self, top, names, autonames):
if (top not in names):
autonames[top.fn.type_name] += 1
names[top] = (top.fn.type_name + str(autonames[top.fn.type_name]))
return names[top]
def _update(self, params):
self.params.update(params)
def _to_proto(self, layers, names, autonames):
if (self in layers):
return
bottom_names = []
for inp in self.inputs:
inp._to_proto(layers, names, autonames)
bottom_names.append(layers[inp.fn].top[inp.n])
layer = caffe_pb2.LayerParameter()
layer.type = self.type_name
layer.bottom.extend(bottom_names)
if self.in_place:
layer.top.extend(layer.bottom)
else:
for top in self.tops:
layer.top.append(self._get_top_name(top, names, autonames))
layer.name = self._get_name(names, autonames)
for (k, v) in six.iteritems(self.params):
if k.endswith('param'):
assign_proto(layer, k, v)
else:
try:
assign_proto(getattr(layer, (_param_names[self.type_name] + '_param')), k, v)
except (AttributeError, KeyError):
assign_proto(layer, k, v)
layers[self] = layer |
class BlankCosmeticPatchesDialog(BaseCosmeticPatchesDialog, Ui_BlankCosmeticPatchesDialog):
_cosmetic_patches: BlankCosmeticPatches
def __init__(self, parent: (QtWidgets.QWidget | None), current: BaseCosmeticPatches):
super().__init__(parent)
self.setupUi(self)
assert isinstance(current, BlankCosmeticPatches)
self._cosmetic_patches = current
self.on_new_cosmetic_patches(current)
self.connect_signals()
def connect_signals(self) -> None:
super().connect_signals()
def on_new_cosmetic_patches(self, patches: BlankCosmeticPatches) -> None:
pass
def cosmetic_patches(self) -> BlankCosmeticPatches:
return self._cosmetic_patches
def reset(self) -> None:
self.on_new_cosmetic_patches(BlankCosmeticPatches()) |
def _computations_as_categorical(df: pd.DataFrame, **kwargs) -> pd.DataFrame:
categories_dict = _as_categorical_checks(df, **kwargs)
categories_dtypes = {}
for (column_name, value) in categories_dict.items():
if (value is None):
cat_dtype = pd.CategoricalDtype()
elif isinstance(value, str):
if (value == _CategoryOrder.SORT.value):
(_, cat_dtype) = df[column_name].factorize(sort=True)
else:
(_, cat_dtype) = df[column_name].factorize(sort=False)
if cat_dtype.empty:
raise ValueError(f'Kindly ensure there is at least one non-null value in {column_name}.')
cat_dtype = pd.CategoricalDtype(categories=cat_dtype, ordered=True)
else:
cat_dtype = pd.CategoricalDtype(categories=value, ordered=True)
categories_dtypes[column_name] = cat_dtype
return df.astype(categories_dtypes) |
class CdPlayer():
description: str
currentTrack: int = 0
amplifier: Amplifier
title: str
def __init__(self, description: str, amplifier: Amplifier):
self.description = description
self.amplifier = amplifier
def on(self) -> None:
print(f'{self.description} on')
def off(self) -> None:
print(f'{self.description} off')
def play(self, title: str) -> None:
self.title = title
self.currentTrack = 0
print(f'{self.description} playing "{title}"')
def play_track(self, track: int) -> None:
if (self.title == None):
print(f"{self.description} can't play track {self.currentTrack}, no cd inserted")
else:
self.currentTrack = track
print(f'{self.description} playing track {self.currentChapter}')
def stop(self) -> None:
self.currentTrack = 0
print(f'{self.description} stopped')
def pause(self) -> None:
self.currentChapter = 0
print(f'{self.description} paused "{self.title}"')
def toString(self) -> str:
return self.description |
def test_tested_unlisted(covtest):
covtest.makefile('\n def func():\n pass\n ')
covtest.run()
expected = check_coverage.Message(check_coverage.MsgType.perfect_file, 'module.py', 'module.py has 100% coverage but is not in perfect_files!')
assert (covtest.check(perfect_files=[]) == [expected]) |
class StoryFactory(DjangoModelFactory):
class Meta():
model = Story
django_get_or_create = ('name',)
category = factory.SubFactory(StoryCategoryFactory)
name = factory.LazyAttribute((lambda o: f'Success Story of {o.company_name}'))
company_name = factory.Faker('company')
company_url = factory.Faker('url')
author = factory.Faker('name')
author_email = factory.Faker('email')
pull_quote = factory.Faker('sentence', nb_words=10)
content = factory.Faker('paragraph', nb_sentences=5)
is_published = True |
def handle_code(code, vk_packet):
code_keys = []
if (code in CODES):
code_keys.append(VirtualKeyAction(CODES[code]))
elif (len(code) == 1):
if ((not vk_packet) and (code in ascii_vk)):
code_keys.append(VirtualKeyAction(ascii_vk[code]))
else:
code_keys.append(KeyAction(code))
elif (' ' in code):
(to_repeat, count) = code.rsplit(None, 1)
if (to_repeat == 'PAUSE'):
try:
pause_time = float(count)
except ValueError:
raise KeySequenceError(('invalid pause time %s' % count))
code_keys.append(PauseAction(pause_time))
else:
try:
count = int(count)
except ValueError:
raise KeySequenceError('invalid repetition count {}'.format(count))
if (to_repeat in CODES):
code_keys.extend(([VirtualKeyAction(CODES[to_repeat])] * count))
else:
to_repeat = parse_keys(to_repeat, vk_packet=vk_packet)
if isinstance(to_repeat, list):
keys = (to_repeat * count)
else:
keys = ([to_repeat] * count)
code_keys.extend(keys)
else:
raise RuntimeError('Unknown code: {}'.format(code))
return code_keys |
class TestFormResourceBaseReviewForm(TestCase):
def test_review_form_comment_includes_resource_name(self):
form = ResourceBaseReviewForm(resource_name='test resource')
self.assertIn('placeholder="Please provide clear feedback if you decided to not approve this test resource." required id="id_comment"', form.as_table()) |
def adapt_network_for_any_size_input(network_definition, multiple):
def new_network_definition(*args, **kwargs):
pdb.set_trace()
if ('image_batch_tensor' in kwargs):
image_batch_tensor = kwargs['image_batch_tensor']
else:
image_batch_tensor = args[0]
args = args[1:]
input_image_shape = tf.shape(image_batch_tensor)
image_height_width = input_image_shape[1:3]
image_height_width_float = tf.to_float(image_height_width)
image_height_width_multiple = (tf.round((image_height_width_float / multiple)) * multiple)
image_height_width_multiple = tf.to_int32(image_height_width_multiple)
resized_images_batch = tf.image.resize_images(image_batch_tensor, image_height_width_multiple)
kwargs['image_batch_tensor'] = resized_images_batch
all_outputs = network_definition(*args, **kwargs)
all_outputs = list(all_outputs)
upsampled_logits_batch = all_outputs[0]
pred = tf.argmax(upsampled_logits_batch, dimension=3)
temp_pred = tf.expand_dims(pred, 3)
original_size_predictions = tf.image.resize_nearest_neighbor(images=temp_pred, size=image_height_width)
all_outputs[0] = original_size_predictions
return all_outputs
return new_network_definition |
def batch_examples(example, batch_size, max_length, mantissa_bits, shard_multiplier=1, length_multiplier=1, constant=False, num_threads=4, drop_long_sequences=True):
with tf.name_scope('batch_examples'):
max_length = (max_length or batch_size)
min_length = 8
mantissa_bits = mantissa_bits
x = min_length
boundaries = []
while (x < max_length):
boundaries.append(x)
x += (2 ** max(0, (int(math.log(x, 2)) - mantissa_bits)))
if (not constant):
batch_sizes = [max(1, (batch_size // length)) for length in (boundaries + [max_length])]
batch_sizes = [(b * shard_multiplier) for b in batch_sizes]
bucket_capacities = [(2 * b) for b in batch_sizes]
else:
batch_sizes = (batch_size * shard_multiplier)
bucket_capacities = [(2 * n) for n in (boundaries + [max_length])]
max_length *= length_multiplier
boundaries = [(boundary * length_multiplier) for boundary in boundaries]
max_length = (max_length if drop_long_sequences else (10 ** 9))
max_example_length = 0
for v in example.values():
if (v.shape.ndims > 0):
seq_length = tf.shape(v)[0]
max_example_length = tf.maximum(max_example_length, seq_length)
(_, outputs) = tf.contrib.training.bucket_by_sequence_length(max_example_length, example, batch_sizes, [(b + 1) for b in boundaries], num_threads=num_threads, capacity=2, bucket_capacities=bucket_capacities, dynamic_pad=True, keep_input=(max_example_length <= max_length))
return outputs |
def dump(args, s):
s.adapter.set_tclk(0)
s.adapter.set_sclk(127)
try:
code = args.code.decode('hex')
except TypeError:
logging.fatal('Code must be in hexadecimal format.')
return
if (len(code) != 7):
logging.fatal('Code must be 7 bytes long.')
return
s.unlock(code)
status = s.unlock_status()
if (status != serialio.UNLOCK_SUCCESSFUL):
logging.fatal('Target did not unlock.')
return
logging.info('Target unlocked.')
start = 3584
end = 4095
with open(args.output, 'w') as f:
logging.info('Writing pages {:x}-{:x} to {}...'.format(start, end, args.output))
for page in range(start, (end + 1)):
logging.debug('Dumping {:x}00-{:x}ff...'.format(page, page))
data = s.read_page(page)
f.write(data) |
class RBF(Kernel):
def __call__(self, X):
XY = X.dot(X.T)
x2 = pt.sum((X ** 2), axis=1).dimshuffle(0, 'x')
X2e = pt.repeat(x2, X.shape[0], axis=1)
H = ((X2e + X2e.T) - (2.0 * XY))
V = pt.sort(H.flatten())
length = V.shape[0]
m = pt.switch(pt.eq((length % 2), 0), pt.mean(V[((length // 2) - 1):((length // 2) + 1)]), V[(length // 2)])
h = ((0.5 * m) / pt.log((floatX(H.shape[0]) + floatX(1))))
Kxy = pt.exp((((- H) / h) / 2.0))
dxkxy = (- pt.dot(Kxy, X))
sumkxy = pt.sum(Kxy, axis=(- 1), keepdims=True)
dxkxy = (pt.add(dxkxy, pt.mul(X, sumkxy)) / h)
return (Kxy, dxkxy) |
_wgpu_render_function(Square, SquareMaterial)
class SquareShader(WorldObjectShader):
def get_bindings(self, wobject, shared):
binding = Binding('u_stdinfo', 'buffer/uniform', shared.uniform_buffer)
self.define_binding(0, 0, binding)
return {0: {0: binding}}
def get_pipeline_info(self, wobject, shared):
return {'primitive_topology': 'triangle-strip', 'cull_mode': 0}
def get_render_info(self, wobject, shared):
return {'indices': (4, 1), 'render_mask': 3}
def get_code(self):
return (((self.code_definitions() + self.code_common()) + self.code_vertex()) + self.code_fragment())
def code_vertex(self):
return '\n \n fn vs_main((vertex_index) index: u32) -> Varyings {\n var positions = array<vec3<f32>, 4>(\n vec3<f32>(-1.0, -1.0, 0.5), vec3<f32>(-1.0, 1.0, 1.5), vec3<f32>(1.0, -1.0, -0.5), vec3<f32>(1.0, 1.0, 0.5)\n );\n var colors = array<vec3<f32>, 4>(\n vec3<f32>(0.0, 1.0, 0.0), vec3<f32>(0.0, 0.5, 0.5), vec3<f32>(0.0, 0.5, 0.5), vec3<f32>(0.0, 0.0, 1.0)\n );\n\n var varyings: Varyings;\n varyings.position = vec4<f32>(positions[index], 1.0);\n varyings.color = vec4<f32>(colors[index], 1.0);\n return varyings;\n }\n '
def code_fragment(self):
return '\n \n fn fs_main(varyings: Varyings) -> FragmentOutput {\n var out: FragmentOutput;\n out.color = varyings.color;\n return out;\n }\n ' |
def kill(pid: int, signal: int, timeout=1000, dword1=wintypes.DWORD(1)):
if (pid <= 0):
raise OSError(errno.EINVAL, 'process group not supported')
if ((signal < 0) or (signal >= PG_SIGNAL_COUNT)):
raise OSError(errno.EINVAL, 'unsupported signal number')
inbuffer = pointer(wintypes.BYTE(signal))
outbuffer = pointer(wintypes.BYTE(0))
outbytes = pointer(wintypes.DWORD(0))
pidpipe = (b'\\\\.\\pipe\\pgsignal_' + str(pid).encode('ascii'))
timeout = wintypes.DWORD(timeout)
r = CallNamedPipeA(pidpipe, inbuffer, dword1, outbuffer, dword1, outbytes, timeout)
if r:
if (outbuffer.contents.value == signal):
if (outbytes.contents.value == 1):
return
raise OSError(errno.ESRCH, 'unexpected output from CallNamedPipeA') |
class _FCNHead(nn.Module):
def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm2d):
super(_FCNHead, self).__init__()
inter_channels = (in_channels // 4)
self.block = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), norm_layer(inter_channels), nn.ReLU(inplace=True), nn.Dropout(0.1), nn.Conv2d(inter_channels, channels, 1))
def forward(self, x):
return self.block(x) |
class Migration(migrations.Migration):
dependencies = [('questions', '0062_meta')]
operations = [migrations.AddField(model_name='questionset', name='questionset', field=models.ForeignKey(blank=True, default=None, help_text='The question set this question set belongs to.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='questionsets', to='questions.QuestionSet', verbose_name='Question set'))] |
class Local():
def get_src_local_rp(extension):
return rpath.RPath(Globals.local_connection, os.path.join(old_test_dir, os.fsencode(extension)))
def get_tgt_local_rp(extension):
return rpath.RPath(Globals.local_connection, os.path.join(abs_test_dir, os.fsencode(extension)))
vftrp = get_src_local_rp('various_file_types')
emptyrp = get_src_local_rp('empty')
inc1rp = get_src_local_rp('increment1')
inc2rp = get_src_local_rp('increment2')
inc3rp = get_src_local_rp('increment3')
inc4rp = get_src_local_rp('increment4')
backup1rp = get_src_local_rp('restoretest')
backup2rp = get_src_local_rp('restoretest2')
backup3rp = get_src_local_rp('restoretest3')
backup4rp = get_src_local_rp('restoretest4')
backup5rp = get_src_local_rp('restoretest5')
rpout = get_tgt_local_rp('output')
rpout_inc = get_tgt_local_rp('output_inc')
rpout1 = get_tgt_local_rp('restoretarget1')
rpout2 = get_tgt_local_rp('restoretarget2')
rpout3 = get_tgt_local_rp('restoretarget3')
rpout4 = get_tgt_local_rp('restoretarget4')
vft_in = get_src_local_rp('increment2/various_file_types')
vft_out = get_tgt_local_rp('vft_out')
vft_recover = get_tgt_local_rp('vft2_out')
timbar_in = get_src_local_rp('increment1/timbar.pyc')
timbar_out = get_tgt_local_rp('timbar.pyc')
wininc2 = get_tgt_local_rp('win-increment2')
wininc3 = get_tgt_local_rp('win-increment3') |
_message(((pyrogram.filters.command(commands='get_last_logs') & pyrogram.filters.private) & tools.is_admin))
def send_last_logs(bot: AutoPoster, message: Message):
logs = sorted(list(bot.logs_path.iterdir()))[(- 1)]
try:
lines = int(message.command[1])
except (ValueError, IndexError):
lines = 15
if logs:
with logs.open() as f:
last_logs = ''.join(f.readlines()[(- lines):])
last_logs = (' {} :\n\n'.format(str(lines)) + last_logs)
for msg in split(last_logs):
message.reply(msg, parse_mode=ParseMode.DISABLED)
else:
message.reply(' .') |
def get_invalid_tag_usage(applier_list, include_tags, exclude_tags):
if ((len(include_tags.strip()) == 0) or (len(exclude_tags.strip()) == 0)):
return []
include_list = include_tags.split(',')
include_list = [i.strip() for i in include_list]
exclude_list = exclude_tags.split(',')
exclude_list = [i.strip() for i in exclude_list]
repeated_tags = []
for a in applier_list[:]:
if ('content' in a):
for c in a['content'][:]:
if ('tags' not in c):
continue
include_matches = list((set(include_list) & set(c['tags'])))
exclude_matches = list((set(exclude_list) & set(c['tags'])))
if (include_matches and exclude_matches):
repeated_tags.append({'object': a.get('object', '[unknown]'), 'name': c.get('name', '[unknown]'), 'invoked_tags': (include_matches + exclude_matches)})
return repeated_tags |
def write_job_parameters(params: namedtuple) -> None:
dict_path = (params.job_dir + 'params.csv')
with open(dict_path, 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
for (key, value) in enumerate(params._fields):
writer.writerow([value, params[key]]) |
def test_env_all_operations():
os.environ['ARB_GET_ME1'] = 'arb value from $ENV ARB_GET_ME1'
os.environ['ARB_GET_ME2'] = 'arb value from $ENV ARB_GET_ME2'
os.environ['ARB_DELETE_ME1'] = 'arb value from $ENV ARB_DELETE_ME1'
os.environ['ARB_DELETE_ME2'] = 'arb value from $ENV ARB_DELETE_ME2'
context = Context({'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'env': {'get': {'key2': 'ARB_GET_ME1', 'key4': 'ARB_GET_ME2'}, 'set': {'ARB_SET_ME1': 'value 4', 'ARB_SET_ME2': 'go go {key2} end end'}, 'unset': ['ARB_DELETE_ME1', 'ARB_DELETE_ME2']}})
pypyr.steps.env.run_step(context)
assert (context['key1'] == 'value1')
assert (context['key2'] == 'arb value from $ENV ARB_GET_ME1')
assert (context['key3'] == 'value3')
assert (context['key4'] == 'arb value from $ENV ARB_GET_ME2')
assert (os.environ['ARB_SET_ME1'] == 'value 4')
assert (os.environ['ARB_SET_ME2'] == 'go go arb value from $ENV ARB_GET_ME1 end end')
assert ('ARB_DELETE_ME1' not in os.environ)
assert ('ARB_DELETE_ME2' not in os.environ)
del os.environ['ARB_GET_ME1']
del os.environ['ARB_GET_ME2']
del os.environ['ARB_SET_ME1']
del os.environ['ARB_SET_ME2'] |
class TestMaskedLanguageModel(unittest.TestCase):
def test_legacy_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_legacy_mlm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(data_dir, 'masked_lm')
def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_mlm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(data_dir, arch='masked_lm', extra_args=(('--encoder-learned-pos',) if learned_pos_emb else ()))
with tempfile.TemporaryDirectory('test_mlm_translation') as translation_dir:
create_dummy_data(translation_dir)
preprocess_translation_data(translation_dir, extra_flags=['--joined-dictionary'])
train_translation_model(translation_dir, arch='transformer_from_pretrained_xlm', extra_flags=((['--decoder-layers', '1', '--decoder-embed-dim', '32', '--decoder-attention-heads', '1', '--decoder-ffn-embed-dim', '32', '--encoder-layers', '1', '--encoder-embed-dim', '32', '--encoder-attention-heads', '1', '--encoder-ffn-embed-dim', '32', '--pretrained-xlm-checkpoint', '{}/checkpoint_last.pt'.format(data_dir), '--activation-fn', 'gelu', '--max-source-positions', '500', '--max-target-positions', '500'] + (['--encoder-learned-pos', '--decoder-learned-pos'] if learned_pos_emb else [])) + (['--init-encoder-only'] if encoder_only else [])), task='translation_from_pretrained_xlm')
def test_pretrained_masked_lm_for_translation_learned_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(True, False)
def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(False, False)
def test_pretrained_masked_lm_for_translation_encoder_only(self):
self._test_pretrained_masked_lm_for_translation(True, True) |
def parse_having(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if ((idx >= len_) or (toks[idx] != 'having')):
return (idx, [])
idx += 1
(idx, conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return (idx, conds) |
def simulate_trotter(qubits: Sequence[cirq.Qid], hamiltonian: Hamiltonian, time: float, n_steps: int=1, order: int=0, algorithm: Optional[TrotterAlgorithm]=None, control_qubit: Optional[cirq.Qid]=None, omit_final_swaps: bool=False) -> cirq.OP_TREE:
if (order < 0):
raise ValueError('The order of the Trotter formula must be at least 0.')
if (algorithm is None):
algorithm = _select_trotter_algorithm(hamiltonian)
if (not isinstance(hamiltonian, tuple(algorithm.supported_types))):
raise TypeError('The input Hamiltonian was a {} but the chosen Trotter step algorithm only supports Hamiltonians of type {}'.format(type(hamiltonian).__name__, {cls.__name__ for cls in algorithm.supported_types}))
trotter_step = _select_trotter_step(hamiltonian, order, algorithm, controlled=(control_qubit is not None))
(yield trotter_step.prepare(qubits, control_qubit))
step_time = (time / n_steps)
for _ in range(n_steps):
(yield _perform_trotter_step(qubits, step_time, order, trotter_step, control_qubit))
(qubits, control_qubit) = trotter_step.step_qubit_permutation(qubits, control_qubit)
(yield trotter_step.finish(qubits, n_steps, control_qubit, omit_final_swaps)) |
def PolynomialPlot():
(coefficients, set_coefficients) = reactpy.hooks.use_state([0])
x = list(linspace((- 1), 1, 50))
y = [polynomial(value, coefficients) for value in x]
return reactpy.html.div(plot(f'{len(coefficients)} Term Polynomial', x, y), ExpandableNumberInputs(coefficients, set_coefficients)) |
def romfs_validation(line: QtWidgets.QLineEdit):
if is_directory_validator(line):
return True
path = Path(line.text())
return (not all((p.is_file() for p in [path.joinpath('system', 'files.toc'), path.joinpath('packs', 'system', 'system.pkg'), path.joinpath('packs', 'maps', 's010_cave', 's010_cave.pkg'), path.joinpath('packs', 'maps', 's020_magma', 's020_magma.pkg')]))) |
class SmartCopyAndPaste(object):
def __setCursorPositionAndAnchor(cursor, position, anchor):
cursor.setPosition(anchor)
cursor.setPosition(position, cursor.KeepAnchor)
def __ensureCursorBeforeAnchor(cls, cursor):
start = cursor.selectionStart()
end = cursor.selectionEnd()
anchorBeforeCursor = (cursor.anchor() < cursor.position())
cls.__setCursorPositionAndAnchor(cursor, start, end)
return anchorBeforeCursor
def copy(self):
cursor = self.textCursor()
start = cursor.selectionStart()
end = cursor.selectionEnd()
anchorBeforeCursor = self.__ensureCursorBeforeAnchor(cursor)
block = cursor.block()
if (end > (block.position() + block.length())):
textBeforeSelection = block.text()[:cursor.positionInBlock()]
if (len(textBeforeSelection.strip()) == 0):
start = block.position()
if anchorBeforeCursor:
self.__setCursorPositionAndAnchor(cursor, end, start)
else:
self.__setCursorPositionAndAnchor(cursor, start, end)
self.setTextCursor(cursor)
super().copy()
def cut(self):
if ((self.textInteractionFlags() & QtCore.Qt.TextEditable) and self.textCursor().hasSelection()):
cursor = self.textCursor()
self.copy()
self.setTextCursor(cursor)
cursor.removeSelectedText()
def paste(self):
self._paste(keepSelection=False)
def pasteAndSelect(self):
self._paste(keepSelection=True)
def _paste(self, keepSelection):
cursor = self.textCursor()
self.__ensureCursorBeforeAnchor(cursor)
cursor.setKeepPositionOnInsert(True)
super().paste()
block = cursor.block()
if (cursor.selectionEnd() > (block.position() + block.length())):
if (len(block.text()[:cursor.positionInBlock()].strip()) == 0):
cursor2 = QtGui.QTextCursor(cursor)
cursor2.setPosition(cursor2.position())
cursor2.movePosition(cursor2.StartOfBlock, cursor2.KeepAnchor)
cursor2.removeSelectedText()
if keepSelection:
cursor.setKeepPositionOnInsert(False)
self.setTextCursor(cursor) |
def sa_conv_unit(x):
with tf.variable_scope(None, 'sa_conv_unit'):
shape = x.get_shape().as_list()
y = slim.conv2d(x, shape[(- 1)], kernel_size=1, stride=1, biases_initializer=None, activation_fn=None)
y = slim.batch_norm(y, activation_fn=None, fused=False)
y = tf.nn.sigmoid(y)
x = tf.multiply(x, y)
return x |
class TorchProfiler(HookBase):
def __init__(self, enable_predicate, output_dir, *, activities=None, save_tensorboard=True):
self._enable_predicate = enable_predicate
self._activities = activities
self._output_dir = output_dir
self._save_tensorboard = save_tensorboard
def before_step(self):
if self._enable_predicate(self.trainer):
if self._save_tensorboard:
on_trace_ready = torch.profiler.tensorboard_trace_handler(os.path.join(self._output_dir, 'log', 'profiler-tensorboard-iter{}'.format(self.trainer.iter)), f'worker{comm.get_rank()}')
else:
on_trace_ready = None
self._profiler = torch.profiler.profile(activities=self._activities, on_trace_ready=on_trace_ready, record_shapes=True, profile_memory=True, with_stack=True, with_flops=True)
self._profiler.__enter__()
else:
self._profiler = None
def after_step(self):
if (self._profiler is None):
return
self._profiler.__exit__(None, None, None)
if (not self._save_tensorboard):
PathManager.mkdirs(self._output_dir)
out_file = os.path.join(self._output_dir, 'profiler-trace-iter{}.json'.format(self.trainer.iter))
if ('://' not in out_file):
self._profiler.export_chrome_trace(out_file)
else:
with tempfile.TemporaryDirectory(prefix='detectron2_profiler') as d:
tmp_file = os.path.join(d, 'tmp.json')
self._profiler.export_chrome_trace(tmp_file)
with open(tmp_file) as f:
content = f.read()
with PathManager.open(out_file, 'w') as f:
f.write(content) |
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=5), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2))
self.classifier = nn.Linear(256, num_classes)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x |
.parametrize('tp', [ClassVar, InitVar, *cond_list(HAS_TYPE_GUARD, (lambda : [typing.TypeGuard])), *cond_list(HAS_TYPED_DICT_REQUIRED, (lambda : [typing.Required, typing.NotRequired]))])
def test_var_tag(tp):
pytest.raises(NotSubscribedError, (lambda : normalize_type(tp)))
assert_normalize(tp[int], tp, [nt_zero(int)]) |
.slow
_figures_equal()
def test_DecisionMatrixPlotter_barh(decision_matrix, fig_test, fig_ref):
dm = decision_matrix(seed=42, min_alternatives=3, max_alternatives=3, min_criteria=3, max_criteria=3)
plotter = plot.DecisionMatrixPlotter(dm=dm)
test_ax = fig_test.subplots()
plotter.barh(ax=test_ax)
df = dm.matrix
df.columns = [f'{c} {o.to_symbol()}' for (c, o) in zip(dm.criteria, dm.objectives)]
df.columns.name = 'Criteria'
exp_ax = fig_ref.subplots()
df.plot.barh(ax=exp_ax) |
_module()
class HandGenerateRelDepthTarget():
def __init__(self):
pass
def __call__(self, results):
rel_root_depth = results['rel_root_depth']
rel_root_valid = results['rel_root_valid']
cfg = results['ann_info']
D = cfg['heatmap_size_root']
root_depth_bound = cfg['root_depth_bound']
target = (((rel_root_depth / root_depth_bound) + 0.5) * D)
target_weight = ((rel_root_valid * (target >= 0)) * (target <= D))
results['target'] = (target * np.ones(1, dtype=np.float32))
results['target_weight'] = (target_weight * np.ones(1, dtype=np.float32))
return results |
def prime1_hint_text():
db = default_database.resource_database_for(RandovaniaGame.METROID_PRIME)
from randovania.games.prime1.generator.pickup_pool import artifacts
artifact = artifacts.create_artifact(0, 0, db)
result = [('Artifact', artifact.pickup_category, artifact.broad_category)]
return result |
class RejectSponsorshipApplicationUseCase(BaseUseCaseWithNotifications):
notifications = [notifications.RejectedSponsorshipNotificationToPSF(), notifications.RejectedSponsorshipNotificationToSponsors()]
def execute(self, sponsorship, request=None):
sponsorship.reject()
sponsorship.save()
self.notify(request=request, sponsorship=sponsorship)
return sponsorship |
class Project(gui.Container):
lastUpdateTime = 0
def __init__(self, **kwargs):
super(Project, self).__init__(**kwargs)
self.variable_name = 'App'
self.style.update({'position': 'relative', 'overflow': 'auto', 'background-color': 'rgb(250,248,240)', 'background-image': "url('/editor_resources:background.png')"})
self.attr_editor_newclass = True
def shouldUpdate(self, filePathName):
if (os.stat(filePathName).st_mtime > self.lastUpdateTime):
return True
return False
def load(self, ifile, configuration):
self.lastUpdateTime = os.stat(ifile).st_mtime
self.ifile = ifile
_module = load_source(self.ifile)
configuration.configDict = _module.configuration
clsmembers = inspect.getmembers(_module, inspect.isclass)
app_init_fnc = None
for (name, value) in clsmembers:
if (issubclass(value, App) and (name != 'App')):
app_init_fnc = value
if (app_init_fnc == None):
return None
members_list = app_init_fnc.__dict__.values()
for m in members_list:
if (inspect.isfunction(m) and (m.__name__ not in ['__init__', 'main', 'idle', 'construct_ui'])):
import types
setattr(self, m.__name__, types.MethodType(m, self))
print(m.__name__)
root_widget = app_init_fnc.construct_ui(self)
return root_widget
def check_pending_listeners(self, widget, widgetVarName, force=False):
code_nested_listener = ''
for event in self.pending_listener_registration:
if (force or (hasattr(event['eventsource'], 'path_to_this_widget') and hasattr(event['eventlistener'], 'path_to_this_widget'))):
if ((force or ((widget.variable_name in event['eventsource'].path_to_this_widget) and (widget.variable_name in event['eventlistener'].path_to_this_widget))) and (event['done'] == False)):
event['done'] = True
sourcename = 'self'
source_filtered_path = event['eventsource'].path_to_this_widget[:]
listener_filtered_path = event['eventlistener'].path_to_this_widget[:]
for v in widget.path_to_this_widget:
source_filtered_path.remove(v)
listener_filtered_path.remove(v)
if ((len(source_filtered_path) == 0) and (event['eventsource'].attr_editor_newclass == False)):
sourcename = event['eventsource'].variable_name
if (force or ((self.children['root'] == widget) and (not (widget.attr_editor_newclass == True)))):
sourcename = self.children['root'].variable_name
if (self.children['root'].variable_name in source_filtered_path):
source_filtered_path.remove(self.children['root'].variable_name)
if (len(source_filtered_path) > 0):
sourcename = ((("%s.children['" + "'].children['".join(source_filtered_path)) + "']") % sourcename)
listenername = 'self'
if (force or ((self.children['root'] == widget) and (not (widget.attr_editor_newclass == True)))):
if (event['eventlistener'] != self):
listenername = self.children['root'].variable_name
if ((len(listener_filtered_path) == 0) and (event['eventlistener'].attr_editor_newclass == False)):
listenername = event['eventlistener'].variable_name
if (len(listener_filtered_path) > 0):
listenername = ((("%s.children['" + "'].children['".join(listener_filtered_path)) + "']") % listenername)
code_nested_listener += (prototypes.proto_set_listener % {'sourcename': sourcename, 'register_function': event['setoneventfuncname'], 'listenername': listenername, 'listener_function': event['listenerfuncname']})
if (not (event['eventlistener'].identifier in self.code_declared_classes)):
self.code_declared_classes[event['eventlistener'].identifier] = ''
if (event['eventlistener'].attr_editor_newclass == True):
if (not event['skip_function_definition']):
self.code_declared_classes[event['eventlistener'].identifier] += event['listenerClassFunction']
return code_nested_listener
def repr_widget_for_editor(self, widget, first_node=False):
if first_node:
self.code_declared_classes = {}
self.pending_listener_registration = list()
self.known_project_children = [self]
self.pending_signals_to_connect = list()
self.path_to_this_widget = []
self.prepare_path_to_this_widget(self.children['root'])
self.known_project_children.append(widget)
widget.path_to_this_widget.append(widget.variable_name)
print(widget.variable_name)
code_nested = ''
if (not hasattr(widget, 'attributes')):
return ''
widgetVarName = widget.variable_name
classname = (('CLASS' + widgetVarName) if widget.attr_editor_newclass else widget.attr_class)
code_nested = (prototypes.proto_widget_allocation % {'varname': widgetVarName, 'classname': classname})
for (x, y) in inspect.getmembers(widget.__class__):
if ((type(y) == property) and (not (getattr(widget, x) is None))):
if hasattr(y.fget, 'editor_attributes'):
_value = getattr(widget, x)
if ((type(_value) == type('')) or (type(_value) == type(u''))):
_value = ('"%s"' % _value)
code_nested += (prototypes.proto_property_setup % {'varname': widgetVarName, 'property': x, 'value': _value})
for (setOnEventListenerFuncname, setOnEventListenerFunc) in inspect.getmembers(widget):
if issubclass(type(setOnEventListenerFunc), gui.ClassEventConnector):
if ((not (setOnEventListenerFunc.callback is None)) and hasattr(setOnEventListenerFunc.event_method_bound, '_event_info')):
listenerFunction = setOnEventListenerFunc.callback
if issubclass(type(listenerFunction), gui.ClassEventConnector):
listenerFunction = listenerFunction.event_method_bound
listenerPrototype = setOnEventListenerFunc.event_method_bound._event_info['prototype']
listener = listenerFunction.__self__
listenerFunctionName = listenerFunction.__name__
listenerClassFunction = (prototypes.proto_code_function % {'funcname': listenerFunctionName, 'parameters': listenerPrototype})
if (hasattr(listener, listenerFunctionName) and (listenerFunction.__code__ != editor_widgets.fakeListenerFunc.__code__)):
listenerClassFunction = inspect.getsource(listenerFunction)
skip = False
for pending in self.pending_listener_registration:
if (pending['eventlistener'] == listener):
if (pending['listenerfuncname'] == listenerFunctionName):
skip = True
break
self.pending_listener_registration.append({'done': False, 'eventsource': widget, 'eventlistener': listener, 'setoneventfuncname': setOnEventListenerFuncname, 'listenerfuncname': listenerFunctionName, 'listenerClassFunction': listenerClassFunction, 'skip_function_definition': skip})
if widget.attr_editor_newclass:
widgetVarName = 'self'
children_code_nested = ''
for child_key in widget.children.keys():
child = widget.children[child_key]
if (type(child) == str):
continue
if (not issubclass(child.__class__, gui.Widget)):
continue
if (child.variable_name is None):
continue
child.path_to_this_widget = widget.path_to_this_widget[:]
children_code_nested += self.repr_widget_for_editor(child)
children_code_nested += (prototypes.proto_layout_append % {'parentname': widgetVarName, 'varname': ("%s,'%s'" % (child.variable_name, child.variable_name))})
children_code_nested += self.check_pending_listeners(widget, widgetVarName)
if widget.attr_editor_newclass:
if (not (widget.identifier in self.code_declared_classes)):
self.code_declared_classes[widget.identifier] = ''
self.code_declared_classes[widget.identifier] = ((prototypes.proto_code_class % {'classname': classname, 'superclassname': widget.attr_class, 'nested_code': children_code_nested}) + self.code_declared_classes[widget.identifier])
else:
code_nested = (code_nested + children_code_nested)
return code_nested
def export_widget_for_app_template(self, widget, first_node=False):
if first_node:
self.code_declared_classes = {}
self.pending_listener_registration = list()
self.known_project_children = [self]
self.pending_signals_to_connect = list()
self.path_to_this_widget = []
self.prepare_path_to_this_widget(self.children['root'])
self.known_project_children.append(widget)
widget.path_to_this_widget.append(widget.variable_name)
code_nested = ''
if (not hasattr(widget, 'attributes')):
return ''
widgetVarName = widget.variable_name
classname = (('CLASS' + widgetVarName) if widget.attr_editor_newclass else widget.__class__.__name__)
if (not first_node):
code_nested = (prototypes.proto_widget_allocation % {'varname': widgetVarName, 'classname': classname})
for (x, y) in inspect.getmembers(widget.__class__):
if ((type(y) == property) and (not (getattr(widget, x) is None))):
if hasattr(y.fget, 'editor_attributes'):
_value = getattr(widget, x)
if (type(_value) == str):
_value = ('"%s"' % _value)
code_nested += (prototypes.proto_property_setup % {'varname': ('self' if first_node else widgetVarName), 'property': x, 'value': _value})
else:
pass
for (setOnEventListenerFuncname, setOnEventListenerFunc) in inspect.getmembers(widget):
if issubclass(type(setOnEventListenerFunc), gui.ClassEventConnector):
if ((not (setOnEventListenerFunc.callback is None)) and hasattr(setOnEventListenerFunc.event_method_bound, '_event_info')):
listenerFunction = setOnEventListenerFunc.callback
if issubclass(type(listenerFunction), gui.ClassEventConnector):
listenerFunction = listenerFunction.event_method_bound
listenerPrototype = setOnEventListenerFunc.event_method_bound._event_info['prototype']
listener = listenerFunction.__self__
listenerFunctionName = listenerFunction.__name__
listenerClassFunction = (prototypes.proto_code_function % {'funcname': listenerFunctionName, 'parameters': listenerPrototype})
if (hasattr(listener, listenerFunctionName) and (listenerFunction.__code__ != editor_widgets.fakeListenerFunc.__code__)):
listenerClassFunction = inspect.getsource(listenerFunction)
skip = False
for pending in self.pending_listener_registration:
if (pending['eventlistener'] == listener):
if (pending['listenerfuncname'] == listenerFunctionName):
skip = True
break
self.pending_listener_registration.append({'done': False, 'eventsource': widget, 'eventlistener': listener, 'setoneventfuncname': setOnEventListenerFuncname, 'listenerfuncname': listenerFunctionName, 'listenerClassFunction': listenerClassFunction, 'skip_function_definition': skip})
if (widget.attr_editor_newclass or first_node):
widgetVarName = 'self'
children_code_nested = ''
for child_key in widget.children.keys():
child = widget.children[child_key]
if (type(child) == str):
continue
if (not issubclass(child.__class__, gui.Widget)):
continue
if (child.variable_name is None):
continue
child.path_to_this_widget = widget.path_to_this_widget[:]
children_code_nested += self.repr_widget_for_editor(child)
children_code_nested += (prototypes.proto_layout_append % {'parentname': widgetVarName, 'varname': ("%s,'%s'" % (child.variable_name, child.variable_name))})
events_registration = self.check_pending_listeners(widget, widgetVarName)
if (widget.attr_editor_newclass or first_node):
if (not (widget.identifier in self.code_declared_classes)):
self.code_declared_classes[widget.identifier] = ''
if first_node:
if (len(events_registration) < 1):
events_registration = 'pass'
self.code_declared_classes[widget.identifier] = ((prototypes.proto_export_app_template % {'classname': classname, 'superclassname': widget.attr_class, 'nested_code': (code_nested + children_code_nested), 'events_registration': events_registration}) + self.code_declared_classes[widget.identifier])
code_nested = ''
else:
children_code_nested += events_registration
self.code_declared_classes[widget.identifier] = ((prototypes.proto_code_class % {'classname': classname, 'superclassname': widget.attr_class, 'nested_code': children_code_nested}) + self.code_declared_classes[widget.identifier])
else:
code_nested = (code_nested + children_code_nested)
return code_nested
def prepare_path_to_this_widget(self, node):
node.path_to_this_widget = []
for child in node.children.values():
if (type(child) == str):
continue
if (not issubclass(child.__class__, gui.Widget)):
continue
if (child.variable_name is None):
continue
self.prepare_path_to_this_widget(child)
def save(self, save_path_filename, configuration):
compiled_code = ''
code_classes = ''
ret = self.repr_widget_for_editor(self.children['root'], True)
code_nested = (ret + self.check_pending_listeners(self, 'self', True))
main_code_class = (prototypes.proto_code_main_class % {'classname': configuration.configDict[configuration.KEY_PRJ_NAME], 'config_resourcepath': configuration.configDict[configuration.KEY_RESOURCEPATH], 'code_nested': code_nested, 'mainwidgetname': self.children['root'].variable_name})
if (self.identifier in self.code_declared_classes.keys()):
main_code_class += self.code_declared_classes[self.identifier]
del self.code_declared_classes[self.identifier]
for key in self.code_declared_classes.keys():
code_class = self.code_declared_classes[key]
code_listener_setting = ''
code_classes += code_class
modules_to_import = []
for w in self.known_project_children:
if ((not (w.__module__ in modules_to_import)) and (w.__module__ != '__main__') and (w.__module__ != 'project')):
modules_to_import.append(w.__module__)
code_classes += main_code_class
compiled_code = (prototypes.proto_code_program % {'imports': '\n'.join([(('from ' + modulename) + ' import *') for modulename in modules_to_import]), 'code_classes': code_classes, 'classname': configuration.configDict[configuration.KEY_PRJ_NAME], 'configuration': configuration.configDict})
print(compiled_code)
if (save_path_filename != None):
f = open(save_path_filename, 'w')
f.write(compiled_code)
f.close()
self.lastUpdateTime = os.stat(save_path_filename).st_mtime |
def test_vsite_reg(methanol, vs, tmpdir):
with tmpdir.as_cwd():
vs.freeze_site_angles = True
vs.regularisation_epsilon = 0.1
vs.run(molecule=methanol)
assert (methanol.extra_sites.n_sites == 2)
sites = []
center_atom = None
with open(os.path.join(methanol.name, 'xyz_with_extra_point_charges.xyz')) as xyz:
for line in xyz.readlines():
if line.startswith('X'):
sites.append(np.array([float(x) for x in line.split()[1:4]]))
elif line.startswith('O'):
center_atom = np.array([float(x) for x in line.split()[1:4]])
for site in sites:
assert (np.linalg.norm((center_atom - site)) == pytest.approx(0.29, abs=0.01)) |
def test_proj_debug_logging(capsys):
with proj_logging_env():
with pytest.warns(FutureWarning):
transformer = Transformer.from_proj('+init=epsg:4326', '+init=epsg:27700')
transformer.transform(100000, 100000)
captured = capsys.readouterr()
if (os.environ.get('PROJ_DEBUG') == '3'):
assert ('PROJ_TRACE' in captured.err)
assert ('PROJ_DEBUG' in captured.err)
elif (os.environ.get('PROJ_DEBUG') == '2'):
assert ('PROJ_TRACE' not in captured.err)
assert ('PROJ_DEBUG' in captured.err)
else:
assert ('PROJ_ERROR' in captured.err) |
def test_load_hotp_vectors():
vector_data = textwrap.dedent('\n # HOTP Test Vectors\n # RFC 4226 Appendix D\n\n COUNT = 0\n COUNTER = 0\n INTERMEDIATE = cc93cf18508d94934c64b65d8ba7667fb7cde4b0\n TRUNCATED = 4c93cf18\n HOTP = 755224\n SECRET = \n\n COUNT = 1\n COUNTER = 1\n INTERMEDIATE = 75a48a19d4cbe100644e8ac1397eea747a2d33ab\n TRUNCATED = 41397eea\n HOTP = 287082\n SECRET = \n\n\n COUNT = 2\n COUNTER = 2\n INTERMEDIATE = 0bacb7fa082fefbc1c5e70416ff44\n TRUNCATED = 82fef30\n HOTP = 359152\n SECRET = \n\n\n COUNT = 3\n COUNTER = 3\n INTERMEDIATE = 66c28227d03a2d5529262ff016a1e6ef76557ece\n TRUNCATED = 66ef7655\n HOTP = 969429\n SECRET = \n ').splitlines()
assert (load_nist_vectors(vector_data) == [{'counter': b'0', 'intermediate': b'cc93cf18508d94934c64b65d8ba7667fb7cde4b0', 'truncated': b'4c93cf18', 'hotp': b'755224', 'secret': b''}, {'counter': b'1', 'intermediate': b'75a48a19d4cbe100644e8ac1397eea747a2d33ab', 'truncated': b'41397eea', 'hotp': b'287082', 'secret': b''}, {'counter': b'2', 'intermediate': b'0bacb7fa082fefbc1c5e70416ff44', 'truncated': b'82fef30', 'hotp': b'359152', 'secret': b''}, {'counter': b'3', 'intermediate': b'66c28227d03a2d5529262ff016a1e6ef76557ece', 'truncated': b'66ef7655', 'hotp': b'969429', 'secret': b''}]) |
def test_fails_rst_no_content(tmp_path, capsys, caplog):
sdist = build_sdist(tmp_path, {'setup.cfg': '\n [metadata]\n name = test-package\n version = 0.0.1\n long_description = file:README.rst\n long_description_content_type = text/x-rst\n ', 'README.rst': '\n test-package\n \n '})
assert check.check([sdist])
assert (capsys.readouterr().out == f'''Checking {sdist}: FAILED
''')
assert (caplog.record_tuples == [('twine.commands.check', logging.ERROR, '`long_description` has syntax errors in markup and would not be rendered on PyPI.\nNo content rendered from RST source.')]) |
class RubyRoleTest(ProvyTestCase):
def setUp(self):
super(RubyRoleTest, self).setUp()
self.role = RubyRole(prov=None, context={})
def installs_necessary_packages_to_provision(self):
with self.using_stub(AptitudeRole) as aptitude, self.execute_mock() as execute:
self.role.provision()
update_alternatives_command = UPDATE_ALTERNATIVES_COMMAND.format(version=self.role.version, priority=self.role.priority)
aptitude.ensure_up_to_date.assert_called_once_with()
aptitude.ensure_package_installed.assert_called_once_with('ruby{version}-full'.format(version=self.role.version))
execute.assert_called_once_with(update_alternatives_command, sudo=True) |
def transforms_imagenet_train(img_size=224, scale=None, ratio=None, hflip=0.5, vflip=0.0, color_jitter=0.4, auto_augment=None, interpolation='random', use_prefetcher=False, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, re_prob=0.0, re_mode='const', re_count=1, re_num_splits=0, separate=False, force_color_jitter=False):
scale = tuple((scale or (0.08, 1.0)))
ratio = tuple((ratio or ((3.0 / 4.0), (4.0 / 3.0))))
primary_tfl = [RandomResizedCropAndInterpolation(img_size, scale=scale, ratio=ratio, interpolation=interpolation)]
if (hflip > 0.0):
primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)]
if (vflip > 0.0):
primary_tfl += [transforms.RandomVerticalFlip(p=vflip)]
secondary_tfl = []
disable_color_jitter = False
if auto_augment:
assert isinstance(auto_augment, str)
disable_color_jitter = (not (force_color_jitter or ('3a' in auto_augment)))
if isinstance(img_size, (tuple, list)):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = dict(translate_const=int((img_size_min * 0.45)), img_mean=tuple([min(255, round((255 * x))) for x in mean]))
if (interpolation and (interpolation != 'random')):
aa_params['interpolation'] = str_to_pil_interp(interpolation)
if auto_augment.startswith('rand'):
secondary_tfl += [rand_augment_transform(auto_augment, aa_params)]
elif auto_augment.startswith('augmix'):
aa_params['translate_pct'] = 0.3
secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)]
else:
secondary_tfl += [auto_augment_transform(auto_augment, aa_params)]
if ((color_jitter is not None) and (not disable_color_jitter)):
if isinstance(color_jitter, (list, tuple)):
assert (len(color_jitter) in (3, 4))
else:
color_jitter = ((float(color_jitter),) * 3)
secondary_tfl += [transforms.ColorJitter(*color_jitter)]
final_tfl = []
if use_prefetcher:
final_tfl += [ToNumpy()]
else:
final_tfl += [transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))]
if (re_prob > 0.0):
final_tfl.append(RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu'))
if separate:
return (transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl))
else:
return transforms.Compose(((primary_tfl + secondary_tfl) + final_tfl)) |
def format_for_slack(total_results, results, scheduled: bool, title: str):
print(total_results, results)
header = {'type': 'header', 'text': {'type': 'plain_text', 'text': title, 'emoji': True}}
if (total_results['failed'] > 0):
total = {'type': 'section', 'fields': [{'type': 'mrkdwn', 'text': f'''*Failures:*
{total_results['failed']} failures.'''}, {'type': 'mrkdwn', 'text': f'''*Passed:*
{total_results['success']} tests passed.'''}]}
else:
total = {'type': 'section', 'fields': [{'type': 'mrkdwn', 'text': '\n All tests passed.'}]}
blocks = [header, total]
if (total_results['failed'] > 0):
for (key, result) in results.items():
print(key, result)
blocks.append({'type': 'header', 'text': {'type': 'plain_text', 'text': key, 'emoji': True}})
blocks.append({'type': 'section', 'fields': [{'type': 'mrkdwn', 'text': f'''*Results:*
{result['failed']} failed, {result['success']} passed.'''}, {'type': 'mrkdwn', 'text': f'''*Time spent:*
{result['time_spent']}'''}]})
elif (not scheduled):
for (key, result) in results.items():
blocks.append({'type': 'section', 'fields': [{'type': 'mrkdwn', 'text': f'''*{key}*
{result['time_spent']}.'''}]})
footer = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': f"< on GitHub>"}}
blocks.append(footer)
blocks = {'blocks': blocks}
return blocks |
class Importable(t.Trafaret):
def check_and_return(self, value):
if (not isinstance(value, str)):
self._failure('value should be a string', value=value)
if (':' not in value):
self._failure('import notation must be in format: `package.module:target`', value=value)
(module, object_name) = value.split(':', 1)
try:
mod = importlib.import_module(module)
except ImportError as exc:
self._failure(str(exc), value=value)
else:
try:
return getattr(mod, object_name)
except AttributeError as exc:
self._failure(str(exc), value=value)
def __repr__(self):
return '<Importable>' |
class DatatableDataFactory(factory.Factory):
class Meta():
model = dict
columns = [{six.u('name'): six.u('per_end_date'), six.u('type'): six.u('Date')}, {six.u('name'): six.u('ticker'), six.u('type'): six.u('String')}, {six.u('name'): six.u('tot_oper_exp'), six.u('type'): six.u('BigDecimal(11,4)')}]
data = [['2015-07-11', 'AAPL', 456.9], ['2015-07-13', 433.3], ['2015-07-14', 'AAPL', 419.1], ['2015-07-15', 476.5]] |
def versions_to_display_for_releases(current_version: StrictVersion, last_changelog_version: StrictVersion, releases: list[dict]) -> tuple[(dict[(str, str)], list[str], (VersionDescription | None))]:
all_change_logs = {}
new_change_logs = []
displayed_new_version = False
version_to_display = None
for release in releases:
version = get_version_for_release(release)
strict_version = version.as_strict_version
if (strict_version > current_version):
if (not displayed_new_version):
version_to_display = version
displayed_new_version = True
else:
log = f'''{version.change_log.formatted_date}
## {version.tag_name}
{version.change_log.patch_notes}'''
all_change_logs[version.tag_name] = log
if (strict_version > last_changelog_version):
if (MAJOR_ENTRY in log):
log = '## {} - Major Changes\n---\n\n{}\n\n---\nFor more details, check the Change Log tab.'.format(version.tag_name, _get_major_entries(log))
new_change_logs.append(log)
return (all_change_logs, new_change_logs, version_to_display) |
def t2star_circuit_execution() -> Tuple[(qiskit.result.Result, np.array, List[int], float, float)]:
num_of_gates = np.append(np.linspace(10, 150, 10).astype(int), np.linspace(160, 450, 5).astype(int))
gate_time = 0.1
qubits = [0]
t2_value = 10
error = thermal_relaxation_error(np.inf, t2_value, gate_time, 0.5)
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error, 'id')
backend = qiskit.Aer.get_backend('qasm_simulator')
shots = 200
(circs, xdata, omega) = t2star_circuits(num_of_gates, gate_time, qubits, 5)
qobj = qiskit.assemble(qiskit.transpile(circs, backend=backend, optimization_level=0), backend=backend, shots=shots, seed_simulator=SEED, noise_model=noise_model, max_parallel_experiments=0)
backend_result = backend.run(qobj).result()
return (backend_result, xdata, qubits, t2_value, omega) |
(persist=eval(os.getenv('PERSISTENT')))
def compute_similarity_matrix_keywords(model_path, keywords=[], all_model_vectors=False, return_unk_sim=False):
(keywords, word_embs) = get_word_embeddings(model_path, keywords, all_model_vectors=all_model_vectors, return_words=True)
word_embs = np.array(word_embs)
sim_matrix = cosine_similarity(word_embs, word_embs)
if return_unk_sim:
unk_emb = np.mean([word_embs[i] for i in range(word_embs.shape[0])], axis=0).reshape(1, (- 1))
sim_with_unk = cosine_similarity(unk_emb, word_embs)
return (keywords, word_embs, sim_matrix, sim_with_unk, unk_emb.reshape((- 1)))
else:
return (keywords, word_embs, sim_matrix) |
class Migration(migrations.Migration):
dependencies = [('projects', '0050_value_set_prefix')]
operations = [migrations.AlterField(model_name='value', name='value_type', field=models.CharField(choices=[('text', 'Text'), ('url', 'URL'), ('integer', 'Integer'), ('float', 'Float'), ('boolean', 'Boolean'), ('datetime', 'Datetime'), ('email', 'Email'), ('phone', 'Phone'), ('option', 'Option'), ('file', 'File')], default='text', help_text='Type of this value.', max_length=8, verbose_name='Value type'))] |
def project_by_tangent_iteration(space: 'RiemannianSpace', pt_a: 'Point', pt_b: 'Point', pt_c: 'Point', *, tol=1e-06, max_iterations=100) -> Tuple[('Point', OptimResult)]:
dist_ab = space.length(pt_a, pt_b)
if (dist_ab < tol):
return (midpoint(space, pt_a, pt_b), OptimResult.ILL_POSED)
projected_lengths = []
(proj, tangent_ab_norm, t) = (pt_a.clone(), (space.log_map(pt_a, pt_b) / dist_ab), 0.0)
for itr in range(max_iterations):
tangent_pc = space.log_map(proj, pt_c)
tangent_pc_at_a = space.levi_civita(proj, pt_a, tangent_pc)
length_pc_along_ab = space.inner_product(pt_a, tangent_pc_at_a, tangent_ab_norm)
if torch.isnan(length_pc_along_ab):
print('WTF: NaN')
if ((len(projected_lengths) >= 2) and ((np.abs(projected_lengths[(- 1)]) - np.abs(projected_lengths[(- 2)])) > 0)):
print('WTF: growing')
projected_lengths.append(length_pc_along_ab.item())
if (torch.abs(length_pc_along_ab) < tol):
return (proj, OptimResult.CONVERGED)
t = (t + length_pc_along_ab)
proj = space.exp_map(pt_a, (t * tangent_ab_norm))
return (pt_a, OptimResult.MAX_STEPS_REACHED) |
def read_sac_zpk(filename=None, file=None, string=None, get_comments=False):
if (filename is not None):
f = open(filename, 'rb')
elif (file is not None):
f = file
elif (string is not None):
f = BytesIO(string)
sects = ('ZEROS', 'POLES', 'CONSTANT')
sectdata = {'ZEROS': [], 'POLES': []}
npoles = 0
nzeros = 0
constant = 1.0
atsect = None
comments = []
for (iline, line) in enumerate(f):
line = str(line.decode('ascii'))
toks = line.split()
if (len(toks) == 0):
continue
if (toks[0][0] in '*#'):
comments.append(line)
continue
if (len(toks) != 2):
f.close()
raise SacPoleZeroError(('Expected 2 tokens in line %i of file %s' % ((iline + 1), filename)))
if toks[0].startswith('*'):
continue
lsect = toks[0].upper()
if (lsect in sects):
atsect = lsect
sectdata[atsect] = []
if (lsect.upper() == 'ZEROS'):
nzeros = int(toks[1])
elif (toks[0].upper() == 'POLES'):
npoles = int(toks[1])
elif (toks[0].upper() == 'CONSTANT'):
constant = float(toks[1])
elif atsect:
sectdata[atsect].append(complex(float(toks[0]), float(toks[1])))
if (f != file):
f.close()
poles = sectdata['POLES']
zeros = sectdata['ZEROS']
npoles_ = len(poles)
nzeros_ = len(zeros)
if (npoles_ > npoles):
raise SacPoleZeroError(('Expected %i poles but found %i in pole-zero file "%s"' % (npoles, npoles_, filename)))
if (nzeros_ > nzeros):
raise SacPoleZeroError(('Expected %i zeros but found %i in pole-zero file "%s"' % (nzeros, nzeros_, filename)))
if (npoles_ < npoles):
poles.extend(([complex(0.0)] * (npoles - npoles_)))
if (nzeros_ < npoles):
zeros.extend(([complex(0.0)] * (nzeros - nzeros_)))
if ((len(poles) == 0) and (len(zeros) == 0)):
raise SacPoleZeroError(('No poles and zeros found in file "%s"' % filename))
if (not num.all(num.isfinite(poles))):
raise SacPoleZeroError(('Not finite pole(s) found in pole-zero file "%s"' % filename))
if (not num.all(num.isfinite(zeros))):
raise SacPoleZeroError(('Not finite zero(s) found in pole-zero file "%s"' % filename))
if (not num.isfinite(constant)):
raise SacPoleZeroError(('Ivalid constant (%g) found in pole-zero file "%s"' % (constant, filename)))
if get_comments:
return (zeros, poles, constant, comments)
else:
return (zeros, poles, constant) |
def ticket_id_to_user_hashid(ticket_id: strawberry.ID, conference_code: str) -> Optional[str]:
conference = Conference.objects.filter(code=conference_code).first()
decoded_ticket_id = decode_hashid(ticket_id)
order_position = pretix.get_order_position(conference, decoded_ticket_id)
if (not order_position):
return None
attendee_email = order_position['attendee_email']
attendee_user = User.objects.filter(email=attendee_email).first()
if (not attendee_user):
return None
user_id = attendee_user.id
return encode_hashid(int(user_id), salt=settings.USER_ID_HASH_SALT, min_length=6) |
def help():
print('\nAaia\n\navailable modules:')
for (importer, module_name, _) in pkgutil.iter_modules([os.path.dirname(__file__)]):
if (module_name != 'main'):
library = importlib.import_module(((__package__ + '.') + module_name))
print(((module_name + ' : ') + library.__description__))
library.help()
del sys.modules[((__package__ + '.') + module_name)] |
def _infer_decorator_callchain(node):
if (not isinstance(node, FunctionDef)):
return None
if (not node.parent):
return None
try:
result = next(node.infer_call_result(node.parent), None)
except InferenceError:
return None
if isinstance(result, bases.Instance):
result = result._proxied
if isinstance(result, ClassDef):
if result.is_subtype_of('builtins.classmethod'):
return 'classmethod'
if result.is_subtype_of('builtins.staticmethod'):
return 'staticmethod'
if isinstance(result, FunctionDef):
if (not result.decorators):
return None
for decorator in result.decorators.nodes:
if isinstance(decorator, node_classes.Name):
if (decorator.name in BUILTIN_DESCRIPTORS):
return decorator.name
if (isinstance(decorator, node_classes.Attribute) and isinstance(decorator.expr, node_classes.Name) and (decorator.expr.name == 'builtins') and (decorator.attrname in BUILTIN_DESCRIPTORS)):
return decorator.attrname
return None |
def test_corrected_cphase_ops_throws() -> None:
(a, b) = cirq.LineQubit.range(2)
with pytest.raises(GateDecompositionError):
_corrected_cphase_ops(qubits=(a, b), angle=(np.pi / 13), parameters=ParticleConservingParameters(theta=(np.pi / 4), delta=0, chi=0, gamma=0, phi=(np.pi / 24))) |
def test_concordance_cebrian2009localizacion():
matrix = scale_by_sum([[6, 5, 28, 5, 5], [4, 2, 25, 10, 9], [5, 7, 35, 9, 6], [6, 1, 27, 6, 7], [6, 8, 30, 7, 9], [5, 6, 26, 4, 8]], axis=0)
objectives = [1, 1, (- 1), 1, 1]
weights = [0.25, 0.25, 0.1, 0.2, 0.2]
expected = [[np.nan, 0.5, 0.35, 0.5, 0.35, 0.45], [0.5, np.nan, 0.5, 0.75, 0.5, 0.5], [0.65, 0.5, np.nan, 0.45, 0.2, 0.7], [0.75, 0.25, 0.55, np.nan, 0.35, 0.45], [0.9, 0.7, 0.8, 0.9, np.nan, 0.9], [0.55, 0.5, 0.55, 0.55, 0.1, np.nan]]
result = concordance(matrix, objectives, weights)
assert np.allclose(result, expected, atol=0.001, equal_nan=True) |
def copy_tree(src, dst, preserve_mode=1, preserve_times=1, preserve_symlinks=0, update=0, verbose=1, dry_run=0):
from distutils.file_util import copy_file
if ((not dry_run) and (not os.path.isdir(src))):
raise DistutilsFileError(("cannot copy tree '%s': not a directory" % src))
try:
names = os.listdir(src)
except OSError as e:
if dry_run:
names = []
else:
raise DistutilsFileError("error listing files in '{}': {}".format(src, e.strerror))
if (not dry_run):
mkpath(dst, verbose=verbose)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if n.startswith('.nfs'):
continue
if (preserve_symlinks and os.path.islink(src_name)):
link_dest = os.readlink(src_name)
if (verbose >= 1):
log.info('linking %s -> %s', dst_name, link_dest)
if (not dry_run):
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os.path.isdir(src_name):
outputs.extend(copy_tree(src_name, dst_name, preserve_mode, preserve_times, preserve_symlinks, update, verbose=verbose, dry_run=dry_run))
else:
copy_file(src_name, dst_name, preserve_mode, preserve_times, update, verbose=verbose, dry_run=dry_run)
outputs.append(dst_name)
return outputs |
class BiDictTests(unittest.TestCase):
def setUp(self):
self.bidict = BiDict()
def testStartEmpty(self):
self.assertEqual(len(self.bidict), 0)
self.assertEqual(len(self.bidict.forward), 0)
self.assertEqual(len(self.bidict.backward), 0)
def testLength(self):
self.assertEqual(len(self.bidict), 0)
self.bidict.Add('from', 'to')
self.assertEqual(len(self.bidict), 1)
del self.bidict['from']
self.assertEqual(len(self.bidict), 0)
def testDeletion(self):
self.assertRaises(KeyError, operator.delitem, self.bidict, 'missing')
self.bidict.Add('missing', 'present')
del self.bidict['missing']
def testBackwardDeletion(self):
self.assertRaises(KeyError, operator.delitem, self.bidict, 'missing')
self.bidict.Add('missing', 'present')
del self.bidict['present']
self.assertEqual(self.bidict.HasForward('missing'), False)
def testForwardAccess(self):
self.bidict.Add('shake', 'vanilla')
self.bidict.Add('pie', 'custard')
self.assertEqual(self.bidict.HasForward('shake'), True)
self.assertEqual(self.bidict.GetForward('shake'), 'vanilla')
self.assertEqual(self.bidict.HasForward('pie'), True)
self.assertEqual(self.bidict.GetForward('pie'), 'custard')
self.assertEqual(self.bidict.HasForward('missing'), False)
self.assertRaises(KeyError, self.bidict.GetForward, 'missing')
def testBackwardAccess(self):
self.bidict.Add('shake', 'vanilla')
self.bidict.Add('pie', 'custard')
self.assertEqual(self.bidict.HasBackward('vanilla'), True)
self.assertEqual(self.bidict.GetBackward('vanilla'), 'shake')
self.assertEqual(self.bidict.HasBackward('missing'), False)
self.assertRaises(KeyError, self.bidict.GetBackward, 'missing')
def testItemAccessor(self):
self.bidict.Add('shake', 'vanilla')
self.bidict.Add('pie', 'custard')
self.assertRaises(KeyError, operator.getitem, self.bidict, 'missing')
self.assertEqual(self.bidict['shake'], 'vanilla')
self.assertEqual(self.bidict['pie'], 'custard') |
def get_create_inputs(dataset_name: str, is_train: bool, epochs: int):
options = {'mnist': (lambda : create_inputs_mnist(is_train)), 'fashion_mnist': (lambda : create_inputs_mnist(is_train)), 'smallNORB': (lambda : create_inputs_norb(is_train, epochs)), 'cifar10': (lambda : create_inputs_cifar10(is_train)), 'cifa100': (lambda : create_inputs_cifa100(is_train))}
return options[dataset_name] |
_module()
class ViPNAS_ResNet(BaseBackbone):
arch_settings = {50: ViPNAS_Bottleneck}
def __init__(self, depth, in_channels=3, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(3,), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=False, with_cp=False, zero_init_residual=True, wid=[48, 80, 160, 304, 608], expan=[None, 1, 1, 1, 1], dep=[None, 4, 6, 7, 3], ks=[7, 3, 5, 5, 5], group=[None, 16, 16, 16, 16], att=[None, True, False, True, True]):
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__()
if (depth not in self.arch_settings):
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.stem_channels = dep[0]
self.num_stages = num_stages
assert (1 <= num_stages <= 4)
self.strides = strides
self.dilations = dilations
assert (len(strides) == len(dilations) == num_stages)
self.out_indices = out_indices
assert (max(out_indices) < num_stages)
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.zero_init_residual = zero_init_residual
self.block = self.arch_settings[depth]
self.stage_blocks = dep[1:(1 + num_stages)]
self._make_stem_layer(in_channels, wid[0], ks[0])
self.res_layers = []
_in_channels = wid[0]
for (i, num_blocks) in enumerate(self.stage_blocks):
expansion = get_expansion(self.block, expan[(i + 1)])
_out_channels = (wid[(i + 1)] * expansion)
stride = strides[i]
dilation = dilations[i]
res_layer = self.make_res_layer(block=self.block, num_blocks=num_blocks, in_channels=_in_channels, out_channels=_out_channels, expansion=expansion, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, kernel_size=ks[(i + 1)], groups=group[(i + 1)], attention=att[(i + 1)])
_in_channels = _out_channels
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = res_layer[(- 1)].out_channels
def make_res_layer(self, **kwargs):
return ViPNAS_ResLayer(**kwargs)
def norm1(self):
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels, kernel_size):
if self.deep_stem:
self.stem = nn.Sequential(ConvModule(in_channels, (stem_channels // 2), kernel_size=3, stride=2, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=True), ConvModule((stem_channels // 2), (stem_channels // 2), kernel_size=3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=True), ConvModule((stem_channels // 2), stem_channels, kernel_size=3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=True))
else:
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, stem_channels, kernel_size=kernel_size, stride=2, padding=(kernel_size // 2), bias=False)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
super().init_weights(pretrained)
if (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
for (name, _) in m.named_parameters():
if (name in ['bias']):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if (i in self.out_indices):
outs.append(x)
if (len(outs) == 1):
return outs[0]
return tuple(outs)
def train(self, mode=True):
super().train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() |
class PrepareNuSuperPositionState(Bloq):
num_bits_p: int
adjoint: bool = False
_property
def signature(self) -> Signature:
return Signature([Register('mu', self.num_bits_p), Register('nu', (self.num_bits_p + 1), shape=(3,))])
def short_name(self) -> str:
return 'PREP $2^{-\\mu}|\\mu\\rangle|\\nu\\rangle$'
def build_call_graph(self, ssa: 'SympySymbolAllocator') -> Set['BloqCountT']:
return {(Toffoli(), (3 * (self.num_bits_p - 1)))} |
class ConditionOverSampleDataset(ConditionCaptionDataset):
def __init__(self, features: Dict, transforms: Dict, caption: str, vocabulary: str, condition: str, load_into_mem: bool=False, threshold: float=0.9, times: int=4):
super().__init__(features, transforms, caption, vocabulary, condition, load_into_mem=load_into_mem)
self.keys = []
for item in self.caption_info:
audio_id = item['audio_id']
max_cap_num = len(item['captions'])
for cap_idx in range(max_cap_num):
cap_id = item['captions'][cap_idx]['cap_id']
cond = self.key_to_condition[f'{audio_id}_{cap_id}']
if (cond < threshold):
for _ in range(times):
self.keys.append((audio_id, cap_id))
else:
self.keys.append((audio_id, cap_id)) |
def add_BAU_constraints(n, config):
ext_c = n.generators.query('p_nom_extendable').carrier.unique()
mincaps = pd.Series(config['electricity'].get('BAU_mincapacities', {key: 0 for key in ext_c}))
lhs = linexpr((1, get_var(n, 'Generator', 'p_nom'))).groupby(n.generators.carrier).apply(join_exprs)
define_constraints(n, lhs, '>=', mincaps[lhs.index], 'Carrier', 'bau_mincaps')
maxcaps = pd.Series(config['electricity'].get('BAU_maxcapacities', {key: np.inf for key in ext_c}))
lhs = linexpr((1, get_var(n, 'Generator', 'p_nom'))).groupby(n.generators.carrier).apply(join_exprs)
define_constraints(n, lhs, '<=', maxcaps[lhs.index], 'Carrier', 'bau_maxcaps') |
def add_exec_opts(parser) -> None:
parser.add_argument('--timeout', help='maximum number of seconds to allow for all tests to run. This does not include time taken to build the tests.', type=int, default=300, metavar='timeout')
parser.add_argument('filter_glob', help='maximum number of seconds to allow for all tests to run. This does not include time taken to build the tests.', type=str, nargs='?', default='', metavar='filter_glob')
parser.add_argument('--kernel_args', help='Kernel command-line parameters. Maybe be repeated', action='append') |
def test_int_loader_provider(strict_coercion, debug_trail):
retort = Retort(strict_coercion=strict_coercion, debug_trail=debug_trail)
loader = retort.get_loader(int)
assert (loader(100) == 100)
if strict_coercion:
raises_exc(TypeLoadError(int, None), (lambda : loader(None)))
raises_exc(TypeLoadError(int, 'foo'), (lambda : loader('foo')))
raises_exc(TypeLoadError(int, '100'), (lambda : loader('100')))
else:
raises_exc(TypeLoadError(Union[(int, float, str)], None), (lambda : loader(None)))
raises_exc(ValueLoadError('Bad string format', 'foo'), (lambda : loader('foo')))
assert (loader('100') == 100) |
class Tconfig(TestCase):
def setUp(self):
config.init()
def test_init_garbage_file(self):
config.quit()
garbage = b'\xf1=\xab\xac'
(fd, filename) = mkstemp()
os.close(fd)
with open(filename, 'wb') as f:
f.write(garbage)
config.init(filename)
self.assertTrue(config.options('player'))
invalid_filename = (filename + '.not-valid')
self.assertTrue(os.path.exists(invalid_filename))
with open(invalid_filename, 'rb') as f:
self.assertEqual(f.read(), garbage)
os.remove(filename)
os.remove(invalid_filename)
def tearDown(self):
config.quit() |
def test_add_should_not_select_prereleases(app: PoetryTestApplication, repo: TestRepository, tester: CommandTester) -> None:
repo.add_package(get_package('pyyaml', '3.13'))
repo.add_package(get_package('pyyaml', '4.2b2'))
tester.execute('pyyaml')
expected = 'Using version ^3.13 for pyyaml\n\nUpdating dependencies\nResolving dependencies...\n\nPackage operations: 1 install, 0 updates, 0 removals\n\n - Installing pyyaml (3.13)\n\nWriting lock file\n'
assert (tester.io.fetch_output() == expected)
assert isinstance(tester.command, InstallerCommand)
assert (tester.command.installer.executor.installations_count == 1)
pyproject: dict[(str, Any)] = app.poetry.file.read()
content = pyproject['tool']['poetry']
assert ('pyyaml' in content['dependencies'])
assert (content['dependencies']['pyyaml'] == '^3.13') |
(Executable)
class ExecutableStub(Executable):
def __init__(self, name='fake executable', stdout=''):
super().__init__(name)
self.calls = []
self.stdout = stdout
def __repr__(self):
return ("StubExecutable('%s')" % self.name)
def times_called(self):
return len(self.calls)
def execute(self, method, commandline_arguments, *args, **kwargs):
self.calls.append(MonitoredInvocation(method, commandline_arguments, args, kwargs))
if self.stdout:
out = kwargs.get('stdout', sys.stdout)
print(self.stdout, file=out)
def inserted_as_shim(self):
executable = self
saved_which = Executable.which
def stub_which(self, program):
full_path_to_executable = saved_which(self, program)
if (program in [full_path_to_executable, executable.name]):
return program
else:
return full_path_to_executable
saved_execute = Executable.execute
def stub_execute(self, method, commandline_arguments, *args, **kwargs):
if (executable.name == self.name):
return executable.execute(method, commandline_arguments, *args, **kwargs)
else:
return saved_execute(self, method, commandline_arguments, *args, **kwargs)
stub_which.__name__ = 'which'
stub_execute.__name__ = 'execute'
with replaced(Executable.which, stub_which, Executable), replaced(Executable.execute, stub_execute, Executable):
(yield executable) |
class Config(object):
GPU_USAGE = 1
LOG_DIR = './log/DTN'
IMAGE_SIZE = 256
MAP_SIZE = 64
TRU_PARAMETERS = {'alpha': 0.001, 'beta': 0.01, 'mu_update_rate': 0.001}
STEPS_PER_EPOCH = 1000
MAX_EPOCH = 40
NUM_EPOCHS_PER_DECAY = 12.0
BATCH_SIZE = 32
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.999
def __init__(self):
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_memory_growth(gpus[self.GPU_USAGE], True)
tf.config.experimental.set_visible_devices(gpus[self.GPU_USAGE], 'GPU')
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), 'Physical GPUs,', len(logical_gpus), 'Logical GPU')
except RuntimeError as e:
print(e)
def display(self):
print('\nConfigurations:')
for a in dir(self):
if ((not a.startswith('__')) and (not callable(getattr(self, a)))):
print('{:30} {}'.format(a, getattr(self, a)))
print('\n') |
def set_object_material_text(object_id: int, text: str, material_index: int=0, material_size: int=OBJECT_MATERIAL_SIZE_256x128, font_face: str='Arial', font_size: int=24, bold: bool=True, font_color: int=, back_color: int=0, text_alignment: int=0) -> bool:
return SetObjectMaterialText(object_id, text, material_index, material_size, font_face, font_size, bold, font_color, back_color, text_alignment) |
def sa_perindopril_rings() -> GoalDirectedBenchmark:
specification = uniform_specification(1, 10, 100)
benchmark_object = perindopril_rings()
sa_biased = ScoringFunctionSAWrapper(benchmark_object.objective, SAScoreModifier())
return GoalDirectedBenchmark(name='SA_perindopril', objective=sa_biased, contribution_specification=specification) |
class Configs():
def __init__(self):
self.config_dir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'config')
self.configs = self.get_configs()
def get_configs(self):
config_files = [os.path.join(self.config_dir, cfile) for cfile in os.listdir(self.config_dir) if ((os.path.basename(cfile) == '.faceswap') or (os.path.splitext(cfile)[1] == '.ini'))]
return self.parse_configs(config_files)
def parse_configs(self, config_files):
formatted = ''
for cfile in config_files:
fname = os.path.basename(cfile)
ext = os.path.splitext(cfile)[1]
formatted += '\n {} \n'.format(fname)
if (ext == '.ini'):
formatted += self.parse_ini(cfile)
elif (fname == '.faceswap'):
formatted += self.parse_json(cfile)
return formatted
def parse_ini(self, config_file):
formatted = ''
with open(config_file, 'r') as cfile:
for line in cfile.readlines():
line = line.strip()
if (line.startswith('#') or (not line)):
continue
item = line.split('=')
if (len(item) == 1):
formatted += '\n{}\n'.format(item[0].strip())
else:
formatted += self.format_text(item[0], item[1])
return formatted
def parse_json(self, config_file):
formatted = ''
with open(config_file, 'r') as cfile:
conf_dict = json.load(cfile)
for key in sorted(conf_dict.keys()):
formatted += self.format_text(key, conf_dict[key])
return formatted
def format_text(key, val):
return '{0: <25} {1}\n'.format((key.strip() + ':'), val.strip()) |
def test_assign_pickup_to_starting_items(empty_patches, state_game_data, generic_pickup_category, default_generator_params):
db = state_game_data.resource_database
starting_node = state_game_data.region_list.node_by_identifier(empty_patches.game.starting_location)
starting = state.State(ResourceCollection(), (), 99, starting_node, empty_patches, None, state_game_data)
resource_a = db.get_item('Ammo')
resource_b = db.item[0]
p = PickupEntry('A', 2, generic_pickup_category, generic_pickup_category, progression=((resource_a, 5),), generator_params=default_generator_params, extra_resources=(), unlocks_resource=True, resource_lock=ResourceLock(resource_a, resource_a, resource_b))
final = starting.assign_pickup_to_starting_items(p)
assert (final.patches.starting_equipment == [p])
assert (final.patches.starting_resources() == ResourceCollection.from_dict(db, {resource_a: 5, resource_b: 0}))
assert (final.resources == ResourceCollection.from_dict(db, {resource_a: 5, resource_b: 0})) |
def Euler2Rotation(phi, theta, psi):
c_phi = np.cos(phi)
s_phi = np.sin(phi)
c_theta = np.cos(theta)
s_theta = np.sin(theta)
c_psi = np.cos(psi)
s_psi = np.sin(psi)
R_roll = np.array([[1, 0, 0], [0, c_phi, (- s_phi)], [0, s_phi, c_phi]])
R_pitch = np.array([[c_theta, 0, s_theta], [0, 1, 0], [(- s_theta), 0, c_theta]])
R_yaw = np.array([[c_psi, (- s_psi), 0], [s_psi, c_psi, 0], [0, 0, 1]])
R = ((R_yaw R_pitch) R_roll)
return R |
class ProgressMeter(object):
def __init__(self, num_batches: int, meters: List[AverageMeter], prefix: str=''):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch: int) -> None:
entries = [(self.prefix + self.batch_fmtstr.format(batch))]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches: int) -> str:
num_digits = len(str((num_batches // 1)))
fmt = (('{:' + str(num_digits)) + 'd}')
return (((('[' + fmt) + '/') + fmt.format(num_batches)) + ']') |
('pypyr.moduleloader.get_module')
(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_str_lower_false(mock_invoke_step, mock_get_module):
step = Step({'name': 'step1', 'run': 'false'})
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call('step1 not running because run is False.')
mock_invoke_step.assert_not_called()
assert (len(context) == original_len) |
class MissingSpaceInDoctestChecker(BaseChecker):
name = 'missing_space_in_doctest'
msgs = {'E9973': ('Space missing after >>> in the docstring of "%s."', 'missing-space-in-doctest', 'Used when a doctest is missing a space before the code to be executed')}
_required_for_messages('missing-space-in-doctest')
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
self._check_docstring(node)
_required_for_messages('missing-space-in-doctest')
def visit_classdef(self, node: nodes.ClassDef) -> None:
self._check_docstring(node)
_required_for_messages('missing-space-in-doctest')
def visit_module(self, node: nodes.Module) -> None:
self._check_docstring(node)
def _check_docstring(self, node: nodes.NodeNG) -> None:
if (node.doc_node is not None):
docstring = (node.doc_node.value or '')
start_line = (node.lineno + 1)
lines = docstring.split('\n')
for (line_no, line) in enumerate(lines):
if self._has_invalid_doctest(line):
self.add_message('missing-space-in-doctest', node=node, args=node.name, line=(line_no + start_line))
def _has_invalid_doctest(self, doc: str) -> Union[(bool, Optional[Match[str]])]:
start_index = doc.find(DOCTEST)
contains_doctest = (start_index != (- 1))
if (contains_doctest and (len(doc) == 3)):
return True
match = re.match('\\s*>>>\\w', doc)
return match |
def _smoketest(ctx: Context, debug: bool, eth_client: EthClient, report_path: Optional[str]) -> None:
from raiden.tests.utils.smoketest import run_smoketest, setup_smoketest, step_printer
raiden_stdout = StringIO()
assert ctx.parent, MYPY_ANNOTATION
environment_type = ctx.parent.params['environment_type']
debug_logfile = ctx.parent.params['debug_logfile']
if (report_path is None):
report_file = mktemp(suffix='.log')
else:
report_file = report_path
click.secho(f'Report file: {report_file}', fg='yellow')
configure_logging(logger_level_config={'': 'DEBUG'}, log_file=report_file, disable_debug_logfile=(not debug_logfile))
def append_report(subject: str, data: Optional[AnyStr]=None) -> None:
with open(report_file, 'a', encoding='UTF-8') as handler:
handler.write(f"{f' {subject.upper()} ':=^80}{os.linesep}")
if (data is not None):
write_data: str
if isinstance(data, bytes):
write_data = data.decode()
else:
write_data = data
handler.writelines([(write_data + os.linesep)])
append_report('Raiden version', json.dumps(get_system_spec()))
append_report('Raiden log')
free_port_generator = get_free_port()
try:
with step_printer(step_count=8, stdout=sys.stdout) as print_step:
with setup_smoketest(eth_client=eth_client, print_step=print_step, free_port_generator=free_port_generator, debug=debug, stdout=raiden_stdout, append_report=append_report) as setup:
args = setup.args
port = next(free_port_generator)
args['api_address'] = f'localhost:{port}'
args['environment_type'] = environment_type
args['one_to_n_contract_address'] = ('0x' + ('1' * 40))
args['routing_mode'] = RoutingMode.PFS
args['flat_fee'] = ()
args['proportional_fee'] = ()
args['proportional_imbalance_fee'] = ()
(invoke_without_command=True, use_option_parsers=False)
def _setup_raiden_config(**kwargs: Any) -> None:
raiden_config = setup_raiden_config(**kwargs)
args['config'] = raiden_config
args.update(kwargs)
return
call_args: List[str] = []
_setup_raiden_config(args=call_args, default_map=args.copy(), standalone_mode=False)
run_smoketest(print_step=print_step, setup=setup)
append_report('Raiden Node stdout', raiden_stdout.getvalue())
except:
if debug:
import pdb
pdb.post_mortem()
error = traceback.format_exc()
append_report('Smoketest execution error', error)
print_step('Smoketest execution error', error=True)
success = False
else:
print_step('Smoketest successful')
success = True
if (not success):
sys.exit(1) |
class Proplist():
def __init__(self, ini_data: Optional[Dict[(str, Union[(bytes, str)])]]=None) -> None:
self._pl = pa.pa_proplist_new()
if (not self._pl):
raise PulseAudioException(0, 'Failed creating proplist.')
if (ini_data is not None):
for (k, v) in ini_data:
self[k] = v
def __setitem__(self, k, v):
if isinstance(v, bytes):
r = pa.pa_proplist_set(self._pl, k.encode('utf-8'), v, len(v))
else:
r = pa.pa_proplist_sets(self._pl, k.encode('utf-8'), v.encode('utf-8'))
if (r != 0):
raise PulseAudioException(0, 'Error setting proplist entry.')
def __delitem__(self, k):
if (pa.pa_proplist_unset(k) != 0):
raise PulseAudioException(0, 'Error unsetting proplist entry.')
def delete(self) -> None:
pa.pa_proplist_free(self._pl)
self._pl = None |
.supported(only_if=(lambda backend: backend.hash_supported(hashes.SHA3_224())), skip_message='Does not support SHA3_224')
class TestSHA3224():
test_sha3_224 = generate_hash_test(load_hash_vectors, os.path.join('hashes', 'SHA3'), ['SHA3_224LongMsg.rsp', 'SHA3_224ShortMsg.rsp'], hashes.SHA3_224()) |
class TestHRPTGetCalibratedBT(TestHRPTWithPatchedCalibratorAndFile):
def _get_channel_4_bt(self):
dataset_id = make_dataid(name='4', calibration='brightness_temperature')
return self._get_dataset(dataset_id)
def test_calibrated_bt_values(self):
result = self._get_channel_4_bt()
np.testing.assert_allclose(result.values, 38.43) |
def get_filenames():
for (dirpath, dirnames, filenames) in os.walk('src'):
if dirpath.endswith('__pycache__'):
continue
for rel_fn in filenames:
if (not rel_fn.endswith('.py')):
continue
fn = os.path.join(dirpath, rel_fn)
if (fn in [os.path.join('src', 'header.py'), os.path.join('src', 'git', 'long_header.py')]):
continue
print('pyflakes on:', fn)
(yield fn) |
def get_devices(display=None):
_devices = {}
base = '/dev'
for filename in os.listdir(base):
if filename.startswith('hidraw'):
path = os.path.join(base, filename)
try:
_devices[path] = HIDRawDevice(display, path)
except OSError:
continue
return list(_devices.values()) |
def test_upload_generic_package_file(tmp_path, project, resp_upload_generic_package):
path = (tmp_path / file_name)
path.write_text(file_content, encoding='utf-8')
package = project.generic_packages.upload(package_name=package_name, package_version=package_version, file_name=file_name, data=path.open(mode='rb'))
assert isinstance(package, GenericPackage) |
class RunID(namedtuple('RunID', 'python compat bench timestamp')):
def __new__(cls, python, compat, bench, timestamp):
self = super().__new__(cls, python, compat, (bench or None), (int(timestamp) if timestamp else None))
return self
def __str__(self):
if (not self.timestamp):
return self.name
return f'{self.name}-{self.timestamp}'
def name(self):
try:
return self._name
except AttributeError:
name = f'{self.python}-compat-{self.compat}'
if self.bench:
name = f'{name}-bm-{self.bench.name}'
self._name = name
return self._name |
class JsonFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
self.json_default = kwargs.pop('json_default', _json_default)
self.json_encoder = kwargs.pop('json_encoder', None)
self.json_serializer = kwargs.pop('json_serializer', json.dumps)
self.default_values = kwargs.pop('default_extra', {})
self.prefix_key = kwargs.pop('prefix_key', 'data')
logging.Formatter.__init__(self, *args, **kwargs)
self._fmt_parameters = self._parse_format_string()
self._skip_fields = set(self._fmt_parameters)
self._skip_fields.update(RESERVED_ATTRS)
def _parse_format_string(self):
standard_formatters = LOG_FORMAT_REGEXP
return standard_formatters.findall(self._fmt)
def add_fields(self, log_record, record, message_dict):
target = log_record
if self.prefix_key:
log_record[self.prefix_key] = {}
target = log_record[self.prefix_key]
for (field, value) in record.__dict__.items():
if ((field in self._fmt_parameters) and (field in RESERVED_ATTRS)):
log_record[field] = value
elif (field not in RESERVED_ATTRS):
target[field] = value
target.update(message_dict)
target.update(self.default_values)
def format(self, record):
message_dict = {}
if isinstance(record.msg, dict):
message_dict = record.msg
record.message = None
if ('message' in message_dict):
record.message = message_dict.pop('message', '')
else:
record.message = record.getMessage()
if ('asctime' in self._fmt_parameters):
record.asctime = self.formatTime(record, self.datefmt)
if (record.exc_info and (not message_dict.get('exc_info'))):
message_dict['exc_info'] = traceback.format_list(traceback.extract_tb(record.exc_info[2]))
log_record = {}
self.add_fields(log_record, record, message_dict)
return self.json_serializer(log_record, default=self.json_default, cls=self.json_encoder) |
def generate_ann(root_path, split, image_infos, preserve_vertical, format):
dst_image_root = osp.join(root_path, 'crops', split)
ignore_image_root = osp.join(root_path, 'ignores', split)
if (split == 'training'):
dst_label_file = osp.join(root_path, f'train_label.{format}')
elif (split == 'val'):
dst_label_file = osp.join(root_path, f'val_label.{format}')
elif (split == 'test'):
dst_label_file = osp.join(root_path, f'test_label.{format}')
else:
raise NotImplementedError
mmcv.mkdir_or_exist(dst_image_root)
mmcv.mkdir_or_exist(ignore_image_root)
lines = []
for image_info in image_infos:
index = 1
src_img_path = osp.join(root_path, 'imgs', image_info['file_name'])
image = mmcv.imread(src_img_path)
src_img_root = image_info['file_name'].split('.')[0]
for anno in image_info['anno_info']:
word = anno['word']
word = word.strip('\u202a')
word = word.replace('', '').replace('', '')
dst_img = crop_img(image, anno['bbox'], 0, 0)
(h, w, _) = dst_img.shape
dst_img_name = f'{src_img_root}_{index}.png'
index += 1
if ((min(dst_img.shape) == 0) or ('' in word) or ('' in word) or (len(word) == 0)):
continue
if (((not preserve_vertical) and ((h / w) > 2)) and (split == 'training')):
dst_img_path = osp.join(ignore_image_root, dst_img_name)
else:
dst_img_path = osp.join(dst_image_root, dst_img_name)
mmcv.imwrite(dst_img, dst_img_path)
if (format == 'txt'):
lines.append(f'{osp.basename(dst_image_root)}/{dst_img_name} {word}')
elif (format == 'jsonl'):
lines.append(json.dumps({'filename': f'{osp.basename(dst_image_root)}/{dst_img_name}', 'text': word}, ensure_ascii=False))
else:
raise NotImplementedError
list_to_file(dst_label_file, lines) |
def train(ps_device):
(train_input_fn, record_info_dict) = data_utils.get_input_fn(tfrecord_dir=FLAGS.record_info_dir, split='train', bsz_per_host=FLAGS.train_batch_size, seq_len=FLAGS.seq_len, reuse_len=FLAGS.reuse_len, bi_data=FLAGS.bi_data, num_hosts=1, num_core_per_host=1, perm_size=FLAGS.perm_size, mask_alpha=FLAGS.mask_alpha, mask_beta=FLAGS.mask_beta, uncased=FLAGS.uncased, num_passes=FLAGS.num_passes, use_bfloat16=FLAGS.use_bfloat16, num_predict=FLAGS.num_predict)
tf.logging.info('num of batches {}'.format(record_info_dict['num_batch']))
bsz_per_core = (FLAGS.train_batch_size // FLAGS.num_core_per_host)
params = {'batch_size': FLAGS.train_batch_size}
train_set = train_input_fn(params)
example = train_set.make_one_shot_iterator().get_next()
if (FLAGS.num_core_per_host > 1):
examples = [{} for _ in range(FLAGS.num_core_per_host)]
for key in example.keys():
vals = tf.split(example[key], FLAGS.num_core_per_host, 0)
for device_id in range(FLAGS.num_core_per_host):
examples[device_id][key] = vals[device_id]
else:
examples = [example]
(tower_mems, tower_losses, tower_new_mems, tower_grads_and_vars) = ([], [], [], [])
for i in range(FLAGS.num_core_per_host):
reuse = (True if (i > 0) else None)
with tf.device(assign_to_gpu(i, ps_device)), tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
mems_i = {}
if FLAGS.mem_len:
mems_i['mems'] = create_mems_tf(bsz_per_core)
(loss_i, new_mems_i, grads_and_vars_i) = single_core_graph(is_training=True, features=examples[i], mems=mems_i)
tower_mems.append(mems_i)
tower_losses.append(loss_i)
tower_new_mems.append(new_mems_i)
tower_grads_and_vars.append(grads_and_vars_i)
if (len(tower_losses) > 1):
loss = (tf.add_n(tower_losses) / len(tower_losses))
grads_and_vars = average_grads_and_vars(tower_grads_and_vars)
else:
loss = tower_losses[0]
grads_and_vars = tower_grads_and_vars[0]
(train_op, learning_rate, gnorm) = model_utils.get_train_op(FLAGS, None, grads_and_vars=grads_and_vars)
global_step = tf.train.get_global_step()
tower_mems_np = []
for i in range(FLAGS.num_core_per_host):
mems_i_np = {}
for key in tower_mems[i].keys():
mems_i_np[key] = initialize_mems_np(bsz_per_core)
tower_mems_np.append(mems_i_np)
saver = tf.train.Saver()
gpu_options = tf.GPUOptions(allow_growth=True)
model_utils.init_from_checkpoint(FLAGS, global_vars=True)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
fetches = [loss, tower_new_mems, global_step, gnorm, learning_rate, train_op]
(total_loss, prev_step) = (0.0, (- 1))
while True:
feed_dict = {}
for i in range(FLAGS.num_core_per_host):
for key in tower_mems_np[i].keys():
for (m, m_np) in zip(tower_mems[i][key], tower_mems_np[i][key]):
feed_dict[m] = m_np
fetched = sess.run(fetches, feed_dict=feed_dict)
(loss_np, tower_mems_np, curr_step) = fetched[:3]
total_loss += loss_np
if ((curr_step > 0) and ((curr_step % FLAGS.iterations) == 0)):
curr_loss = (total_loss / (curr_step - prev_step))
tf.logging.info('[{}] | gnorm {:.2f} lr {:8.6f} | loss {:.2f} | pplx {:>7.2f}, bpc {:>7.4f}'.format(curr_step, fetched[(- 3)], fetched[(- 2)], curr_loss, math.exp(curr_loss), (curr_loss / math.log(2))))
(total_loss, prev_step) = (0.0, curr_step)
if ((curr_step > 0) and ((curr_step % FLAGS.save_steps) == 0)):
save_path = os.path.join(FLAGS.model_dir, 'model.ckpt')
saver.save(sess, save_path)
tf.logging.info('Model saved in path: {}'.format(save_path))
if (curr_step >= FLAGS.train_steps):
break |
class BlazeEventsLoader(implements(PipelineLoader)):
__doc__ = __doc__.format(SID_FIELD_NAME=SID_FIELD_NAME, TS_FIELD_NAME=TS_FIELD_NAME, EVENT_DATE_FIELD_NAME=EVENT_DATE_FIELD_NAME)
def __init__(self, expr, next_value_columns, previous_value_columns, resources=None, odo_kwargs=None):
dshape = expr.dshape
if (not istabular(dshape)):
raise ValueError(('expression dshape must be tabular, got: %s' % dshape))
required_cols = list(required_event_fields(next_value_columns, previous_value_columns))
self._expr = bind_expression_to_resources(expr[required_cols], resources)
self._next_value_columns = next_value_columns
self._previous_value_columns = previous_value_columns
self._odo_kwargs = (odo_kwargs if (odo_kwargs is not None) else {})
def load_adjusted_array(self, domain, columns, dates, sids, mask):
raw = load_raw_data(sids, domain.data_query_cutoff_for_sessions(dates), self._expr, self._odo_kwargs)
return EventsLoader(events=raw, next_value_columns=self._next_value_columns, previous_value_columns=self._previous_value_columns).load_adjusted_array(domain, columns, dates, sids, mask) |
class MatchResult(object):
def __init__(self):
self._pattern_to_op_tensor = {}
self._name_to_pattern = {}
def add(self, pattern, op, tensor):
self._pattern_to_op_tensor[pattern] = (op, tensor)
if (pattern.name is not None):
if (pattern.name in self._name_to_pattern):
raise ValueError(('Name %s is already bound to another pattern' % pattern.name))
self._name_to_pattern[pattern.name] = pattern
def _to_pattern(self, pattern_or_name):
if isinstance(pattern_or_name, Pattern):
return pattern_or_name
if isinstance(pattern_or_name, str):
if (pattern_or_name not in self._name_to_pattern):
return None
return self._name_to_pattern[pattern_or_name]
raise ValueError(('pattern_or_name has type %s. Expect Pattern or str.' % type(pattern_or_name)))
def _get_op_tensor(self, pattern_or_name):
pattern = self._to_pattern(pattern_or_name)
if (pattern is None):
return None
if (pattern not in self._pattern_to_op_tensor):
return None
return self._pattern_to_op_tensor[pattern]
def get_op(self, pattern_or_name):
op_tensor = self._get_op_tensor(pattern_or_name)
return (op_tensor[0] if op_tensor else None)
def get_tensor(self, pattern_or_name):
op_tensor = self._get_op_tensor(pattern_or_name)
return (op_tensor[1] if op_tensor else None)
def merge_from(self, other_match_result):
self._pattern_to_op_tensor.update(other_match_result._pattern_to_op_tensor)
self._name_to_pattern.update(other_match_result._name_to_pattern) |
class Arithmetic(Task):
VERSION = 0
DATASET_PATH = 'EleutherAI/arithmetic'
def has_training_docs(self):
return False
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
return NotImplemented
def validation_docs(self):
return self.dataset['validation']
def test_docs(self):
return NotImplemented
def doc_to_text(self, doc):
return doc['context']
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc['context']
def doc_to_target(self, doc):
return doc['completion']
def construct_requests(self, doc, ctx):
(ll, is_prediction) = rf.loglikelihood(ctx, doc['completion'])
return is_prediction
def process_results(self, doc, results):
(is_prediction,) = results
return {'acc': is_prediction}
def aggregation(self):
return {'acc': mean}
def higher_is_better(self):
return {'acc': True} |
def test_get_routed_grid_model_circuit():
problem = _random_grid_model(2, 3, np.random.RandomState(0))
qubits = cirq.GridQubit.rect(2, 3)
circuit = get_routed_hardware_grid_circuit(problem_graph=problem.graph, qubits=qubits, coordinates=problem.coordinates, gammas=[(np.pi / 2), (np.pi / 4)], betas=[(np.pi / 2), (np.pi / 4)])
cirq.testing.assert_has_diagram(circuit, '\n (0, 0) (0, 1) (0, 2) (1, 0) (1, 1) (1, 2)\n \n H H H H H H\n \n ZZZZ ZZZZ \n \n ZZZZ ZZZZ\n \n \n ZZZZ \n ZZZZ \n ZZZZ \n \n \n \n \n X X X X X X\n \n ZZZZ^0.5 ZZZZ^0.5 \n \n ZZZZ^-0.5 ZZZZ^0.5\n \n \n ZZZZ^-0.5 \n ZZZZ^0.5 \n ZZZZ^0.5 \n \n \n \n \n X^0.5 X^0.5 X^0.5 X^0.5 X^0.5 X^0.5\n \n', transpose=True) |
def test_tensordot_of_proxied_cupy_arrays():
cupy = pytest.importorskip('cupy')
org = cupy.arange(9).reshape((3, 3))
a = proxy_object.asproxy(org.copy())
b = proxy_object.asproxy(org.copy())
res1 = dask.array.tensordot(a, b).flatten()
res2 = dask.array.tensordot(org.copy(), org.copy()).flatten()
assert all((res1 == res2)) |
def test_target_task_view():
secret = factories.make_secret()
transfer = factories.create(factories.LockedTransferSignedStateProperties(secret=secret))
secrethash = transfer.lock.secrethash
mediator = factories.make_address()
mediator_channel = factories.create(factories.NettingChannelStateProperties(partner_state=factories.NettingChannelEndStateProperties(address=mediator, balance=TokenAmount(100))))
transfer_state = TargetTransferState(from_hop=HopState(channel_identifier=mediator_channel.canonical_identifier.channel_identifier, node_address=mediator), transfer=transfer, secret=secret)
task = TargetTask(canonical_identifier=mediator_channel.canonical_identifier, target_state=transfer_state)
payment_mapping = {secrethash: cast(TransferTask, task)}
view = transfer_tasks_view(payment_mapping)
assert (len(view) == 1)
pending_transfer = view[0]
assert (pending_transfer.get('role') == 'target')
assert (pending_transfer.get('locked_amount') == str(transfer.balance_proof.locked_amount))
assert (pending_transfer.get('payment_identifier') == str(transfer.payment_identifier)) |
class Fog(VersionBase):
def __init__(self, visual_range, bounding_box=None):
self.visual_range = visual_range
if (bounding_box and (not isinstance(bounding_box, BoundingBox))):
raise TypeError('bounding_box not of type BoundingBox')
self.bounding_box = bounding_box
def __eq__(self, other):
if isinstance(other, Fog):
if ((self.get_attributes() == other.get_attributes()) and (self.bounding_box == other.bounding_box)):
return True
return False
def parse(element):
visual_range = element.attrib['visualRange']
bounding_box = None
if (element.find('BoundingBox') != None):
bounding_box = BoundingBox.parse(element.find('BoundingBox'))
return Fog(visual_range, bounding_box)
def get_attributes(self):
retdict = {}
retdict['visualRange'] = str(self.visual_range)
return retdict
def get_element(self):
element = ET.Element('Fog', attrib=self.get_attributes())
if (self.bounding_box is not None):
element.append(self.bounding_box.get_element())
return element |
def get_val_dataloader(args, val_data, vocab):
print('processing val data')
(features, vocab) = convert_example_to_feature(args, val_data, vocab, get_vocab=False)
val_data = Dataset(features)
sampler = data.SequentialSampler(val_data)
val_dataloader = data.DataLoader(val_data, sampler=sampler, batch_size=args.batch_size, collate_fn=batchify_data)
return (val_dataloader, features) |
def main():
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
test_loader = DataLoader(test_data, batch_size=16, shuffle=False, num_workers=4, pin_memory=True)
meta_loader = DataLoader(meta_data, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
step = args.step
print('===> About training in a two-step process! ===')
if (step == 1):
print('===> Step 1 ...')
cot = NET(n_classes=N_CLASSES, pretrained=True, use_two_step=True)
cot = nn.DataParallel(cot).cuda()
optimizer = optim.Adam(cot.module.fc.params(), lr=learning_rate, weight_decay=weight_decay)
cot_ema = NET(n_classes=N_CLASSES, pretrained=True, use_two_step=True)
cot_ema = nn.DataParallel(cot_ema).cuda()
cot_ema.load_state_dict(cot.state_dict())
flag = True
vnet = ACVNet(1, 100, 100, 1, 3).cuda()
vnet = nn.DataParallel(vnet)
optimizer_vnet = torch.optim.Adam(vnet.module.params(), 0.001, weight_decay=0.0001)
elif (step == 2):
print('===> Step 2 ...')
cot = NET(n_classes=N_CLASSES, pretrained=False, use_two_step=True)
cot = nn.DataParallel(cot).cuda()
optimizer = optim.Adam(cot.module.params(), lr=learning_rate, weight_decay=weight_decay)
cot_ema = NET(n_classes=N_CLASSES, pretrained=False, use_two_step=True)
cot_ema = nn.DataParallel(cot_ema).cuda()
cot_ema.load_state_dict(cot.state_dict())
vnet = ACVNet(1, 100, 100, 1, 3).cuda()
vnet = nn.DataParallel(vnet)
optimizer_vnet = torch.optim.Adam(vnet.module.params(), 0.001, weight_decay=0.0001)
flag = False
else:
raise AssertionError('Wrong step argument')
print('---> no checkpoint loaded <---')
if (step == 2):
cot.load_state_dict(torch.load('model/aircraft_net_step1_vgg16_best_epoch.pth'))
start_epoch = 0
best_accuracy = 0.0
best_epoch = None
print('')
with open(logfile, 'a') as f:
f.write('------ Step: {} ...\n'.format(step))
for epoch in range(start_epoch, num_epochs):
epoch_start_time = time.time()
cot.train()
adjust_learning_rate(optimizer, epoch)
if (epoch < 40):
train_acc = train_CE(train_loader, cot, cot_ema, optimizer, epoch)
else:
meta_lr = print_lr(optimizer, epoch)
train_acc = train(train_loader, meta_loader, cot, cot_ema, vnet, optimizer, optimizer_vnet, epoch, meta_lr, flag)
test_acc = evaluate(test_loader, cot)
test_acc_ema = evaluate(test_loader, cot_ema)
if (test_acc_ema > best_accuracy):
best_accuracy = test_acc_ema
best_epoch = (epoch + 1)
torch.save(cot.state_dict(), 'model/aircraft_net_ema_step{}_vgg16_best_epoch.pth'.format(step))
torch.save(vnet.state_dict(), 'model/aircraft_vnet_step{}_vgg16_best_epoch.pth'.format(step))
if (test_acc > best_accuracy):
best_accuracy = test_acc
best_epoch = (epoch + 1)
torch.save(cot.state_dict(), 'model/aircraft_net_step{}_vgg16_best_epoch.pth'.format(step))
torch.save(vnet.state_dict(), 'model/aircraft_vnet_step{}_vgg16_best_epoch.pth'.format(step))
epoch_end_time = time.time()
save_checkpoint({'epoch': (epoch + 1), 'cot_state_dict': cot.state_dict(), 'optimizer': optimizer.state_dict(), 'best_epoch': best_epoch, 'best_accuracy': best_accuracy, 'step': step})
print('------\nEpoch: [{:03d}/{:03d}]\tTrain Accuracy: [{:6.2f}]\tTest Accuracy: [{:6.2f}]\tEpoch Runtime: [{:6.2f}]\n------'.format((epoch + 1), num_epochs, train_acc, test_acc, (epoch_end_time - epoch_start_time)))
print('------\nEpoch: [{:03d}/{:03d}]\tTrain Accuracy: [{:6.2f}]\tTest Accuracy: [{:6.2f}]\tEpoch Runtime: [{:6.2f}]\n------'.format((epoch + 1), num_epochs, train_acc, test_acc_ema, (epoch_end_time - epoch_start_time)))
with open(logfile, 'a') as f:
output = 'Epoch: [{:03d}/{:03d}]\tTrain Accuracy: [{:6.2f}]\tTest Accuracy: [{:6.2f}]\tEpoch Runtime: [{:6.2f}]'.format((epoch + 1), num_epochs, train_acc, test_acc, (epoch_end_time - epoch_start_time))
f.write((output + '\n'))
print('******\nBest Accuracy: [{0:6.2f}], at Epoch [{1:03d}]; \n******'.format(best_accuracy, best_epoch))
with open(logfile, 'a') as f:
output = '******\nBest Accuracy: [{0:6.2f}], at Epoch [{1:03d}]; \n******'.format(best_accuracy, best_epoch)
f.write((output + '\n')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.