code stringlengths 281 23.7M |
|---|
def assert_string_arrays_equal(expected: list[str], actual: list[str], msg: str) -> None:
actual = clean_up(actual)
if (expected != actual):
(expected_ranges, actual_ranges) = diff_ranges(expected, actual)
sys.stderr.write('Expected:\n')
red = ('\x1b[31m' if (sys.platform != 'win32') else None)
render_diff_range(expected_ranges, expected, colour=red)
sys.stderr.write('Actual:\n')
green = ('\x1b[32m' if (sys.platform != 'win32') else None)
render_diff_range(actual_ranges, actual, colour=green)
sys.stderr.write('\n')
first_diff = next((i for (i, (a, b)) in enumerate(zip(expected, actual)) if (a != b)), max(len(expected), len(actual)))
if ((0 <= first_diff < len(actual)) and ((len(expected[first_diff]) >= MIN_LINE_LENGTH_FOR_ALIGNMENT) or (len(actual[first_diff]) >= MIN_LINE_LENGTH_FOR_ALIGNMENT))):
show_align_message(expected[first_diff], actual[first_diff])
sys.stderr.write('Update the test output using --update-data -n0 (you can additionally use the -k selector to update only specific tests)\n')
pytest.fail(msg, pytrace=False) |
class CacheIndexableTest(unittest.TestCase):
def get_iter(self):
for i in range(100):
it = rorpiter.IndexedTuple((i,), list(range(i)))
self.d[(i,)] = it
(yield it)
def testCaching(self):
self.d = {}
ci = rorpiter.CacheIndexable(self.get_iter(), 3)
for i in range(3):
next(ci)
self.assertEqual(ci.get((1,)), self.d[(1,)])
self.assertIsNone(ci.get((3,)))
for i in range(3):
next(ci)
self.assertEqual(ci.get((3,)), self.d[(3,)])
self.assertEqual(ci.get((4,)), self.d[(4,)])
self.assertIsNone(ci.get((3, 5)))
self.assertRaises(AssertionError, ci.get, (1,))
def testEqual(self):
self.d = {}
l1 = list(self.get_iter())
l2 = list(rorpiter.CacheIndexable(iter(l1), 10))
self.assertEqual(l1, l2) |
class Migration(migrations.Migration):
dependencies = [('questions', '0084_catalog_sections')]
operations = [migrations.CreateModel(name='SectionPage', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('order', models.IntegerField(default=0)), ('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='section_pages', to='questions.section')), ('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='page_sections', to='questions.page'))], options={'ordering': ('section', 'order')}), migrations.AlterField(model_name='page', name='section', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='questions.Section')), migrations.AddField(model_name='section', name='pages', field=models.ManyToManyField(blank=True, help_text='The pages of this section.', related_name='sections', through='questions.SectionPage', to='questions.Page', verbose_name='Pages')), migrations.RunPython(run_data_migration), migrations.RemoveField(model_name='page', name='section'), migrations.RemoveField(model_name='page', name='order')] |
def get_result_statistics(results, opts, num_gpus=None, include_failed_instances_in_duration=False, as_percentage_of=None):
ensure_backward_compatibility(opts)
results_stat = [(cost, tour, duration) for (cost, tour, duration) in results if (tour is not None)]
failed = [i for (i, (cost, tour, duration)) in enumerate(results) if (tour is None)]
(costs, tours, durations) = zip(*results_stat)
if include_failed_instances_in_duration:
(_, _, durations) = zip(*results)
(avg_serial_duration, avg_parallel_duration, total_duration_parallel, total_duration_single_device, effective_batch_size) = get_durations(durations, opts.batch_size, opts.system_info['used_num_processes'], opts.system_info['used_device_count'])
total_duration = (total_duration_parallel if (num_gpus is None) else (total_duration_single_device * num_gpus))
mean_cost = np.mean(costs)
if (as_percentage_of is not None):
mean_cost = (((mean_cost / as_percentage_of) - 1) * 100)
return (mean_cost, total_duration, len(results), failed) |
def test_ipopt_solver_options():
solver = Solver.IPOPT()
assert (solver.type == SolverType.IPOPT)
assert (solver.show_online_optim is False)
assert (solver.show_options is None)
assert (solver.tol == 1e-06)
assert (solver.dual_inf_tol == 1.0)
assert (solver.constr_viol_tol == 0.0001)
assert (solver.compl_inf_tol == 0.0001)
assert (solver.acceptable_tol == 1e-06)
assert (solver.acceptable_dual_inf_tol == .0)
assert (solver.acceptable_constr_viol_tol == 0.01)
assert (solver.acceptable_compl_inf_tol == 0.01)
assert (solver.max_iter == 1000)
assert (solver.hessian_approximation == 'exact')
assert (solver.limited_memory_max_history == 50)
assert (solver.linear_solver == 'mumps')
assert (solver.nlp_scaling_method == 'gradient-based')
assert (solver.mu_init == 0.1)
assert (solver.warm_start_init_point == 'no')
assert (solver.warm_start_mult_bound_push == 0.001)
assert (solver.warm_start_slack_bound_push == 0.001)
assert (solver.warm_start_bound_push == 0.001)
assert (solver.warm_start_slack_bound_frac == 0.001)
assert (solver.warm_start_bound_frac == 0.001)
assert (solver.bound_push == 0.01)
assert (solver.bound_frac == 0.01)
assert (solver.print_level == 5)
assert (solver.c_compile is False)
solver.set_linear_solver('ma57')
assert (solver.linear_solver == 'ma57')
solver.set_tol(2)
assert (solver.tol == 2)
solver.set_dual_inf_tol(3)
assert (solver.dual_inf_tol == 3)
solver.set_constr_viol_tol(4)
assert (solver.constr_viol_tol == 4)
solver.set_compl_inf_tol(5)
assert (solver.compl_inf_tol == 5)
solver.set_acceptable_tol(6)
assert (solver.acceptable_tol == 6)
solver.set_acceptable_dual_inf_tol(7)
assert (solver.acceptable_dual_inf_tol == 7)
solver.set_acceptable_constr_viol_tol(8)
assert (solver.acceptable_constr_viol_tol == 8)
solver.set_acceptable_compl_inf_tol(9)
assert (solver.acceptable_compl_inf_tol == 9)
solver.set_maximum_iterations(10)
assert (solver.max_iter == 10)
solver.set_hessian_approximation('hello bioptim')
assert (solver.hessian_approximation == 'hello bioptim')
solver.set_nlp_scaling_method('how are you?')
assert (solver.nlp_scaling_method == 'how are you?')
solver.set_limited_memory_max_history(11)
assert (solver.limited_memory_max_history == 11)
solver.set_mu_init(12)
assert (solver.mu_init == 12)
solver.set_warm_start_init_point('super!')
assert (solver.warm_start_init_point == 'super!')
solver.set_warm_start_mult_bound_push(13)
assert (solver.warm_start_mult_bound_push == 13)
solver.set_warm_start_slack_bound_push(14)
assert (solver.warm_start_slack_bound_push == 14)
solver.set_warm_start_bound_push(15)
assert (solver.warm_start_bound_push == 15)
solver.set_warm_start_slack_bound_frac(16)
assert (solver.warm_start_slack_bound_frac == 16)
solver.set_warm_start_bound_frac(17)
assert (solver.warm_start_bound_frac == 17)
solver.set_bound_push(18)
assert (solver.bound_push == 18)
solver.set_bound_frac(19)
assert (solver.bound_frac == 19)
solver.set_print_level(20)
assert (solver.print_level == 20)
solver.set_c_compile(True)
assert (solver.c_compile is True)
solver.set_convergence_tolerance(21)
assert (solver.tol == 21)
assert (solver.acceptable_tol == 21)
assert (solver.compl_inf_tol == 21)
assert (solver.acceptable_compl_inf_tol == 21)
solver.set_constraint_tolerance(22)
assert (solver.constr_viol_tol == 22)
assert (solver.acceptable_constr_viol_tol == 22)
solver.set_warm_start_options(42)
assert (solver.warm_start_init_point == 'yes')
assert (solver.mu_init == 42)
assert (solver.warm_start_mult_bound_push == 42)
assert (solver.warm_start_slack_bound_push == 42)
assert (solver.warm_start_bound_push == 42)
assert (solver.warm_start_slack_bound_frac == 42)
assert (solver.warm_start_bound_frac == 42)
solver.set_initialization_options(44)
assert (solver.bound_push == 44)
assert (solver.bound_frac == 44)
solver.set_option_unsafe(666, 'mysterious option')
assert (solver.__dict__['_mysterious option'] == 666)
fake_solver = FakeSolver(options_common={'ipopt.casino_gain': 777})
solver_dict = solver.as_dict(fake_solver)
assert (solver_dict['ipopt.casino_gain'] == 777)
assert (solver_dict['ipopt.tol'] == 21)
assert (not ('_c_compile' in solver_dict))
assert (not ('type' in solver_dict))
assert (not ('show_online_optim' in solver_dict))
assert (not ('show_options' in solver_dict))
solver.set_nlp_scaling_method('gradient-fiesta')
assert (solver.nlp_scaling_method == 'gradient-fiesta') |
def format_time_brief(seconds: Union[(int, float)]) -> str:
s = int(np.rint(seconds))
if (s < 60):
return '{0}s'.format(s)
elif (s < (60 * 60)):
return '{0}m {1:02}s'.format((s // 60), (s % 60))
elif (s < ((24 * 60) * 60)):
return '{0}h {1:02}m'.format((s // (60 * 60)), ((s // 60) % 60))
else:
return '{0}d {1:02}h'.format((s // ((24 * 60) * 60)), ((s // (60 * 60)) % 24)) |
class Output():
def __init__(self, verbosity: Verbosity=Verbosity.NORMAL, decorated: bool=False, formatter: (Formatter | None)=None) -> None:
self._verbosity: Verbosity = verbosity
self._formatter = (formatter or Formatter())
self._formatter.decorated(decorated)
self._section_outputs: list[SectionOutput] = []
def formatter(self) -> Formatter:
return self._formatter
def verbosity(self) -> Verbosity:
return self._verbosity
def set_formatter(self, formatter: Formatter) -> None:
self._formatter = formatter
def is_decorated(self) -> bool:
return self._formatter.is_decorated()
def decorated(self, decorated: bool=True) -> None:
self._formatter.decorated(decorated)
def supports_utf8(self) -> bool:
return True
def set_verbosity(self, verbosity: Verbosity) -> None:
self._verbosity = verbosity
def is_quiet(self) -> bool:
return (self._verbosity is Verbosity.QUIET)
def is_verbose(self) -> bool:
return (self._verbosity.value >= Verbosity.VERBOSE.value)
def is_very_verbose(self) -> bool:
return (self._verbosity.value >= Verbosity.VERY_VERBOSE.value)
def is_debug(self) -> bool:
return (self._verbosity is Verbosity.DEBUG)
def write_line(self, messages: (str | Iterable[str]), verbosity: Verbosity=Verbosity.NORMAL, type: Type=Type.NORMAL) -> None:
self.write(messages, new_line=True, verbosity=verbosity, type=type)
def write(self, messages: (str | Iterable[str]), new_line: bool=False, verbosity: Verbosity=Verbosity.NORMAL, type: Type=Type.NORMAL) -> None:
if isinstance(messages, str):
messages = [messages]
if (verbosity.value > self.verbosity.value):
return
for message in messages:
if (type is Type.NORMAL):
message = self._formatter.format(message)
elif (type is Type.PLAIN):
message = strip_tags(self._formatter.format(message))
self._write(message, new_line=new_line)
def flush(self) -> None:
pass
def remove_format(self, text: str) -> str:
return self.formatter.remove_format(text)
def section(self) -> SectionOutput:
raise NotImplementedError
def _write(self, message: str, new_line: bool=False) -> None:
raise NotImplementedError |
def strncat(state, dst, src, num):
(dlength, last) = state.mem_search(src, [BZERO])
dlength = state.evalcon(dlength).as_long()
(length, last) = state.mem_search(src, [BZERO])
length = z3.If((num < length), num, length)
state.mem_move((dst + dlength), src, (length + ONE))
return dst |
def create_toplevel_linklet_vars(forms_ls, linklet):
linkl_toplevels = {}
for form in forms_ls:
if isinstance(form, W_Correlated):
form = form.get_obj()
if (isinstance(form, values.W_List) and (form.car() is mksym('define-values'))):
ids = form.cdr().car()
(ids_ls, ids_len) = to_rpython_list(ids, unwrap_correlated=True)
for id in ids_ls:
if (id in linkl_toplevels):
raise SchemeException(('duplicate binding name : %s' % id.tostring()))
linkl_toplevels[id] = interp.LinkletDefinedVar(id, defining_linklet=linklet)
return linkl_toplevels |
def create_hparams(FLAGS):
FLAGS = flat_config(FLAGS)
return tf.contrib.training.HParams(train_file=(FLAGS['train_file'] if ('train_file' in FLAGS) else None), eval_file=(FLAGS['eval_file'] if ('eval_file' in FLAGS) else None), test_file=(FLAGS['test_file'] if ('test_file' in FLAGS) else None), infer_file=(FLAGS['infer_file'] if ('infer_file' in FLAGS) else None), FEATURE_COUNT=(FLAGS['FEATURE_COUNT'] if ('FEATURE_COUNT' in FLAGS) else None), FIELD_COUNT=(FLAGS['FIELD_COUNT'] if ('FIELD_COUNT' in FLAGS) else None), data_format=(FLAGS['data_format'] if ('data_format' in FLAGS) else None), PAIR_NUM=(FLAGS['PAIR_NUM'] if ('PAIR_NUM' in FLAGS) else None), DNN_FIELD_NUM=(FLAGS['DNN_FIELD_NUM'] if ('DNN_FIELD_NUM' in FLAGS) else None), n_user=(FLAGS['n_user'] if ('n_user' in FLAGS) else None), n_item=(FLAGS['n_item'] if ('n_item' in FLAGS) else None), n_user_attr=(FLAGS['n_user_attr'] if ('n_user_attr' in FLAGS) else None), n_item_attr=(FLAGS['n_item_attr'] if ('n_item_attr' in FLAGS) else None), dim=(FLAGS['dim'] if ('dim' in FLAGS) else None), layer_sizes=(FLAGS['layer_sizes'] if ('layer_sizes' in FLAGS) else None), cross_layer_sizes=(FLAGS['cross_layer_sizes'] if ('cross_layer_sizes' in FLAGS) else None), cross_layers=(FLAGS['cross_layers'] if ('cross_layers' in FLAGS) else None), activation=(FLAGS['activation'] if ('activation' in FLAGS) else None), cross_activation=(FLAGS['cross_activation'] if ('cross_activation' in FLAGS) else 'identity'), dropout=(FLAGS['dropout'] if ('dropout' in FLAGS) else None), attention_layer_sizes=(FLAGS['attention_layer_sizes'] if ('attention_layer_sizes' in FLAGS) else None), attention_activation=(FLAGS['attention_activation'] if ('attention_activation' in FLAGS) else None), model_type=(FLAGS['model_type'] if ('model_type' in FLAGS) else None), method=(FLAGS['method'] if ('method' in FLAGS) else None), load_model_name=(FLAGS['load_model_name'] if ('load_model_name' in FLAGS) else None), mu=(FLAGS['mu'] if ('mu' in FLAGS) else None), init_method=(FLAGS['init_method'] if ('init_method' in FLAGS) else 'tnormal'), init_value=(FLAGS['init_value'] if ('init_value' in FLAGS) else 0.01), embed_l2=(FLAGS['embed_l2'] if ('embed_l2' in FLAGS) else 0.0), embed_l1=(FLAGS['embed_l1'] if ('embed_l1' in FLAGS) else 0.0), layer_l2=(FLAGS['layer_l2'] if ('layer_l2' in FLAGS) else 0.0), layer_l1=(FLAGS['layer_l1'] if ('layer_l1' in FLAGS) else 0.0), cross_l2=(FLAGS['cross_l2'] if ('cross_l2' in FLAGS) else 0.0), cross_l1=(FLAGS['cross_l1'] if ('cross_l1' in FLAGS) else 0.0), learning_rate=(FLAGS['learning_rate'] if ('learning_rate' in FLAGS) else 0.001), loss=(FLAGS['loss'] if ('loss' in FLAGS) else None), optimizer=(FLAGS['optimizer'] if ('optimizer' in FLAGS) else 'adam'), epochs=(FLAGS['epochs'] if ('epochs' in FLAGS) else 10), batch_size=(FLAGS['batch_size'] if ('batch_size' in FLAGS) else 1), log=(FLAGS['log'] if ('log' in FLAGS) else 'log'), logger=None, show_step=(FLAGS['show_step'] if ('show_step' in FLAGS) else 1), save_epoch=(FLAGS['save_epoch'] if ('save_epoch' in FLAGS) else 5), metrics=(FLAGS['metrics'] if ('metrics' in FLAGS) else None)) |
def preprocess_pairwise_data(samples: List[ContextualizedExample], tokenizer: PreTrainedTokenizer, max_seq_length=64, disable_tqdm=False):
raw_sentences = []
for sample in samples:
(ent_ctx_a, ent_ctx_b) = sample.entities
raw_sentences.extend([ent_ctx_a.left_context, ent_ctx_a.entity, ent_ctx_a.right_context])
raw_sentences.extend([ent_ctx_b.left_context, ent_ctx_b.entity, ent_ctx_b.right_context])
tokenizer_output = tokenizer(raw_sentences, truncation='do_not_truncate')
dataset = parse_tokenizer_output(tokenizer_output, max_seq_length, disable_tqdm=disable_tqdm)
dataset = [(dataset[(2 * i)], dataset[((2 * i) + 1)], samples[i].label) for i in range(len(samples))]
return dataset |
class ESILState():
def __init__(self, r2api: R2API, **kwargs):
self.kwargs = kwargs
self.r2api = r2api
self.pure_symbolic = kwargs.get('sym', False)
self.pcode = kwargs.get('pcode', False)
self.check_perms = kwargs.get('check', False)
if kwargs.get('optimize', False):
self.solver = z3.Optimize()
elif kwargs.get('simple', True):
self.solver = z3.SimpleSolver()
else:
self.solver = z3.Solver()
timeout = kwargs.get('timeout', None)
if (timeout != None):
self.solver.set('timeout', timeout)
self.solver.push()
self.model = None
self.current_instruction = None
self.esil = {'cur': 0, 'old': 0, 'stack': [], 'size': 64, 'type': 1}
self.stack = self.esil['stack']
self.max_len = kwargs.get('max_len', 4096)
self.pid = kwargs.get('pid', 1337)
self.fork_mode = kwargs.get('fork_mode', 'parent')
self.sleep = kwargs.get('sleep', False)
self.info = self.r2api.get_info()
self.debug = kwargs.get('debug', False)
self.trace = kwargs.get('trace', False)
self.events = kwargs.get('events', {})
self.memory: ESILMemory = None
self.registers: ESILRegisters = None
self.proc: ESILProcess = None
self.fs: ESILFilesystem = None
self.os: ESILOS = None
self.condition = None
self.steps = 0
self.distance =
self.target = None
self.exit = None
if ('info' in self.info):
self.bits = self.info['info']['bits']
self.endian = self.info['info']['endian']
else:
self.bits = 64
self.endian = 'little'
if kwargs.get('init', True):
self.proc = ESILProcess(r2api, **kwargs)
self.init_state()
def init_state(self):
self.init_registers()
self.init_memory()
self.init_filesystem()
self.init_os()
flags = self.r2api.get_flags()
stdfds = {'obj.stdin': 0, 'obj.stdout': 1, 'obj.stderr': 2}
for stdfd in stdfds:
if (stdfd in flags):
addr = flags[stdfd]['offset']
new_addr = self.memory.alloc()
self.memory[new_addr] = stdfds[stdfd]
self.memory[addr] = new_addr
def init_memory(self):
max_eval = self.kwargs.get('max_eval', 32)
self.memory = ESILMemory(self.r2api, self.info, max_eval, self.pure_symbolic, self.check_perms)
self.memory.solver = self.solver
self.memory.max_len = self.max_len
self.memory.init_memory()
def init_registers(self):
self.register_info = self.r2api.get_register_info()
self.aliases = {}
registers = self.register_info['reg_info']
aliases = self.register_info['alias_info']
register_values = self.r2api.get_all_registers()
for alias in aliases:
self.aliases[alias['role_str']] = alias
for register in registers:
register['value'] = register_values[register['name']]
self.registers = ESILRegisters(registers, self.aliases, sym=self.pure_symbolic)
self.registers.init_registers()
def init_filesystem(self):
self.fs = ESILFilesystem(self.r2api, **self.kwargs)
def init_os(self):
self.os = ESILOS(self.r2api, **self.kwargs)
def dump_file(self, f):
data = self.fs.content(f)
if (len(data) > 0):
return self.memory.pack_bv(data)
def dump_stdin(self):
return self.dump_file(0)
def dump_stdout(self):
return self.dump_file(1)
def dump_stderr(self):
return self.dump_file(2)
def write_stdin(self, data):
self.fs.add({0: data})
def set_symbolic_register(self, name: str, var: str=None):
if (var == None):
var = name
size = self.registers[name].size()
self.registers[name] = z3.BitVec(var, size)
def check_addr(self, bv, mode='r', length=None, data=None):
if isinstance(bv, int):
return
elif z3.is_bv_value(bv):
return
bv = z3.simplify(bv)
if z3.is_bv_value(bv):
return
elif z3.is_bv(bv):
mode_to_event = {'r': ESILSolveEvent.SymRead, 'w': ESILSolveEvent.SymWrite, 'x': ESILSolveEvent.SymExec, 'f': ESILSolveEvent.SymFree}
event = mode_to_event[mode]
if (event in self.events):
if isinstance(length, int):
length = BV(length, self.bits)
if isinstance(data, list):
data = self.memory.pack_bv(data)
ctx = EventContext(bv, length, data)
for hook in self.events[event]:
hook(self, ctx)
def mem_read(self, addr, length):
self.check_addr(addr, 'r', length)
return self.memory.read(addr, length)
def mem_write(self, addr, data):
self.check_addr(addr, 'w', data=data)
return self.memory.write(addr, data)
def mem_read_bv(self, addr, length):
self.check_addr(addr, 'r', length)
return self.memory.read_bv(addr, length)
def mem_cond_read(self, addr, length):
self.check_addr(addr, 'r', length)
return self.memory.cond_read(addr, length)
def mem_write_bv(self, addr, val, length):
self.check_addr(addr, 'w', length, val)
return self.memory.write_bv(addr, val, length)
def mem_copy(self, dst, data, length):
self.check_addr(dst, 'w', length, data)
return self.memory.copy(dst, data, length)
def mem_memcopy(self, src, dst, length):
src = self.check_addr(src, 'r', length)
dst = self.check_addr(dst, 'w', length)
return self.memory.memcopy(src, dst, length)
def mem_compare(self, src, dst, length=None):
self.check_addr(src, 'r', length)
self.check_addr(dst, 'w', length)
return self.memory.compare(src, dst, length)
def mem_move(self, src, dst, length):
self.check_addr(src, 'r', length)
self.check_addr(dst, 'w', length)
return self.memory.move(src, dst, length)
def mem_alloc(self, length=128):
return self.memory.alloc(length)
def mem_free(self, addr):
self.check_addr(addr, 'f')
return self.memory.free(addr)
def mem_search(self, addr, needle, length=None, reverse=None):
self.check_addr(addr, 'r')
return self.memory.search(addr, needle, length, reverse)
def constrain(self, *constraints):
self.solver.add(*constraints)
def constrain_bytes(self, bv, regex: Union[(str, bytes)]):
if (type(regex) == bytes):
for i in range(len(regex)):
self.constrain((z3.Extract((7 + (i * 8)), (i * 8), bv) == regex[i]))
return
all_bytes = ''.join([chr(x) for x in range(256)])
if z3.is_bv(bv):
bv = [z3.Extract(((b * 8) + 7), (b * 8), bv) for b in range(int((bv.size() / 8)))]
opts = []
new_regex = regex[:]
negate = False
if ((len(regex) > 2) and (regex[:2] == '[^')):
negate = True
new_regex = new_regex.replace('[^', '[')
dashes = [i for (i, c) in enumerate(regex) if (c == '-')]
for d in dashes:
if ((regex[(d - 1)] != '\\') and (len(regex) > d)):
x = ord(regex[(d - 1)])
y = ord(regex[(d + 1)])
opts.append([x, y])
new_regex = new_regex.replace(regex[(d - 1):(d + 2)], '')
vals = []
if (new_regex != '[]'):
vals = [ord(x) for x in re.findall(new_regex, all_bytes, re.DOTALL)]
for b in bv:
or_vals = []
for val in vals:
or_vals.append((b == val))
for opt in opts:
or_vals.append(z3.And((b >= opt[0]), (b <= opt[1])))
if negate:
self.constrain(z3.Not(z3.Or(*or_vals)))
else:
self.constrain(z3.Or(*or_vals))
def constrain_register(self, name: str, val):
reg = self.registers[name]
self.constrain((reg == val))
def evaluate_register(self, name: str, eval_type: str='eval'):
val = self.registers[name]
if (eval_type == 'max'):
self.solver.maximize(val)
elif (eval_type == 'min'):
self.solver.minimize(val)
if (self.model == None):
sat = self.solver.check()
if (sat == z3.sat):
self.model = self.solver.model()
else:
raise ESILUnsatException('state has unsatisfiable constraints')
value = self.model.eval(val, True)
return value
def evaluate(self, val: z3.BitVecRef) -> int:
if self.is_sat():
model = self.solver.model()
return model.eval(val, True)
else:
raise ESILUnsatException('state has unsatisfiable constraints')
def evalcon(self, val: z3.BitVecRef):
eval_val = self.evaluate(val)
self.constrain((val == eval_val))
return eval_val
def eval_max(self, sym, n: int=64):
solutions = []
self.solver.push()
while (len(solutions) < n):
if (self.solver.check() == z3.sat):
m = self.solver.model()
sol = m.eval(sym, True)
solutions.append(sol)
self.solver.add((sym != sol))
else:
break
self.solver.pop()
return solutions
def symbol(self, name: str, length: int, cons=None) -> z3.BitVecRef:
bv = z3.BitVec(name, (length * 8))
if (cons != None):
self.constrain_bytes(bv, cons)
return bv
def evaluate_buffer(self, bv: z3.BitVecRef) -> bytes:
buf = self.evaluate(bv)
val = buf.as_long()
length = (bv.size() // 8)
return bytes([((val >> (8 * i)) & 255) for i in range(length)])
def evaluate_string(self, bv: z3.BitVecRef) -> str:
b = self.evaluate_buffer(bv)
if (b'\x00' in b):
b = b[:b.index(b'\x00')]
return b.decode()
def symbolic_string(self, addr, length=None):
(ret_len, last) = self.mem_search(addr, [BZERO], length)
data = self.mem_read_bv(addr, last)
return (data, ret_len)
def concrete_string(self, addr, length=None):
(ret_len, last) = self.memory.search(addr, [BZERO], length)
sym_str = self.memory.read_bv(addr, last)
self.evalcon(sym_str)
return self.evaluate_string(sym_str)
def step(self) -> List:
pc = self.registers['PC'].as_long()
instr = self.r2api.disass(pc)
self.current_instruction = instr
new_states = self.proc.execute_instruction(self, instr)
return new_states
def is_sat(self) -> bool:
return (self.solver.check() == z3.sat)
def apply(self):
for reg in self.registers._registers:
if (not self.registers._registers[reg]['sub']):
register = self.registers[reg]
value = self.evaluate(register)
self.constrain((register == value))
self.r2api.set_reg_value(reg, value.as_long())
for addr in self.memory._memory:
value_bv = self.evaluate(self.memory[addr])
self.constrain((self.memory[addr] == value_bv))
value = self.evaluate_buffer(self.memory[addr])
length = (self.memory[addr].size() // 8)
self.r2api.write(addr, value, length)
def clone(self) -> 'ESILState':
self.kwargs['init'] = False
clone = self.__class__(self.r2api, **self.kwargs)
clone.constrain(*self.solver.assertions())
clone.proc = self.proc
clone.pid = self.pid
clone.fork_mode = self.fork_mode
clone.steps = self.steps
clone.distance = self.distance
clone.esil = self.esil.copy()
clone.registers = self.registers.clone()
clone.memory = self.memory.clone()
clone.fs = self.fs.clone()
clone.memory.solver = clone.solver
return clone |
class PageIterator(Iterator[_T]):
def __init__(self, operation: Callable, args: Any, kwargs: Dict[(str, Any)], rate_limit: Optional[float]=None) -> None:
self._operation = operation
self._args = args
self._kwargs = kwargs
self._last_evaluated_key = kwargs.get('exclusive_start_key')
self._is_last_page = False
self._total_scanned_count = 0
self._rate_limiter = None
if rate_limit:
self._rate_limiter = RateLimiter(rate_limit)
def __iter__(self) -> Iterator[_T]:
return self
def __next__(self) -> _T:
if self._is_last_page:
raise StopIteration()
self._kwargs['exclusive_start_key'] = self._last_evaluated_key
if self._rate_limiter:
self._rate_limiter.acquire()
self._kwargs['return_consumed_capacity'] = TOTAL
page = self._operation(*self._args, **self._kwargs)
self._last_evaluated_key = page.get(LAST_EVALUATED_KEY)
self._is_last_page = (self._last_evaluated_key is None)
self._total_scanned_count += page[SCANNED_COUNT]
if self._rate_limiter:
consumed_capacity = page.get(CONSUMED_CAPACITY, {}).get(CAPACITY_UNITS, 0)
self._rate_limiter.consume(consumed_capacity)
return page
def next(self) -> _T:
return self.__next__()
def key_names(self) -> Iterable[str]:
if self._last_evaluated_key:
return self._last_evaluated_key.keys()
table_meta = self._operation.__self__.get_meta_table()
return table_meta.get_key_names(self._kwargs.get('index_name'))
def page_size(self) -> Optional[int]:
return self._kwargs.get('limit')
_size.setter
def page_size(self, page_size: int) -> None:
self._kwargs['limit'] = page_size
def last_evaluated_key(self) -> Optional[Dict[(str, Dict[(str, Any)])]]:
return self._last_evaluated_key
def total_scanned_count(self) -> int:
return self._total_scanned_count |
def run_test(case, m):
m.elaborate()
m.apply(BehavioralRTLIRGenPass(m))
m.apply(BehavioralRTLIRTypeCheckPass(m))
visitor = BehavioralRTLIRToVVisitorL2((lambda x: (x in verilog_reserved)))
upblks = m.get_metadata(BehavioralRTLIRGenPass.rtlir_upblks)
m_all_upblks = m.get_update_blocks()
assert (len(m_all_upblks) == 1)
for blk in m_all_upblks:
upblk_src = visitor.enter(blk, upblks[blk])
upblk_src = '\n'.join(upblk_src)
assert ((upblk_src + '\n') == case.REF_UPBLK) |
def test_validation_error(capsys):
testargs = ['--schema', '3.0.0', './tests/integration/data/v2.0/petstore.yaml']
with pytest.raises(SystemExit):
main(testargs)
(out, err) = capsys.readouterr()
assert (not err)
assert ('./tests/integration/data/v2.0/petstore.yaml: Validation Error:' in out)
assert ('Failed validating' in out)
assert ("'openapi' is a required property" in out) |
class AverageMeter():
def __init__(self, *keys):
self.__data = dict()
for k in keys:
self.__data[k] = [0.0, 0]
def add(self, dict):
for (k, v) in dict.items():
self.__data[k][0] += v
self.__data[k][1] += 1
def get(self, *keys):
if (len(keys) == 1):
return (self.__data[keys[0]][0] / self.__data[keys[0]][1])
else:
v_list = [(self.__data[k][0] / self.__data[k][1]) for k in keys]
return tuple(v_list)
def pop(self, key=None):
if (key is None):
for k in self.__data.keys():
self.__data[k] = [0.0, 0]
else:
v = self.get(key)
self.__data[key] = [0.0, 0]
return v |
class BERTweetMetrics():
def __init__(self, multiclass=True, weight=None, **kwargs):
self.multiclass = multiclass
self.weight = (weight is not None)
self.metrics = {}
self.metrics['loss'] = Average()
self.metrics['accuracy'] = Accuracy(is_multilabel=(not multiclass))
self.metrics['precision'] = Precision(average=False, is_multilabel=(not multiclass))
self.metrics['recall'] = Recall(average=False, is_multilabel=(not multiclass))
F1 = (((self.metrics['precision'] * self.metrics['recall']) * 2) / ((self.metrics['precision'] + self.metrics['recall']) + 1e-20))
self.metrics['f1'] = MetricsLambda((lambda t: torch.mean(t).item()), F1)
if (self.multiclass and self.weight):
F1 = (F1 * (weight / weight.sum()))
self.metrics['weighted-f1'] = MetricsLambda((lambda t: torch.mean(t).item()), F1)
def reset(self):
for key in self.metrics:
self.metrics[key].reset()
def update(self, loss, y_pred, y_true):
self.metrics['loss'].update(loss[0].item())
if (not self.multiclass):
y_pred = torch.sigmoid(y_pred)
y_pred = torch.round(y_pred)
y_pred = y_pred.cpu()
y_true = y_true.cpu()
for key in self.metrics:
if (key == 'loss'):
continue
self.metrics[key].update((y_pred, y_true))
def compute(self, **kwarg):
result = {}
result['loss'] = self.metrics['loss'].compute().item()
result['accuracy'] = self.metrics['accuracy'].compute()
result['precision'] = self.metrics['precision'].compute().mean().item()
result['recall'] = self.metrics['recall'].compute().mean().item()
result['f1'] = self.metrics['f1'].compute()
if ('weighted-f1' in self.metrics):
result['weighted-f1'] = self.metrics['weighted-f1'].compute()
return result
def log_tensorboard(self, writer, step, results=None, loss=None, train=True):
results = (self.compute() if (results is None) else results)
mode_str = ('train' if train else 'val')
writer.add_scalar(('Loss/' + mode_str), (results['loss'] if (loss is None) else loss[0].item()), step)
writer.add_scalar(('Accuracy/' + mode_str), results['accuracy'], step)
writer.add_scalar(('Precision/' + mode_str), results['precision'], step)
writer.add_scalar(('Recall/' + mode_str), results['recall'], step)
writer.add_scalar(('F1/' + mode_str), results['f1'], step)
if ('weighted-f1' in results):
writer.add_scalar(('Weighted-F1/' + mode_str), results['weighted-f1'], step)
return results |
.skipif((not pytensor.config.cxx), reason='G++ not available, so we need to skip this test.')
def test_local_mul_s_d():
mode = get_default_mode()
mode = mode.including('specialize', 'local_mul_s_d')
for sp_format in sparse.sparse_formats:
inputs = [getattr(pytensor.sparse, (sp_format + '_matrix'))(), matrix()]
f = pytensor.function(inputs, sparse.mul_s_d(*inputs), mode=mode)
assert (not any((isinstance(node.op, sparse.MulSD) for node in f.maker.fgraph.toposort()))) |
class UpBlock(BaseModule):
def __init__(self, in_channels, out_channels, init_cfg=None):
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, int)
assert isinstance(out_channels, int)
self.conv1x1 = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
self.conv3x3 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.deconv = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=4, stride=2, padding=1)
def forward(self, x):
x = F.relu(self.conv1x1(x))
x = F.relu(self.conv3x3(x))
x = self.deconv(x)
return x |
class AttrVI_ATTR_USB_MAX_INTR_SIZE(RangeAttribute):
resources = [(constants.InterfaceType.usb, 'INSTR'), (constants.InterfaceType.usb, 'RAW')]
py_name = 'maximum_interrupt_size'
visa_name = 'VI_ATTR_USB_MAX_INTR_SIZE'
visa_type = 'ViUInt16'
default = NotAvailable
(read, write, local) = (True, True, True)
(min_value, max_value, values) = (0, 65535, None) |
def test_hello_ini_setting(testdir):
testdir.makeini('\n [pytest]\n HELLO = world\n ')
testdir.makepyfile("\n import pytest\n\n \n def hello(request):\n return request.config.getini('HELLO')\n\n def test_hello_world(hello):\n assert hello == 'world'\n ")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines(['*::test_hello_world PASSED*'])
assert (result.ret == 0) |
def get_embedding_names_by_table(tables: Union[(List[EmbeddingBagConfig], List[EmbeddingConfig])]) -> List[List[str]]:
shared_feature: Dict[(str, bool)] = {}
for embedding_config in tables:
for feature_name in embedding_config.feature_names:
if (feature_name not in shared_feature):
shared_feature[feature_name] = False
else:
shared_feature[feature_name] = True
embedding_names_by_table: List[List[str]] = []
for embedding_config in tables:
embedding_names: List[str] = []
for feature_name in embedding_config.feature_names:
if shared_feature[feature_name]:
embedding_names.append(((feature_name + '') + embedding_config.name))
else:
embedding_names.append(feature_name)
embedding_names_by_table.append(embedding_names)
return embedding_names_by_table |
class StandUpExecutor(ActionExecutor):
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo, char_index, modify=True, in_place=False):
info.set_current_line(script[0])
char_node = _get_character_node(state, char_index)
if ((State.SITTING in char_node.states) or (State.LYING in char_node.states)):
new_char_node = char_node.copy()
new_char_node.states.discard(State.SITTING)
new_char_node.states.discard(State.LYING)
if modify:
(yield state.change_state([ChangeNode(new_char_node)], in_place=in_place))
else:
(yield state)
else:
info.error('{} is not sitting', char_node) |
_node(_caller_prototype_tags, _prototype_tag_select)
def node_prototype_tags(caller):
text = '\n |cPrototype-Tags|n can be used to classify and find prototypes in listings Tag names are not\n case-sensitive and can have not have a custom category.\n\n {current}\n '.format(current=_get_current_value(caller, 'prototype_tags', formatter=(lambda lst: ', '.join((tg for tg in lst))), only_inherit=True))
_set_actioninfo(caller, _format_list_actions('remove', prefix='|w<text>|n|W to add Tag. Other Action:|n '))
helptext = "\n Using prototype-tags is a good way to organize and group large numbers of prototypes by\n genre, type etc. Under the hood, prototypes' tags will all be stored with the category\n '{tagmetacategory}'.\n ".format(tagmetacategory=protlib._PROTOTYPE_TAG_META_CATEGORY)
text = (text, helptext)
options = _wizard_options('prototype_tags', 'prototype_desc', 'prototype_locks')
options.append({'key': '_default', 'goto': _prototype_tags_actions})
return (text, options) |
def read_srml(filename, map_variables=True):
tsv_data = pd.read_csv(filename, delimiter='\t')
data = _format_index(tsv_data)
data = data[data.columns[2:]]
if map_variables:
data = data.rename(columns=_map_columns)
columns = data.columns
flag_label_map = {flag: (columns[(columns.get_loc(flag) - 1)] + '_flag') for flag in columns[1::2]}
data = data.rename(columns=flag_label_map)
for col in columns[::2]:
missing = (data[(col + '_flag')] == 99)
data[col] = data[col].where((~ missing), np.NaN)
return data |
class ChannelAFG(ChannelBase):
def __init__(self, instrument, id):
super().__init__(instrument, id)
self.calculate_voltage_range()
self.frequency_values = [self.frequency_min, self.frequency_max]
self.phase_values = [self.phase_min, self.phase_max]
load_impedance = Instrument.control('OUTPut{ch}:IMPedance?', 'OUTPut{ch}:IMPedance %d', 'This property sets the output load impedance for the specified\n channel. The specified value is used for amplitude, offset, and\n high/low level settings. You can set the impedance to any value from\n 1 to 1 M. The default value is 50 .', validator=strict_range, values=[1, 1000000])
output_impedance = Instrument.control('OUTPut{ch}:LOW:IMPedance?', 'OUTPut{ch}:LOW:IMPedance %d', 'This property sets the instrument output impedance, the possible\n values are: 5 Ohm or 50 Ohm (default).', validator=strict_discrete_set, values={5: 1, 50: 0}, map_values=True)
shape = Instrument.control('SOURce{ch}:FUNCtion:SHAPe?', 'SOURce{ch}:FUNCtion:SHAPe %s', 'This property sets or queries the shape of the carrier waveform.\n Allowed choices depends on the choosen modality, please refer on\n instrument manual. When you set this property with a different value,\n if the instrument is running it will be stopped.\n Can be set to: SIN<USOID>, SQU<ARE>, PULS<E>, RAMP, PRN<OISE>, DC,\n SINC, GAUS<SIAN>, LOR<ENTZ>, ERIS<E>, EDEC<AY>, HAV<ERSINE>, ARBB,\n EFIL<E>, DOUBLEPUL<SE>', validator=strict_discrete_set, values=['SINUSOID', 'SIN', 'SQUARE', 'SQU', 'PULSE', 'PULS', 'RAMP', 'PRNOISE', 'PRN', 'DC', 'SINC', 'GAUSSIAN', 'GAUS', 'LORENTZ', 'LOR', 'ERISE', 'ERIS', 'EDECAY', 'EDEC', 'HAVERSINE', 'HAV', 'ARBB', 'EFILE', 'EFIL', 'DOUBLEPULSE', 'DOUBLEPUL'])
delay_get_command = 'SOURce{ch}:INITDELay?'
delay_set_command = 'SOURce{ch}:INITDELay %s'
delay_max_get_command = 'SOURce{ch}:INITDELay? MAXimum'
delay_min_get_command = 'SOURce{ch}:INITDELay? MINimum'
frequency = Instrument.control('SOURce{ch}:FREQuency?', 'SOURce{ch}:FREQuency %s', 'This property sets or queries the frequency of the output waveform.\n This command is available when the Run Mode is set to any setting other\n than Sweep. The output frequency range setting depends on the type of\n output waveform. If you change the type of output waveform, it may\n change the output frequency because changing waveform types affects the\n setting range of the output frequency. The output frequency range\n setting depends also on the amplitude parameter.', validator=strict_range, dynamic=True)
frequency_max = Instrument.measurement('SOURce{ch}:FREQuency? MAXimum', 'This property queries the maximum frequency that can be set to the\n output waveform.')
frequency_min = Instrument.measurement('SOURce{ch}:FREQuency? MINimum', 'This property queries the minimum frequency that can be set to the\n output waveform.')
phase = Instrument.control('SOURce{ch}:PHASe:ADJust?', 'SOURce{ch}:PHASe:ADJust %s', 'This property sets or queries the phase of the output waveform for\n the specified channel. The value is in degrees.', validator=strict_range, dynamic=True)
phase_max = Instrument.measurement('SOURce{ch}:PHASe:ADJust? MAXimum', 'This property queries the maximum phase that can be set to the\n output waveform.')
phase_min = Instrument.measurement('SOURce{ch}:PHASe:ADJust? MINimum', 'This property queries the minimum phase that can be set to the\n output waveform.')
voltage_unit = Instrument.control('OUTPut{ch}:VOLTage:UNIT?', 'OUTPut{ch}:VOLTage:UNIT %s', 'This property sets or queries the units of output amplitude, the\n possible choices are: VPP, VRMS, DBM. This command does not affect the\n offset, high level, or low level of output.', validator=strict_discrete_set, values=['VPP', 'VRMS', 'DBM'])
voltage_low = Instrument.control('SOURce{ch}:VOLTage:LEVel:IMMediate:LOW?', 'SOURce{ch}:VOLTage:LEVel:IMMediate:LOW %s', 'This property sets or queries the low level of the waveform. The\n low level could be limited by noise level to not exceed the maximum\n amplitude. If the carrier is Noise or DC level, this command and this\n query cause an error.', validator=strict_range, dynamic=True)
voltage_low_max = Instrument.measurement('SOURce{ch}:VOLTage:LEVel:IMMediate:LOW? MAXimum', 'This property queries the maximum low voltage level that can be set\n to the output waveform.')
voltage_low_min = Instrument.measurement('SOURce{ch}:VOLTage:LEVel:IMMediate:LOW? MINimum', 'This property queries the minimum low voltage level that can be set\n to the output waveform.')
voltage_high = Instrument.control('SOURce{ch}:VOLTage:LEVel:IMMediate:HIGH?', 'SOURce{ch}:VOLTage:LEVel:IMMediate:HIGH %s', 'This property sets or queries the high level of the waveform. The\n high level could be limited by noise level to not exceed the maximum\n amplitude. If the carrier is Noise or DC level, this command and this\n query cause an error.', validator=strict_range, dynamic=True)
voltage_high_max = Instrument.measurement('SOURce{ch}:VOLTage:LEVel:IMMediate:HIGH? MAXimum', 'This property queries the maximum high voltage level that can be set\n to the output waveform.')
voltage_high_min = Instrument.measurement('SOURce{ch}:VOLTage:LEVel:IMMediate:HIGH? MINimum', 'This property queries the minimum high voltage level that can be set\n to the output waveform.')
voltage_amplitude = Instrument.control('SOURce{ch}:VOLTage:LEVel:IMMediate:AMPLitude?', 'SOURce{ch}:VOLTage:LEVel:IMMediate:AMPLitude %s', 'This property sets or queries the output amplitude for the specified\n channel. The measurement unit of amplitude depends on the selection\n operated using the voltage_unit property. If the carrier is Noise the\n amplitude is Vpk instead of Vpp. If the carrier is DC level this\n command causes an error. The range of the amplitude setting could be\n limited by the frequency and offset parameter of the carrier waveform.\n ', validator=strict_range, dynamic=True)
voltage_amplitude_max = Instrument.measurement('SOURce{ch}:VOLTage:LEVel:IMMediate:AMPLitude? MAXimum', 'This property queries the maximum amplitude voltage level that can\n be set to the output waveform.', get_process=(lambda value: float(value.replace('VPP', ''))))
voltage_amplitude_min = Instrument.measurement('SOURce{ch}:VOLTage:LEVel:IMMediate:AMPLitude? MINimum', 'This property queries the minimum amplitude voltage level that can\n be set to the output waveform.', get_process=(lambda value: float(value.replace('VPP', ''))))
voltage_offset = Instrument.control('SOURce{ch}:VOLTage:LEVel:IMMediate:OFFSet?', 'SOURce{ch}:VOLTage:LEVel:IMMediate:OFFSet %s', 'This property sets or queries the offset level for the specified\n channel. The offset range setting depends on the amplitude parameter.\n ', validator=strict_range, dynamic=True)
voltage_offset_max = Instrument.measurement('SOURce{ch}:VOLTage:LEVel:IMMediate:OFFSet? MAXimum', 'This property queries the maximum offset voltage level that can be\n set to the output waveform.')
voltage_offset_min = Instrument.measurement('SOURce{ch}:VOLTage:LEVel:IMMediate:OFFSet? MINimum', 'This property queries the minimum offset voltage level that can be\n set to the output waveform.')
baseline_offset = Instrument.control('SOURce{ch}:VOLTage:BASELINE:OFFSET?', 'SOURce{ch}:VOLTage:BASELINE:OFFSET %s', 'This property sets or queries the offset level for the specified\n channel. The offset range setting depends on the amplitude parameter.\n ', validator=strict_range, dynamic=True)
baseline_offset_max = Instrument.measurement('SOURce{ch}:VOLTage:BASELINE:OFFSET? MAXimum', 'This property queries the maximum offset voltage level that can be\n set to the output waveform.')
baseline_offset_min = Instrument.measurement('SOURce{ch}:VOLTage:BASELINE:OFFSET? MINimum', 'This property queries the minimum offset voltage level that can be\n set to the output waveform.')
def calculate_voltage_range(self):
self.voltage_low_values = [self.voltage_low_min, self.voltage_low_max]
self.voltage_high_values = [self.voltage_high_min, self.voltage_high_max]
self.voltage_amplitude_values = [self.voltage_amplitude_min, self.voltage_amplitude_max]
self.voltage_offset_values = [self.voltage_offset_min, self.voltage_offset_max]
self.baseline_offset_values = [self.baseline_offset_min, self.baseline_offset_max] |
class PlainVerticalTable(PrettyTable):
def get_string(self, **kwargs: (str | list[str])) -> str:
options = self._get_options(kwargs)
rows = self._get_rows(options)
output = ''
for row in rows:
for v in row:
output += '{}\n'.format(v)
output += '\n'
return output |
def _ensure_unique_nodes_unique_edges(graph_dict):
nodes = graph_dict['nodes']
edges = graph_dict['edges']
new_nodes = {node['id']: node for node in nodes}
new_nodes = list(new_nodes.values())
new_edges = {'{}.{}.{}'.format(edge['from_id'], edge['relation_type'], edge['to_id']): edge for edge in edges}
new_edges = list(new_edges.values())
return {'nodes': new_nodes, 'edges': new_edges} |
class BatchBase(futures.FutureBase):
def __init__(self):
futures.FutureBase.__init__(self)
self.items = []
def is_flushed(self):
return self.is_computed()
def is_cancelled(self):
return (self.is_computed() and (self.error() is not None))
def is_empty(self):
return (len(self.items) == 0)
def get_priority(self):
return (0, len(self.items))
def flush(self):
if self.is_computed():
raise BatchingError('Batch is already flushed or cancelled.')
if _debug_options.DUMP_FLUSH_BATCH:
debug.write(': -> batch flush:')
self.dump(4)
if _debug_options.DUMP_STACK:
debug.dump_stack()
try:
self.error()
if (not _debug.options.KEEP_DEPENDENCIES):
self.items.clear()
finally:
if _debug_options.DUMP_FLUSH_BATCH:
debug.write((': <- batch flushed: %s' % debug.str(self)))
def cancel(self, error=None):
if self.is_computed():
return
if (error is None):
error = BatchCancelledError()
self.set_error(error)
def _compute(self):
self._try_switch_active_batch()
try:
self._flush()
self.set_value(None)
except BaseException as error:
if (not self.is_computed()):
self.set_error(error)
def _computed(self):
self._try_switch_active_batch()
error = self.error()
cancelled = (error is not None)
if cancelled:
self._cancel()
for item in self.items:
if (not item.is_computed()):
item.set_error((error if cancelled else AssertionError("Value of this item wasn't set on batch flush.")))
futures.FutureBase._computed(self)
def _flush(self):
raise NotImplementedError()
def _cancel(self):
pass
def _try_switch_active_batch(self):
raise NotImplementedError()
def __str__(self):
return ('%s (%s, %i items)' % (core_inspection.get_full_name(type(self)), ('cancelled' if self.is_cancelled() else ('flushed' if self.is_flushed() else 'pending')), len(self.items)))
def dump(self, indent=0):
debug.write(debug.str(self), indent)
debug.write(('Priority: %s' % debug.repr(self.get_priority())), (indent + 1))
if self.items:
debug.write('Items:', (indent + 1))
for item in self.items:
item.dump((indent + 2))
else:
debug.write('No items.', (indent + 1))
def to_str(self):
return str(self)
def dump_perf_stats(self, time_taken):
self._total_time = time_taken
profiler.append({'time_taken': time_taken, 'name': self.to_str(), 'dependencies': [(i.to_str(), i._total_time) for i in self.items]}) |
class RailsRoleTest(ProvyTestCase):
def setUp(self):
super(RailsRoleTest, self).setUp()
self.role = RailsRole(prov=None, context={'owner': 'some-owner'})
self.supervisor_role = SupervisorRole(prov=None, context=self.role.context)
def installs_necessary_packages_to_provision(self):
methods_to_mock = ('execute', 'register_template_loader', 'remote_exists_dir', 'update_file', 'change_path_mode', 'ensure_dir')
with self.using_stub(AptitudeRole) as aptitude, self.using_stub(GemRole) as gem, self.mock_role_methods(*methods_to_mock):
self.role.remote_exists_dir.return_value = False
self.role.provision()
self.role.register_template_loader.assert_called_once_with('provy.more.debian.web')
self.assertEqual(aptitude.ensure_package_installed.mock_calls, [call(package) for package in PACKAGES_TO_INSTALL])
self.assertEqual(gem.ensure_package_installed.mock_calls, [call('bundler'), call('passenger')])
self.role.remote_exists_dir.assert_called_once_with('/etc/nginx')
self.assertEqual(self.role.ensure_dir.mock_calls, [call('/var/log/nginx', sudo=True), call('/etc/nginx/sites-available', sudo=True), call('/etc/nginx/sites-enabled', sudo=True), call('/etc/nginx/conf.d', sudo=True)])
self.role.execute.assert_called_once_with('passenger-install-nginx-module --auto --auto-download --prefix=/etc/nginx', sudo=True, stdout=False)
self.assertEqual(self.role.update_file.mock_calls, [call('rails.nginx.conf.template', '/etc/nginx/conf/nginx.conf', sudo=True), call('rails.nginx.init.template', '/etc/init.d/nginx', sudo=True)])
self.role.change_path_mode.assert_called_once_with('/etc/init.d/nginx', 755)
def provisions_even_if_nginx_already_exists(self):
methods_to_mock = ('register_template_loader', 'remote_exists_dir', 'update_file', 'change_path_mode', 'ensure_dir')
with self.using_stub(AptitudeRole), self.using_stub(GemRole), self.mock_role_methods(*methods_to_mock):
self.role.remote_exists_dir.return_value = True
self.role.provision()
self.assertEqual(self.role.ensure_dir.mock_calls, [call('/etc/nginx/sites-available', sudo=True), call('/etc/nginx/sites-enabled', sudo=True), call('/etc/nginx/conf.d', sudo=True)])
def restarts_on_cleanup_if_must_be_restarted(self):
with patch('provy.more.debian.RailsRole.restart') as restart:
self.role.ensure_restart()
self.role.cleanup()
self.assertTrue(restart.called)
def doesnt_restart_on_cleanup_if_doesnt_need_to_be_restarted(self):
with patch('provy.more.debian.RailsRole.restart') as restart:
self.role.cleanup()
self.assertFalse(restart.called)
def ensures_site_is_disabled(self):
site = 'some-site'
with self.using_stub(NginxRole) as nginx:
self.role.ensure_site_disabled(site)
nginx.ensure_site_disabled.assert_called_once_with(site)
def ensures_site_is_enabled(self):
site = 'some-site'
with self.using_stub(NginxRole) as nginx:
self.role.ensure_site_enabled(site)
nginx.ensure_site_enabled.assert_called_once_with(site)
def ensures_site_is_created_and_restarted(self):
owner = self.role.context['owner']
site = 'some-site'
host = 'some-host'
path = 'some-path'
options = {'foo': 'bar'}
expected_options = {'foo': 'bar', 'host': host, 'path': path}
with self.mock_role_methods('update_file', 'ensure_restart', 'execute'):
self.role.update_file.return_value = True
self.role.create_site(site, host, path, options=options)
self.role.update_file.assert_called_once_with('rails-nginx.template', '/etc/nginx/sites-available/some-site', options=expected_options, sudo=True)
self.role.ensure_restart.assert_called_once_with()
self.role.execute.assert_called_once_with('cd some-path && bundle install --without development test --deployment', stdout=True, user=owner)
def ensures_site_is_created_without_restart_when_already_existant(self):
owner = self.role.context['owner']
site = 'some-site'
host = 'some-host'
path = 'some-path'
options = {'foo': 'bar'}
expected_options = {'foo': 'bar', 'host': host, 'path': path}
with self.mock_role_methods('update_file', 'ensure_restart', 'execute'):
self.role.update_file.return_value = False
self.role.create_site(site, host, path, options=options)
self.role.update_file.assert_called_once_with('rails-nginx.template', '/etc/nginx/sites-available/some-site', options=expected_options, sudo=True)
self.assertFalse(self.role.ensure_restart.called)
self.role.execute.assert_called_once_with('cd some-path && bundle install --without development test --deployment', stdout=True, user=owner)
def restarts_nginx(self):
with self.using_stub(NginxRole) as nginx:
self.role.restart()
nginx.restart.assert_called_once_with() |
_config
def test_ratiotile_alternative_calculation(manager):
manager.c.next_layout()
manager.c.next_layout()
for i in range(12):
manager.test_window(str(i))
print(manager.c.layout.info()['layout_info'])
if (i == 0):
assert (manager.c.layout.info()['layout_info'] == [(0, 0, 800, 600)])
elif (i == 4):
assert (manager.c.layout.info()['layout_info'] == [(0, 0, 400, 200), (0, 200, 400, 200), (0, 400, 400, 200), (400, 0, 400, 300), (400, 300, 400, 300)])
elif (i == 5):
assert (manager.c.layout.info()['layout_info'] == [(0, 0, 400, 200), (0, 200, 400, 200), (0, 400, 400, 200), (400, 0, 400, 200), (400, 200, 400, 200), (400, 400, 400, 200)])
elif (i == 9):
assert (manager.c.layout.info()['layout_info'] == [(0, 0, 266, 150), (0, 150, 266, 150), (0, 300, 266, 150), (0, 450, 266, 150), (266, 0, 267, 200), (266, 200, 267, 200), (266, 400, 267, 200), (533, 0, 267, 200), (533, 200, 267, 200), (533, 400, 267, 200)])
elif (i == 10):
assert (manager.c.layout.info()['layout_info'] == [(0, 0, 266, 150), (0, 150, 266, 150), (0, 300, 266, 150), (0, 450, 266, 150), (266, 0, 267, 150), (266, 150, 267, 150), (266, 300, 267, 150), (266, 450, 267, 150), (533, 0, 267, 200), (533, 200, 267, 200), (533, 400, 267, 200)])
elif (i == 11):
assert (manager.c.layout.info()['layout_info'] == [(0, 0, 266, 150), (0, 150, 266, 150), (0, 300, 266, 150), (0, 450, 266, 150), (266, 0, 267, 150), (266, 150, 267, 150), (266, 300, 267, 150), (266, 450, 267, 150), (533, 0, 267, 150), (533, 150, 267, 150), (533, 300, 267, 150), (533, 450, 267, 150)]) |
def plt_fig(test_img, scores, img_scores, gts, threshold, cls_threshold, save_dir, class_name):
num = len(scores)
vmax = (scores.max() * 255.0)
vmin = (scores.min() * 255.0)
vmax = ((vmax * 0.5) + (vmin * 0.5))
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
for i in range(num):
img = test_img[i]
img = denormalization(img)
gt = gts[i].squeeze()
heat_map = (scores[i] * 255)
mask = scores[i]
mask[(mask > threshold)] = 1
mask[(mask <= threshold)] = 0
kernel = morphology.disk(4)
mask = morphology.opening(mask, kernel)
mask *= 255
vis_img = mark_boundaries(img, mask, color=(1, 0, 0), mode='thick')
(fig_img, ax_img) = plt.subplots(1, 4, figsize=(9, 3), gridspec_kw={'width_ratios': [4, 4, 4, 3]})
fig_img.subplots_adjust(wspace=0.05, hspace=0)
for ax_i in ax_img:
ax_i.axes.xaxis.set_visible(False)
ax_i.axes.yaxis.set_visible(False)
ax_img[0].imshow(img)
ax_img[0].title.set_text('Input image')
ax_img[1].imshow(gt, cmap='gray')
ax_img[1].title.set_text('GroundTruth')
ax_img[2].imshow(heat_map, cmap='jet', norm=norm, interpolation='none')
ax_img[2].imshow(vis_img, cmap='gray', alpha=0.7, interpolation='none')
ax_img[2].title.set_text('Segmentation')
black_mask = np.zeros((int(mask.shape[0]), int(((3 * mask.shape[1]) / 4))))
ax_img[3].imshow(black_mask, cmap='gray')
ax = plt.gca()
if (img_scores[i] > cls_threshold):
cls_result = 'nok'
else:
cls_result = 'ok'
ax.text(0.05, 0.89, 'Detected anomalies', verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, fontdict=dict(fontsize=8, color='w', family='sans-serif'))
ax.text(0.05, 0.79, '', verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, fontdict=dict(fontsize=8, color='w', family='sans-serif'))
ax.text(0.05, 0.72, 'Results', verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, fontdict=dict(fontsize=8, color='w', family='sans-serif'))
ax.text(0.05, 0.67, '', verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, fontdict=dict(fontsize=8, color='w', family='sans-serif'))
ax.text(0.05, 0.59, "'{}'".format(cls_result), verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, fontdict=dict(fontsize=8, color='r', family='sans-serif'))
ax.text(0.05, 0.47, 'Anomaly scores: {:.2f}'.format(img_scores[i]), verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, fontdict=dict(fontsize=8, color='w', family='sans-serif'))
ax.text(0.05, 0.37, '', verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, fontdict=dict(fontsize=8, color='w', family='sans-serif'))
ax.text(0.05, 0.3, 'Thresholds', verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, fontdict=dict(fontsize=8, color='w', family='sans-serif'))
ax.text(0.05, 0.25, '', verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, fontdict=dict(fontsize=8, color='w', family='sans-serif'))
ax.text(0.05, 0.17, 'Classification: {:.2f}'.format(cls_threshold), verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, fontdict=dict(fontsize=8, color='w', family='sans-serif'))
ax.text(0.05, 0.07, 'Segementation: {:.2f}'.format(threshold), verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, fontdict=dict(fontsize=8, color='w', family='sans-serif'))
ax_img[3].title.set_text('Classification')
fig_img.savefig(os.path.join(save_dir, (class_name + '_{}'.format(i))), dpi=300, bbox_inches='tight')
plt.close() |
class Blur(nn.Module):
def __init__(self, in_filters, sfilter=(1, 1), pad_mode='replicate', **kwargs):
super(Blur, self).__init__()
filter_size = len(sfilter)
self.pad = SamePad(filter_size, pad_mode=pad_mode)
self.filter_proto = torch.tensor(sfilter, dtype=torch.float, requires_grad=False)
self.filter = torch.einsum('i, j -> i j', self.filter_proto, self.filter_proto)
self.filter = (self.filter / torch.sum(self.filter))
self.filter = self.filter.repeat([in_filters, 1, 1, 1])
self.filter = torch.nn.Parameter(self.filter, requires_grad=False)
def forward(self, x):
x = self.pad(x)
x = F.conv2d(x, self.filter, groups=x.size()[1])
return x
def extra_repr(self):
return ('pad=%s, filter_proto=%s' % (self.pad, self.filter_proto.tolist())) |
def test_bmn():
model_cfg = dict(type='BMN', temporal_dim=100, boundary_ratio=0.5, num_samples=32, num_samples_per_bin=3, feat_dim=400, soft_nms_alpha=0.4, soft_nms_low_threshold=0.5, soft_nms_high_threshold=0.9, post_process_top_k=100)
if torch.cuda.is_available():
localizer_bmn = build_localizer(model_cfg).cuda()
raw_feature = torch.rand(8, 400, 100).cuda()
gt_bbox = np.array(([[[0.1, 0.3], [0.375, 0.625]]] * 8))
losses = localizer_bmn(raw_feature, gt_bbox)
assert isinstance(losses, dict)
video_meta = [dict(video_name='v_test', duration_second=100, duration_frame=960, feature_frame=960)]
with torch.no_grad():
one_raw_feature = torch.rand(1, 400, 100).cuda()
localizer_bmn(one_raw_feature, gt_bbox=None, video_meta=video_meta, return_loss=False)
else:
localizer_bmn = build_localizer(model_cfg)
raw_feature = torch.rand(8, 400, 100)
gt_bbox = torch.Tensor(([[[0.1, 0.3], [0.375, 0.625]]] * 8))
losses = localizer_bmn(raw_feature, gt_bbox)
assert isinstance(losses, dict)
video_meta = [dict(video_name='v_test', duration_second=100, duration_frame=960, feature_frame=960)]
with torch.no_grad():
one_raw_feature = torch.rand(1, 400, 100)
localizer_bmn(one_raw_feature, gt_bbox=None, video_meta=video_meta, return_loss=False) |
class VariableDeclarations(VersionBase):
def __init__(self):
self.variables = []
def parse(element):
variable_declarations = VariableDeclarations()
declarations = element.findall('VariableDeclaration')
for declaration in declarations:
variable = Variable.parse(declaration)
variable_declarations.add_variable(variable)
return variable_declarations
def __eq__(self, other):
if isinstance(other, VariableDeclarations):
if (self.variables == other.variables):
return True
return False
def add_variable(self, variable):
if (not isinstance(variable, Variable)):
raise TypeError('variable input is not of type Variable')
self.variables.append(variable)
return self
def get_element(self):
if (self.version_minor < 2):
OpenSCENARIOVersionError('Variables were introduced in OSC 1.2')
element = ET.Element('VariableDeclarations')
for p in self.variables:
element.append(p.get_element())
return element |
class ArrayList(Array2D):
def __init__(self, w, h, data=None):
self.width = w
self.height = h
self.data = [(array('d', [0]) * w) for y in range(h)]
if (data is not None):
self.setup(data)
def __getitem__(self, idx):
if isinstance(idx, tuple):
return self.data[idx[1]][idx[0]]
else:
return self.data[idx]
def __setitem__(self, idx, val):
if isinstance(idx, tuple):
self.data[idx[1]][idx[0]] = val
else:
self.data[idx] = val
def copy_data_from(self, other):
for (l1, l2) in zip(self.data, other.data):
l1[:] = l2 |
class PLBartTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ['input_ids', 'attention_mask']
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', language_codes='base', tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[Dict[(str, Any)]]=None, additional_special_tokens=None, **kwargs):
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, language_codes=language_codes, tokenizer_file=tokenizer_file, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self.language_codes = language_codes
fairseq_language_codes = FAIRSEQ_LANGUAGE_CODES[self.language_codes]
self.fairseq_tokens_to_ids = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
self.fairseq_offset = 1
self.sp_model_size = len(self.sp_model)
self.lang_code_to_id = {code: ((self.sp_model_size + i) + self.fairseq_offset) for (i, code) in enumerate(fairseq_language_codes)}
self.id_to_lang_code = {v: k for (k, v) in self.lang_code_to_id.items()}
if (self.language_codes == 'base'):
self.fairseq_tokens_to_ids['<mask>'] = ((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset)
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()}
self._additional_special_tokens = list(self.lang_code_to_id.keys())
if (additional_special_tokens is not None):
self._additional_special_tokens.extend([t for t in additional_special_tokens if (t not in self._additional_special_tokens)])
if (self.language_codes == 'base'):
self._src_lang = src_lang
self.cur_lang_code_id = (self.lang_code_to_id[self._src_lang] if (self._src_lang is not None) else self._src_lang)
else:
self._src_lang = (src_lang if (src_lang is not None) else 'en_XX')
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
state['sp_model_proto'] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def vocab_size(self):
if (self.language_codes == 'base'):
return (((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset) + 1)
else:
return ((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset)
def src_lang(self) -> str:
return self._src_lang
_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
prefix_ones = ([1] * len(self.prefix_tokens))
suffix_ones = ([1] * len(self.suffix_tokens))
if (token_ids_1 is None):
return ((prefix_ones + ([0] * len(token_ids_0))) + suffix_ones)
return (((prefix_ones + ([0] * len(token_ids_0))) + ([0] * len(token_ids_1))) + suffix_ones)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return ((self.prefix_tokens + token_ids_0) + self.suffix_tokens)
return (((self.prefix_tokens + token_ids_0) + token_ids_1) + self.suffix_tokens)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs):
if ((src_lang is None) or (tgt_lang is None)):
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs['forced_bos_token_id'] = tgt_lang_id
return inputs
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
if (token in self.fairseq_tokens_to_ids):
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
return ((spm_id + self.fairseq_offset) if spm_id else self.unk_token_id)
def _convert_id_to_token(self, index):
if (index in self.fairseq_ids_to_tokens):
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece((index - self.fairseq_offset))
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def prepare_seq2seq_batch(self, src_texts: List[str], src_lang: str='en_XX', tgt_texts: Optional[List[str]]=None, tgt_lang: str='python', **kwargs) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
self.cur_lang_code = (self.lang_code_to_id[src_lang] if (src_lang is not None) else None)
self.prefix_tokens = []
if (self.cur_lang_code is not None):
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
else:
self.suffix_tokens = [self.eos_token_id]
def set_tgt_lang_special_tokens(self, lang: str) -> None:
self.cur_lang_code = (self.lang_code_to_id[lang] if (lang is not None) else None)
self.prefix_tokens = []
if (self.cur_lang_code is not None):
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
else:
self.suffix_tokens = [self.eos_token_id] |
class MultiTextureSprite(pyglet.sprite.AdvancedSprite):
def __init__(self, imgs: Mapping[(str, pyglet.image.Texture)], x: float=0, y: float=0, z: float=0, blend_src: int=pyglet.gl.GL_SRC_ALPHA, blend_dest: int=pyglet.gl.GL_ONE_MINUS_SRC_ALPHA, batch: Optional[pyglet.graphics.Batch]=None, group: Optional[MultiTextureSpriteGroup]=None, subpixel: bool=False, program: pyglet.graphics.shader.ShaderProgram=None) -> None:
self._x = x
self._y = y
self._z = z
self._texture = list(imgs.values())[0]
if isinstance(self._texture, pyglet.image.TextureArrayRegion):
self._program = (program or pyglet.sprite.get_default_array_shader())
else:
self._program = (program or pyglet.sprite.get_default_shader())
self._batch = (batch or pyglet.graphics.get_default_batch())
self._group = MultiTextureSpriteGroup(imgs, blend_src, blend_dest, self.program, group)
self._subpixel = subpixel
self._create_vertex_list() |
class MyModel(ClassyModel):
def __init__(self, num_classes):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
num_channels = 3
self.fc = nn.Linear(num_channels, num_classes)
def forward(self, x):
out = self.avgpool(x)
out = out.reshape(out.size(0), (- 1))
out = self.fc(out)
return out |
class ViewPseudoFactory(ViewFactory):
def __init__(self, bookmark):
super().__init__(RegexPath('/', '/', {}), '')
self.bookmark = bookmark
def matches_view(self, view):
return False
def get_absolute_url(self, user_interface, **arguments):
return self.bookmark.href.as_network_absolute() |
class LeNetContainer(nn.Module):
def __init__(self, num_filters, kernel_size, input_dim, hidden_dims, output_dim=10):
super(LeNetContainer, self).__init__()
self.conv1 = nn.Conv2d(1, num_filters[0], kernel_size, 1)
self.conv2 = nn.Conv2d(num_filters[0], num_filters[1], kernel_size, 1)
self.fc1 = nn.Linear(input_dim, hidden_dims[0])
self.fc2 = nn.Linear(hidden_dims[0], output_dim)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2, 2)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2, 2)
x = F.relu(x)
x = x.view((- 1), ((x.size()[1] * x.size()[2]) * x.size()[3]))
x = self.fc1(x)
x = self.fc2(x)
return x |
def getClusterLabelWithDisMatrix(dis_matrix, display_dis_matrix=False):
n_clusters = 7
linkage = 'complete'
if display_dis_matrix:
sns.heatmap(dis_matrix)
plt.show()
estimator = AgglomerativeClustering(n_clusters=n_clusters, linkage=linkage, affinity='precomputed')
estimator.fit(dis_matrix)
label_pred = estimator.labels_
return label_pred |
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.label_encoder = LabelEncoder()
curr_dim = 64
image_encoder = [nn.Conv2d(6, curr_dim, kernel_size=7, stride=1, padding=3, bias=True), nn.InstanceNorm2d(curr_dim), nn.ReLU(inplace=True)]
for i in range(2):
image_encoder += [nn.Conv2d(curr_dim, (curr_dim * 2), kernel_size=4, stride=2, padding=1, bias=True), nn.InstanceNorm2d((curr_dim * 2)), nn.ReLU(inplace=True)]
curr_dim = (curr_dim * 2)
for i in range(3):
image_encoder += [ResidualBlock(dim_in=curr_dim, dim_out=curr_dim, net_mode='t')]
self.image_encoder = nn.Sequential(*image_encoder)
decoder = []
for i in range(3):
decoder += [ResidualBlock(dim_in=curr_dim, dim_out=curr_dim, net_mode='t')]
for i in range(2):
decoder += [nn.ConvTranspose2d(curr_dim, (curr_dim // 2), kernel_size=4, stride=2, padding=1, bias=False), nn.InstanceNorm2d((curr_dim // 2)), nn.ReLU(inplace=True)]
curr_dim = (curr_dim // 2)
self.residual = nn.Sequential(nn.Conv2d((curr_dim + 3), 3, kernel_size=3, stride=1, padding=1, bias=False), nn.Tanh())
self.decoder = nn.Sequential(*decoder)
def forward(self, x, label_feature):
mixed_feature = self.label_encoder(x, label_feature)
encode = self.image_encoder(mixed_feature)
decode = self.decoder(encode)
decode_x = torch.cat([decode, x], dim=1)
adv_x = self.residual(decode_x)
return (adv_x, mixed_feature) |
def load_checkpoint(meta_path: str, file_name_prefix: str) -> QuantizationSimModel:
new_sess = utils.graph_saver.load_model_from_meta(meta_path=str((((meta_path + '/') + file_name_prefix) + '.meta')))
new_quant_sim = load_data_from_pickle_file((meta_path + '/orig_quantsim_config'))
new_quant_sim.session = new_sess
update_tensor_quantizer_references(new_sess, new_quant_sim._param_quantizers)
update_tensor_quantizer_references(new_sess, new_quant_sim._activation_quantizers)
return new_quant_sim |
def _get_localzone(_root: str='/') -> datetime.tzinfo:
tzenv = os.environ.get('TZ')
if tzenv:
return _tz_from_env(tzenv)
try:
link_dst = os.readlink('/etc/localtime')
except OSError:
pass
else:
pos = link_dst.find('/zoneinfo/')
if (pos >= 0):
zone_name = link_dst[(pos + 10):]
tzinfo = _get_tzinfo(zone_name)
if (tzinfo is not None):
return tzinfo
tzpath = os.path.join(_root, 'etc/timezone')
if os.path.exists(tzpath):
with open(tzpath, 'rb') as tzfile:
data = tzfile.read()
if (data[:5] != b'TZif2'):
etctz = data.strip().decode()
if (' ' in etctz):
(etctz, dummy) = etctz.split(' ', 1)
if ('#' in etctz):
(etctz, dummy) = etctz.split('#', 1)
return _get_tzinfo_or_raise(etctz.replace(' ', '_'))
timezone_re = re.compile('\\s*(TIME)?ZONE\\s*=\\s*"(?P<etctz>.+)"')
for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'):
tzpath = os.path.join(_root, filename)
if (not os.path.exists(tzpath)):
continue
with open(tzpath) as tzfile:
for line in tzfile:
match = timezone_re.match(line)
if (match is not None):
etctz = match.group('etctz')
return _get_tzinfo_or_raise(etctz.replace(' ', '_'))
for filename in ('etc/localtime', 'usr/local/etc/localtime'):
tzpath = os.path.join(_root, filename)
if (not os.path.exists(tzpath)):
continue
return _get_tzinfo_from_file(tzpath)
raise LookupError('Can not find any timezone configuration') |
def build_model(images, model_name, training, override_params=None, model_dir=None, fine_tuning=False):
assert isinstance(images, tf.Tensor)
if ((not training) or fine_tuning):
if (not override_params):
override_params = {}
override_params['batch_norm'] = utils.BatchNormalization
(blocks_args, global_params) = get_model_params(model_name, override_params)
if ((not training) or fine_tuning):
global_params = global_params._replace(batch_norm=utils.BatchNormalization)
if model_dir:
param_file = os.path.join(model_dir, 'model_params.txt')
if (not tf.gfile.Exists(param_file)):
if (not tf.gfile.Exists(model_dir)):
tf.gfile.MakeDirs(model_dir)
with tf.gfile.GFile(param_file, 'w') as f:
tf.logging.info(('writing to %s' % param_file))
f.write(('model_name= %s\n\n' % model_name))
f.write(('global_params= %s\n\n' % str(global_params)))
f.write(('blocks_args= %s\n\n' % str(blocks_args)))
with tf.variable_scope(model_name):
model = efficientnet_model.Model(blocks_args, global_params)
logits = model(images, training=training)
logits = tf.identity(logits, 'logits')
return (logits, model.endpoints) |
class LoggerDepthProjection():
def __init__(self, step_size, name):
super(LoggerDepthProjection, self).__init__()
self.step_size = step_size
self.name = name
self.config = {'material': {'cls': 'PointsMaterial', 'size': 0.03}}
def tick(self, logger, step, ray_origins, ray_directions, output_depth, target_depth):
if (((step % self.step_size) == 0) and (step > 0)):
(vertices, colors, _) = get_point_clouds(ray_origins, ray_directions, output_depth, target_depth)
global_step = (step // self.step_size)
vertices = vertices.unsqueeze(0)
colors = colors.unsqueeze(0)
logger.add_mesh(self.name, vertices=vertices, colors=colors, global_step=global_step, config_dict=self.config) |
class TimeParameteriseModel(TimeCreateExpression):
r: pybamm.SpatialVariable
geometry: dict
def setup(self):
set_random_seed()
TimeCreateExpression.time_create_expression(self)
def time_parameterise(self):
param = pybamm.ParameterValues({'Particle radius [m]': 1e-05, 'Diffusion coefficient [m2.s-1]': 3.9e-14, 'Interfacial current density [A.m-2]': 1.4, 'Faraday constant [C.mol-1]': 96485, 'Initial concentration [mol.m-3]': 25000.0})
self.r = pybamm.SpatialVariable('r', domain=['negative particle'], coord_sys='spherical polar')
self.geometry = {'negative particle': {self.r: {'min': pybamm.Scalar(0), 'max': self.R}}}
param.process_model(self.model)
param.process_geometry(self.geometry) |
def process_switch_inform(tokens, switch_pointer):
switch_idxs = [0]
while (switch_pointer[switch_idxs[(- 1)]] != 0):
switch_idxs.append(switch_pointer[switch_idxs[(- 1)]])
differ = [i for i in range(1, len(switch_idxs)) if ((switch_idxs[i] - switch_idxs[(i - 1)]) != 1)]
dif_len = len(differ)
if (dif_len < 2):
return ''
elif (dif_len == 4):
return '"{}""{}"'.format(''.join(tokens[differ[0]:differ[1]]), ''.join(tokens[differ[2]:differ[3]]))
else:
return '"{}"'.format(''.join(tokens[differ[0]:differ[1]])) |
.timeout(60)
.skipif((not with_distributed), reason='dask.distributed is not installed')
.skipif((OPERATING_SYSTEM == 'Windows'), reason='XXX: seems to always fail')
.skipif((OPERATING_SYSTEM == 'Darwin'), reason='XXX: intermittently fails')
.skipif((OPERATING_SYSTEM == 'Linux'), reason='XXX: intermittently fails')
def test_distributed_executor():
from distributed import Client
learner = Learner1D(linear, ((- 1), 1))
client = Client(n_workers=1)
BlockingRunner(learner, npoints_goal=10, executor=client)
client.shutdown()
assert (learner.npoints > 0) |
class TupleSelector(object):
class _TupleWrapper(object):
def __init__(self, data, fields):
self._data = data
self._fields = fields
def get(self, field):
return self._data[self._fields.index(TupleSelector.tuple_reference_key(field))]
def tuple_reference_key(cls, field):
if isinstance(field, Function):
return (field.name + ','.join([cls.tuple_reference_key(arg) for arg in field.arguments]))
if isinstance(field, Field):
return ((field.name + ':') + field.model.__name__)
raise Exception(('Unknown field type %s in TupleSelector' % field._node_type))
def __init__(self, query, fields):
self._query = query.select(*fields).tuples()
self._fields = [TupleSelector.tuple_reference_key(field) for field in fields]
def __iter__(self):
return self._build_iterator()
def _build_iterator(self):
for tuple_data in self._query:
(yield TupleSelector._TupleWrapper(tuple_data, self._fields)) |
def fixDelex(filename, data, data2, idx, idx_acts):
try:
turn = data2[filename.strip('.json')][str(idx_acts)]
except:
return data
if (not isinstance(turn, str)):
for (k, act) in turn.items():
if ('Attraction' in k):
if ('restaurant_' in data['log'][idx]['text']):
data['log'][idx]['text'] = data['log'][idx]['text'].replace('restaurant', 'attraction')
if ('hotel_' in data['log'][idx]['text']):
data['log'][idx]['text'] = data['log'][idx]['text'].replace('hotel', 'attraction')
if ('Hotel' in k):
if ('attraction_' in data['log'][idx]['text']):
data['log'][idx]['text'] = data['log'][idx]['text'].replace('attraction', 'hotel')
if ('restaurant_' in data['log'][idx]['text']):
data['log'][idx]['text'] = data['log'][idx]['text'].replace('restaurant', 'hotel')
if ('Restaurant' in k):
if ('attraction_' in data['log'][idx]['text']):
data['log'][idx]['text'] = data['log'][idx]['text'].replace('attraction', 'restaurant')
if ('hotel_' in data['log'][idx]['text']):
data['log'][idx]['text'] = data['log'][idx]['text'].replace('hotel', 'restaurant')
return data |
class Conv3DSimple(nn.Conv3d):
def __init__(self, in_planes, out_planes, midplanes=None, stride=1, padding=1):
super(Conv3DSimple, self).__init__(in_channels=in_planes, out_channels=out_planes, kernel_size=(3, 3, 3), stride=stride, padding=padding, bias=False)
def get_downsample_stride(stride):
return (stride, stride, stride) |
def test_detect_clearsky_arrays(detect_clearsky_data):
(expected, cs) = detect_clearsky_data
clear_samples = clearsky.detect_clearsky(expected['GHI'].values, cs['ghi'].values, times=cs.index, window_length=10)
assert isinstance(clear_samples, np.ndarray)
assert (clear_samples == expected['Clear or not'].values).all() |
class InitCatalogTestCase(unittest.TestCase):
def setUp(self):
self.olddir = os.getcwd()
os.chdir(data_dir)
self.dist = Distribution(TEST_PROJECT_DISTRIBUTION_DATA)
self.cmd = frontend.InitCatalog(self.dist)
self.cmd.initialize_options()
def tearDown(self):
for dirname in ['en_US', 'ja_JP', 'lv_LV']:
locale_dir = os.path.join(i18n_dir, dirname)
if os.path.isdir(locale_dir):
shutil.rmtree(locale_dir)
os.chdir(self.olddir)
def test_no_input_file(self):
self.cmd.locale = 'en_US'
self.cmd.output_file = 'dummy'
with pytest.raises(OptionError):
self.cmd.finalize_options()
def test_no_locale(self):
self.cmd.input_file = 'dummy'
self.cmd.output_file = 'dummy'
with pytest.raises(OptionError):
self.cmd.finalize_options()
_time('1994-11-11')
def test_with_output_dir(self):
self.cmd.input_file = 'project/i18n/messages.pot'
self.cmd.locale = 'en_US'
self.cmd.output_dir = 'project/i18n'
self.cmd.finalize_options()
self.cmd.run()
po_file = _po_file('en_US')
assert os.path.isfile(po_file)
date = format_datetime(datetime(1994, 11, 11, 0, 0), 'yyyy-MM-dd HH:mmZ', tzinfo=LOCALTZ, locale='en')
expected_content = f'''# English (United States) translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1
"
"Report-Msgid-Bugs-To: bugs.
"
"POT-Creation-Date: 2007-04-01 15:30+0200
"
"PO-Revision-Date: {date}
"
"Last-Translator: FULL NAME <>
"
"Language: en_US
"
"Language-Team: en_US <>
"
"Plural-Forms: nplurals=2; plural=(n != 1);
"
"MIME-Version: 1.0
"
"Content-Type: text/plain; charset=utf-8
"
"Content-Transfer-Encoding: 8bit
"
"Generated-By: Babel {VERSION}
"
#. This will be a translator coment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
'''
with open(po_file) as f:
actual_content = f.read()
assert (expected_content == actual_content)
_time('1994-11-11')
def test_keeps_catalog_non_fuzzy(self):
self.cmd.input_file = 'project/i18n/messages_non_fuzzy.pot'
self.cmd.locale = 'en_US'
self.cmd.output_dir = 'project/i18n'
self.cmd.finalize_options()
self.cmd.run()
po_file = _po_file('en_US')
assert os.path.isfile(po_file)
date = format_datetime(datetime(1994, 11, 11, 0, 0), 'yyyy-MM-dd HH:mmZ', tzinfo=LOCALTZ, locale='en')
expected_content = f'''# English (United States) translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1
"
"Report-Msgid-Bugs-To: bugs.
"
"POT-Creation-Date: 2007-04-01 15:30+0200
"
"PO-Revision-Date: {date}
"
"Last-Translator: FULL NAME <>
"
"Language: en_US
"
"Language-Team: en_US <>
"
"Plural-Forms: nplurals=2; plural=(n != 1);
"
"MIME-Version: 1.0
"
"Content-Type: text/plain; charset=utf-8
"
"Content-Transfer-Encoding: 8bit
"
"Generated-By: Babel {VERSION}
"
#. This will be a translator coment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
'''
with open(po_file) as f:
actual_content = f.read()
assert (expected_content == actual_content)
_time('1994-11-11')
def test_correct_init_more_than_2_plurals(self):
self.cmd.input_file = 'project/i18n/messages.pot'
self.cmd.locale = 'lv_LV'
self.cmd.output_dir = 'project/i18n'
self.cmd.finalize_options()
self.cmd.run()
po_file = _po_file('lv_LV')
assert os.path.isfile(po_file)
date = format_datetime(datetime(1994, 11, 11, 0, 0), 'yyyy-MM-dd HH:mmZ', tzinfo=LOCALTZ, locale='en')
expected_content = f'''# Latvian (Latvia) translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1
"
"Report-Msgid-Bugs-To: bugs.
"
"POT-Creation-Date: 2007-04-01 15:30+0200
"
"PO-Revision-Date: {date}
"
"Last-Translator: FULL NAME <>
"
"Language: lv_LV
"
"Language-Team: lv_LV <>
"
"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 :"
" 2);
"
"MIME-Version: 1.0
"
"Content-Type: text/plain; charset=utf-8
"
"Content-Transfer-Encoding: 8bit
"
"Generated-By: Babel {VERSION}
"
#. This will be a translator coment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
msgstr[2] ""
'''
with open(po_file) as f:
actual_content = f.read()
assert (expected_content == actual_content)
_time('1994-11-11')
def test_correct_init_singular_plural_forms(self):
self.cmd.input_file = 'project/i18n/messages.pot'
self.cmd.locale = 'ja_JP'
self.cmd.output_dir = 'project/i18n'
self.cmd.finalize_options()
self.cmd.run()
po_file = _po_file('ja_JP')
assert os.path.isfile(po_file)
date = format_datetime(datetime(1994, 11, 11, 0, 0), 'yyyy-MM-dd HH:mmZ', tzinfo=LOCALTZ, locale='ja_JP')
expected_content = f'''# Japanese (Japan) translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1
"
"Report-Msgid-Bugs-To: bugs.
"
"POT-Creation-Date: 2007-04-01 15:30+0200
"
"PO-Revision-Date: {date}
"
"Last-Translator: FULL NAME <>
"
"Language: ja_JP
"
"Language-Team: ja_JP <>
"
"Plural-Forms: nplurals=1; plural=0;
"
"MIME-Version: 1.0
"
"Content-Type: text/plain; charset=utf-8
"
"Content-Transfer-Encoding: 8bit
"
"Generated-By: Babel {VERSION}
"
#. This will be a translator coment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
'''
with open(po_file) as f:
actual_content = f.read()
assert (expected_content == actual_content)
_time('1994-11-11')
def test_supports_no_wrap(self):
self.cmd.input_file = 'project/i18n/long_messages.pot'
self.cmd.locale = 'en_US'
self.cmd.output_dir = 'project/i18n'
long_message = (('"' + ('xxxxx ' * 15)) + '"')
with open('project/i18n/messages.pot', 'rb') as f:
pot_contents = f.read().decode('latin-1')
pot_with_very_long_line = pot_contents.replace('"bar"', long_message)
with open(self.cmd.input_file, 'wb') as f:
f.write(pot_with_very_long_line.encode('latin-1'))
self.cmd.no_wrap = True
self.cmd.finalize_options()
self.cmd.run()
po_file = _po_file('en_US')
assert os.path.isfile(po_file)
date = format_datetime(datetime(1994, 11, 11, 0, 0), 'yyyy-MM-dd HH:mmZ', tzinfo=LOCALTZ, locale='en_US')
expected_content = f'''# English (United States) translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1
"
"Report-Msgid-Bugs-To: bugs.
"
"POT-Creation-Date: 2007-04-01 15:30+0200
"
"PO-Revision-Date: {date}
"
"Last-Translator: FULL NAME <>
"
"Language: en_US
"
"Language-Team: en_US <>
"
"Plural-Forms: nplurals=2; plural=(n != 1);
"
"MIME-Version: 1.0
"
"Content-Type: text/plain; charset=utf-8
"
"Content-Transfer-Encoding: 8bit
"
"Generated-By: Babel {VERSION}
"
#. This will be a translator coment,
#. that will include several lines
#: project/file1.py:8
msgid {long_message}
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
'''
with open(po_file) as f:
actual_content = f.read()
assert (expected_content == actual_content)
_time('1994-11-11')
def test_supports_width(self):
self.cmd.input_file = 'project/i18n/long_messages.pot'
self.cmd.locale = 'en_US'
self.cmd.output_dir = 'project/i18n'
long_message = (('"' + ('xxxxx ' * 15)) + '"')
with open('project/i18n/messages.pot', 'rb') as f:
pot_contents = f.read().decode('latin-1')
pot_with_very_long_line = pot_contents.replace('"bar"', long_message)
with open(self.cmd.input_file, 'wb') as f:
f.write(pot_with_very_long_line.encode('latin-1'))
self.cmd.width = 120
self.cmd.finalize_options()
self.cmd.run()
po_file = _po_file('en_US')
assert os.path.isfile(po_file)
date = format_datetime(datetime(1994, 11, 11, 0, 0), 'yyyy-MM-dd HH:mmZ', tzinfo=LOCALTZ, locale='en_US')
expected_content = f'''# English (United States) translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1
"
"Report-Msgid-Bugs-To: bugs.
"
"POT-Creation-Date: 2007-04-01 15:30+0200
"
"PO-Revision-Date: {date}
"
"Last-Translator: FULL NAME <>
"
"Language: en_US
"
"Language-Team: en_US <>
"
"Plural-Forms: nplurals=2; plural=(n != 1);
"
"MIME-Version: 1.0
"
"Content-Type: text/plain; charset=utf-8
"
"Content-Transfer-Encoding: 8bit
"
"Generated-By: Babel {VERSION}
"
#. This will be a translator coment,
#. that will include several lines
#: project/file1.py:8
msgid {long_message}
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
'''
with open(po_file) as f:
actual_content = f.read()
assert (expected_content == actual_content) |
def extract_feature_from_samples(generator, inception, truncation, truncation_latent, batch_size, n_sample, device, info_print=False):
with torch.no_grad():
generator.eval()
inception.eval()
n_batch = (n_sample // batch_size)
resid = (n_sample - ((n_batch - 1) * batch_size))
batch_sizes = (([batch_size] * (n_batch - 1)) + [resid])
features = []
for (idx, batch) in enumerate(batch_sizes):
if info_print:
print(('Processing Batch: ' + str(idx)))
latent = torch.randn(batch, 512, device=device)
img = generator([latent], truncation=truncation, truncation_latent=truncation_latent)
feat = inception(img)[0].view(img.shape[0], (- 1))
features.append(feat.to('cpu'))
features = torch.cat(features, 0)
return features |
def test_filter_languages():
filtered_langs = dictcli.filter_languages(langs(), ['af-ZA'])
assert (filtered_langs == [afrikaans()])
filtered_langs = dictcli.filter_languages(langs(), ['pl-PL', 'en-US'])
assert (filtered_langs == [english(), polish()])
with pytest.raises(dictcli.InvalidLanguageError):
dictcli.filter_languages(langs(), ['pl-PL', 'en-GB']) |
class W_BytePRegexp(W_AnyRegexp):
def tostring(self):
from pypy.objspace.std.bytesobject import string_escape_encode
out_encoded = string_escape_encode(self.source, '"')
return ('#px#%s' % out_encoded)
def obj_name(self):
return values.W_Bytes.from_string(self.source) |
class BaseAgent(object):
def __init__(self, env):
self.env = env
self.results = {}
def get_results(self, detailed_output=False):
output = []
for (k, v) in self.results.items():
output.append({'instr_id': k, 'trajectory': v['path']})
if detailed_output:
output[(- 1)]['details'] = v['details']
return output
def rollout(self, **args):
raise NotImplementedError
def get_agent(name):
return globals()[(name + 'Agent')]
def test(self, iters=None, **kwargs):
self.env.reset_epoch(shuffle=(iters is not None))
self.losses = []
self.results = {}
looped = False
self.loss = 0
if (iters is not None):
for i in range(iters):
for traj in self.rollout(**kwargs):
self.loss = 0
self.results[traj['instr_id']] = traj
else:
while True:
for traj in self.rollout(**kwargs):
if (traj['instr_id'] in self.results):
looped = True
else:
self.loss = 0
self.results[traj['instr_id']] = traj
if looped:
break
def test_viz(self, iters=None, **kwargs):
self.env.reset_epoch(shuffle=(iters is not None))
self.losses = []
self.results = {}
looped = False
self.loss = 0
if (iters is not None):
for i in range(iters):
for traj in self.rollout(**kwargs):
self.loss = 0
self.results[traj['instr_id']] = traj
else:
while True:
for traj in self.rollout_viz(**kwargs):
if (traj['instr_id'] in self.results):
looped = True
else:
self.loss = 0
self.results[traj['instr_id']] = traj
if looped:
break |
def mn_encode(message):
assert ((len(message) % 8) == 0)
out = []
for i in range((len(message) // 8)):
word = message[(8 * i):((8 * i) + 8)]
x = int(word, 16)
w1 = (x % n)
w2 = (((x // n) + w1) % n)
w3 = ((((x // n) // n) + w2) % n)
out += [wordlist[w1], wordlist[w2], wordlist[w3]]
return out |
class TestSpatialSVD():
def test_spatial_svd_compression(self):
model = get_model()
eval_callback = MagicMock()
eval_callback.side_effect = [0.4, 0.6, 0.6, 0.5, 0.4, 0.6, 0.6, 0.5, 0.4, 0.6]
greedy_params = GreedySelectionParameters(0.5, 4)
auto_params = SpatialSvdParameters.AutoModeParams(greedy_params)
svd_params = SpatialSvdParameters(input_op_names=model.inputs, output_op_names=model.outputs, mode=SpatialSvdParameters.Mode.auto, params=auto_params)
scheme = aimet_common_defs.CompressionScheme.spatial_svd
cost_metric = aimet_common_defs.CostMetric.mac
(compressed_model, stats) = ModelCompressor.compress_model(model=model, eval_callback=eval_callback, eval_iterations=10, compress_scheme=scheme, cost_metric=cost_metric, parameters=svd_params)
assert (stats.compression_ratio_selection_stats.eval_scores_dictionary['conv1'][0.25] == 0.4)
assert (stats.compression_ratio_selection_stats.eval_scores_dictionary['conv1'][0.5] == 0.6)
assert (stats.compression_ratio_selection_stats.eval_scores_dictionary['conv1'][0.75] == 0.6)
assert (stats.compression_ratio_selection_stats.eval_scores_dictionary['conv2'][0.25] == 0.5)
assert (stats.compression_ratio_selection_stats.eval_scores_dictionary['conv2'][0.5] == 0.4)
assert (stats.compression_ratio_selection_stats.eval_scores_dictionary['conv2'][0.75] == 0.6) |
def prime_decode_image(prime_encoded_image):
prime_generator = generate_primes()
structure_list = []
num_nonzero_voxels = 1
for prime in prime_generator:
print(prime)
s_img = sitk.Equal(sitk.Modulus(prime_encoded_image, prime), 0)
num_nonzero_voxels = sitk.GetArrayViewFromImage(s_img).sum()
if (num_nonzero_voxels > 0):
structure_list.append(s_img)
else:
break
return structure_list |
def test_RandomVariable_bcast_specify_shape():
rv = RandomVariable('normal', 0, [0, 0], config.floatX, inplace=True)
s1 = pt.as_tensor(1, dtype=np.int64)
s2 = iscalar()
s2.tag.test_value = 2
s3 = iscalar()
s3.tag.test_value = 3
s3 = Assert('testing')(s3, eq(s1, 1))
size = specify_shape(pt.as_tensor([s1, s3, s2, s2, s1]), (5,))
mu = tensor(dtype=config.floatX, shape=(None, None, 1))
mu.tag.test_value = np.random.normal(size=(2, 2, 1)).astype(config.floatX)
std = tensor(dtype=config.floatX, shape=(None, 1, 1))
std.tag.test_value = np.ones((2, 1, 1)).astype(config.floatX)
res = rv(mu, std, size=size)
assert (res.type.shape == (1, None, None, None, 1)) |
class OrderSplitLoader(torch.utils.data.IterableDataset):
def __init__(self, contents, summaries, tokenizer_model, append_mask_token=False, **kwargs):
super(OrderSplitLoader).__init__()
if append_mask_token:
raise NotImplementedError
self.contents = contents
self.tokenizer_model = tokenizer_model
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_model)
self.pos = 0
self.eval = (kwargs['eval'] if ('eval' in kwargs) else False)
def __len__(self):
return len(self.contents)
def __iter__(self):
return self
def __next__(self):
increment = 1
worker_info = torch.utils.data.get_worker_info()
if (worker_info is not None):
increment = worker_info.num_workers
worker_id = worker_info.id
if (self.pos == 0):
self.pos = worker_id
valid = False
while (not valid):
if (self.pos >= len(self.contents)):
raise StopIteration
if self.eval:
random.seed(self.pos)
content = self.contents[self.pos].split('\t')
if (len(content) < 2):
content = split_paragraphs(self.contents[self.pos], mode='sentence')
if (len(content) < 2):
self.pos += increment
continue
idx0 = random.choice(range(len(content)))
content0 = content[idx0]
example = {}
copy_content = [s.strip() for s in content]
if (random.random() < 0.5):
copy_content = [s for (i, s) in enumerate(content) if (i != idx0)]
insert_idx = random.choice(range((len(copy_content) + 1)))
while (insert_idx == idx0):
insert_idx = random.choice(range((len(copy_content) + 1)))
copy_content.insert(insert_idx, (('*' + content0) + '*'))
example['prefix'] = self.tokenizer.encode('\n\n'.join(copy_content))
example['labels'] = np.array([0])
else:
copy_content[idx0] = (('*' + content0) + '*')
example['prefix'] = self.tokenizer.encode('\n\n'.join(copy_content))
example['labels'] = np.array([1])
valid = True
self.pos += increment
return [example] |
def test_remove_by_full_path_to_python(tmp_path: Path, manager: EnvManager, poetry: Poetry, config: Config, mocker: MockerFixture, venv_name: str) -> None:
config.merge({'virtualenvs': {'path': str(tmp_path)}})
(tmp_path / f'{venv_name}-py3.7').mkdir()
(tmp_path / f'{venv_name}-py3.6').mkdir()
mocker.patch('subprocess.check_output', side_effect=check_output_wrapper(Version.parse('3.6.6')))
expected_venv_path = (tmp_path / f'{venv_name}-py3.6')
python_path = ((expected_venv_path / 'bin') / 'python')
venv = manager.remove(str(python_path))
assert (venv.path == expected_venv_path)
assert (not expected_venv_path.exists()) |
def test_reusing_nonce_from_a_mined_transaction_raises(deploy_client: JSONRPCClient) -> None:
(contract_proxy, _) = deploy_rpc_test_contract(deploy_client, 'RpcTest')
client_invalid_nonce = JSONRPCClient(deploy_client.web3, deploy_client.privkey)
estimated_transaction = deploy_client.estimate_gas(contract_proxy, 'ret', {})
msg = 'ret always succed, gas estimation should have succeed.'
assert estimated_transaction, msg
transaction_hash = deploy_client.transact(estimated_transaction)
deploy_client.poll_transaction(transaction_hash)
with pytest.raises(EthereumNonceTooLow):
proxy_invalid = client_invalid_nonce.new_contract_proxy(abi=contract_proxy.abi, contract_address=contract_proxy.address)
estimated_transaction_invalid = deploy_client.estimate_gas(proxy_invalid, 'ret_str', {})
msg = 'ret_str always succed, gas estimation should have succeed.'
assert estimated_transaction_invalid, msg
client_invalid_nonce.transact(estimated_transaction_invalid) |
def emissivity(ndvi_image: np.ndarray, landsat_band_4: np.ndarray=None, emissivity_method: str='avdan'):
if (not (ndvi_image.shape == landsat_band_4.shape)):
raise InputShapesNotEqual(f'Shapes of input images should be equal: {ndvi_image.shape}, {landsat_band_4.shape}')
if ((emissivity_method == 'xiaolei') and (landsat_band_4 is None)):
raise ValueError(f'The red band (landsat_band_4) has to be provided if {emissivity_method} is to be used')
(emissivity_10, emissivity_11) = Runner(algorithms=emissivity_algorithms)(emissivity_method, ndvi=ndvi_image, red_band=landsat_band_4)
return (emissivity_10, emissivity_11) |
def example_generator(data_path, single_pass):
while True:
filelist = glob.glob(data_path)
assert filelist, ('Error: Empty filelist at %s' % data_path)
if single_pass:
filelist = sorted(filelist)
else:
random.shuffle(filelist)
for f in filelist:
reader = open(f, 'rb')
while True:
len_bytes = reader.read(8)
if (not len_bytes):
break
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack(('%ds' % str_len), reader.read(str_len))[0]
(yield example_pb2.Example.FromString(example_str))
if single_pass:
break |
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1) |
def unpack_cmk_args(args, name):
(args, prop_keys, prop_vals) = unpack_properties(args, name)
if (len(args) != 3):
raise SchemeException((name + ': not give three required arguments'))
(key, get, set) = args
if (not isinstance(key, values.W_ContinuationMarkKey)):
raise SchemeException((name + ': supplied key is not a continuation-mark-key'))
if (not get.iscallable()):
raise SchemeException((name + ': supplied get-proc is not callable'))
if (not set.iscallable()):
raise SchemeException((name + ': supplied set-proc is not callable'))
return (key, get, set, prop_keys, prop_vals) |
def virtual_scane_one_model(model_dir, worker_id):
print(('Scanning ' + model_dir))
tmp_model_name = (('tmp' + str(worker_id)) + '.ply')
TMP_DATA_PATH = ('./tmp' + str(worker_id))
TMP_PLY_POINTCLOUD_PATH = (('./tmp' + str(worker_id)) + '.ply_output')
if (not os.path.exists(TMP_DATA_PATH)):
os.makedirs(TMP_DATA_PATH)
clean_dir(TMP_PLY_POINTCLOUD_PATH)
(cam_view_points, cam_target_points) = generate_camera_view_target_points()
model_filename = os.path.join(model_dir, 'model.obj')
if (not os.path.exists(model_filename)):
print(('File not found: %s' % model_filename))
return
model_basename = os.path.basename(model_dir)
prev_clean_output_filename = os.path.join(PREV_OUTPUT_DATA_PATH, (model_basename + '_clean.ply'))
if os.path.exists(prev_clean_output_filename):
print('Previously scanned, skip.', prev_clean_output_filename)
return
ply_tmp_name = os.path.join(TMP_DATA_PATH, tmp_model_name)
mesh_util.convert_obj2ply(model_filename, ply_tmp_name, recenter=True, center_mode='box_center')
cmd_str = ((((EXE_VIRTUAL_SCANNER + ' ') + ply_tmp_name) + ' ') + CMD_POSTFIX.format(','.join((str(e) for e in cam_view_points)), ','.join((str(e) for e in cam_target_points))))
os.system(cmd_str)
all_xyz = []
pcd_files = glob.glob((TMP_PLY_POINTCLOUD_PATH + '/*.pcd'))
for pf in pcd_files:
xyz = pc_util.read_pcd(pf)
all_xyz.extend(xyz)
all_points = np.array(all_xyz)
print('Collecte #points:', all_points.shape)
if (all_points.shape[0] < 2048):
print('Failed to scan sufficient points! Move to next model.')
return
all_points = pc_util.remove_duplicated_points(all_points)
print(('Total points after merge: %d' % all_points.shape[0]))
clean_output_filename = os.path.join(OUTPUT_DATA_PATH, (model_basename + '_clean.ply'))
pc_util.write_ply(all_points, clean_output_filename)
print(('Save point cloud to ' + clean_output_filename))
return |
_test
def test_model_custom_target_tensors():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
y = K.placeholder([10, 4], name='y')
y1 = K.placeholder([10, 3], name='y1')
y2 = K.placeholder([7, 5], name='y2')
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1.0, 0.5]
with pytest.raises(ValueError):
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None, target_tensors=[y, y1, y2])
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None, target_tensors=[y, y1])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], {y: np.random.random((10, 4)), y1: np.random.random((10, 3))})
with pytest.raises(ValueError):
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None, target_tensors={'does_not_exist': y2})
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None, target_tensors={'dense_1': y, 'dropout': y1})
out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], {y: np.random.random((10, 4)), y1: np.random.random((10, 3))})
if (K.backend() == 'tensorflow'):
import tensorflow as tf
pl_target_a = tf.placeholder('float32', shape=(None, 4))
model.compile(optimizer='rmsprop', loss='mse', target_tensors={'dense_1': pl_target_a})
model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) |
class TestMapNotify(EndianTest):
def setUp(self):
self.evt_args_0 = {'event': , 'override': 1, 'sequence_number': 6027, 'type': 244, 'window': }
self.evt_bin_0 = b'\xf4\x00\x17\x8b(C\x19! O\x9b\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def testPack0(self):
bin = event.MapNotify._fields.to_binary(*(), **self.evt_args_0)
self.assertBinaryEqual(bin, self.evt_bin_0)
def testUnpack0(self):
(args, remain) = event.MapNotify._fields.parse_binary(self.evt_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.evt_args_0) |
class StackOverflowDupQuestions(AbsTaskReranking):
def description(self):
return {'name': 'StackOverflowDupQuestions', 'hf_hub_name': 'mteb/stackoverflowdupquestions-reranking', 'description': 'Stack Overflow Duplicate Questions Task for questions with the tags Java, JavaScript and Python', 'reference': ' 'type': 'Reranking', 'category': 's2s', 'eval_splits': ['test', 'validation'], 'eval_langs': ['en'], 'main_score': 'map', 'revision': 'e185fbe320cfc5848eb6114e1ef5ec69'} |
class TestStochasticTMLE():
def df(self):
df = ze.load_sample_data(False)
df[['cd4_rs1', 'cd4_rs2']] = ze.spline(df, 'cd40', n_knots=3, term=2, restricted=True)
df[['age_rs1', 'age_rs2']] = ze.spline(df, 'age0', n_knots=3, term=2, restricted=True)
return df.drop(columns=['cd4_wk45']).dropna()
def cf(self):
df = ze.load_sample_data(False)
df[['cd4_rs1', 'cd4_rs2']] = ze.spline(df, 'cd40', n_knots=3, term=2, restricted=True)
df[['age_rs1', 'age_rs2']] = ze.spline(df, 'age0', n_knots=3, term=2, restricted=True)
return df.drop(columns=['dead']).dropna()
def simple_df(self):
expected = pd.DataFrame([[1, 1, 1, 1, 1], [0, 0, 0, (- 1), 2], [0, 1, 0, 5, 1], [0, 0, 1, 0, 0], [1, 0, 0, 0, 1], [1, 0, 1, 0, 0], [0, 1, 0, 10, 1], [0, 0, 0, (- 5), 0], [1, 1, 0, (- 5), 2]], columns=['W', 'A', 'Y', 'C', 'S'], index=[1, 2, 3, 4, 5, 6, 7, 8, 9])
return expected
def test_error_continuous_exp(self, df):
with pytest.raises(ValueError):
StochasticTMLE(df=df, exposure='cd40', outcome='dead')
def test_error_fit(self, df):
stmle = StochasticTMLE(df=df, exposure='art', outcome='dead')
with pytest.raises(ValueError):
stmle.fit(p=0.5)
stmle = StochasticTMLE(df=df, exposure='art', outcome='dead')
stmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
with pytest.raises(ValueError):
stmle.fit(p=0.5)
stmle = StochasticTMLE(df=df, exposure='art', outcome='dead')
stmle.outcome_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
with pytest.raises(ValueError):
stmle.fit(p=0.5)
def test_error_p_oob(self, df):
stmle = StochasticTMLE(df=df, exposure='art', outcome='dead')
stmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
stmle.outcome_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
with pytest.raises(ValueError):
stmle.fit(p=1.1)
with pytest.raises(ValueError):
stmle.fit(p=(- 0.1))
def test_error_p_cond_len(self, df):
stmle = StochasticTMLE(df=df, exposure='art', outcome='dead')
stmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
stmle.outcome_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
with pytest.raises(ValueError):
stmle.fit(p=[0.1], conditional=["df['male']==1", "df['male']==0"])
with pytest.raises(ValueError):
stmle.fit(p=[0.1, 0.3], conditional=["df['male']==1"])
def test_error_summary(self, df):
stmle = StochasticTMLE(df=df, exposure='art', outcome='dead')
stmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
stmle.outcome_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
with pytest.raises(ValueError):
stmle.summary()
def test_warn_missing_data(self):
df = pd.DataFrame()
df['A'] = [1, 1, 0, 0, np.nan]
df['Y'] = [np.nan, 0, 1, 0, 1]
with pytest.warns(UserWarning):
StochasticTMLE(df=df, exposure='A', outcome='Y')
def test_drop_missing_data(self):
df = pd.DataFrame()
df['A'] = [1, 1, 0, 0, np.nan]
df['Y'] = [np.nan, 0, 1, 0, 1]
stmle = StochasticTMLE(df=df, exposure='A', outcome='Y')
assert (stmle.df.shape[0] == 3)
def test_continuous_processing(self):
a_list = [0, 1, 1, 0, 1, 1, 0, 0]
y_list = [1, (- 1), 5, 0, 0, 0, 10, (- 5)]
df = pd.DataFrame()
df['A'] = a_list
df['Y'] = y_list
stmle = StochasticTMLE(df=df, exposure='A', outcome='Y', continuous_bound=0.0001)
assert (stmle._continuous_outcome is True)
assert (stmle._continuous_min == (- 5))
assert (stmle._continuous_max == 10)
assert (stmle._cb == 0.0001)
y_bound = [(2 / 5), (4 / 15), (2 / 3), (1 / 3), (1 / 3), (1 / 3), 0.9999, 0.0001]
pdt.assert_series_equal(pd.Series(y_bound), stmle.df['Y'], check_dtype=False, check_names=False)
def test_marginal_vector_length_stoch(self, df):
stmle = StochasticTMLE(df=df, exposure='art', outcome='dead')
stmle.exposure_model('male')
stmle.outcome_model('art + male + age0')
stmle.fit(p=0.4, samples=7)
assert (len(stmle.marginals_vector) == 7)
def test_qmodel_params(self, simple_df):
sas_params = [(- 1.0699), (- 0.9525), 1.5462]
sas_preds = [0.3831332, 0.2554221, 0.1168668, 0.2554221, 0.6168668, 0.6168668, 0.1168668, 0.2554221, 0.3831332]
stmle = StochasticTMLE(df=simple_df, exposure='A', outcome='Y')
stmle.outcome_model('A + W')
est_params = stmle._outcome_model.params
est_preds = stmle._Qinit_
npt.assert_allclose(sas_params, est_params, atol=0.0001)
npt.assert_allclose(sas_preds, est_preds, atol=1e-06)
def test_qmodel_params2(self, simple_df):
sas_params = [0.3876, 0.3409, (- 0.203), (- 0.0883)]
sas_preds = [0.437265, 0.210957, 0.6402345, 0.3876202, 0.0963188, 0.1846502, 0.6402345, 0., 0.]
stmle = StochasticTMLE(df=simple_df, exposure='A', outcome='C')
stmle.outcome_model('A + W + S', continuous_distribution='normal')
est_params = stmle._outcome_model.params
est_preds = stmle._Qinit_
npt.assert_allclose(sas_params, est_params, atol=0.0001)
npt.assert_allclose(sas_preds, est_preds, atol=1e-06)
def test_qmodel_params3(self, simple_df):
sas_params = [(- 1.0478), 0.9371, (- 0.5321), (- 0.2733)]
sas_preds = [0.4000579, 0.2030253, 0.6811115, 0.3507092, 0.1567304, 0., 0.6811115, 0.3507092, 0.3043857]
stmle = StochasticTMLE(df=simple_df, exposure='A', outcome='C')
stmle.outcome_model('A + W + S', continuous_distribution='Poisson')
est_params = stmle._outcome_model.params
est_preds = stmle._Qinit_
npt.assert_allclose(sas_params, est_params, atol=0.0001)
npt.assert_allclose(sas_preds, est_preds, atol=1e-06)
def test_gmodel_params(self, simple_df):
sas_preds = [2.0, 1., 2.5, 1., 2, 2, 2.5, 1., 2]
stmle = StochasticTMLE(df=simple_df, exposure='A', outcome='C')
stmle.exposure_model('W')
est_preds = (1 / stmle._denominator_)
npt.assert_allclose(sas_preds, est_preds, atol=1e-06)
def test_compare_tmle_binary(self, df):
stmle = StochasticTMLE(df, exposure='art', outcome='dead')
stmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
stmle.outcome_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
stmle.fit(p=1.0, samples=1)
all_treat = stmle.marginal_outcome
stmle.fit(p=0.0, samples=1)
non_treat = stmle.marginal_outcome
tmle = TMLE(df, exposure='art', outcome='dead')
tmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0', print_results=False)
tmle.outcome_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0', print_results=False)
tmle.fit()
expected = tmle.risk_difference
npt.assert_allclose(expected, (all_treat - non_treat), atol=0.0001)
def test_compare_tmle_continuous(self, cf):
cf['cd4_wk45'] = np.log(cf['cd4_wk45'])
stmle = StochasticTMLE(cf, exposure='art', outcome='cd4_wk45')
stmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
stmle.outcome_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
stmle.fit(p=1.0, samples=1)
all_treat = stmle.marginal_outcome
stmle.fit(p=0.0, samples=1)
non_treat = stmle.marginal_outcome
tmle = TMLE(cf, exposure='art', outcome='cd4_wk45')
tmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0', print_results=False)
tmle.outcome_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0', print_results=False)
tmle.fit()
expected = tmle.average_treatment_effect
npt.assert_allclose(expected, (all_treat - non_treat), atol=0.001)
def test_qmodel_bound(self, simple_df):
sas_params = [(- 1.0699), (- 0.9525), 1.5462]
sas_preds = [0.3831332, 0.2554221, 0.2, 0.2554221, 0.6, 0.6, 0.2, 0.2554221, 0.3831332]
stmle = StochasticTMLE(df=simple_df, exposure='A', outcome='Y')
stmle.outcome_model('A + W', bound=[0.2, 0.6])
est_params = stmle._outcome_model.params
est_preds = stmle._Qinit_
npt.assert_allclose(sas_params, est_params, atol=0.0001)
npt.assert_allclose(sas_preds, est_preds, atol=1e-06)
def test_gmodel_bound(self, simple_df):
sas_preds = [2, (1 / 0.55), (1 / 0.45), (1 / 0.55), 2, 2, (1 / 0.45), (1 / 0.55), 2]
stmle = StochasticTMLE(df=simple_df, exposure='A', outcome='C')
stmle.exposure_model('W', bound=[0.45, 0.55])
est_preds = (1 / stmle._denominator_)
npt.assert_allclose(sas_preds, est_preds, atol=1e-06)
def test_calculate_epsilon1(self, df):
stmle = StochasticTMLE(df, exposure='art', outcome='dead')
stmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
stmle.outcome_model('art + male + age0 + age_rs1 + age_rs2 + dvl0 + cd40 + cd4_rs1 + cd4_rs2')
stmle.fit(p=0.15, samples=1)
npt.assert_allclose((- 0.), stmle.epsilon, atol=1e-06)
stmle.fit(p=0.4, samples=1)
npt.assert_allclose((- 0.), stmle.epsilon, atol=1e-06)
def test_calculate_epsilon2(self, cf):
stmle = StochasticTMLE(cf, exposure='art', outcome='cd4_wk45')
stmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
stmle.outcome_model('art + male + age0 + age_rs1 + age_rs2 + dvl0 + cd40 + cd4_rs1 + cd4_rs2')
stmle.fit(p=0.15, samples=1)
npt.assert_allclose((- 0.), stmle.epsilon, atol=1e-06)
stmle.fit(p=0.4, samples=1)
npt.assert_allclose((- 0.), stmle.epsilon, atol=1e-06)
def test_machine_learning_runs(self, df):
log = LogisticRegression(penalty='l1', solver='liblinear', random_state=201)
tmle = StochasticTMLE(df, exposure='art', outcome='dead')
tmle.exposure_model('male + age0 + cd40 + cd4_rs1 + cd4_rs2 + dvl0 + male:dvl0', custom_model=log)
tmle.outcome_model('art + male + age0 + dvl0 + cd40', custom_model=log)
tmle.fit(p=0.4, samples=20) |
class Tracker():
module: nn.Module
traced: List[nn.Module] = field(default_factory=list)
handles: list = field(default_factory=list)
name2module: Dict[(str, nn.Module)] = field(default_factory=OrderedDict)
def _forward_hook(self, m, inputs: Tensor, outputs: Tensor, name: str):
has_not_submodules = ((len(list(m.modules())) == 1) or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d))
if has_not_submodules:
self.traced.append(m)
self.name2module[name] = m
def __call__(self, x: Tensor):
for (name, m) in self.module.named_modules():
self.handles.append(m.register_forward_hook(partial(self._forward_hook, name=name)))
self.module(x)
[x.remove() for x in self.handles]
return self
def parametrized(self):
return {k: v for (k, v) in self.name2module.items() if (len(list(v.state_dict().keys())) > 0)} |
def make_iterable_unstructure_fn(cl: Any, converter: BaseConverter, unstructure_to: Any=None) -> IterableUnstructureFn:
handler = converter.unstructure
fn_name = 'unstructure_iterable'
if (getattr(cl, '__args__', None) not in (None, ())):
type_arg = cl.__args__[0]
if (not isinstance(type_arg, TypeVar)):
handler = converter._unstructure_func.dispatch(type_arg)
globs = {'__cattr_seq_cl': (unstructure_to or cl), '__cattr_u': handler}
lines = []
lines.append(f'def {fn_name}(iterable):')
lines.append(' res = __cattr_seq_cl(__cattr_u(i) for i in iterable)')
total_lines = [*lines, ' return res']
eval(compile('\n'.join(total_lines), '', 'exec'), globs)
return globs[fn_name] |
def alu_prediction(A, B, op, error=False):
assert isinstance(op, Ops), 'The tinyalu op must be of type Ops'
if (op == Ops.ADD):
result = (A + B)
elif (op == Ops.AND):
result = (A & B)
elif (op == Ops.XOR):
result = (A ^ B)
elif (op == Ops.MUL):
result = (A * B)
if error:
result = (result + 1)
return result |
class BaseRequiredImgAsset(BaseRequiredAsset):
ASSET_CLASS = ImgAsset
min_width = models.PositiveIntegerField()
max_width = models.PositiveIntegerField()
min_height = models.PositiveIntegerField()
max_height = models.PositiveIntegerField()
class Meta(BaseRequiredAsset.Meta):
abstract = True |
def test_read_commandline_bad_cmd(dataframe):
temp_dir = tempfile.gettempdir()
dataframe.to_csv(f'{temp_dir}/dataframe.csv')
with pytest.raises(TypeError):
janitor.io.read_commandline(6)
with pytest.raises(CalledProcessError):
janitor.io.read_commandline('bad command')
cmd = 'cat'
ExpectedError = pd.errors.EmptyDataError
if (sys.platform in ['win32']):
cmd = 'type'
ExpectedError = CalledProcessError
with pytest.raises(ExpectedError):
janitor.io.read_commandline(cmd)
os.unlink(f'{temp_dir}/dataframe.csv') |
def test_sia_uses_ces_distances(s):
with config.override(REPERTOIRE_DISTANCE='EMD', CES_DISTANCE='EMD'):
sia = compute.subsystem.sia(s)
assert (sia.phi == 2.3125)
with config.override(REPERTOIRE_DISTANCE='EMD', CES_DISTANCE='SUM_SMALL_PHI'):
sia = compute.subsystem.sia(s)
assert (sia.phi == 1.083333) |
def test_life_list(requests_mock):
requests_mock.get(f'{API_V1}/observations/taxonomy', json=j_life_list_2, status_code=200)
client = iNatClient()
results = client.observations.life_list(taxon_id=52775)
assert isinstance(results, LifeList)
assert (len(results) == 31)
t = results[8]
assert (t.id == 52775)
assert (t.name == 'Bombus')
assert (t.count == 4)
assert (t.descendant_obs_count == results.get_count(52775) == 154) |
def get_stanford_models():
jar_name = os.path.join(SPICEDIR, SPICELIB, '{}.jar'.format(JAR))
if (not os.path.exists(jar_name)):
print('Downloading {} for SPICE ...'.format(JAR))
url = '
(zip_file, headers) = urlretrieve(url, reporthook=print_progress)
print()
print('Extracting {} ...'.format(JAR))
file_name = os.path.join(CORENLP, JAR)
zip_file_name = '/'.join([CORENLP, JAR])
target_name = os.path.join(SPICEDIR, SPICELIB, JAR)
for filef in ['{}.jar', '{}-models.jar']:
ZipFile(zip_file).extract(filef.format(zip_file_name), SPICEDIR)
os.rename(os.path.join(SPICEDIR, filef.format(file_name)), filef.format(target_name))
os.rmdir(os.path.join(SPICEDIR, CORENLP))
os.remove(zip_file)
print('Done.') |
class PlanParser(object):
def __init__(self, domain_file_path):
self.domain = domain_file_path
self.problem_id = (- 1)
self.process_pool = multiprocessing.Pool(3)
def get_plan(self):
parsed_plans = self.process_pool.map(get_plan_async, zip(([self.domain] * 3), ([self.problem_id] * 3), range(3, 6)))
return self.find_best_plan(parsed_plans)
def get_plan_from_file(self, domain_path, filepath):
parsed_plans = self.process_pool.map(get_plan_from_file, zip(([domain_path] * 3), ([filepath] * 3), range(3, 6)))
return self.find_best_plan(parsed_plans)
def clean_plan(self, plan):
cleaned_plan = list()
for i in range((len(plan) - 1)):
if (not ((plan[i]['action'] == 'GotoLocation') and (plan[(i + 1)]['action'] == 'GotoLocation'))):
cleaned_plan.append(plan[i])
cleaned_plan.append(plan[(len(plan) - 1)])
return cleaned_plan
def find_best_plan(self, parsed_plans):
if all([(parsed_plan[0] == 'timeout') for parsed_plan in parsed_plans]):
parsed_plan = parsed_plans[0][1:]
else:
parsed_plans = [self.clean_plan(parsed_plan) for parsed_plan in parsed_plans if (parsed_plan[0] != 'timeout')]
parsed_plan = min(parsed_plans, key=len)
if constants.DEBUG:
print(('plan\n' + '\n'.join([('%03d: %s' % (pp, game_util.get_action_str(pl))) for (pp, pl) in enumerate(parsed_plan)])))
else:
print(('plan\n' + '\n'.join([('%03d: %s' % (pp, game_util.get_action_str(pl))) for (pp, pl) in enumerate(parsed_plan)])))
return parsed_plan |
class UdpTransport(BaseTransport):
def __init__(self, beaver_config, logger=None):
super(UdpTransport, self).__init__(beaver_config, logger=logger)
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._address = (beaver_config.get('udp_host'), beaver_config.get('udp_port'))
def callback(self, filename, lines, **kwargs):
timestamp = self.get_timestamp(**kwargs)
if kwargs.get('timestamp', False):
del kwargs['timestamp']
for line in lines:
self._sock.sendto(self.format(filename, line, timestamp, **kwargs), self._address) |
class BuildMn(BuildMnBase):
def Run(self, argv):
if (len(argv) < 6):
((print >> sys.stderr), 'BuildMn.Run(<ARCADIA_ROOT> <archiver> <mninfo> <mnname> <mnrankingSuffix> <cppOutput> [params...])')
sys.exit(1)
self.SrcRoot = argv[0]
self.archiver = argv[1]
mninfo = argv[2]
mnname = argv[3]
mnrankingSuffix = argv[4]
mncppPath = argv[5]
check = False
ptr = False
multi = False
self.fml_unused_tool = ''
for param in argv[6:]:
if (param == 'CHECK'):
check = True
elif (param == 'PTR'):
ptr = True
elif (param == 'MULTI'):
multi = True
elif param.startswith('fml_tool='):
self.fml_unused_tool = get_value(param)
else:
((print >> sys.stdout), 'Unknown param: {0}'.format(param))
super(BuildMn, self).Run(mninfo, mnname, mnrankingSuffix, mncppPath, check=check, ptr=ptr, multi=multi) |
def mock_plugin_installation(mocker):
subprocess_run = subprocess.run
mocked_subprocess_run = mocker.MagicMock(returncode=0)
def _mock(command, **kwargs):
if isinstance(command, list):
if (command[:5] == [sys.executable, '-u', '-m', 'pip', 'install']):
mocked_subprocess_run(command, **kwargs)
return mocked_subprocess_run
if (command[:3] == [sys.executable, 'self', 'python-path']):
return mocker.MagicMock(returncode=0, stdout=sys.executable.encode())
return subprocess_run(command, **kwargs)
mocker.patch('subprocess.run', side_effect=_mock)
return mocked_subprocess_run |
class EmailBackend(BaseEmailBackend):
def __init__(self, token=None, channel=None, sender_name=None, author=None, archive=False, **kwargs):
super().__init__(**kwargs)
self.token = (token or settings.FRONT_TOKEN)
self.channel = (channel or settings.FRONT_CHANNEL)
if ((not self.token) or (not self.channel)):
raise NotImplementedError('For the Front email backend, settings.FRONT_TOKEN and settings.FRONT_CHANNEL must be set.')
self.sender_name = (sender_name or settings.FRONT_SENDER_NAME)
self.author = (author or settings.FRONT_AUTHOR)
self.archive = (archive or settings.FRONT_ARCHIVE)
self.message_url = f'
self.draft_url = f'
self.headers = {'Accept': 'application/json', 'Authorization': f'Bearer {self.token}'}
def send_messages(self, email_messages):
if (not email_messages):
return 0
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
return num_sent
def _send(self, email_message):
if (not email_message.recipients()):
return False
draft = getattr(email_message, 'draft', False)
archive = getattr(email_message, 'archive', self.archive)
encoding = (email_message.encoding or settings.DEFAULT_CHARSET)
recipients = [sanitize_address(addr, encoding) for addr in email_message.recipients()]
if email_message.attachments:
log.warning('Front email backend does not yet implement attachments!')
payload = {'to': recipients, 'cc': email_message.cc, 'bcc': email_message.bcc, 'sender_name': self.sender_name, 'subject': email_message.subject, 'options': {'archive': archive}, 'body': email_message.body}
if draft:
url = self.draft_url
payload['mode'] = 'shared'
payload['author_id'] = self.author
if (not self.author):
raise NotImplementedError("Can't save a draft message without setting FRONT_AUTHOR.")
log.debug('Creating Front draft message: %s', email_message.subject)
else:
url = self.message_url
log.debug('Starting Front conversation: %s', email_message.subject)
response = requests.post(url, json=payload, headers=self.headers)
if response.ok:
return True
if (not self.fail_silently):
log.error('Failed to send front message: %s, to=%s', response.content, recipients)
response.raise_for_status()
return False |
class OutageSection(Section):
keyword = b'OUTAGE'
outages_header = b'NET Sta Chan Aux Start Date Time End Date Time Duration Comment'
report_period = OutageReportPeriod.T()
outages = List.T(Outage.T())
def read(cls, reader):
DataType.read(reader)
report_period = OutageReportPeriod.read(reader)
outages = []
outages = list(cls.read_table(reader, cls.outages_header, Outage))
return cls(report_period=report_period, outages=outages)
def write(self, writer):
self.write_datatype(writer)
self.report_period.write(writer)
self.write_table(writer, self.outages_header, self.outages) |
def get_bit_vector(system):
if config.with_bit_all:
reservable = [len(value) for (entity, value) in system.state['reservation_informed'].items()]
reservable = np.all(reservable)
small_value = config.small_value
if (len(system.state['informed']['name']) > 0):
bit_vecs = ([1] * dialog_config.SYS_ACTION_CARDINALITY)
bit_vecs[4] = small_value
bit_vecs[0] = small_value
if (len(system.state['results']) == 0):
bit_vecs[2] = small_value
bit_vecs[3] = small_value
bit_vecs[5] = small_value
else:
bit_vecs[2] = 1
bit_vecs[3] = 1
bit_vecs[5] = 1
if (not reservable):
bit_vecs[3] = small_value
else:
bit_vecs[3] = 1
bit_vecs[5] = small_value
return bit_vecs
informed_so_far = [(len(value) > 0) for (entity, value) in system.state['informed'].items() if (entity != 'name')]
assert len(informed_so_far)
if (np.sum(informed_so_far) > 1):
bit_vecs = ([1] * dialog_config.SYS_ACTION_CARDINALITY)
bit_vecs[4] = small_value
if (len(system.state['results']) == 0):
bit_vecs[2] = small_value
bit_vecs[3] = small_value
bit_vecs[5] = small_value
else:
bit_vecs[2] = 1
bit_vecs[3] = 1
bit_vecs[5] = 1
if (not reservable):
bit_vecs[3] = small_value
else:
bit_vecs[3] = 1
bit_vecs[5] = small_value
if np.all(informed_so_far):
bit_vecs[0] = 0
return bit_vecs
else:
bit_vecs = [1, small_value, small_value, small_value, small_value, small_value]
return bit_vecs
elif config.with_bit_more:
reservable = [len(value) for (entity, value) in system.state['reservation_informed'].items()]
reservable = np.all(reservable)
small_value = config.small_value
if (len(system.state['informed']['name']) > 0):
bit_vecs = ([1] * dialog_config.SYS_ACTION_CARDINALITY)
bit_vecs[4] = small_value
if (len(system.state['results']) == 0):
bit_vecs[2] = small_value
bit_vecs[3] = small_value
bit_vecs[5] = small_value
else:
bit_vecs[2] = 1
bit_vecs[3] = 1
bit_vecs[5] = 1
if (not reservable):
bit_vecs[3] = small_value
else:
bit_vecs[3] = 1
return bit_vecs
informed_so_far = [(len(value) > 0) for (entity, value) in system.state['informed'].items() if (entity != 'name')]
assert len(informed_so_far)
if (np.sum(informed_so_far) > 0):
bit_vecs = ([1] * dialog_config.SYS_ACTION_CARDINALITY)
bit_vecs[4] = small_value
if (len(system.state['results']) == 0):
bit_vecs[2] = small_value
bit_vecs[3] = small_value
bit_vecs[5] = small_value
else:
bit_vecs[2] = 1
bit_vecs[3] = 1
bit_vecs[5] = 1
if (not reservable):
bit_vecs[3] = small_value
else:
bit_vecs[3] = 1
return bit_vecs
else:
bit_vecs = [1, small_value, small_value, small_value, small_value, small_value]
return bit_vecs
elif config.with_bit_rep_only:
reservable = [len(value) for (entity, value) in system.state['reservation_informed'].items()]
reservable = np.all(reservable)
small_value = config.small_value
if (len(system.state['informed']['name']) > 0):
bit_vecs = ([1] * dialog_config.SYS_ACTION_CARDINALITY)
bit_vecs[0] = small_value
return bit_vecs
informed_so_far = [(len(value) > 0) for (entity, value) in system.state['informed'].items() if (entity != 'name')]
if np.all(informed_so_far):
bit_vecs = ([1] * dialog_config.SYS_ACTION_CARDINALITY)
bit_vecs[0] = small_value
else:
bit_vecs = ([1] * dialog_config.SYS_ACTION_CARDINALITY)
return bit_vecs
assert len(informed_so_far)
if (np.sum(informed_so_far) > 1):
bit_vecs = ([1] * dialog_config.SYS_ACTION_CARDINALITY)
bit_vecs[4] = small_value
if (len(system.state['results']) == 0):
bit_vecs[2] = small_value
bit_vecs[5] = small_value
else:
bit_vecs[2] = 1
bit_vecs[5] = 1
if (not reservable):
bit_vecs[3] = small_value
else:
bit_vecs[3] = 1
bit_vecs[5] = small_value
if np.all(informed_so_far):
bit_vecs[0] = 0
return bit_vecs
else:
bit_vecs = [1, small_value, small_value, small_value, small_value, small_value]
return bit_vecs |
(2, 'where', 'filter')
def getItemsByCategory(filter, where=None, eager=None):
if isinstance(filter, int):
filter = (Category.ID == filter)
elif isinstance(filter, str):
filter = (Category.name == filter)
else:
raise TypeError('Need integer or string as argument')
filter = processWhere(filter, where)
return get_gamedata_session().query(Item).options(*processEager(eager)).join(Item.group, Group.category).filter(filter).all() |
class TableProcessor(object):
def __init__(self, table_linearize_func: TableLinearize, table_truncate_funcs: List[TableTruncate], target_delimiter: str=', '):
self.table_linearize_func = table_linearize_func
self.table_truncate_funcs = table_truncate_funcs
self.target_delimiter = target_delimiter
def process_input(self, table_content: Dict, question: str, answer: List[str]) -> str:
for truncate_func in self.table_truncate_funcs:
truncate_func.truncate_table(table_content, question, answer)
linear_table = self.table_linearize_func.process_table(table_content)
joint_input = ((question + ' ') + linear_table)
return joint_input
def process_output(self, answer: List[str]) -> str:
output = self.target_delimiter.join(answer)
if (output.strip() == ''):
raise Exception('The Answer is EMPTY!')
else:
return output |
def test_jsonify_behaves():
assert (Jsonify.yaml_tag == '!jsonify')
jsonify = Jsonify({'a': 'string here', 'b': 123, 'c': False})
assert (jsonify == Jsonify({'a': 'string here', 'b': 123, 'c': False}))
assert jsonify
assert (str(jsonify) == "{'a': 'string here', 'b': 123, 'c': False}")
assert (repr(jsonify) == "Jsonify({'a': 'string here', 'b': 123, 'c': False})")
assert (jsonify.get_value(Context({'a': 'BBB'})) == '{"a": "string here", "b": 123, "c": false}') |
def serialize_key_and_certificates(name: (bytes | None), key: (PKCS12PrivateKeyTypes | None), cert: (x509.Certificate | None), cas: (typing.Iterable[_PKCS12CATypes] | None), encryption_algorithm: serialization.KeySerializationEncryption) -> bytes:
if ((key is not None) and (not isinstance(key, (rsa.RSAPrivateKey, dsa.DSAPrivateKey, ec.EllipticCurvePrivateKey, ed25519.Ed25519PrivateKey, ed448.Ed448PrivateKey)))):
raise TypeError('Key must be RSA, DSA, EllipticCurve, ED25519, or ED448 private key, or None.')
if ((cert is not None) and (not isinstance(cert, x509.Certificate))):
raise TypeError('cert must be a certificate or None')
if (cas is not None):
cas = list(cas)
if (not all((isinstance(val, (x509.Certificate, PKCS12Certificate)) for val in cas))):
raise TypeError('all values in cas must be certificates')
if (not isinstance(encryption_algorithm, serialization.KeySerializationEncryption)):
raise TypeError('Key encryption algorithm must be a KeySerializationEncryption instance')
if ((key is None) and (cert is None) and (not cas)):
raise ValueError('You must supply at least one of key, cert, or cas')
from cryptography.hazmat.backends.openssl.backend import backend
return backend.serialize_key_and_certificates_to_pkcs12(name, key, cert, cas, encryption_algorithm) |
def set_interval(interval):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop():
while (not stopped.wait(interval)):
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True
t.start()
return stopped
return wrapper
return decorator |
def main() -> None:
import argparse
import configparser
import re
NODE_SECTION_RE = re.compile('^node[0-9]+')
parser = argparse.ArgumentParser()
parser.add_argument('--nodes-data-dir', default=os.getcwd())
parser.add_argument('--wait-after-first-sync', default=False, action='store_true')
parser.add_argument('--profiler-data-directory', default=None)
parser.add_argument('--interface', default='127.0.0.1')
parser.add_argument('--iterations', default=5, type=int)
parser.add_argument('config')
args = parser.parse_args()
if ((args.profiler_data_directory is not None) and (os.geteuid() != 0)):
raise RuntimeError('To enable profiling the script has to be executed with root.')
config = configparser.ConfigParser()
config.read(args.config)
datadir = args.nodes_data_dir
interface = Host(args.interface)
port_generator = get_free_port(5000)
retry_timeout = 1
nodes_config: List[NodeConfig] = []
token_address = config.defaults()['token-address']
if (not is_checksum_address(token_address)):
raise ValueError(f'Invalid token address {token_address}, check it is checksummed.')
defaults = {'--log-config': 'raiden:DEBUG', '--environment-type': 'development', '--datadir': datadir}
for section in config:
if NODE_SECTION_RE.match(section):
node_config = config[section]
address = node_config['address']
node = defaults.copy()
node.update({'--keystore-path': node_config['keystore-path'], '--password-file': node_config['password-file'], '--eth-rpc-endpoint': node_config['eth-rpc-endpoint'], '--network-id': node_config['network-id'], '--address': address})
pathfinding_url = node_config.get('pathfinding-service-address')
if (pathfinding_url is not None):
node['--pathfinding-service-address'] = pathfinding_url
raiden_args = ['raiden', '--accept-disclaimer', '--log-json', '--disable-debug-logfile', '--flat-fee', token_address, '0', '--proportional-fee', token_address, '0', '--proportional-imbalance-fee', token_address, '0']
raiden_args.extend(chain.from_iterable(node.items()))
address = to_checksum_address(address)
nodedir = os.path.join(datadir, f'node_{pex(to_canonical_address(address))}')
nodes_config.append(NodeConfig(raiden_args, interface, address, nodedir))
capacity_lower_bound = 1130220
profiler_data_directory = args.profiler_data_directory
iterations = args.iterations
if (iterations is None):
iteration_counter: Any = count()
else:
iteration_counter = iter(range(iterations))
with Janitor() as nursery:
nodes_running = start_and_wait_for_all_servers(nursery, port_generator, nodes_config, retry_timeout)
if (nodes_running is None):
return
if args.wait_after_first_sync:
nursery.spawn_under_watch(wait_for_user_input).get()
test_config = StressTestConfiguration(port_generator, retry_timeout, Amount(capacity_lower_bound), token_address, iteration_counter, profiler_data_directory)
nursery.spawn_under_watch(run_stress_test, nursery, nodes_running, test_config)
nursery.wait(timeout=None) |
def do_autopaginate(parser, token):
split = token.split_contents()
as_index = None
context_var = None
for (i, bit) in enumerate(split):
if (bit == 'as'):
as_index = i
break
if (as_index is not None):
try:
context_var = split[(as_index + 1)]
except IndexError:
raise template.TemplateSyntaxError((('Context variable assignment ' + 'must take the form of {%% %r object.example_set.all ... as ') + ('context_var_name %%}' % split[0])))
del split[as_index:(as_index + 2)]
if (len(split) == 2):
return AutoPaginateNode(split[1])
elif (len(split) == 3):
return AutoPaginateNode(split[1], paginate_by=split[2], context_var=context_var)
elif (len(split) == 4):
try:
orphans = int(split[3])
except ValueError:
raise template.TemplateSyntaxError((u'Got %s, but expected integer.' % split[3]))
return AutoPaginateNode(split[1], paginate_by=split[2], orphans=orphans, context_var=context_var)
else:
raise template.TemplateSyntaxError(('%r tag takes one required ' + ('argument and one optional argument' % split[0]))) |
class TeleporterList(location_list.LocationList):
def nodes_list(cls, game: RandovaniaGame) -> list[NodeIdentifier]:
game_description = default_database.game_description_for(game)
teleporter_dock_types = game_description.dock_weakness_database.all_teleporter_dock_types
region_list = game_description.region_list
nodes = [region_list.identifier_for_node(node) for node in region_list.all_nodes if (isinstance(node, DockNode) and (node.dock_type in teleporter_dock_types))]
nodes.sort()
return nodes
def element_type(cls):
return NodeIdentifier
def ensure_has_locations(self, area_locations: list[NodeIdentifier], enabled: bool) -> TeleporterList:
return super().ensure_has_locations(area_locations, enabled) |
class IBContract(Contract):
security_type_map = {SecurityType.FUTURE: 'FUT', SecurityType.STOCK: 'STK', SecurityType.INDEX: 'IND', SecurityType.SPREAD: 'BAG', SecurityType.CONTFUT: 'CONTFUT'}
def __init__(self, symbol: str, security_type: SecurityType, exchange: str, multiplier: Optional[str]='', currency: str='', last_trade_date: Optional[datetime]=None):
super().__init__()
self.symbol = symbol
self.security_type = security_type
self.exchange = exchange
self.currency = currency
self.multiplier = multiplier
self.last_trade_date = last_trade_date
def lastTradeDateOrContractMonth(self) -> str:
return (self.last_trade_date.strftime('%Y%m%d') if self.last_trade_date else '')
def lastTradeDateOrContractMonth(self, lastTradeDateOrContractMonth: str):
self.last_trade_date = (datetime.strptime(lastTradeDateOrContractMonth, '%Y%m%d') if (lastTradeDateOrContractMonth != '') else None)
def secType(self) -> str:
return self._map_security_type(self.security_type)
def secType(self, secType: str):
self.security_type = (self._map_secType(secType) if secType else None)
def from_ib_contract(cls, ib_contract: Contract) -> 'IBContract':
security_type = cls._map_secType(ib_contract.secType)
ibcontract = IBContract(ib_contract.symbol, security_type, ib_contract.exchange)
for (attribute_name, value) in ib_contract.__dict__.items():
setattr(ibcontract, attribute_name, value)
return ibcontract
def _map_security_type(cls, security_type: SecurityType) -> str:
try:
return cls.security_type_map[security_type]
except KeyError:
raise ValueError(f'Security type {security_type} could not be mapped into a correct Interactive Brokers secType') from None
def _map_secType(cls, secType: str) -> SecurityType:
security_type_map = {value: key for (key, value) in cls.security_type_map.items()}
try:
return security_type_map[secType]
except KeyError:
raise ValueError(f'Security type {secType} could not be mapped into a correct Interactive Brokers secType') from None
def to_string(self) -> str:
return str(self)
def from_string(cls, contract_str: str) -> 'IBContract':
(params, combo_legs) = contract_str.split('combo:')
ib_contract = Contract()
[ib_contract.conId, ib_contract.symbol, ib_contract.secType, ib_contract.lastTradeDateOrContractMonth, ib_contract.strike, ib_contract.right, ib_contract.multiplier, ib_contract.exchange, ib_contract.primaryExchange, ib_contract.currency, ib_contract.localSymbol, ib_contract.tradingClass, ib_contract.includeExpired, ib_contract.secIdType, ib_contract.secId] = params.split(',')
ib_contract.conId = int(ib_contract.conId)
ib_contract.strike = float(ib_contract.strike)
ib_contract.includeExpired = bool((ib_contract.includeExpired == 'True'))
combo_legs = combo_legs.split(';')
combo_legs = [c for c in combo_legs if (len(c) > 0)]
if (len(combo_legs) > 0):
if (len(combo_legs[(- 1)].split(',')) == 3):
delta_neutral_contract = combo_legs[(- 1)].split(',')
combo_legs = combo_legs[:(- 1)]
ib_contract.deltaNeutralContract = DeltaNeutralContract()
ib_contract.deltaNeutralContract.conId = int(delta_neutral_contract[0])
ib_contract.deltaNeutralContract.delta = float(delta_neutral_contract[1])
ib_contract.deltaNeutralContract.price = float(delta_neutral_contract[2])
ib_contract.comboLegs = ([] if (len(combo_legs) > 0) else None)
if (ib_contract.comboLegs is not None):
for params in combo_legs:
params = params.split(',')
combo_leg = ComboLeg()
combo_leg.conId = int(params[0])
combo_leg.ratio = int(params[1])
combo_leg.action = params[2]
combo_leg.exchange = params[3]
combo_leg.openClose = int(params[4])
combo_leg.shortSaleSlot = int(params[5])
combo_leg.designatedLocation = params[6]
combo_leg.exemptCode = int(params[7])
ib_contract.comboLegs.append(combo_leg)
return cls.from_ib_contract(ib_contract)
def __eq__(self, other):
if (self is other):
return True
if (not isinstance(other, IBContract)):
return False
return ((self.symbol, self.secType, self.multiplier, self.last_trade_date) == (other.symbol, other.secType, other.multiplier, other.last_trade_date))
def __hash__(self):
return hash((self.symbol, self.secType, self.multiplier)) |
class VSA_Module(nn.Module):
def __init__(self, opt={}):
super(VSA_Module, self).__init__()
channel_size = opt['multiscale']['multiscale_input_channel']
out_channels = opt['multiscale']['multiscale_output_channel']
embed_dim = opt['embed']['embed_dim']
self.LF_conv = nn.Conv2d(in_channels=192, out_channels=channel_size, kernel_size=3, stride=4)
self.HF_conv = nn.Conv2d(in_channels=768, out_channels=channel_size, kernel_size=1, stride=1)
self.conv1x1_1 = nn.Conv2d(in_channels=(channel_size * 2), out_channels=out_channels, kernel_size=1)
self.conv1x1_2 = nn.Conv2d(in_channels=(channel_size * 2), out_channels=out_channels, kernel_size=1)
self.solo_attention = nn.Linear(in_features=256, out_features=embed_dim)
def forward(self, lower_feature, higher_feature, solo_feature):
lower_feature = self.LF_conv(lower_feature)
higher_feature = self.HF_conv(higher_feature)
concat_feature = torch.cat([lower_feature, higher_feature], dim=1)
concat_feature = (higher_feature.mean(dim=1, keepdim=True).expand_as(concat_feature) + concat_feature)
main_feature = self.conv1x1_1(concat_feature)
attn_feature = torch.sigmoid(self.conv1x1_2(concat_feature).view(concat_feature.shape[0], 1, (- 1))).view(concat_feature.shape[0], 1, main_feature.shape[2], main_feature.shape[3])
atted_feature = (main_feature * attn_feature).squeeze(dim=1).view(attn_feature.shape[0], (- 1))
solo_att = torch.sigmoid(self.solo_attention(atted_feature))
solo_feature = (solo_feature * solo_att)
return l2norm(solo_feature, (- 1)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.