code stringlengths 101 5.91M |
|---|
def test_regular_grid_2d_8():
ar = np.zeros((20, 40))
g = regular_grid(ar.shape, 8)
assert_equal(g, [slice(5.0, None, 10.0), slice(5.0, None, 10.0)])
ar[g] = 1
assert_equal(ar.sum(), 8) |
class Partition2(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/T5Block[4]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[5]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:2'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.4', 'l_1': 'encoder.5'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=attention_mask, position_bias=x0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_0 = self.l_1(t_0, attention_mask=attention_mask, position_bias=x0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
return (t_0,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def _conditional_combinations() -> list[tuple[(int, int, bool)]]:
args = [0, 1]
conditional_opcodes = range(90, 166)
combinations: list[tuple[(int, int, bool)]] = []
for op in conditional_opcodes:
if (op is opcodes.SETUP_ASYNC_WITH):
continue
for arg in args:
combinations.append((op, arg, True))
combinations.append((op, arg, False))
return combinations |
_properties
class Map(object):
label = Property(dtype=str, desc='Label of the map')
params = ListProperty(element_type=str, desc='Mapped parameters')
range = RangeProperty(desc='Ranges of map parameters', default=sbs.Range([]))
schedule = EnumProperty(dtype=dtypes.ScheduleType, desc='Map schedule', default=dtypes.ScheduleType.Default)
unroll = Property(dtype=bool, desc='Map unrolling')
collapse = Property(dtype=int, default=1, desc='How many dimensions to collapse into the parallel range')
debuginfo = DebugInfoProperty()
is_collapsed = Property(dtype=bool, desc='Show this node/scope/state as collapsed', default=False)
instrument = EnumProperty(dtype=dtypes.InstrumentationType, desc='Measure execution statistics with given method', default=dtypes.InstrumentationType.No_Instrumentation)
omp_num_threads = Property(dtype=int, default=0, desc='Number of OpenMP threads executing the Map', optional=True, optional_condition=(lambda m: (m.schedule in (dtypes.ScheduleType.CPU_Multicore, dtypes.ScheduleType.CPU_Persistent))))
omp_schedule = EnumProperty(dtype=dtypes.OMPScheduleType, default=dtypes.OMPScheduleType.Default, desc='OpenMP schedule {static, dynamic, guided}', optional=True, optional_condition=(lambda m: (m.schedule in (dtypes.ScheduleType.CPU_Multicore, dtypes.ScheduleType.CPU_Persistent))))
omp_chunk_size = Property(dtype=int, default=0, desc='OpenMP schedule chunk size', optional=True, optional_condition=(lambda m: (m.schedule in (dtypes.ScheduleType.CPU_Multicore, dtypes.ScheduleType.CPU_Persistent))))
gpu_block_size = ListProperty(element_type=int, default=None, allow_none=True, desc='GPU kernel block size', optional=True, optional_condition=(lambda m: (m.schedule in dtypes.GPU_SCHEDULES)))
gpu_launch_bounds = Property(dtype=str, default='0', desc='GPU kernel launch bounds. A value of -1 disables the statement, 0 (default) enables the statement if block size is not symbolic, and any other value (including tuples) sets it explicitly.', optional=True, optional_condition=(lambda m: (m.schedule in dtypes.GPU_SCHEDULES)))
def __init__(self, label, params, ndrange, schedule=dtypes.ScheduleType.Default, unroll=False, collapse=1, fence_instrumentation=False, debuginfo=None):
super(Map, self).__init__()
self.label = label
self.schedule = schedule
self.unroll = unroll
self.collapse = 1
self.params = params
self.range = ndrange
self.debuginfo = debuginfo
self._fence_instrumentation = fence_instrumentation
def __str__(self):
return (((self.label + '[') + ', '.join(['{}={}'.format(i, r) for (i, r) in zip(self._params, [sbs.Range.dim_to_string(d) for d in self._range])])) + ']')
def __repr__(self):
return (((type(self).__name__ + ' (') + self.__str__()) + ')')
def validate(self, sdfg, state, node):
if (not dtypes.validate_name(self.label)):
raise NameError(('Invalid map name "%s"' % self.label))
def get_param_num(self):
return len(self.params) |
def make_batches(lines, args, task, max_positions, encode_fn):
tokens = [task.source_dictionary.encode_line(encode_fn(src_str), add_if_not_exist=False).long() for src_str in lines]
lengths = torch.LongTensor([t.numel() for t in tokens])
itr = task.get_batch_iterator(dataset=task.build_dataset_for_inference(tokens, lengths), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=max_positions).next_epoch_itr(shuffle=False)
for batch in itr:
(yield Batch(ids=batch['id'], src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths'])) |
def add_simple_state_to_sdfg(state: SDFGState, top_sdfg: SDFG, state_name: str):
if (state.last_sdfg_states.get(top_sdfg) is not None):
substate = top_sdfg.add_state(state_name)
else:
substate = top_sdfg.add_state(state_name, is_start_state=True)
finish_add_state_to_sdfg(state, top_sdfg, substate)
return substate |
def sequence2frame(accompany_pianoroll, chord_groundtruth):
print('augment chord into frame base...')
accompany_pianoroll_frame = []
chord_groundtruth_frame = []
for (acc_song, truth_song) in zip(accompany_pianoroll, chord_groundtruth):
acc_pianoroll = []
truth_pianoroll = []
for (acc_beat, truth_beat) in zip(acc_song, truth_song):
for i in range((Constants.BEAT_RESOLUTION * Constants.BEAT_PER_CHORD)):
acc_pianoroll.append(acc_beat)
truth_pianoroll.append(truth_beat)
accompany_pianoroll_frame.append(acc_pianoroll)
chord_groundtruth_frame.append(truth_pianoroll)
accompany_pianoroll_frame = np.asarray(accompany_pianoroll_frame).astype(int)
chord_groundtruth_frame = np.asarray(chord_groundtruth_frame)
print('accompany_pianoroll frame shape:', accompany_pianoroll_frame.shape)
print('groundtruth_pianoroll frame shape:', chord_groundtruth_frame.shape)
return (accompany_pianoroll_frame, chord_groundtruth_frame) |
def generate_tp_model(default_config: OpQuantizationConfig, base_config: OpQuantizationConfig, mixed_precision_cfg_list: List[OpQuantizationConfig], name: str) -> TargetPlatformModel:
default_configuration_options = tp.QuantizationConfigOptions([default_config])
generated_tpc = tp.TargetPlatformModel(default_configuration_options, name=name)
with generated_tpc:
tp.OperatorsSet('NoQuantization', tp.get_default_quantization_config_options().clone_and_edit(enable_weights_quantization=False, enable_activation_quantization=False))
mixed_precision_configuration_options = tp.QuantizationConfigOptions(mixed_precision_cfg_list, base_config=base_config)
conv = tp.OperatorsSet('Conv', mixed_precision_configuration_options)
fc = tp.OperatorsSet('FullyConnected', mixed_precision_configuration_options)
any_relu = tp.OperatorsSet('AnyReLU')
add = tp.OperatorsSet('Add')
sub = tp.OperatorsSet('Sub')
mul = tp.OperatorsSet('Mul')
div = tp.OperatorsSet('Div')
prelu = tp.OperatorsSet('PReLU')
swish = tp.OperatorsSet('Swish')
sigmoid = tp.OperatorsSet('Sigmoid')
tanh = tp.OperatorsSet('Tanh')
activations_after_conv_to_fuse = tp.OperatorSetConcat(any_relu, swish, prelu, sigmoid, tanh)
activations_after_fc_to_fuse = tp.OperatorSetConcat(any_relu, swish, sigmoid)
any_binary = tp.OperatorSetConcat(add, sub, mul, div)
tp.Fusing([conv, activations_after_conv_to_fuse])
tp.Fusing([fc, activations_after_fc_to_fuse])
tp.Fusing([any_binary, any_relu])
return generated_tpc |
class WarmUpAndCosine(callbacks.Callback):
def __init__(self, warmup_steps, max_lr, min_lr=0.0, total_steps=None, layer_name='global_step_tracker', **kwargs):
super().__init__(**kwargs)
self.max_lr = max_lr
self.min_lr = min_lr
self.warmup_steps = warmup_steps
self.total_steps = total_steps
self.layer_name = layer_name
self.lr_span = (self.max_lr - self.min_lr)
self.wup_lr_incr = (self.lr_span / self.warmup_steps)
if (self.total_steps is not None):
self.cosine_w = ((0.5 * np.pi) / (self.total_steps - self.warmup_steps))
def on_train_batch_begin(self, batch, logs=None):
(global_step,) = self.model.get_layer(self.layer_name).get_weights()
if (global_step < self.warmup_steps):
cur_lr = (self.min_lr + (self.wup_lr_incr * (global_step + 1)))
self.model.optimizer.lr.assign(cur_lr)
elif (self.total_steps is not None):
if (global_step <= self.total_steps):
cur_lr = (self.min_lr + (self.lr_span * np.cos((self.cosine_w * (global_step - self.warmup_steps)))))
self.model.optimizer.lr.assign(cur_lr)
else:
self.model.stop_training = True
def on_epoch_begin(self, epoch, logs=None):
self.on_batch_begin(0)
(global_step,) = self.model.get_layer(self.layer_name).get_weights()
cur_lr = self.model.optimizer.lr.numpy()
print(f'Current Global Step: {global_step}; Learning Rate: {cur_lr:.7f}', flush=True) |
def _split_persona_and_context(text, eval_type='convai2'):
if ('your persona:' not in text):
return (None, text)
elif (eval_type == 'convai2'):
texts = text.split('\n')
return ('\n'.join(texts[:(- 1)]), texts[(- 1)])
elif (eval_type == 'dnli'):
texts = text.split('\n')
last_idx = 0
for (idx, text) in enumerate(texts):
if ('your persona:' in text):
last_idx = idx
persona_texts = texts[:(last_idx + 1)]
context_texts = texts[(last_idx + 1):]
return ('\n'.join(persona_texts), '\n'.join(context_texts)) |
_module('numpy')
def require(a, dtype=None, requirements=None):
possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C', 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F', 'A': 'A', 'ALIGNED': 'A', 'W': 'W', 'WRITEABLE': 'W', 'O': 'O', 'OWNDATA': 'O', 'E': 'E', 'ENSUREARRAY': 'E'}
if (not requirements):
return asanyarray(a, dtype=dtype)
else:
requirements = {possible_flags[x.upper()] for x in requirements}
if ('E' in requirements):
requirements.remove('E')
subok = False
else:
subok = True
order = 'A'
if (requirements >= {'C', 'F'}):
raise ValueError('Cannot specify both "C" and "F" order')
elif ('F' in requirements):
order = 'F'
requirements.remove('F')
elif ('C' in requirements):
order = 'C'
requirements.remove('C')
arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
for prop in requirements:
if (not arr.flags[prop]):
arr = arr.copy(order)
break
return arr |
def PreActResNet164Basic(num_classes=10):
return ResNet(PreActBlock, layers=([27] * 3), filters=[16, 32, 64], num_classes=num_classes) |
def test_reparameterize_size():
(mean, logvar) = model1.encode(x1.float())
mean_new = model1.reparameterize(mean, logvar)
assert (len(mean_new[0]) == model1.no_latent_features) |
def vector_to_word(vector):
word = ''
for vec in vector:
word = (word + int2char(vec))
return word |
class unit_gcn(nn.Module):
def __init__(self, in_channels, out_channels, A, adaptive=True):
super(unit_gcn, self).__init__()
self.out_c = out_channels
self.in_c = in_channels
self.num_subset = A.shape[0]
self.adaptive = adaptive
if adaptive:
self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32)), requires_grad=True)
else:
self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)
self.conv_d = nn.ModuleList()
for i in range(self.num_subset):
self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1))
if (in_channels != out_channels):
self.down = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1), nn.BatchNorm2d(out_channels))
else:
self.down = (lambda x: x)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
bn_init(self.bn, 1e-06)
for i in range(self.num_subset):
conv_branch_init(self.conv_d[i], self.num_subset)
def L2_norm(self, A):
A_norm = (torch.norm(A, 2, dim=1, keepdim=True) + 0.0001)
A = (A / A_norm)
return A
def forward(self, x):
(N, C, T, V) = x.size()
y = None
if self.adaptive:
A = self.PA
A = self.L2_norm(A)
else:
A = self.A.cuda(x.get_device())
for i in range(self.num_subset):
A1 = A[i]
A2 = x.view(N, (C * T), V)
z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V))
y = ((z + y) if (y is not None) else z)
y = self.bn(y)
y += self.down(x)
y = self.relu(y)
return y |
class bcolors():
HEADER = '\x1b[95m'
OKBLUE = '\x1b[94m'
OKCYAN = '\x1b[96m'
OKGREEN = '\x1b[92m'
WARNING = '\x1b[93m'
FAIL = '\x1b[91m'
ENDC = '\x1b[0m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m' |
def update_config(cfg, filename, ensure_dir=True):
cfg.defrost()
cfg.merge_from_file(filename)
if ensure_dir:
cfg.working_dir = osp.dirname(osp.abspath(__file__))
cfg.root_dir = osp.dirname(_C.working_dir)
cfg.exp_name = '_'.join(_C.modules)
cfg.output_dir = osp.join(_C.root_dir, 'output', _C.data.name, _C.model_name, _C.exp_name)
cfg.snapshot_dir = osp.join(_C.output_dir, 'snapshots')
cfg.log_dir = osp.join(_C.output_dir, 'logs')
cfg.event_dir = osp.join(_C.output_dir, 'events')
common.ensure_dir(cfg.output_dir)
common.ensure_dir(cfg.snapshot_dir)
common.ensure_dir(cfg.log_dir)
common.ensure_dir(cfg.event_dir)
cfg.freeze()
return cfg |
def index(g, self, index):
if (sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK):
return g.op('ATen', self, index, operator_s='index')
if sym_help._is_packed_list(index):
indices = sym_help._unpack_list(index)
else:
indices = [index]
def try_mask_to_index(index):
if ((not sym_help._is_none(index)) and ((index.type().scalarType() == 'Byte') or (index.type().scalarType() == 'Bool'))):
if (sym_help._export_onnx_opset_version < 9):
raise RuntimeError('Exporting masked indices are only supported after ONNX opset 9.')
warnings.warn('Exporting aten::index operator with indices of type Byte. Only 1-D indices are supported. In any other case, this will produce an incorrect ONNX graph.')
index = squeeze(g, nonzero(g, index), dim=1)
return index
indices = [try_mask_to_index(idx) for idx in indices]
if (len(indices) == 1):
return index_select(g, self, 0, indices[0])
else:
adv_idx_indices = [i for (i, idx) in enumerate(indices) if (not sym_help._is_none(idx))]
if (len(adv_idx_indices) == 0):
return self
elif (len(adv_idx_indices) == 1):
return index_select(g, self, adv_idx_indices[0], indices[adv_idx_indices[0]])
else:
rank = self.type().dim()
if (rank is None):
raise NotImplementedError((('Unsupported aten::index operator of advanced indexing on tensor of unknown rank, ' + 'try turning on shape and type propagate during export: ') + 'torch.onnx._export(..., propagate=True).'))
warnings.warn((((('Exporting aten::index operator of advanced indexing in opset ' + str(sym_help._export_onnx_opset_version)) + ' is achieved by combination of multiple ONNX operators, ') + 'including Reshape, Transpose, Concat, and Gather. ') + 'If indices include negative values, the exported graph will produce incorrect results.'))
rank = self.type().dim()
adv_idx_count = len(adv_idx_indices)
shape_tensor = _shape_as_tensor(g, self)
dim_tensor_list = [g.op('Gather', shape_tensor, g.op('Constant', value_t=torch.LongTensor([dim])), axis_i=0) for dim in range(rank)]
self = g.op('Transpose', self, perm_i=(adv_idx_indices + [i for i in range(rank) if (i not in adv_idx_indices)]))
self = g.op('Flatten', self, axis_i=adv_idx_count)
cum_adv_index = indices[adv_idx_indices[(- 1)]]
multiplier = dim_tensor_list[adv_idx_indices[(- 1)]]
for i in range((adv_idx_count - 2), (- 1), (- 1)):
adv_index = g.op('Mul', indices[adv_idx_indices[i]], multiplier)
cum_adv_index = g.op('Add', cum_adv_index, adv_index)
multiplier = g.op('Mul', multiplier, dim_tensor_list[adv_idx_indices[i]])
self = index_select(g, self, 0, cum_adv_index)
cum_adv_index_shape_tensor = _shape_as_tensor(g, cum_adv_index)
if (adv_idx_indices == list(range(adv_idx_indices[0], (adv_idx_indices[(- 1)] + 1)))):
folded_adv_idx_shape_list = ([g.op('Constant', value_t=torch.LongTensor([(- 1)]))] + [dim_tensor_list[i] for i in range(rank) if (i not in adv_idx_indices)])
folded_adv_idx_shape = g.op('Concat', *folded_adv_idx_shape_list, axis_i=0)
self = g.op('Reshape', self, folded_adv_idx_shape)
adv_idx_permute = ((list(range(1, (adv_idx_indices[0] + 1))) + [0]) + list(range((adv_idx_indices[0] + 1), ((rank - adv_idx_count) + 1))))
self = g.op('Transpose', self, perm_i=adv_idx_permute)
final_shape_list = (([dim_tensor_list[i] for i in range(adv_idx_indices[0])] + [cum_adv_index_shape_tensor]) + [dim_tensor_list[i] for i in range(adv_idx_indices[0], rank) if (i not in adv_idx_indices)])
final_shape = g.op('Concat', *final_shape_list, axis_i=0)
else:
final_shape = g.op('Concat', cum_adv_index_shape_tensor, *[dim_tensor_list[i] for i in range(rank) if (i not in adv_idx_indices)], axis_i=0)
return g.op('Reshape', self, final_shape) |
class BinaryMorphology3D():
param_names = ['shape', 'footprint', 'radius', 'decomposition']
params = [((128, 128, 128),), ('ball', 'cube', 'octahedron'), (1, 3, 5, 10), (None, 'sequence', 'separable')]
def setup(self, shape, footprint, radius, decomposition):
rng = np.random.default_rng(123)
self.image = (rng.standard_normal(shape) > (- 3))
fp_func = getattr(morphology, footprint)
allow_decomp = ('cube', 'octahedron', 'ball')
allow_separable = ('cube',)
if ((decomposition == 'separable') and (footprint != 'cube')):
raise NotImplementedError('separable unavailable')
footprint_kwargs = {}
if ((decomposition is not None) and (footprint not in allow_decomp)):
raise NotImplementedError('decomposition unimplemented')
elif ((decomposition == 'separable') and (footprint not in allow_separable)):
raise NotImplementedError('separable decomposition unavailable')
if (footprint in allow_decomp):
footprint_kwargs['decomposition'] = decomposition
if (footprint == 'cube'):
size = ((2 * radius) + 1)
self.footprint = fp_func(size, **footprint_kwargs)
elif (footprint in ['ball', 'octahedron']):
self.footprint = fp_func(radius, **footprint_kwargs)
def time_erosion(self, shape, footprint, radius, *args):
morphology.binary_erosion(self.image, self.footprint) |
def load_pr_tags():
this_dir = os.path.dirname(os.path.abspath(__file__))
json_path = os.path.join(this_dir, 'prtags.json')
details = {}
with open(json_path) as f:
details = json.load(f)
details['release'] = ''
return details |
def check_one_program(helper, script, precond, graph_dict, w_graph_list, modify_graph=True, place_other_objects=True, id_mapping={}, **info):
helper.initialize(graph_dict)
(script, precond) = modify_objects_unity2script(helper, script, precond)
if modify_graph:
helper.set_to_default_state(graph_dict, None, id_checker=(lambda v: True))
(id_mapping, first_room, room_mapping) = helper.add_missing_object_from_script(script, precond, graph_dict, id_mapping)
info = {'room_mapping': room_mapping}
objects_id_in_script = [v for v in id_mapping.values()]
helper.set_to_default_state(graph_dict, first_room, id_checker=(lambda v: (v in objects_id_in_script)))
if place_other_objects:
max_node_to_place = (max_nodes - len(graph_dict['nodes']))
n = random.randint((max_node_to_place - 20), max_node_to_place)
helper.add_random_objs_graph_dict(graph_dict, n=max(n, 0))
helper.set_to_default_state(graph_dict, None, id_checker=(lambda v: (v >= 2000)))
helper.random_change_object_state(id_mapping, graph_dict, id_checker=(lambda v: (v not in objects_id_in_script)))
helper.check_binary(graph_dict, id_checker=(lambda v: True), verbose=False)
random_objects_id = helper.random_objects_id
helper.prepare_from_precondition(precond, id_mapping, graph_dict)
helper.open_all_doors(graph_dict)
helper.ensure_light_on(graph_dict, id_checker=(lambda v: (v not in objects_id_in_script)))
helper.check_binary(graph_dict, id_checker=(lambda v: (v >= random_objects_id)), verbose=False)
helper.check_binary(graph_dict, id_checker=(lambda v: True), verbose=True)
assert (len(graph_dict['nodes']) <= max_nodes)
elif (len(id_mapping) != 0):
helper.modify_script_with_specified_id(script, id_mapping, **info)
graph = EnvironmentGraph(graph_dict)
name_equivalence = utils.load_name_equivalence()
executor = ScriptExecutor(graph, name_equivalence)
(executable, final_state, graph_state_list) = executor.execute(script, w_graph_list=w_graph_list)
if executable:
message = 'Script is executable'
else:
message = 'Script is not executable, since {}'.format(executor.info.get_error_string())
return (message, executable, final_state, graph_state_list, id_mapping, info, script) |
def test_after_test_case_execution():
observer = FooObserver()
result = MagicMock()
with mock.patch.object(observer._assertion_local_state, 'trace') as trace_mock:
clone = object()
trace_mock.clone.return_value = clone
observer.after_test_case_execution_inside_thread(MagicMock(), result)
assert (result.assertion_trace == clone) |
class Decanomial(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 10.0)] * self.N), ([10.0] * self.N)))
self.custom_bounds = [(0, 2.5), ((- 2), (- 4))]
self.global_optimum = [[2.0, (- 3.0)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
val = (((((x[1] ** 4) + (12 * (x[1] ** 3))) + (54 * (x[1] ** 2))) + (108 * x[1])) + 81.0)
val2 = ((((x[0] ** 10.0) - (20 * (x[0] ** 9))) + (180 * (x[0] ** 8))) - (960 * (x[0] ** 7)))
val2 += (((3360 * (x[0] ** 6)) - (8064 * (x[0] ** 5))) + (13340 * (x[0] ** 4)))
val2 += (((((- 15360) * (x[0] ** 3)) + (11520 * (x[0] ** 2))) - (5120 * x[0])) + 2624)
return (0.001 * ((abs(val) + abs(val2)) ** 2.0)) |
def run_remote(params_path, gpu=False, instance_type='m5.large', ami='ami-00b8b0b2dff90dcab', spot_price=0.5):
command = (COMMAND % params_path)
if gpu:
ami = 'ami-03fd6608775f924b8'
instance_type = 'g3.4xlarge'
spot_price = 0.5
command = (GPU_COMMAND % params_path)
instance = request_instance(instance_type, ami, spot_price, params_path)
with create_parasol_zip() as parasol_zip, Connection(instance, user='ubuntu', connect_kwargs={'key_filename': PEM_FILE}) as conn:
print('Running remote experiment...')
conn.put(parasol_zip)
conn.run('mkdir parasol; unzip -o parasol.zip -d parasol; rm parasol.zip', hide='stdout')
conn.run('PIPENV_YES=1 pipenv run python setup.py develop', hide='stdout')
conn.run('PIPENV_YES=1 pipenv run pip install deepx --upgrade', hide='stdout')
conn.run(('echo "%s" > run.py' % command), hide='stdout')
conn.run('tmux new-session -d -s \'experiment\' "xvfb-run -s \'-screen 0 1400x900x24\' pipenv run python run.py; sudo poweroff"') |
def main(args):
parser = get_config()
all_args = parse_args(args, parser)
assert (all_args.algorithm_name in ['mep', 'adaptive'])
if (all_args.cuda and torch.cuda.is_available()):
print('choose to use gpu...')
device = torch.device('cuda:0')
torch.set_num_threads(all_args.n_training_threads)
if all_args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
print('choose to use cpu...')
device = torch.device('cpu')
torch.set_num_threads(all_args.n_training_threads)
run_dir = ((((Path((os.path.split(os.path.dirname(os.path.abspath(__file__)))[0] + '/results')) / all_args.env_name) / all_args.layout_name) / all_args.algorithm_name) / all_args.experiment_name)
if (not run_dir.exists()):
os.makedirs(str(run_dir))
if all_args.use_wandb:
run = wandb.init(config=all_args, project=all_args.env_name, entity=all_args.wandb_name, notes=socket.gethostname(), name=((((str(all_args.algorithm_name) + '_') + str(all_args.experiment_name)) + '_seed') + str(all_args.seed)), group=all_args.layout_name, dir=str(run_dir), job_type='training', reinit=True, tags=all_args.wandb_tags)
else:
if (not run_dir.exists()):
curr_run = 'run1'
else:
exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in run_dir.iterdir() if str(folder.name).startswith('run')]
if (len(exst_run_nums) == 0):
curr_run = 'run1'
else:
curr_run = ('run%i' % (max(exst_run_nums) + 1))
run_dir = (run_dir / curr_run)
if (not run_dir.exists()):
os.makedirs(str(run_dir))
setproctitle.setproctitle(((((((str(all_args.algorithm_name) + '-') + str(all_args.env_name)) + '-') + str(all_args.experiment_name)) + '') + str(all_args.user_name)))
torch.manual_seed(all_args.seed)
torch.cuda.manual_seed_all(all_args.seed)
np.random.seed(all_args.seed)
envs = make_train_env(all_args, run_dir)
eval_envs = (make_eval_env(all_args, run_dir) if all_args.use_eval else None)
num_agents = all_args.num_agents
config = {'all_args': all_args, 'envs': envs, 'eval_envs': eval_envs, 'num_agents': num_agents, 'device': device, 'run_dir': run_dir}
if all_args.share_policy:
from hsp.runner.shared.overcooked_runner import OvercookedRunner as Runner
else:
from hsp.runner.separated.overcooked_runner import MPERunner as Runner
runner = Runner(config)
print('population_yaml_path: ', all_args.population_yaml_path)
population_config = yaml.load(open(all_args.population_yaml_path), yaml.Loader)
override_policy_config = {}
agent_name = all_args.adaptive_agent_name
override_policy_config[agent_name] = (Namespace(use_agent_policy_id=all_args.use_agent_policy_id, predict_other_shaped_info=all_args.predict_other_shaped_info, predict_shaped_info_horizon=all_args.predict_shaped_info_horizon, predict_shaped_info_event_count=all_args.predict_shaped_info_event_count, shaped_info_coef=all_args.shaped_info_coef, policy_group_normalization=all_args.policy_group_normalization, num_v_out=all_args.num_v_out, use_task_v_out=all_args.use_task_v_out, use_policy_vhead=all_args.use_policy_vhead), *runner.policy_config[1:])
for policy_name in population_config:
if (policy_name != agent_name):
override_policy_config[policy_name] = (None, None, runner.policy_config[2], None)
runner.policy.load_population(all_args.population_yaml_path, evaluation=False, override_policy_config=override_policy_config)
runner.trainer.init_population()
runner.train_mep()
envs.close()
if (all_args.use_eval and (eval_envs is not envs)):
eval_envs.close()
if all_args.use_wandb:
run.finish()
else:
runner.writter.export_scalars_to_json(str((runner.log_dir + '/summary.json')))
runner.writter.close() |
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('polynomial', parent_package, top_path)
config.add_data_dir('tests')
return config |
class AgentEvaluator(object):
def __init__(self, env_params, mdp_fn, force_compute=False, mlam_params=NO_COUNTERS_PARAMS, debug=False):
assert callable(mdp_fn), 'mdp generating function must be a callable function'
env_params['mlam_params'] = mlam_params
self.mdp_fn = mdp_fn
self.env = OvercookedEnv(self.mdp_fn, **env_params)
self.force_compute = force_compute
def from_mdp_params_infinite(mdp_params, env_params, outer_shape=None, mdp_params_schedule_fn=None, force_compute=False, mlam_params=NO_COUNTERS_PARAMS, debug=False):
assert (outer_shape is not None), 'outer_shape needs to be defined for variable mdp'
assert (('num_mdp' in env_params) and np.isinf(env_params['num_mdp'])), 'num_mdp needs to be specified and infinite'
mdp_fn_naive = LayoutGenerator.mdp_gen_fn_from_dict(mdp_params, outer_shape, mdp_params_schedule_fn)
return AgentEvaluator(env_params, mdp_fn_naive, force_compute, mlam_params, debug)
def from_mdp_params_finite(mdp_params, env_params, outer_shape=None, mdp_params_schedule_fn=None, force_compute=False, mlam_params=NO_COUNTERS_PARAMS, debug=False):
assert (outer_shape is not None), 'outer_shape needs to be defined for variable mdp'
assert (('num_mdp' in env_params) and (not np.isinf(env_params['num_mdp']))), 'num_mdp needs to be specified and finite'
mdp_fn_naive = LayoutGenerator.mdp_gen_fn_from_dict(mdp_params, outer_shape, mdp_params_schedule_fn)
num_mdp = env_params['num_mdp']
assert ((type(num_mdp) == int) and (num_mdp > 0)), ('invalid number of mdp: ' + str(num_mdp))
mdp_lst = [mdp_fn_naive() for _ in range(num_mdp)]
return AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params=env_params, force_compute=force_compute, mlam_params=mlam_params, debug=debug)
def from_mdp(mdp, env_params, force_compute=False, mlam_params=NO_COUNTERS_PARAMS, debug=False):
assert (type(mdp) == OvercookedGridworld), 'mdp must be a OvercookedGridworld object'
mdp_fn = (lambda _ignored: mdp)
return AgentEvaluator(env_params, mdp_fn, force_compute, mlam_params, debug)
def from_layout_name(mdp_params, env_params, force_compute=False, mlam_params=NO_COUNTERS_PARAMS, debug=False):
assert ((type(mdp_params) is dict) and ('layout_name' in mdp_params))
mdp = OvercookedGridworld.from_layout_name(**mdp_params)
return AgentEvaluator.from_mdp(mdp, env_params, force_compute, mlam_params, debug)
def from_mdp_lst(mdp_lst, env_params, sampling_freq=None, force_compute=False, mlam_params=NO_COUNTERS_PARAMS, debug=False):
assert is_iterable(mdp_lst), 'mdp_lst must be a list'
assert all([(type(mdp) == OvercookedGridworld) for mdp in mdp_lst]), 'some mdps are not OvercookedGridworld objects'
if (sampling_freq is None):
sampling_freq = (np.ones(len(mdp_lst)) / len(mdp_lst))
mdp_fn = (lambda _ignored: np.random.choice(mdp_lst, p=sampling_freq))
return AgentEvaluator(env_params, mdp_fn, force_compute, mlam_params, debug)
def evaluate_random_pair(self, num_games=1, all_actions=True, display=False, native_eval=False):
agent_pair = AgentPair(RandomAgent(all_actions=all_actions), RandomAgent(all_actions=all_actions))
return self.evaluate_agent_pair(agent_pair, num_games=num_games, display=display, native_eval=native_eval)
def evaluate_human_model_pair(self, num_games=1, display=False, native_eval=False):
a0 = GreedyHumanModel(self.env.mlam)
a1 = GreedyHumanModel(self.env.mlam)
agent_pair = AgentPair(a0, a1)
return self.evaluate_agent_pair(agent_pair, num_games=num_games, display=display, native_eval=native_eval)
def evaluate_agent_pair(self, agent_pair, num_games, game_length=None, start_state_fn=None, metadata_fn=None, metadata_info_fn=None, display=False, dir=None, display_phi=False, info=True, native_eval=False):
if native_eval:
return self.env.get_rollouts(agent_pair, num_games=num_games, display=display, dir=dir, display_phi=display_phi, info=info, metadata_fn=metadata_fn, metadata_info_fn=metadata_info_fn)
else:
horizon_env = self.env.copy()
horizon_env.horizon = (self.env.horizon if (game_length is None) else game_length)
horizon_env.start_state_fn = (self.env.start_state_fn if (start_state_fn is None) else start_state_fn)
horizon_env.reset()
return horizon_env.get_rollouts(agent_pair, num_games=num_games, display=display, dir=dir, display_phi=display_phi, info=info, metadata_fn=metadata_fn, metadata_info_fn=metadata_info_fn)
def get_agent_pair_trajs(self, a0, a1=None, num_games=100, game_length=None, start_state_fn=None, display=False, info=True):
if (a1 is None):
ap = AgentPair(a0, a0, allow_duplicate_agents=True)
trajs_0 = trajs_1 = self.evaluate_agent_pair(ap, num_games=num_games, game_length=game_length, start_state_fn=start_state_fn, display=display, info=info)
else:
trajs_0 = self.evaluate_agent_pair(AgentPair(a0, a1), num_games=num_games, game_length=game_length, start_state_fn=start_state_fn, display=display, info=info)
trajs_1 = self.evaluate_agent_pair(AgentPair(a1, a0), num_games=num_games, game_length=game_length, start_state_fn=start_state_fn, display=display, info=info)
return (trajs_0, trajs_1)
def check_trajectories(trajectories, from_json=False, **kwargs):
if (not from_json):
AgentEvaluator._check_standard_traj_keys(set(trajectories.keys()))
AgentEvaluator._check_right_types(trajectories)
AgentEvaluator._check_trajectories_dynamics(trajectories, **kwargs)
def _check_standard_traj_keys(traj_keys_set):
default_traj_keys = DEFAULT_TRAJ_KEYS
assert (traj_keys_set == set(default_traj_keys)), 'Keys of traj dict did not match standard form.\nMissing keys: {}\nAdditional keys: {}'.format([k for k in default_traj_keys if (k not in traj_keys_set)], [k for k in traj_keys_set if (k not in default_traj_keys)])
def _check_right_types(trajectories):
for idx in range(len(trajectories['ep_states'])):
(states, actions, rewards) = (trajectories['ep_states'][idx], trajectories['ep_actions'][idx], trajectories['ep_rewards'][idx])
(mdp_params, env_params) = (trajectories['mdp_params'][idx], trajectories['env_params'][idx])
assert all(((type(j_a) is tuple) for j_a in actions))
assert all(((type(s) is OvercookedState) for s in states))
assert (type(mdp_params) is dict)
assert (type(env_params) is dict)
def _check_trajectories_dynamics(trajectories, verbose=True):
if any((env_params['_variable_mdp'] for env_params in trajectories['env_params'])):
if verbose:
print('Skipping trajectory consistency checking because MDP was recognized as variable. Trajectory consistency checking is not yet supported for variable MDPs.')
return
(_, envs) = AgentEvaluator.get_mdps_and_envs_from_trajectories(trajectories)
for idx in range(len(trajectories['ep_states'])):
(states, actions, rewards) = (trajectories['ep_states'][idx], trajectories['ep_actions'][idx], trajectories['ep_rewards'][idx])
simulation_env = envs[idx]
assert (len(states) == len(actions) == len(rewards)), '# states {}\t# actions {}\t# rewards {}'.format(len(states), len(actions), len(rewards))
for i in range((len(states) - 1)):
curr_state = states[i]
simulation_env.state = curr_state
(next_state, reward, done, info) = simulation_env.step(actions[i])
assert (states[(i + 1)] == next_state), 'States differed (expected vs actual): {}\n\nexpected dict: \t{}\nactual dict: \t{}'.format(simulation_env.display_states(states[(i + 1)], next_state), states[(i + 1)].to_dict(), next_state.to_dict())
assert (rewards[i] == reward), '{} \t {}'.format(rewards[i], reward)
def get_mdps_and_envs_from_trajectories(trajectories):
(mdps, envs) = ([], [])
for idx in range(len(trajectories['ep_lengths'])):
mdp_params = copy.deepcopy(trajectories['mdp_params'][idx])
env_params = copy.deepcopy(trajectories['env_params'][idx])
mdp = OvercookedGridworld.from_layout_name(**mdp_params)
env = OvercookedEnv.from_mdp(mdp, **env_params)
mdps.append(mdp)
envs.append(env)
return (mdps, envs)
def save_trajectories(trajectories, filename):
AgentEvaluator.check_trajectories(trajectories)
if any(((t['env_params']['start_state_fn'] is not None) for t in trajectories)):
print('Saving trajectories with a custom start state. This can currently cause things to break when loading in the trajectories.')
save_pickle(trajectories, filename)
def load_trajectories(filename):
trajs = load_pickle(filename)
AgentEvaluator.check_trajectories(trajs)
return trajs
def save_traj_as_json(trajectory, filename):
assert (set(DEFAULT_TRAJ_KEYS) == set(trajectory.keys())), '{} vs\n{}'.format(DEFAULT_TRAJ_KEYS, trajectory.keys())
AgentEvaluator.check_trajectories(trajectory)
trajectory = AgentEvaluator.make_trajectories_json_serializable(trajectory)
save_as_json(trajectory, filename)
def make_trajectories_json_serializable(trajectories):
dict_traj = copy.deepcopy(trajectories)
dict_traj['ep_states'] = [[ob.to_dict() for ob in one_ep_obs] for one_ep_obs in trajectories['ep_states']]
for k in dict_traj.keys():
dict_traj[k] = list(dict_traj[k])
dict_traj['ep_actions'] = [list(lst) for lst in dict_traj['ep_actions']]
dict_traj['ep_rewards'] = [list(lst) for lst in dict_traj['ep_rewards']]
dict_traj['ep_dones'] = [list(lst) for lst in dict_traj['ep_dones']]
dict_traj['ep_returns'] = [int(val) for val in dict_traj['ep_returns']]
dict_traj['ep_lengths'] = [int(val) for val in dict_traj['ep_lengths']]
del dict_traj['ep_infos']
del dict_traj['metadatas']
return dict_traj
def load_traj_from_json(filename):
traj_dict = load_from_json(filename)
traj_dict['ep_states'] = [[OvercookedState.from_dict(ob) for ob in curr_ep_obs] for curr_ep_obs in traj_dict['ep_states']]
traj_dict['ep_actions'] = [[tuple(((tuple(a) if (type(a) is list) else a) for a in j_a)) for j_a in ep_acts] for ep_acts in traj_dict['ep_actions']]
return traj_dict
def merge_trajs(trajs_n):
metadatas_merged = merge_dictionaries([trajs['metadatas'] for trajs in trajs_n])
merged_trajs = merge_dictionaries(trajs_n)
merged_trajs['metadatas'] = metadatas_merged
return merged_trajs
def remove_traj_idx(trajs, idx):
metadatas = trajs['metadatas']
del trajs['metadatas']
removed_idx_d = rm_idx_from_dict(trajs, idx)
removed_idx_metas = rm_idx_from_dict(metadatas, idx)
trajs['metadatas'] = metadatas
removed_idx_d['metadatas'] = removed_idx_metas
return removed_idx_d
def take_traj_indices(trajs, indices):
subset_trajs = take_indexes_from_dict(trajs, indices, keys_to_ignore=['metadatas'])
subset_trajs['metadatas'] = take_indexes_from_dict(trajs['metadatas'], indices)
return subset_trajs
def add_metadata_to_traj(trajs, metadata_fn, input_keys):
metadata_fn_input = [trajs[k] for k in input_keys]
(metadata_key, metadata_data) = metadata_fn(metadata_fn_input)
assert (metadata_key not in trajs['metadatas'].keys())
trajs['metadatas'][metadata_key] = metadata_data
return trajs
def add_observations_to_trajs_in_metadata(trajs, encoding_fn):
def metadata_fn(data):
traj_ep_states = data[0]
obs_metadata = []
for one_traj_states in traj_ep_states:
obs_metadata.append([encoding_fn(s) for s in one_traj_states])
return ('ep_obs_for_both_agents', obs_metadata)
return AgentEvaluator.add_metadata_to_traj(trajs, metadata_fn, ['ep_states'])
def events_visualization(trajs, traj_index):
pass |
_model
def seresnext50_32x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['seresnext50_32x4d']
model = SENet(SEResNeXtBottleneck, [3, 4, 6, 3], groups=32, reduction=16, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def register_cv_hardcoded_model(name, *args, **kw):
ParamDictCVMOdelHandler(*args, **kw).register_autogenerated(generated_file_name_or_path=name) |
def test_phi_minus_phi_plus():
for i in range(200):
(k1, k2, k3, k4, a3) = create_scenario(phi_minus, phi_plus, i)
state = correct_order(k1.state, k1.keys)
assert numpy.array_equal(state, phi_minus) |
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
if ('optimizer' in checkpoint):
del checkpoint['optimizer']
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:(- 4)]
else:
out_file_name = out_file
date_now = date.today().strftime('%Y%m%d')
final_file = (out_file_name + f'-{sha[:8]}_{date_now}.pth')
subprocess.Popen(['mv', out_file, final_file]) |
def getColsPermutations(cols, num):
if (num == 0):
return []
return ([', '.join(a) for a in permutations(cols, num)] + getColsPermutations(cols, (num - 1))) |
def get_transformation(args):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
return transform |
def pred_fn_wrapper(pred_rng, params, batch, pred_fn, under_pmap):
idxes = batch.pop('__idx__')
preds = pred_fn(pred_rng=pred_rng, params=params, batch=batch)
preds = {'raw_preds': preds, '__idx__': idxes}
if under_pmap:
return jax.lax.all_gather(preds, axis_name='batch')
else:
return preds |
def default_loader(path):
from torchvision import get_image_backend
if (get_image_backend() == 'accimage'):
return accimage_loader(path)
else:
return pil_loader(path) |
_start_docstrings('\n CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', CAMEMBERT_START_DOCSTRING)
class CamembertForMultipleChoice(RobertaForMultipleChoice):
config_class = CamembertConfig |
def run(target, method, thread, round):
exe = ((target + '/') + target)
print(exe, '-m', method, '-t', thread, '-r', round)
with open(tmp_file, 'w') as ofs:
subprocess.call([exe, '-m', method, '-t', str(thread), '-r', str(round)], stdout=ofs)
X = []
Y = []
with open(tmp_file, 'r') as ifs:
ifs.readline()
ifs.readline()
for line in ifs:
token = line.split()
assert (len(token) == 2), 'output line must have exactly two numbers'
X.append(int(token[0]))
Y.append(float(token[1]))
return (X, Y) |
def create_cookie(name, value, **kwargs):
result = {'version': 0, 'name': name, 'value': value, 'port': None, 'domain': '', 'path': '/', 'secure': False, 'expires': None, 'discard': True, 'comment': None, 'comment_url': None, 'rest': {'HttpOnly': None}, 'rfc2109': False}
badargs = (set(kwargs) - set(result))
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError((err % list(badargs)))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result) |
class ImageReferenceLabelDataset(Dataset):
def __init__(self, image_path: str, reference_path: str, resolution: int, category: str, transform=None):
super().__init__()
self.resolution = resolution
self.transform = transform
self.image_paths = image_path
self.reference_paths = reference_path
self.palette = get_palette(category)
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
image_path = self.image_paths[idx]
reference_path = self.reference_paths[idx]
imagename = os.path.basename(image_path).split('_')[0]
reference_path = self.reference_paths[idx]
referencename = os.path.basename(reference_path).split('_')[0]
dirname = os.path.dirname(image_path)
ref_dirname = os.path.dirname(reference_path)
image_mask_path = os.path.join(dirname, (imagename + '_raw.png'))
reference_mask_path = os.path.join(ref_dirname, (referencename + '_raw.png'))
pil_image = Image.open(image_path)
pil_image = pil_image.convert('RGB')
assert (pil_image.size[0] == pil_image.size[1]), f'Only square images are supported: ({pil_image.size[0]}, {pil_image.size[1]})'
tensor_image = self.transform(pil_image)
pil_ref = Image.open(reference_path)
pil_ref = pil_ref.convert('RGB')
tensor_ref = self.transform(pil_ref)
mask_tensor = []
for label in [image_mask_path, reference_mask_path]:
label_np = cv2.imread(label)
mask = to_labels(label_np, self.palette).astype('uint8')
mask = cv2.resize(mask, (self.resolution, self.resolution), interpolation=cv2.INTER_NEAREST)
tensor_label = torch.from_numpy(mask)
mask_tensor.append(tensor_label)
return (imagename, referencename, tensor_image, tensor_ref, mask_tensor[0], mask_tensor[1]) |
def load_colbert(args, do_print=True):
(colbert, checkpoint) = load_model(args, do_print)
for k in ['query_maxlen', 'doc_maxlen', 'dim', 'similarity', 'amp']:
if (('arguments' in checkpoint) and hasattr(args, k)):
if ((k in checkpoint['arguments']) and (checkpoint['arguments'][k] != getattr(args, k))):
(a, b) = (checkpoint['arguments'][k], getattr(args, k))
Run.warn(f"Got checkpoint['arguments']['{k}'] != args.{k} (i.e., {a} != {b})")
if ('arguments' in checkpoint):
if (args.rank < 1):
print(ujson.dumps(checkpoint['arguments'], indent=4))
if do_print:
print('\n')
return (colbert, checkpoint) |
class _CallbacksManager(Copyable):
callbacks: MutableMapping[(str, MutableSequence[Callable])]
def __init__(self):
self.callbacks = {}
pass
def __getstate__(self):
odict = self.__dict__.copy()
del odict['callbacks']
return odict
def get(self, event: str) -> Sequence[Callable]:
return self.callbacks.get(event, [])
def add_callback(self, event: str, fn: Callable) -> None:
self.callbacks.setdefault(event, []).append(fn) |
def img_tensorize(im: str):
assert isinstance(im, str)
if os.path.isfile(im):
img = np.array(Image.open(im).convert('RGB'))
else:
img = get_image_from_url(im)
assert (img is not None), f'could not connect to: {im}'
return img |
class ShuffleProduct(ShuffleProduct_abstract):
def __init__(self, l1, l2, element_constructor=None):
assert (isinstance(l1, Iterable) and isinstance(l2, Iterable))
if (element_constructor is None):
try:
element_constructor = l1.parent()._element_constructor_
except AttributeError:
pass
ShuffleProduct_abstract.__init__(self, list(l1), list(l2), element_constructor)
def _repr_(self):
return ('Shuffle product of: %s and %s' % (self._l1, self._l2))
def _ascii_art_(self):
from sage.typeset.ascii_art import ascii_art
return (ascii_art('Shuffle product of:') * ((ascii_art(self._l1) + ascii_art(' and ')) + ascii_art(self._l2)))
def __iter__(self):
def swap(i, j):
(l[(i - 1)], l[(j - 1)]) = (l[(j - 1)], l[(i - 1)])
def gen(n, k):
if (0 < k < n):
for _ in gen((n - 1), k):
(yield)
if (k == 1):
swap(n, (n - 1))
else:
swap(n, (k - 1))
(yield)
for _ in neg((n - 1), (k - 1)):
(yield)
def neg(n, k):
if (0 < k < n):
for _ in gen((n - 1), (k - 1)):
(yield)
if (k == 1):
swap(n, (n - 1))
else:
swap(n, (k - 1))
(yield)
for _ in neg((n - 1), k):
(yield)
m = len(self._l1)
n = len(self._l2)
mn = (m + n)
l = (([0] * m) + ([1] * n))
EC = self._element_constructor_
(yield EC((self._l1 + self._l2)))
for _ in gen(mn, m):
l1 = iter(self._l1)
l2 = iter(self._l2)
(yield EC([(next(l2) if l[k] else next(l1)) for k in range(mn)]))
def __contains__(self, iterable):
if (not isinstance(iterable, type(self._element_constructor_([])))):
return False
l1 = self._l1
l2 = self._l2
len_l1 = len(l1)
len_l2 = len(l2)
i_l1 = i_l2 = 0
iterable = list(iterable)
for (i, el) in enumerate(iterable):
if (l1[i_l1] == el):
i_l1 += 1
elif (l2[i_l2] == el):
i_l2 += 1
else:
return False
if (i_l1 == len_l1):
return (iterable[(i + 1):] == l2[i_l2:])
if (i_l2 == len_l2):
return (iterable[(i + 1):] == l1[i_l1:])
return (((i_l1 + 1) == len_l1) and ((i_l2 + 1) == len_l2))
def cardinality(self):
ll1 = Integer(len(self._l1))
ll2 = Integer(len(self._l2))
return (ll1 + ll2).binomial(ll1) |
class PDB2Fmap():
def __init__(self, embd_grain='CA', fmap_shape=None):
self.embd_grain = embd_grain
self.fmap_shape = fmap_shape
def fit(self, pdb_file, embd_chain=None):
self.pdb_file = pdb_file
self.pdb = PandasPdb().read_pdb(self.pdb_file)
self.embd_chain = embd_chain
if (embd_chain != None):
self.dfpdb = self.pdb.df['ATOM'][(self.pdb.df['ATOM'].chain_id == embd_chain)]
else:
self.dfpdb = self.pdb.df['ATOM']
if (self.embd_grain == 'mean'):
df_embd = self.dfpdb.groupby(['residue_number', 'residue_name']).apply(get_pdb_xyzb_mean).apply(pd.Series)
df_embd.columns = ['x_coord', 'y_coord', 'z_coord', 'b_factor']
df_embd = df_embd.reset_index()
if (self.embd_grain == 'CB'):
df_embd = self.dfpdb.groupby(['residue_number', 'residue_name']).apply(get_pdb_xyzb_cb).apply(pd.Series)
df_embd.columns = ['x_coord', 'y_coord', 'z_coord', 'b_factor']
df_embd = df_embd.reset_index()
if (self.embd_grain == 'CA'):
df_embd = self.dfpdb.groupby(['residue_number', 'residue_name']).apply(get_pdb_xyzb_ca).apply(pd.Series)
df_embd.columns = ['x_coord', 'y_coord', 'z_coord', 'b_factor']
df_embd = df_embd.reset_index()
if (self.embd_grain == 'all'):
df_embd = self.dfpdb[['residue_name', 'residue_number', 'x_coord', 'y_coord', 'z_coord', 'b_factor']]
df_embd['residue_name_1aa'] = df_embd['residue_name'].map(PDB.protein_letters_3to1)
df_embd.index = ((df_embd.index.astype(str) + '-') + df_embd['residue_name_1aa'])
dfx = df_embd[['x_coord', 'y_coord', 'z_coord']].T
self.dfx = dfx
self.df_embd = df_embd
self.mp = AggMolMap(dfx, metric='euclidean')
self.mp.fit(fmap_shape=self.fmap_shape, cluster_channels=1)
self.fmap_shape = self.mp.fmap_shape
def transform_xyz(self, scale=True, feature_range=(0, 1)):
if scale:
scaler = MinMaxScaler(feature_range=feature_range)
x = scaler.fit_transform(self.dfx.T).T
else:
x = self.dfx.values
X = self.mp.batch_transform(x, scale=False)
return X
def transofrm_bf(self, scale=True, feature_range=(0, 1)):
if scale:
scaler = MinMaxScaler(feature_range=feature_range)
x = scaler.fit_transform(self.df_embd[['b_factor']]).T
else:
x = self.df_embd[['b_factor']].values.T
X = self.mp.transform(x[0], scale=False)
return X
def transofrm_pkt(self, pkt_file):
self.pkt_file = pkt_file
self.pkt = PandasPdb().read_pdb(self.pkt_file)
if (self.embd_chain != None):
self.dfpkt = self.pkt.df['ATOM'][(self.pkt.df['ATOM'].chain_id == self.embd_chain)]
else:
self.dfpkt = self.pkt.df['ATOM']
pkt_residue_number = self.dfpkt.residue_number.unique()
self.df_embd['pocket'] = (self.df_embd.residue_number.isin(pkt_residue_number) * 1)
x = self.df_embd[['pocket']].values.T
X = self.mp.transform(x[0], scale=False)
return X
def transform_custom(self, aap_df, scale=True, feature_range=(0, 1)):
df_custom = pd.DataFrame(index=self.df_embd.index)
for (k, v) in aap_df.to_dict().items():
df_custom[k] = self.df_embd.residue_name_1aa.map(v)
self.df_custom = df_custom
if scale:
scaler = MinMaxScaler(feature_range=feature_range)
x = scaler.fit_transform(self.df_custom).T
else:
x = self.df_custom.values.T
X = self.mp.batch_transform(x, scale=False)
return X
def transform_intrinsic(self, scale=True, feature_range=(0, 1)):
df_intrinsic = pd.DataFrame(index=self.df_embd.index)
for (k, v) in IntrinsicAAPs.items():
df_intrinsic[k] = self.df_embd.residue_name_1aa.map(v)
self.df_intrinsic = df_intrinsic
if scale:
scaler = MinMaxScaler(feature_range=feature_range)
x = scaler.fit_transform(self.df_intrinsic).T
else:
x = self.df_intrinsic.values.T
X = self.mp.batch_transform(x, scale=False)
return X |
class CongruenceSubgroupBase(ArithmeticSubgroup):
def __init__(self, level):
level = ZZ(level)
if (level <= 0):
raise ArithmeticError('Congruence groups only defined for positive levels.')
self.__level = level
ArithmeticSubgroup.__init__(self)
def _an_element_(self):
N = self.level()
return self([(1 - N), (- N), N, (1 + N)])
def is_congruence(self):
return True
def level(self):
return self.__level
def __eq__(self, other):
if (not isinstance(other, ArithmeticSubgroup)):
return False
elif is_CongruenceSubgroup(other):
if (self.level() == other.level() == 1):
return True
return ((self.level() == other.level()) and (self.index() == other.index()) and (self.image_mod_n() == other.image_mod_n()))
from sage.modular.arithgroup.arithgroup_perm import ArithmeticSubgroup_Permutation_class
if isinstance(other, ArithmeticSubgroup_Permutation_class):
return (self.as_permutation_group() == other)
else:
raise NotImplementedError
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return hash((self.level(), self.index())) |
def main():
parser = ArgumentParser()
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--name', default=None, type=str)
parser.add_argument('--weighted', action='store_true', help='model trains with weighted loss when flag is set')
parser = pl.Trainer.add_argparse_args(parser)
parser = Classifier.add_model_specific_args(parser)
args = parser.parse_args()
augmentations = {None: no_augmentations, 'no_augmentations': no_augmentations, 'geom_augmentations': geom_augmentations, 'basic_augmentations': basic_augmentations, 'color_augmentations': color_augmentations, 'color_augmentations_light': color_augmentations_light, 'gan_augmentations': gan_augmentations}
name = args.name
if (name in augmentations.keys()):
aug = augmentations[name]
else:
aug = no_augmentations
print(aug)
gan_aug = False
if (args.name == 'gan_augmentations'):
args.batch_size = 8
gan_aug = True
print('gan_aug=', gan_aug)
for center in [1, 2, 3, 4]:
print('load data')
data_dir = '/storage/groups/haicu/datasets/2101_camelyon17/patches/'
train_dataset = OneCenterLoad(data_dir, center, 'train', transform=aug)
val_dataset = OneCenterLoad(data_dir, center, 'val', transform=no_augmentations)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=6)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=6)
model = Classifier(args.learning_rate, args.l2_reg, args.weighted, gan_aug=gan_aug, transform=no_augmentations)
logger = TensorBoardLogger('lightning_logs', name=name)
print(logger.log_dir)
early_stop_callback = EarlyStopping(monitor='val_metrics/PR_AUC', min_delta=0.0, patience=20, verbose=False, mode='max')
checkpoint_callback = ModelCheckpoint(monitor='PR_AUC', dirpath=(logger.log_dir + '/checkpoints/'), filename='Classifier-Center0-{epoch:02d}-{PR_AUC:.4f}', save_top_k=1, mode='max')
trainer = pl.Trainer.from_argparse_args(args)
trainer.logger = logger
trainer.callbacks = [checkpoint_callback, early_stop_callback]
trainer.val_check_interval = 0.5
trainer.fit(model, train_loader, val_loader)
del train_dataset, val_dataset
model = Classifier.load_from_checkpoint(checkpoint_path=checkpoint_callback.best_model_path)
print(checkpoint_callback.best_model_path)
test_centers = [[i] for i in range(5)]
test_all = list(range(5))
test_all.remove(center)
test_centers.append(test_all)
results = []
for c in test_centers:
print(f'results for dataset {c}')
if (c == [center]):
test_dataset = OneCenterLoad(data_dir, center, 'val')
else:
test_dataset = MultipleCentersSeq(data_dir, c)
test_loader = DataLoader(test_dataset, batch_size=128, num_workers=1)
result = trainer.test(test_dataloaders=test_loader)
results.append(result)
print('center', center)
print(test_centers)
print('PR_AUC')
pr_auc = [round(res[0]['PR_AUC'], 4) for res in results]
print(pr_auc)
print('F1_tumor')
f1 = [round(res[0]['F1_tumor'], 4) for res in results]
print(f1) |
_function
def does_backend_handle_base_ring(base_ring, backend):
try:
Polyhedra(base_ring, 0, backend)
except ValueError:
return False
return True |
class Getitem(Expr):
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if (self.ctx != 'load'):
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False |
_params({'data_home': [None, str], 'filter_data': [None, tuple], 'download_if_missing': ['boolean'], 'random_state': ['random_state'], 'shuffle': ['boolean'], 'verbose': ['boolean']}, prefer_skip_nested_validation=True)
def fetch_datasets(*, data_home=None, filter_data=None, download_if_missing=True, random_state=None, shuffle=False, verbose=False):
data_home = get_data_home(data_home=data_home)
zenodo_dir = join(data_home, 'zenodo')
datasets = OrderedDict()
if (filter_data is None):
filter_data_ = MAP_NAME_ID.keys()
else:
list_data = MAP_NAME_ID.keys()
filter_data_ = []
for it in filter_data:
if isinstance(it, str):
if (it not in list_data):
raise ValueError(f'{it} is not a dataset available. The available datasets are {list_data}')
else:
filter_data_.append(it)
elif isinstance(it, int):
if ((it < 1) or (it > 27)):
raise ValueError(f'The dataset with the ID={it} is not an available dataset. The IDs are {range(1, 28)}')
else:
filter_data_.append(MAP_ID_NAME[it])
else:
raise ValueError(f'The value in the tuple should be str or int. Got {type(it)} instead.')
for it in filter_data_:
filename = ((PRE_FILENAME + str(MAP_NAME_ID[it])) + POST_FILENAME)
filename = join(zenodo_dir, filename)
available = isfile(filename)
if (download_if_missing and (not available)):
makedirs(zenodo_dir, exist_ok=True)
if verbose:
print(('Downloading %s' % URL))
f = BytesIO(urlopen(URL).read())
tar = tarfile.open(fileobj=f)
tar.extractall(path=zenodo_dir)
elif ((not download_if_missing) and (not available)):
raise IOError('Data not found and `download_if_missing` is False')
data = np.load(filename)
(X, y) = (data['data'], data['label'])
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
datasets[it] = Bunch(data=X, target=y, DESCR=it)
return datasets |
class VOCSegmentation(Dataset):
def __init__(self, base_dir=Path.db_root_dir('pascal'), split='train', transform=None):
super().__init__()
self._base_dir = base_dir
self._image_dir = os.path.join(self._base_dir, 'JPEGImages')
self._cat_dir = os.path.join(self._base_dir, 'SegmentationClass')
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.transform = transform
_splits_dir = os.path.join(self._base_dir, 'ImageSets', 'Segmentation')
self.im_ids = []
self.images = []
self.categories = []
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, (splt + '.txt'))), 'r') as f:
lines = f.read().splitlines()
for (ii, line) in enumerate(lines):
_image = os.path.join(self._image_dir, (line + '.jpg'))
_cat = os.path.join(self._cat_dir, (line + '.png'))
assert os.path.isfile(_image)
assert os.path.isfile(_cat)
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_cat)
assert (len(self.images) == len(self.categories))
print('Number of images in {}: {:d}'.format(split, len(self.images)))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
(_img, _target) = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
if (self.transform is not None):
sample = self.transform(sample)
return sample
def _make_img_gt_point_pair(self, index):
_img = Image.open(self.images[index]).convert('RGB')
_target = Image.open(self.categories[index])
return (_img, _target)
def __str__(self):
return (('VOC2012(split=' + str(self.split)) + ')') |
class DataTrainingArguments():
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
validation_split_percentage: Optional[int] = field(default=5, metadata={'help': "The percentage of the train set used as validation set in case there's no validation split"})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The training data file (.txt or .csv).'})
eval_file: Optional[str] = field(default=None, metadata={'help': 'The eval data file (.txt or .csv).'})
max_seq_length: Optional[int] = field(default=32, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'})
mlm_probability: float = field(default=0.15, metadata={'help': 'Ratio of tokens to mask for MLM (only effective if --do_mlm)'})
def __post_init__(self):
if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
elif (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json', 'txt']), '`train_file` should be a csv, a json or a txt file.' |
def compute_pointer_with_align(model, node_type, prev_state, prev_action_emb, parent_h, parent_action_emb, desc_enc):
(new_state, attention_weights) = model._update_state(node_type, prev_state, prev_action_emb, parent_h, parent_action_emb, desc_enc)
output = new_state[0]
memory_pointer_logits = model.pointers[node_type](output, desc_enc.memory)
memory_pointer_probs = torch.nn.functional.softmax(memory_pointer_logits, dim=1)
if (node_type == 'column'):
pointer_probs = torch.mm(memory_pointer_probs, desc_enc.m2c_align_mat)
else:
assert (node_type == 'table')
pointer_probs = torch.mm(memory_pointer_probs, desc_enc.m2t_align_mat)
pointer_probs = pointer_probs.clamp(min=1e-09)
pointer_logits = torch.log(pointer_probs)
return (output, new_state, pointer_logits, attention_weights) |
class L2Norm(nn.Module):
def __init__(self, n_channels, scale=1.0):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.scale = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.weight.data *= 0.0
self.weight.data += self.scale
def forward(self, x):
norm = (x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps)
x = ((x / norm) * self.weight.view(1, (- 1), 1, 1))
return x |
def test_validate_times():
annotations.validate_times(None)
with pytest.raises(ValueError):
annotations.validate_times(np.array([[0, 1], [0, 2]]))
with pytest.raises(ValueError):
annotations.validate_times(np.array([2, 0]))
with pytest.raises(ValueError):
annotations.validate_times(np.array([(- 1), 0])) |
def get_children(graph, p):
c = []
for key in graph.keys():
if (p in graph[key]):
c.append(key)
return c |
def get_params(argv='1'):
params = dict(quick_test=True, finetune_mode=False, pretrained_model_weights='models/1_1_foa_dev_split6_model.h5', dataset_dir='/scratch/asignal/partha/DCASE2022_SELD_dataset', feat_label_dir='/scratch/asignal/partha/DCASE2022_SELD_dataset/seld_feat_label', model_dir='models/', dcase_output_dir='results/', mode='dev', dataset='foa', fs=24000, hop_len_s=0.02, label_hop_len_s=0.1, max_audio_len_s=60, nb_mel_bins=64, use_salsalite=False, fmin_doa_salsalite=50, fmax_doa_salsalite=2000, fmax_spectra_salsalite=9000, multi_accdoa=False, thresh_unify=15, label_sequence_length=50, batch_size=128, dropout_rate=0.05, nb_cnn2d_filt=64, f_pool_size=[4, 4, 2], nb_rnn_layers=2, rnn_size=128, self_attn=False, nb_heads=4, nb_fnn_layers=1, fnn_size=128, nb_epochs=100, lr=0.001, average='macro', lad_doa_thresh=20)
if (argv == '1'):
pass
elif (argv == '2'):
print('FOA + ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'foa'
params['multi_accdoa'] = False
elif (argv == '3'):
print('FOA + multi ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'foa'
params['multi_accdoa'] = True
elif (argv == '4'):
print('MIC + GCC + ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = False
params['multi_accdoa'] = False
elif (argv == '5'):
print('MIC + SALSA + ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = True
params['multi_accdoa'] = False
elif (argv == '6'):
print('MIC + GCC + multi ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = False
params['multi_accdoa'] = True
elif (argv == '7'):
print('MIC + SALSA + multi ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = True
params['multi_accdoa'] = True
elif (argv == '999'):
print('QUICK TEST MODE\n')
params['quick_test'] = True
else:
print('ERROR: unknown argument {}'.format(argv))
exit()
feature_label_resolution = int((params['label_hop_len_s'] // params['hop_len_s']))
params['feature_sequence_length'] = (params['label_sequence_length'] * feature_label_resolution)
params['t_pool_size'] = [feature_label_resolution, 1, 1]
params['patience'] = int(params['nb_epochs'])
if ('2020' in params['dataset_dir']):
params['unique_classes'] = 14
elif ('2021' in params['dataset_dir']):
params['unique_classes'] = 12
elif ('2022' in params['dataset_dir']):
params['unique_classes'] = 13
for (key, value) in params.items():
pass
return params |
def quat_to_rotmat(quaternions: Union[(torch.Tensor, numpy.ndarray)]) -> Union[(torch.Tensor, numpy.ndarray)]:
if (quaternions.shape[(- 1)] != 4):
raise ValueError(f'Invalid input quaternions shape f{quaternions.shape}.')
t = Compose([quaternion_to_matrix])
return t(quaternions) |
class LinearClassifier(nn.Module):
def __init__(self, in_dim, output_size, num_layers=1):
super().__init__()
self.num_layers = num_layers
self.in_dim = in_dim
self.linear1 = nn.Linear(in_dim, in_dim)
self.linear2 = nn.Linear(in_dim, in_dim)
self.linear3 = nn.Linear(in_dim, output_size)
def forward(self, x):
out = F.relu(self.linear1(x))
out = F.relu(self.linear2(x))
out = self.linear3(out)
out = F.softmax(out, dim=1)
return out |
class ASM1684NameBreakpoint(Breakpoint):
type = '1684-asm'
pattern = re.compile('^\\w+')
def match_break(cls, text, tdb: TdbCmdBackend) -> bool:
from ..target_1684.regdef import op_class_dic
if (text in op_class_dic):
return True
return False |
def changeCyclicTriangleC1(G, A, i):
delta = 0
for u in G.outIterator(i):
for v in G.outIterator(u):
if ((v != i) and G.isArc(v, i)):
delta += 1
return delta |
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert (not hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline('passthrough')
assert (pipeline.steps[0] == ('passthrough', 'passthrough'))
assert (not hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert (not hasattr(pipeline, 'predict'))
pipeline.transform
assert (not hasattr(pipeline, 'inverse_transform'))
pipeline = make_pipeline(NoInvTransf(), Transf())
assert (not hasattr(pipeline, 'predict'))
pipeline.transform
assert (not hasattr(pipeline, 'inverse_transform')) |
class ProxyAnchorLoss(WeightRegularizerMixin, BaseMetricLossFunction):
def __init__(self, num_classes, embedding_size, margin=0.1, alpha=32, **kwargs):
super().__init__(**kwargs)
self.proxies = torch.nn.Parameter(torch.Tensor(num_classes, embedding_size))
self.weight_init_func(self.proxies)
self.num_classes = num_classes
self.margin = margin
self.alpha = alpha
self.add_to_recordable_attributes(list_of_names=['num_classes', 'alpha', 'margin'], is_stat=False)
def cast_types(self, dtype, device):
self.proxies.data = c_f.to_device(self.proxies.data, device=device, dtype=dtype)
def compute_loss(self, embeddings, labels, indices_tuple):
(dtype, device) = (embeddings.dtype, embeddings.device)
self.cast_types(dtype, device)
miner_weights = lmu.convert_to_weights(indices_tuple, labels, dtype=dtype).unsqueeze(1)
miner_weights = (miner_weights - 1)
cos = self.distance(embeddings, self.proxies)
pos_mask = torch.nn.functional.one_hot(labels, self.num_classes)
neg_mask = (1 - pos_mask)
with_pos_proxies = torch.where((torch.sum(pos_mask, dim=0) != 0))[0]
pos_exp = self.distance.margin(cos, self.margin)
neg_exp = self.distance.margin((- self.margin), cos)
pos_term = lmu.logsumexp(((self.alpha * pos_exp) + miner_weights), keep_mask=pos_mask.bool(), add_one=True, dim=0)
neg_term = lmu.logsumexp(((self.alpha * neg_exp) + miner_weights), keep_mask=neg_mask.bool(), add_one=True, dim=0)
loss_indices = c_f.torch_arange_from_size(self.proxies)
loss_dict = {'pos_loss': {'losses': pos_term.squeeze(0), 'indices': loss_indices, 'reduction_type': 'element', 'divisor': len(with_pos_proxies)}, 'neg_loss': {'losses': neg_term.squeeze(0), 'indices': loss_indices, 'reduction_type': 'element', 'divisor': self.num_classes}}
self.add_weight_regularization_to_loss_dict(loss_dict, self.proxies)
return loss_dict
def get_default_reducer(self):
return DivisorReducer()
def get_default_distance(self):
return CosineSimilarity()
def get_default_weight_init_func(self):
return c_f.TorchInitWrapper(torch.nn.init.kaiming_normal_, mode='fan_out')
def _sub_loss_names(self):
return ['pos_loss', 'neg_loss'] |
class EnSpellCorrector():
def __init__(self, word_freq_dict: dict=None, custom_confusion_dict: dict=None, en_dict_path: str=None):
if (word_freq_dict and en_dict_path):
raise ValueError('word_freq_dict and en_dict_path can not be set at the same time.')
if (word_freq_dict is None):
word_freq_dict = {}
if (custom_confusion_dict is None):
custom_confusion_dict = {}
if ((not word_freq_dict) and (en_dict_path is None)):
en_dict_path = default_en_dict_path
self.word_freq_dict = word_freq_dict
self.custom_confusion_dict = custom_confusion_dict
if (en_dict_path and os.path.exists(en_dict_path)):
with gzip.open(en_dict_path, 'rb') as f:
all_word_freq_dict = json.loads(f.read())
word_freq = {}
for (k, v) in all_word_freq_dict.items():
if (v > 400):
word_freq[k] = v
self.word_freq_dict = word_freq
logger.debug(('load en spell data: %s, size: %d' % (en_dict_path, len(self.word_freq_dict))))
def edits1(word):
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range((len(word) + 1))]
deletes = [(L + R[1:]) for (L, R) in splits if R]
transposes = [(((L + R[1]) + R[0]) + R[2:]) for (L, R) in splits if (len(R) > 1)]
replaces = [((L + c) + R[1:]) for (L, R) in splits if R for c in letters]
inserts = [((L + c) + R) for (L, R) in splits for c in letters]
return set((((deletes + transposes) + replaces) + inserts))
def edits2(self, word):
return (e2 for e1 in self.edits1(word) for e2 in self.edits1(e1))
def known(self, word_freq_dict):
return set((w for w in word_freq_dict if (w in self.word_freq_dict)))
def probability(self, word):
N = sum(self.word_freq_dict.values())
return (self.word_freq_dict.get(word, 0) / N)
def candidates(self, word):
return (self.known([word]) or self.known(self.edits1(word)) or self.known(self.edits2(word)) or {word})
def correct_word(self, word):
candi_prob = {i: self.probability(i) for i in self.candidates(word)}
sort_candi_prob = sorted(candi_prob.items(), key=operator.itemgetter(1))
return sort_candi_prob[(- 1)][0]
def _get_custom_confusion_dict(path):
confusion = {}
if (path and os.path.exists(path)):
with open(path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line.startswith('#'):
continue
terms = line.split()
if (len(terms) < 2):
continue
wrong = terms[0]
right = terms[1]
confusion[wrong] = right
return confusion
def set_en_custom_confusion_dict(self, path):
self.custom_confusion_dict = self._get_custom_confusion_dict(path)
logger.debug(('Loaded en spell confusion path: %s, size: %d' % (path, len(self.custom_confusion_dict))))
def correct(self, sentence, include_symbol=True):
text_new = ''
details = []
blocks = split_text_into_sentences_by_symbol(sentence, include_symbol=include_symbol)
for (w, idx) in blocks:
if ((len(w) > 1) and is_alphabet_string(w)):
if (w in self.custom_confusion_dict):
corrected_item = self.custom_confusion_dict[w]
else:
corrected_item = self.correct_word(w)
if (corrected_item != w):
begin_idx = idx
detail_word = (w, corrected_item, begin_idx)
details.append(detail_word)
w = corrected_item
text_new += w
details = sorted(details, key=operator.itemgetter(2))
return {'source': sentence, 'target': text_new, 'errors': details}
def correct_batch(self, sentences: List[str], **kwargs):
return [self.correct(s, **kwargs) for s in sentences] |
def test_unary():
x = ak.Array([1, 2, 3], behavior={'foo': 'BAR'}, attrs={'hello': 'world'})
y = (- x)
assert (y.attrs is x.attrs)
assert (x.behavior is y.behavior) |
def get_gpu_count():
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices('GPU'))
else:
return 0 |
class EntryPoint(object):
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if (not MODULE(module_name)):
raise ValueError('Invalid module name', module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self):
s = ('%s = %s' % (self.name, self.module_name))
if self.attrs:
s += (':' + '.'.join(self.attrs))
if self.extras:
s += (' [%s]' % ','.join(self.extras))
return s
def __repr__(self):
return ('EntryPoint.parse(%r)' % str(self))
def load(self, require=True, *args, **kwargs):
if ((not require) or args or kwargs):
warnings.warn('Parameters to load are deprecated. Call .resolve and .require separately.', DeprecationWarning, stacklevel=2)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if (self.extras and (not self.dist)):
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile('\\s*(?P<name>.+?)\\s*=\\s*(?P<module>[\\w.]+)\\s*(:\\s*(?P<attr>[\\w.]+))?\\s*(?P<extras>\\[.*\\])?\\s*$')
def parse(cls, src, dist=None):
m = cls.pattern.match(src)
if (not m):
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = (res['attr'].split('.') if res['attr'] else ())
return cls(res['name'], res['module'], attrs, extras, dist)
def _parse_extras(cls, extras_spec):
if (not extras_spec):
return ()
req = Requirement.parse(('x' + extras_spec))
if req.specs:
raise ValueError()
return req.extras
def parse_group(cls, group, lines, dist=None):
if (not MODULE(group)):
raise ValueError('Invalid group name', group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if (ep.name in this):
raise ValueError('Duplicate entry point', group, ep.name)
this[ep.name] = ep
return this
def parse_map(cls, data, dist=None):
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for (group, lines) in data:
if (group is None):
if (not lines):
continue
raise ValueError('Entry points must be listed in groups')
group = group.strip()
if (group in maps):
raise ValueError('Duplicate group name', group)
maps[group] = cls.parse_group(group, lines, dist)
return maps |
def evaluate(ref_file, trans_file, metric, subword_option=None):
if (metric.lower() == 'bleu'):
evaluation_score = _bleu(ref_file, trans_file, subword_option=subword_option)
elif (metric.lower() == 'rouge'):
evaluation_score = _rouge(ref_file, trans_file, subword_option=subword_option)
elif (metric.lower() == 'accuracy'):
evaluation_score = _accuracy(ref_file, trans_file)
elif (metric.lower() == 'word_accuracy'):
evaluation_score = _word_accuracy(ref_file, trans_file)
else:
raise ValueError(('Unknown metric %s' % metric))
return evaluation_score |
def advect(vf: ti.types.ndarray(ndim=2), qf: ti.types.ndarray(ndim=2), new_qf: ti.types.ndarray(ndim=2)):
for (i, j) in vf:
p = (ti.Vector([i, j]) + 0.5)
p = backtrace(vf, p, dt)
new_qf[(i, j)] = (bilerp(qf, p) * dye_decay) |
def ClientStateToString(state):
if (state == ClientState.idle):
return 'IDLE'
if (state == ClientState.training):
return 'TRAINING'
if (state == ClientState.validating):
return 'VALIDATING'
return 'UNKNOWN' |
def create_dir(dir_path, cover=False):
if (cover or (not os.path.exists(dir_path))):
if (cover and os.path.exists(dir_path)):
os.removedirs(dir_path)
os.makedirs(dir_path) |
def _create_luke_config(bert_config, entity_vocab_size, entity_emb_size):
return LukeConfig(entity_vocab_size=entity_vocab_size, bert_model_name=BERT_MODEL_NAME, entity_emb_size=entity_emb_size, **bert_config.to_dict()) |
class SubsetRandomSampler(Sampler):
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices) |
class GraphSearches(base_graph_filter.BaseGraphFilter, ABC):
def _node_filter(self, node_matcher: node_matcher.BaseNodeMatcher) -> list:
return [n for n in self.nodes if node_matcher.apply(n)]
def _edge_filter(self, edge_matcher: edge_matcher.BaseEdgeMatcher) -> list:
edge_list = []
for e in self.edges:
if (edge_matcher.apply(e) and len(self.edges(e[0]))):
edge_list.append(e)
return edge_list
def _walk_filter(self, walk_matcher: WalkMatcherList) -> List[BaseNode]:
def walk_match(node: BaseNode, node_list: List[BaseNode], index: int, node_matcher_list: list) -> Any:
if node_matcher_list[index].apply(node):
node_list.append(node)
if ((index + 1) == len(node_matcher_list)):
return [node_list]
result_list = [walk_match(nn, node_list.copy(), (index + 1), node_matcher_list) for nn in self.get_next_nodes(node) if ((len(self.get_next_nodes(nn)) == 1) or ((index + 2) == len(node_matcher_list)))]
result_filter = [r for r_list in result_list if (r_list is not None) for r in r_list if ((r is not None) and (len(r) == len(node_matcher_list)))]
if (len(result_filter) == 1):
return result_filter
elif (len(result_filter) == 0):
return None
else:
return result_filter
else:
return None
matcher_list = (walk_matcher.matcher_list if isinstance(walk_matcher, WalkMatcherList) else [walk_matcher])
result = []
result_match_list = [walk_match(n, [], 0, matcher_list) for n in self.nodes if (len(self.get_next_nodes(n)) == 1)]
result.extend([r for r_list in result_match_list if (r_list is not None) for r in r_list])
return result |
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img):
region = T.RandomCrop.get_params(img, self.size)
return crop(img, region) |
def register_Ns3MmWaveBeamforming_methods(root_module, cls):
cls.add_constructor([param('ns3::MmWaveBeamforming const &', 'arg0')])
cls.add_constructor([param('uint32_t', 'enbAntenna'), param('uint32_t', 'ueAntenna')])
cls.add_method('CalcRxPowerSpectralDensity', 'ns3::Ptr< ns3::SpectrumValue >', [param('ns3::Ptr< ns3::SpectrumValue const >', 'txPsd'), param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True)
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('GetConfigurationParameters', 'ns3::Ptr< ns3::MmWavePhyMacCommon >', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Initial', 'void', [param('ns3::NetDeviceContainer', 'ueDevices'), param('ns3::NetDeviceContainer', 'enbDevices')])
cls.add_method('LoadFile', 'void', [])
cls.add_method('SetBeamformingVector', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'ueDevice'), param('ns3::Ptr< ns3::NetDevice >', 'enbDevice')])
cls.add_method('SetConfigurationParameters', 'void', [param('ns3::Ptr< ns3::MmWavePhyMacCommon >', 'ptrConfig')])
cls.add_method('UpdateMatrices', 'void', [param('bool', 'update')])
cls.add_method('DoCalcRxPowerSpectralDensity', 'ns3::Ptr< ns3::SpectrumValue >', [param('ns3::Ptr< ns3::SpectrumValue const >', 'txPsd'), param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, visibility='private', is_virtual=True)
return |
def _create_dataset(uri, batch_size, shuffle, no_image_normalization, cache_dir, overwrite_cache, create_cache_explicitly, prepare_data_iterator, dataset_index):
class Dataset():
pass
dataset = Dataset()
dataset.uri = uri
dataset.cache_dir = cache_dir
dataset.normalize = (not no_image_normalization)
comm = current_communicator()
rng = numpy.random.RandomState(dataset_index)
use_memory_cache = ((comm.size == 1) if comm else True)
if prepare_data_iterator:
if (cache_dir == ''):
cache_dir = None
if (cache_dir and (create_cache_explicitly or comm)):
cache_index = os.path.join(cache_dir, 'cache_index.csv')
if ((not os.path.exists(cache_index)) or overwrite_cache):
create_cache_flag = False
if single_or_rankzero():
create_cache_flag = True
os.makedirs(cache_dir, exist_ok=True)
if comm:
comm.barrier()
if ((comm.local_rank == 0) and (comm.rank != 0) and (not os.path.exists(cache_dir))):
os.makedirs(cache_dir, exist_ok=True)
create_cache_flag = True
if create_cache_flag:
log_uri = (uri.split('/', 3)[(- 1)] if uri.startswith('s3') else uri)
if comm:
logger.log(99, f'Creating cache data for "{log_uri}" on node rank:{comm.rank} local_rank:{comm.local_rank}')
else:
logger.log(99, f'Creating cache data for "{log_uri}"')
if os.path.exists(uri):
cc = CreateCache(uri, rng=rng, shuffle=shuffle)
cc.create(cache_dir, normalize=False)
else:
with data_iterator_csv_dataset(uri, batch_size, shuffle, rng=rng, normalize=False, cache_dir=cache_dir, with_memory_cache=False) as di:
pass
if comm:
comm.barrier()
rng = numpy.random.RandomState(dataset_index)
dataset.data_iterator = (lambda : data_iterator_cache(cache_dir, batch_size, shuffle, rng=rng, normalize=dataset.normalize, with_memory_cache=use_memory_cache))
elif ((not cache_dir) or overwrite_cache or (not os.path.exists(cache_dir)) or (len(os.listdir(cache_dir)) == 0)):
if comm:
logger.critical('Implicit cache creation does not support with MPI')
import sys
sys.exit((- 1))
else:
if cache_dir:
try:
os.makedirs(cache_dir)
except OSError:
pass
dataset.data_iterator = (lambda : data_iterator_csv_dataset(uri, batch_size, shuffle, rng=rng, normalize=dataset.normalize, cache_dir=cache_dir))
else:
dataset.data_iterator = (lambda : data_iterator_cache(cache_dir, batch_size, shuffle, rng=rng, normalize=dataset.normalize, with_memory_cache=use_memory_cache))
else:
dataset.data_iterator = None
return dataset |
def compile_cuda_module(host_args):
libname = ('_cext_gpu.lib' if (sys.platform == 'win32') else 'lib_cext_gpu.a')
lib_out = ('build/' + libname)
if (not os.path.exists('build/')):
os.makedirs('build/')
(_, nvcc) = get_cuda_path()
print('NVCC ==> ', nvcc)
arch_flags = '-arch=sm_37 -gencode=arch=compute_37,code=sm_37 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_75,code=compute_75'
nvcc_command = f"-allow-unsupported-compiler shap/cext/_cext_gpu.cu -lib -o {lib_out} -Xcompiler {','.join(host_args)} --include-path {sysconfig.get_path('include')} --std c++14 --expt-extended-lambda --expt-relaxed-constexpr {arch_flags}"
print('Compiling cuda extension, calling nvcc with arguments:')
print(([nvcc] + nvcc_command.split(' ')))
subprocess.run(([nvcc] + nvcc_command.split(' ')), check=True)
return ('build', '_cext_gpu') |
def visualize_views(views, highlight_silhouette=False, show=True, save_path: Path=None):
if highlight_silhouette:
images = [((v.mask + 1.0) * v.color).clamp_(min=0.0, max=1.0).cpu() for v in views]
else:
images = [v.color.cpu() for v in views]
(fig, axs) = create_mosaic_figure(images)
if show:
plt.show()
if save_path:
plt.savefig(save_path)
if (not show):
plt.close(fig) |
def cast_with_native_amp(func: Callable, mixed_precision: Optional[str]=None) -> Callable:
if (mixed_precision not in ('fp16', 'bf16')):
logger.warning(f'Unknown mixed precision mode: {mixed_precision}, falling back to fp32.')
return func
if ((mixed_precision == 'fp16') and is_torch_version('>=', '1.10')):
output_func = torch.cuda.amp.autocast(dtype=torch.float16)(func)
else:
device_type = ('cuda' if torch.cuda.is_available() else 'cpu')
output_func = torch.autocast(device_type=device_type, dtype=torch.bfloat16)(func)
output_func = convert_outputs_to_fp32(output_func)
return output_func |
class SelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm((hidden_states + input_tensor))
return hidden_states |
class VmfSQVAETrainer(TrainerBase):
def __init__(self, cfgs, flgs, train_loader, val_loader, test_loader):
super(VmfSQVAETrainer, self).__init__(cfgs, flgs, train_loader, val_loader, test_loader)
self.metric_semseg = SegmentationMetric(cfgs.network.num_class)
self.plots = {'loss_train': [], 'acc_train': [], 'perplexity_train': [], 'loss_val': [], 'acc_val': [], 'perplexity_val': [], 'miou_val': [], 'loss_test': [], 'acc_test': [], 'perplexity_test': [], 'miou_test': []}
def _train(self, epoch):
train_loss = []
acc = []
perplexity = []
self.model.train()
start_time = time.time()
for (batch_idx, (x, y)) in enumerate(self.train_loader):
y = self.preprocess(x, y)
if self.flgs.decay:
step = ((((epoch - 1) * len(self.train_loader)) + batch_idx) + 1)
temperature_current = self._set_temperature(step, self.cfgs.quantization.temperature)
self.model.module.quantizer.set_temperature(temperature_current)
(_, _, loss) = self.model(y, flg_train=True, flg_quant_det=False)
self.optimizer.zero_grad()
loss['all'].backward()
self.optimizer.step()
train_loss.append(loss['all'].item())
acc.append(loss['acc'].item())
perplexity.append(loss['perplexity'].item())
result = {}
result['loss'] = np.asarray(train_loss).mean(0)
result['acc'] = np.array(acc).mean(0)
result['perplexity'] = np.array(perplexity).mean(0)
self.print_loss(result, 'train', (time.time() - start_time))
return result
def _test(self, mode='val'):
_ = self._test_sub(False)
result = self._test_sub(True, mode)
self.scheduler.step(result['loss'])
return result
def _test_sub(self, flg_quant_det, mode='val'):
test_loss = []
acc = []
perplexity = []
self.metric_semseg.reset()
if (mode == 'val'):
data_loader = self.val_loader
elif (mode == 'test'):
data_loader = self.test_loader
start_time = time.time()
with torch.no_grad():
for (x, y) in data_loader:
y = self.preprocess(x, y)
(x_reconst, _, loss) = self.model(y, flg_quant_det=flg_quant_det)
self.metric_semseg.update(x_reconst, y)
(pixAcc, mIoU, _) = self.metric_semseg.get()
test_loss.append(loss['all'].item())
acc.append(loss['acc'].item())
perplexity.append(loss['perplexity'].item())
(pixAcc, mIoU, _) = self.metric_semseg.get()
result = {}
result['loss'] = np.asarray(test_loss).mean(0)
result['acc'] = np.array(acc).mean(0)
result['miou'] = mIoU
result['perplexity'] = np.array(perplexity).mean(0)
self.print_loss(result, mode, (time.time() - start_time))
myprint(('%15s' % 'PixAcc: {:5.4f} mIoU: {:5.4f}'.format(pixAcc, mIoU)), self.flgs.noprint)
return result
def generate_reconstructions(self, filename, nrows=4, ncols=8):
self._generate_reconstructions_discrete(filename, nrows=nrows, ncols=ncols)
def print_loss(self, result, mode, time_interval):
myprint((mode.capitalize().ljust(16) + 'Loss: {:5.4f}, ACC: {:5.4f}, Perplexity: {:5.4f}, Time: {:5.3f} sec'.format(result['loss'], result['acc'], result['perplexity'], time_interval)), self.flgs.noprint) |
def fgsd(graph):
model = FGSD()
model._set_seed()
model._check_graphs([graph])
embedding = model._calculate_fgsd(graph)
return embedding |
def parse_search_arg(search):
groups = search.split()
entries = {k: vs for (k, vs) in (g.split('=') for g in groups)}
entry_names = list(entries.keys())
sets = [list((f'--{k} {v}' for v in vs.split(':'))) for (k, vs) in entries.items()]
matrix = [list(x) for x in itertools.product(*sets)]
return (matrix, entry_names) |
def test_mortality(tmp_path: pathlib.Path):
ontology = DummyMortalityOntology()
labeler = InpatientMortalityLabeler(ontology)
for outcome_code in ['SNOMED/', 'DEATH_CHILD']:
events_with_labels: EventsWithLabels = [(event((2000, 1, 1), 'Visit/IP', end=datetime.datetime(2000, 1, 1), omop_table='visit_occurrence'), 'skip'), (event((2001, 1, 1), 'Visit/IP', end=datetime.datetime(2001, 1, 11), omop_table='visit_occurrence'), False), (event((2002, 1, 1), 'Visit/IP', end=datetime.datetime(2002, 1, 11), omop_table='visit_occurrence'), True), (event((2002, 1, 10), outcome_code), 'skip'), (event((2003, 1, 1), 'Visit/IP', end=datetime.datetime(2003, 1, 11), omop_table='visit_occurrence'), 'skip')]
assert (labeler.outcome_codes == {'Condition Type/OMOP4822053', 'Death Type/OMOP generated', 'DEATH_CHILD', 'SNOMED/'})
true_prediction_times: List[datetime.datetime] = [move_datetime_to_end_of_day(x[0].start) for x in events_with_labels if (isinstance(x[1], bool) or (x[1] is None))]
run_test_for_labeler(labeler, events_with_labels, true_prediction_times=true_prediction_times, help_text='test_mortality') |
def test_totalvi_auto_transfer_mudata():
adata = synthetic_iid()
protein_adata = synthetic_iid(n_genes=50)
mdata = MuData({'rna': adata, 'protein': protein_adata})
TOTALVI.setup_mudata(mdata, batch_key='batch', modalities={'rna_layer': 'rna', 'batch_key': 'rna', 'protein_layer': 'protein'})
model = TOTALVI(mdata)
adata2 = synthetic_iid()
protein_adata2 = synthetic_iid(n_genes=50)
mdata2 = MuData({'rna': adata2, 'protein': protein_adata2})
model.get_elbo(mdata2) |
def transcribe(model, device, wav):
inputs = model['tokenizer'](wav, sampling_rate=16000, return_tensors='pt', padding='longest')
input_values = inputs.input_values.to(device)
attention_mask = inputs.attention_mask.to(device)
logits = model['model'](input_values, attention_mask=attention_mask).logits
predicted_ids = torch.argmax(logits, dim=(- 1))
transcription = model['tokenizer'].batch_decode(predicted_ids)[0]
return transcription |
_config
def task_finetune_nlvr2_randaug():
exp_name = 'finetune_nlvr2_randaug'
datasets = ['nlvr2']
train_transform_keys = ['pixelbert_randaug']
loss_names = _loss_names({'nlvr2': 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 0.0001 |
def show_seg_result_meshlab(data, result, out_dir, palette, show=False, snapshot=False):
points = data['points'][0][0].cpu().numpy()
pts_filename = data['img_metas'][0][0]['pts_filename']
file_name = osp.split(pts_filename)[(- 1)].split('.')[0]
pred_seg = result[0]['semantic_mask'].numpy()
if (palette is None):
max_idx = pred_seg.max()
palette = np.random.randint(0, 256, size=((max_idx + 1), 3))
palette = np.array(palette).astype(np.int)
show_seg_result(points, None, pred_seg, out_dir, file_name, palette=palette, show=show, snapshot=snapshot)
return file_name |
def fallback_cmd_s3_sync(src_path, dest_path):
return f'aws s3 sync --no-follow-symlinks {src_path} {dest_path}' |
class CosWarmupAdamW(torch.optim.AdamW):
def __init__(self, params, lr, weight_decay, betas, warmup_iter=None, max_iter=None, warmup_ratio=None, power=None, **kwargs):
super().__init__(params, lr=lr, betas=betas, weight_decay=weight_decay, eps=1e-08)
self.global_step = 0
self.warmup_iter = np.float(warmup_iter)
self.warmup_ratio = warmup_ratio
self.max_iter = np.float(max_iter)
self.power = power
self.__init_lr = [group['lr'] for group in self.param_groups]
def step(self, closure=None):
if (self.global_step < self.warmup_iter):
lr_mult = (self.global_step / self.warmup_iter)
lr_add = ((1 - (self.global_step / self.warmup_iter)) * self.warmup_ratio)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = ((self.__init_lr[i] * lr_mult) + lr_add)
elif (self.global_step < self.max_iter):
lr_mult = ((np.cos((((self.global_step - self.warmup_iter) / (self.max_iter - self.warmup_iter)) * np.pi)) * 0.5) + 0.5)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = (self.__init_lr[i] * lr_mult)
super().step(closure)
self.global_step += 1 |
class BufferDict(dict):
def capacity(self) -> int:
capacities = []
for (_, _, v) in iterate_recursively(self):
capacities.append(v.shape[0])
return max(capacities)
def index(self, indices):
return self.index_func(self, indices)
def index_func(self, x, indices):
if isinstance(x, (dict, BufferDict)):
res = BufferDict()
for (k, v) in x.items():
res[k] = self.index_func(v, indices)
return res
else:
t = x[indices]
return t
def set_data(self, index, new_data):
return self.set_data_func(self, index, new_data)
def set_data_func(self, x, index, new_data):
if isinstance(new_data, (dict, BufferDict)):
for (nk, nv) in new_data.items():
self.set_data_func(x[nk], index, nv)
else:
if isinstance(new_data, torch.Tensor):
t = new_data.cpu().numpy()
elif isinstance(new_data, np.ndarray):
t = new_data
else:
raise TypeError(f'Unexpected type for new insert data: {type(new_data)}, expected is np.ndarray')
x[index] = t.copy() |
def test_packages(packages, only_failures=False):
rows = [['Status', 'Package', 'GAP Output']]
for pkgdir in packages:
pkg = pkgdir.split('-')[0]
orig_warning_level = libgap.InfoLevel(libgap.InfoWarning)
libgap.SetInfoLevel(libgap.InfoWarning, 0)
try:
output = libgap.LoadPackage(pkg)
finally:
libgap.SetInfoLevel(libgap.InfoWarning, orig_warning_level)
ok = bool(output)
status = ('' if ok else 'Failure')
if (ok and only_failures):
continue
rows.append([status, pkg, str(output)])
from sage.misc.table import table
return table(rows, header_row=True) |
def _output_csv(file, results):
file.write('benchmark,device,num_threads,numel,shape,contiguous,dim,mean (us),median (us),iqr (us)\n')
for measurement in results:
metadata = measurement.metadata
(device, dim, shape, name, numel, contiguous) = (metadata['device'], metadata['dim'], metadata['shape'], metadata['name'], metadata['numel'], metadata['is_contiguous'])
if isinstance(dim, Iterable):
dim_str = '-'.join((str(d) for d in dim))
else:
dim_str = str(dim)
shape_str = 'x'.join((str(s) for s in shape))
print(name, device, measurement.task_spec.num_threads, numel, shape_str, contiguous, dim_str, (measurement.mean * 1000000.0), (measurement.median * 1000000.0), (measurement.iqr * 1000000.0), sep=',', file=file) |
def _ffc(content, equality=False):
e = list(content)
a = ([(len(e) - 1)] * sum(e))
r = ([0] * sum(e))
a[0] = 0
e[0] -= 1
k = len(e)
rng_k = list(range(k))
rng_k.reverse()
dll = DoublyLinkedList(rng_k)
if (not e[0]):
dll.hide(0)
(yield from _fast_fixed_content(a, e, 2, 1, k, r, 2, dll, equality=equality)) |
def encode_sequence(sequence, rnns, embedder, dropout_amount=0.0):
batch_size = 1
layer_states = []
for rnn in rnns:
hidden_size = rnn.weight_hh.size()[1]
if rnn.weight_hh.is_cuda:
h_0 = torch.cuda.FloatTensor(batch_size, hidden_size).fill_(0)
c_0 = torch.cuda.FloatTensor(batch_size, hidden_size).fill_(0)
else:
h_0 = torch.zeros(batch_size, hidden_size)
c_0 = torch.zeros(batch_size, hidden_size)
layer_states.append((h_0, c_0))
outputs = []
for token in sequence:
rnn_input = embedder(token)
((cell_states, hidden_states), output, layer_states) = forward_one_multilayer(rnns, rnn_input, layer_states, dropout_amount)
outputs.append(output)
return ((cell_states, hidden_states), outputs) |
def get_data_parallel_group():
global _USE_MEGATRON
if _USE_MEGATRON:
from fairseq.model_parallel.megatron import mpu
return mpu.get_data_parallel_group()
else:
return get_global_group() |
class PadCollate():
def __init__(self, dim=0):
self.dim = dim
def pad_collate(self, batch):
study_lens = list(map((lambda x: x[0].shape[self.dim]), batch))
max_len = max(study_lens)
num_components = max((len(x) for x in batch))
batch = [((pad_tensor(x[0], pad=max_len, dim=self.dim),) + tuple(x[1:])) for x in batch]
batch = tuple((self._merge(batch, component_idx=i) for i in range(num_components)))
masks = [(([1] * sl) + ([0] * (max_len - sl))) for sl in study_lens]
masks = torch.tensor(masks, dtype=torch.float32)
return (batch + (masks,))
def __call__(self, batch):
return self.pad_collate(batch)
def _merge(batch, component_idx):
components = [x[component_idx] for x in batch]
assert (len(components) > 0), 'Error in pad_collate: Cannot merge a batch of size 0'
first_component = components[0]
if isinstance(first_component, dict):
merged_components = {k: [d[k] for d in components] for k in first_component}
elif isinstance(first_component, torch.Tensor):
merged_components = torch.stack(components, dim=0)
else:
raise ValueError('Unexpected type in PadCollate._merge: {}'.format(type(components[0])))
return merged_components |
class Node(object):
def __init__(self):
self.root = False
self.children = []
self.label = None
self.parent = None
self.phrase = ''
self.terminal = False
self.start_idx = 0
self.end_idx = 0
self.parent_idx = None |
def register_Ns3MinMaxAvgTotalCalculator__Unsigned_int_methods(root_module, cls):
cls.add_constructor([param('ns3::MinMaxAvgTotalCalculator< unsigned int > const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Output', 'void', [param('ns3::DataOutputCallback &', 'callback')], is_const=True, is_virtual=True)
cls.add_method('Reset', 'void', [])
cls.add_method('Update', 'void', [param('unsigned int const', 'i')])
cls.add_method('getCount', 'long int', [], is_const=True, is_virtual=True)
cls.add_method('getMax', 'double', [], is_const=True, is_virtual=True)
cls.add_method('getMean', 'double', [], is_const=True, is_virtual=True)
cls.add_method('getMin', 'double', [], is_const=True, is_virtual=True)
cls.add_method('getSqrSum', 'double', [], is_const=True, is_virtual=True)
cls.add_method('getStddev', 'double', [], is_const=True, is_virtual=True)
cls.add_method('getSum', 'double', [], is_const=True, is_virtual=True)
cls.add_method('getVariance', 'double', [], is_const=True, is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
return |
_properties
class BufferTiling(transformation.SingleStateTransformation):
map1_exit = transformation.PatternNode(nodes.MapExit)
array = transformation.PatternNode(nodes.AccessNode)
map2_entry = transformation.PatternNode(nodes.MapEntry)
tile_sizes = ShapeProperty(dtype=tuple, default=(128, 128, 128), desc='Tile size per dimension')
def expressions(cls):
return [sdutil.node_path_graph(cls.map1_exit, cls.array, cls.map2_entry)]
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
map1_exit = self.map1_exit
map2_entry = self.map2_entry
for buf in graph.all_nodes_between(map1_exit, map2_entry):
if (not isinstance(buf, nodes.AccessNode)):
return False
if (not sdfg.arrays[buf.data].transient):
return False
if (graph.in_degree(buf) != 1):
return False
if (graph.out_degree(buf) != 1):
return False
if (graph.in_edges(buf)[0].src != map1_exit):
return False
if (graph.out_edges(buf)[0].dst != map2_entry):
return False
provided = graph.in_edges(buf)[0].data.subset
consumed = graph.out_edges(buf)[0].data.subset
if (not provided.covers(consumed)):
return False
num_occurrences = len([n for n in graph.nodes() if (isinstance(n, nodes.AccessNode) and (n.data == buf))])
if (num_occurrences > 1):
return False
return True
def apply(self, graph, sdfg):
map1_exit = self.map1_exit
map1_entry = graph.entry_node(map1_exit)
map2_entry = self.map2_entry
buffers = graph.all_nodes_between(map1_exit, map2_entry)
lower_extents = tuple(((b - a) for (a, b) in zip(map1_entry.range.min_element(), map2_entry.range.min_element())))
upper_extents = tuple(((a - b) for (a, b) in zip(map1_entry.range.max_element(), map2_entry.range.max_element())))
MapTilingWithOverlap.apply_to(sdfg, map_entry=map1_entry, options={'tile_sizes': self.tile_sizes, 'lower_overlap': lower_extents, 'upper_overlap': upper_extents})
tile_map1_exit = graph.out_edges(map1_exit)[0].dst
tile_map1_entry = graph.entry_node(tile_map1_exit)
tile_map1_entry.label = 'BufferTiling'
MapTiling.apply_to(sdfg, map_entry=map2_entry, options={'tile_sizes': self.tile_sizes, 'tile_trivial': True})
tile_map2_entry = graph.in_edges(map2_entry)[0].src
some_buffer = next(iter(buffers))
MapFusion.apply_to(sdfg, first_map_exit=tile_map1_exit, array=some_buffer, second_map_entry=tile_map2_entry)
map1_entry.range.ranges = [((r[0], r[0], r[2]) if ((l_ext == 0) and (u_ext == 0) and (ts == 1)) else r) for (r, l_ext, u_ext, ts) in zip(map1_entry.range.ranges, lower_extents, upper_extents, self.tile_sizes)]
map2_entry.range.ranges = [((r[0], r[0], r[2]) if (ts == 1) else r) for (r, ts) in zip(map2_entry.range.ranges, self.tile_sizes)]
if any(((ts == 1) for ts in self.tile_sizes)):
if any(((r[0] == r[1]) for r in map1_entry.map.range)):
TrivialMapElimination.apply_to(sdfg, map_entry=map1_entry)
if any(((r[0] == r[1]) for r in map2_entry.map.range)):
TrivialMapElimination.apply_to(sdfg, map_entry=map2_entry) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.