code stringlengths 101 5.91M |
|---|
def compute_mdlp_all_intervals(mdlp_discretizer):
category_names = []
for (i, cut_points) in enumerate(mdlp_discretizer.cut_points_):
if (cut_points is None):
category_names.append(None)
continue
idxs = np.arange((len(cut_points) + 1))
names = mdlp_discretizer.assign_intervals(idxs, i)
category_names.append(names)
return category_names |
_module()
class YOLOXModeSwitchHook(Hook):
def __init__(self, num_last_epochs=15, skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')):
self.num_last_epochs = num_last_epochs
self.skip_type_keys = skip_type_keys
self._restart_dataloader = False
def before_train_epoch(self, runner):
epoch = runner.epoch
train_loader = runner.data_loader
model = runner.model
if is_module_wrapper(model):
model = model.module
if ((epoch + 1) == (runner.max_epochs - self.num_last_epochs)):
runner.logger.info('No mosaic and mixup aug now!')
train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
if (hasattr(train_loader, 'persistent_workers') and (train_loader.persistent_workers is True)):
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
runner.logger.info('Add additional L1 loss now!')
model.bbox_head.use_l1 = True
elif self._restart_dataloader:
train_loader._DataLoader__initialized = True |
class TerminalController():
BOL = ''
UP = ''
DOWN = ''
LEFT = ''
RIGHT = ''
CLEAR_SCREEN = ''
CLEAR_EOL = ''
CLEAR_BOL = ''
CLEAR_EOS = ''
BOLD = ''
BLINK = ''
DIM = ''
REVERSE = ''
NORMAL = ''
HIDE_CURSOR = ''
SHOW_CURSOR = ''
COLS = None
LINES = None
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
_STRING_CAPABILITIES = '\n BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1\n CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold\n BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0\n HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm'.split()
_COLORS = 'BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE'.split()
_ANSICOLORS = 'BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE'.split()
def __init__(self, term_stream=sys.stdout):
try:
import curses
except:
return
if (not term_stream.isatty()):
return
try:
curses.setupterm()
except:
return
self.COLS = curses.tigetnum('cols')
self.LINES = curses.tigetnum('lines')
self.XN = curses.tigetflag('xenl')
for capability in self._STRING_CAPABILITIES:
(attrib, cap_name) = capability.split('=')
setattr(self, attrib, (self._tigetstr(cap_name) or ''))
set_fg = self._tigetstr('setf')
if set_fg:
for (i, color) in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, color, self._tparm(set_fg, i))
set_fg_ansi = self._tigetstr('setaf')
if set_fg_ansi:
for (i, color) in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, color, self._tparm(set_fg_ansi, i))
set_bg = self._tigetstr('setb')
if set_bg:
for (i, color) in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, ('BG_' + color), self._tparm(set_bg, i))
set_bg_ansi = self._tigetstr('setab')
if set_bg_ansi:
for (i, color) in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, ('BG_' + color), self._tparm(set_bg_ansi, i))
def _tparm(self, arg, index):
import curses
return (curses.tparm(to_bytes(arg), index).decode('ascii') or '')
def _tigetstr(self, cap_name):
import curses
cap = curses.tigetstr(cap_name)
if (cap is None):
cap = ''
else:
cap = cap.decode('ascii')
return re.sub('\\$<\\d+>[/*]?', '', cap)
def render(self, template):
return re.sub('\\$\\$|\\${\\w+}', self._render_sub, template)
def _render_sub(self, match):
s = match.group()
if (s == '$$'):
return s
else:
return getattr(self, s[2:(- 1)]) |
class AnyNet(Backbone):
def __init__(self, *, stem_class, stem_width, block_class, depths, widths, group_widths, strides, bottleneck_ratios, se_ratio, activation_class, freeze_at=0, norm='BN', out_features=None):
super().__init__()
self.stem = stem_class(3, stem_width, norm, activation_class)
current_stride = self.stem.stride
self._out_feature_strides = {'stem': current_stride}
self._out_feature_channels = {'stem': self.stem.out_channels}
self.stages_and_names = []
prev_w = stem_width
for (i, (d, w, s, b, g)) in enumerate(zip(depths, widths, strides, bottleneck_ratios, group_widths)):
params = {'bot_mul': b, 'group_w': g, 'se_r': se_ratio}
stage = AnyStage(prev_w, w, s, d, block_class, norm, activation_class, params)
name = 's{}'.format((i + 1))
self.add_module(name, stage)
self.stages_and_names.append((stage, name))
self._out_feature_strides[name] = current_stride = int((current_stride * np.prod([k.stride for k in stage.children()])))
self._out_feature_channels[name] = list(stage.children())[(- 1)].out_channels
prev_w = w
self.apply(init_weights)
if (out_features is None):
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert (out_feature in children), 'Available children: {} does not include {}'.format(', '.join(children), out_feature)
self.freeze(freeze_at)
def forward(self, x):
assert (x.dim() == 4), f'Model takes an input of shape (N, C, H, W). Got {x.shape} instead!'
outputs = {}
x = self.stem(x)
if ('stem' in self._out_features):
outputs['stem'] = x
for (stage, name) in self.stages_and_names:
x = stage(x)
if (name in self._out_features):
outputs[name] = x
return outputs
def output_shape(self):
return {name: ShapeSpec(channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]) for name in self._out_features}
def freeze(self, freeze_at=0):
if (freeze_at >= 1):
self.stem.freeze()
for (idx, (stage, _)) in enumerate(self.stages_and_names, start=2):
if (freeze_at >= idx):
for block in stage.children():
block.freeze()
return self |
def _find_dep_file_path(main_file, file_path, relative_path_search=False):
abs_path = os.path.abspath(file_path)
if ((not os.path.exists(abs_path)) and (file_path.endswith('.pxi') or relative_path_search)):
rel_file_path = os.path.join(os.path.dirname(main_file), file_path)
if os.path.exists(rel_file_path):
abs_path = os.path.abspath(rel_file_path)
if (not os.path.exists(abs_path)):
for sys_path in sys.path:
test_path = os.path.realpath(os.path.join(sys_path, file_path))
if os.path.exists(test_path):
return canonical_filename(test_path)
return canonical_filename(abs_path) |
class RandomActiveLearningNodeNB(LearningNodeNB, RandomActiveLeafClass):
def __init__(self, initial_stats=None, max_features=2, random_state=None):
super().__init__(initial_stats)
self.max_features = max_features
self.feature_indices = np.array([])
self.random_state = random_state
self._random_state = check_random_state(self.random_state) |
def test_forward_combined_dummy(pretrain_file):
model = build_model(pretrain_file, '--combined_dummy_embedding')
run_forward_checks(model)
model = build_model(pretrain_file, '--no_combined_dummy_embedding')
run_forward_checks(model) |
class Dipole(BaseSrc):
def __init__(self, receiver_list=None, location_a=None, location_b=None, location=None, **kwargs):
if (location_a is not None):
if (location_b is None):
raise ValueError('For a dipole source both location_a and location_b must be set')
if (location is not None):
raise ValueError('Cannot set both location and location_a, location_b. Please provide either location=(location_a, location_b) or both location_a=location_a, location_b=location_b')
location = [location_a, location_b]
if (location is None):
raise AttributeError("Source cannot be instantiated without assigning 'location'.Please provide either location=(location_a, location_b) or both location_a=location_a, location_b=location_b")
super(Dipole, self).__init__(receiver_list, location=location, **kwargs)
def location(self):
return self._locations
def location(self, locs):
if (len(locs) != 2):
raise ValueError(f'locations must be a list or tuple of length 2: [location_a, location_b. The input locations has length {len(locs)}')
locs = [np.atleast_1d(locs[0]), np.atleast_1d(locs[1])]
if (locs[0].shape != locs[1].shape):
raise ValueError(f'location_a (shape: {locs[0].shape}) and location_b (shape: {locs[1].shape}) need to be the same size')
self._locations = locs
def location_a(self):
return self.location[0]
def location_b(self):
return self.location[1]
def eval(self, simulation):
if (simulation._formulation == 'HJ'):
inds = simulation.mesh.closest_points_index(self.location, grid_loc='CC')
q = np.zeros(simulation.mesh.nC)
q[inds] = (self.current * np.r_[(1.0, (- 1.0))])
elif (simulation._formulation == 'EB'):
qa = simulation.mesh.get_interpolation_matrix(self.location[0], location_type='N').todense()
qb = (- simulation.mesh.get_interpolation_matrix(self.location[1], location_type='N').todense())
q = (self.current * mkvc((qa + qb)))
return q |
class MarianOnnxConfig(OnnxSeq2SeqConfigWithPast):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task in ['default', 'seq2seq-lm']):
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})])
if self.use_past:
common_inputs['decoder_input_ids'] = {0: 'batch'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs')
elif (self.task == 'causal-lm'):
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})])
if self.use_past:
(num_encoder_layers, _) = self.num_layers
for i in range(num_encoder_layers):
common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})])
return common_inputs
def outputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task in ['default', 'seq2seq-lm']):
common_outputs = super().outputs
else:
common_outputs = super(OnnxConfigWithPast, self).outputs
if self.use_past:
(num_encoder_layers, _) = self.num_layers
for i in range(num_encoder_layers):
common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
encoder_inputs = self._generate_dummy_inputs_for_encoder_and_decoder(tokenizer, batch_size, seq_length, is_pair, framework)
decoder_seq_length = (seq_length if (not self.use_past) else 1)
decoder_inputs = self._generate_dummy_inputs_for_encoder_and_decoder(tokenizer, batch_size, decoder_seq_length, is_pair, framework)
decoder_inputs = {f'decoder_{name}': tensor for (name, tensor) in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, encoder_seq_length) = common_inputs['input_ids'].shape
decoder_seq_length = common_inputs['decoder_input_ids'].shape[1]
(num_encoder_attention_heads, num_decoder_attention_heads) = self.num_attention_heads
encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, (self._config.hidden_size // num_encoder_attention_heads))
decoder_past_length = (decoder_seq_length + 3)
decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, (self._config.hidden_size // num_decoder_attention_heads))
common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1)
common_inputs['past_key_values'] = []
(num_encoder_layers, num_decoder_layers) = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = (max(num_encoder_layers, num_decoder_layers) - min_num_layers)
remaining_side_name = ('encoder' if (num_encoder_layers > num_decoder_layers) else 'decoder')
for _ in range(min_num_layers):
common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape)))
shape = (encoder_shape if (remaining_side_name == 'encoder') else decoder_shape)
for _ in range(min_num_layers, max_num_layers):
common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
common_inputs = self._generate_dummy_inputs_for_encoder_and_decoder(tokenizer, batch_size, seq_length, is_pair, framework)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, seqlen) = common_inputs['input_ids'].shape
past_key_values_length = (seqlen + 2)
(num_encoder_layers, _) = self.num_layers
(num_encoder_attention_heads, _) = self.num_attention_heads
past_shape = (batch, num_encoder_attention_heads, past_key_values_length, (self._config.hidden_size // num_encoder_attention_heads))
common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length)], dim=1)
common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)]
return common_inputs
def _generate_dummy_inputs_for_encoder_and_decoder(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.DEFAULT_FIXED_BATCH, num_token_to_add=0)
token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.DEFAULT_FIXED_SEQUENCE, num_token_to_add=token_to_add)
dummy_input = ([(' '.join([tokenizer.unk_token]) * seq_length)] * batch_size)
common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
return common_inputs
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
if (self.task in ['default', 'seq2seq-lm']):
common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
else:
common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
return common_inputs
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
if (self.task in ['default', 'seq2seq-lm']):
flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
else:
flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t) |
def test(key, model_type, seed=0, gpu=0):
dn = ('./%s_%s' % (key, model_type))
fn = ('%s/sgd%03d.dat' % (dn, seed))
if (not os.path.exists(dn)):
os.mkdir(dn)
device = ('cuda:%d' % (gpu,))
if (model_type == 'logreg'):
(module, (n_tr, n_val, n_test), (lr, decay, num_epoch, batch_size)) = settings_logreg(key)
(z_tr, z_val, _) = module.fetch(n_tr, n_val, n_test, seed)
((x_tr, y_tr), (x_val, y_val)) = (z_tr, z_val)
model = LogisticRegressionCV(random_state=seed, fit_intercept=False, cv=5)
model.fit(x_tr, y_tr)
alpha = (1 / (model.C_[0] * n_tr))
net_func = (lambda : LogReg(x_tr.shape[1]).to(device))
elif (model_type == 'dnn'):
(module, (n_tr, n_val, n_test), m, alpha, (lr, decay, num_epoch, batch_size)) = settings_dnn(key)
(z_tr, z_val, _) = module.fetch(n_tr, n_val, n_test, seed)
((x_tr, y_tr), (x_val, y_val)) = (z_tr, z_val)
net_func = (lambda : DNN(x_tr.shape[1]).to(device))
x_tr = torch.from_numpy(x_tr).to(torch.float32).to(device)
y_tr = torch.from_numpy(np.expand_dims(y_tr, axis=1)).to(torch.float32).to(device)
x_val = torch.from_numpy(x_val).to(torch.float32).to(device)
y_val = torch.from_numpy(np.expand_dims(y_val, axis=1)).to(torch.float32).to(device)
num_steps = int(np.ceil((n_tr / batch_size)))
list_of_sgd_models = []
list_of_counterfactual_models = []
list_of_losses = []
for n in range((- 1), n_tr):
torch.manual_seed(seed)
model = net_func()
loss_fn = nn.BCEWithLogitsLoss()
optimizer = torch.optim.SGD(model.parameters(), lr, momentum=0.0)
lr_n = lr
skip = [n]
info = []
c = 0
for epoch in range(num_epoch):
np.random.seed(epoch)
idx_list = np.array_split(np.random.permutation(n_tr), num_steps)
for i in range(num_steps):
info.append({'idx': idx_list[i], 'lr': lr_n})
c += 1
if (n < 0):
m = net_func()
m.load_state_dict(copy.deepcopy(model.state_dict()))
list_of_sgd_models.append(m)
idx = idx_list[i]
b = idx.size
idx = np.setdiff1d(idx, skip)
z = model(x_tr[idx])
loss = loss_fn(z, y_tr[idx])
for p in model.parameters():
loss += ((0.5 * alpha) * (p * p).sum())
optimizer.zero_grad()
loss.backward()
for p in model.parameters():
p.grad.data *= (idx.size / b)
optimizer.step()
if decay:
lr_n *= np.sqrt((c / (c + 1)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr_n
if (n < 0):
m = net_func()
m.load_state_dict(copy.deepcopy(model.state_dict()))
list_of_sgd_models.append(m)
else:
m = net_func()
m.load_state_dict(copy.deepcopy(model.state_dict()))
list_of_counterfactual_models.append(m)
z = model(x_val)
list_of_losses.append(loss_fn(z, y_val).item())
list_of_losses = np.array(list_of_losses)
models = NetList(list_of_sgd_models)
counterfactual = NetList(list_of_counterfactual_models)
joblib.dump({'models': models, 'info': info, 'counterfactual': counterfactual, 'alpha': alpha}, fn) |
def build_keras_model():
query = Input(name='query', shape=(query_term_maxlen, 1))
doc = Input(name='doc', shape=(query_term_maxlen, hist_size))
z = doc
for i in range(num_layers):
z = Dense(hidden_sizes[i], kernel_initializer=initializer_fc)(z)
z = Activation('tanh')(z)
z = Permute((2, 1))(z)
z = Reshape((query_term_maxlen,))(z)
q_w = Dense(1, kernel_initializer=initializer_gate, use_bias=False)(query)
q_w = Lambda((lambda x: softmax(x, axis=1)), output_shape=(query_term_maxlen,))(q_w)
q_w = Reshape((query_term_maxlen,))(q_w)
out_ = Dot(axes=[1, 1])([z, q_w])
model = Model(inputs=[query, doc], outputs=[out_])
return model |
def absorb_bn(module, bn_module):
w = module.weight.data
if (module.bias is None):
zeros = torch.Tensor(module.out_channels).zero_().type(w.type())
module.bias = nn.Parameter(zeros)
b = module.bias.data
invstd = bn_module.running_var.clone().add_(bn_module.eps).pow_((- 0.5))
w.mul_(invstd.view(w.size(0), 1, 1, 1).expand_as(w))
b.add_((- bn_module.running_mean)).mul_(invstd)
if bn_module.affine:
w.mul_(bn_module.weight.data.view(w.size(0), 1, 1, 1).expand_as(w))
b.mul_(bn_module.weight.data).add_(bn_module.bias.data)
bn_module.register_buffer('running_mean', torch.zeros(module.out_channels))
bn_module.register_buffer('running_var', torch.ones(module.out_channels))
bn_module.register_parameter('weight', None)
bn_module.register_parameter('bias', None)
bn_module.affine = False |
(0.1)
def movies_being_shown(entities, *argv, **kargs):
message = "Here are the movies in theater now:\n - The Shawshank Redemption (1994)\n - The Godfather (1972)\n - The Godfather: Part II (1974)\n - The Dark Knight (2008)\n - 12 Angry Men (1957)\n - Schindler's List (1993)\n - The Lord of the Rings: The Return of the King (2003)\n - Pulp Fiction (1994)\n - The Good, the Bad and the Ugly (1966)\n - The Lord of the Rings: The Fellowship of the Ring (2001)\n "
return resp(True, msg=message) |
def generate(args, g_ema, device, mean_latent):
with torch.no_grad():
g_ema.eval()
sample_z = torch.randn(args.sample, args.latent, device=device)
for i in tqdm(range(args.pics)):
truncation = ((args.truncation / (args.pics - 1)) * i)
print(truncation)
(sample, _) = g_ema([sample_z], truncation=truncation, truncation_latent=mean_latent)
utils.save_image(sample, f'generated_samples/{str(i).zfill(6)}.png', nrow=int(math.sqrt(args.sample)), normalize=True, range=((- 1), 1)) |
class Clusterer(kmeans.Clusterer):
def __init__(self, initialization=True, matching=True, **kwargs):
self.initialization = initialization
self.matching = matching
super().__init__(**kwargs)
def get_initialization(self, features, labels):
means = []
for i in range(self.k):
mask = (labels == i)
mean = np.zeros(features[0].shape)
numels = mask.astype(int).sum()
if (numels > 0):
for (index, equal) in enumerate(mask):
if equal:
mean += features[index]
means.append((mean / numels))
else:
rand_point = random.randint(0, (features.size(0) - 1))
means.append(features[rand_point])
result = np.array(means)
return result
def fit_means(self):
features = self.get_cluster_batch_features()
if ((self.x_labels is not None) and self.initialization):
print('Initializing k-means with previous cluster assignments')
initialization = self.get_initialization(features, self.x_labels)
else:
initialization = 'k-means++'
new_classes = self.kmeans_fit_predict(features, init=initialization)
if ((self.x_labels is not None) and self.matching):
print('Doing cluster matching')
matching = self.hungarian_match(new_classes, self.x_labels, self.k, self.k)
self.mapping = [int(j) for (i, j) in sorted(matching)]
self.x_labels = np.array([self.mapping[x] for x in new_classes])
def recluster(self, discriminator, **kwargs):
self.discriminator = copy.deepcopy(discriminator)
self.fit_means()
def hungarian_match(self, flat_preds, flat_targets, preds_k, targets_k):
num_samples = flat_targets.shape[0]
assert (preds_k == targets_k)
num_k = preds_k
num_correct = np.zeros((num_k, num_k))
for c1 in range(num_k):
for c2 in range(num_k):
votes = int(((flat_preds == c1) * (flat_targets == c2)).sum())
num_correct[(c1, c2)] = votes
match = linear_assignment((num_samples - num_correct))
res = []
for (out_c, gt_c) in match:
res.append((out_c, gt_c))
return res |
class Stack():
def __init__(self, dtype=np.dtype(np.int64), length=1024):
self.buffer = np.full(length, 999, dtype=dtype)
self.pointer = 0
def __str__(self):
return ' '.join(([str(x) for x in self.buffer[:self.pointer]] + ['<- top']))
def __repr__(self):
return '<Stack {0}>'.format(str(self))
def push(self, num):
if (self.pointer >= len(self.buffer)):
raise ValueError('stack overflow')
self.buffer[self.pointer] = num
self.pointer += 1
def pop(self):
if (self.pointer <= 0):
raise ValueError('stack underflow')
self.pointer -= 1
return self.buffer[self.pointer]
def tolist(self):
return self.buffer[:self.pointer].tolist() |
class atlas_threads_info(atlas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas'] |
def parse_args():
parser = argparse.ArgumentParser(description='Train a STREAM network')
parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default='cfg/STREAM/bird.yaml', type=str)
parser.add_argument('--gpu', dest='gpu_id', type=int, default=0)
parser.add_argument('--data_dir', dest='data_dir', type=str, default='data/birds')
parser.add_argument('--manualSeed', type=int, default=0, help='manual seed')
args = parser.parse_args()
return args |
def RegisterModel(model_name):
def decorator(f):
MODEL_REGISTRY[model_name] = f
return f
return decorator |
def configuration(parent_package='', top_path=None):
from distutils.sysconfig import get_python_inc
from scipy._build_utils.system_info import get_info, NotFoundError, numpy_info
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
from scipy._build_utils import get_g77_abi_wrappers, split_fortran_files
config = Configuration('linalg', parent_package, top_path)
lapack_opt = get_info('lapack_opt')
atlas_version = ([v[3:(- 3)] for (k, v) in lapack_opt.get('define_macros', []) if (k == 'ATLAS_INFO')] + [None])[0]
if atlas_version:
print(('ATLAS version: %s' % atlas_version))
sources = ['fblas.pyf.src']
sources += get_g77_abi_wrappers(lapack_opt)
config.add_extension('_fblas', sources=sources, depends=['fblas_l?.pyf.src'], extra_info=lapack_opt)
sources = ['flapack.pyf.src']
sources += get_g77_abi_wrappers(lapack_opt)
dep_pfx = join('src', 'lapack_deprecations')
deprecated_lapack_routines = [join(dep_pfx, (c + 'gegv.f')) for c in 'cdsz']
sources += deprecated_lapack_routines
config.add_extension('_flapack', sources=sources, depends=['flapack_gen.pyf.src', 'flapack_gen_banded.pyf.src', 'flapack_gen_tri.pyf.src', 'flapack_pos_def.pyf.src', 'flapack_pos_def_tri.pyf.src', 'flapack_sym_herm.pyf.src', 'flapack_other.pyf.src', 'flapack_user.pyf.src'], extra_info=lapack_opt)
if (atlas_version is not None):
config.add_extension('_cblas', sources=['cblas.pyf.src'], depends=['cblas.pyf.src', 'cblas_l1.pyf.src'], extra_info=lapack_opt)
config.add_extension('_clapack', sources=['clapack.pyf.src'], depends=['clapack.pyf.src'], extra_info=lapack_opt)
config.add_extension('_flinalg', sources=[join('src', 'det.f'), join('src', 'lu.f')], extra_info=lapack_opt)
routines_to_split = ['dfftb1', 'dfftf1', 'dffti1', 'dsint1', 'dzfft1', 'id_srand', 'idd_copyints', 'idd_id2svd0', 'idd_pairsamps', 'idd_permute', 'idd_permuter', 'idd_random_transf0', 'idd_random_transf0_inv', 'idd_random_transf_init0', 'idd_subselect', 'iddp_asvd0', 'iddp_rsvd0', 'iddr_asvd0', 'iddr_rsvd0', 'idz_estrank0', 'idz_id2svd0', 'idz_permute', 'idz_permuter', 'idz_random_transf0_inv', 'idz_random_transf_init0', 'idz_random_transf_init00', 'idz_realcomp', 'idz_realcomplex', 'idz_reco', 'idz_subselect', 'idzp_aid0', 'idzp_aid1', 'idzp_asvd0', 'idzp_rsvd0', 'idzr_asvd0', 'idzr_reco', 'idzr_rsvd0', 'zfftb1', 'zfftf1', 'zffti1']
print('Splitting linalg.interpolative Fortran source files')
dirname = os.path.split(os.path.abspath(__file__))[0]
fnames = split_fortran_files(join(dirname, 'src', 'id_dist', 'src'), routines_to_split)
fnames = [join('src', 'id_dist', 'src', f) for f in fnames]
config.add_extension('_interpolative', (fnames + ['interpolative.pyf']), extra_info=lapack_opt)
config.add_extension('_solve_toeplitz', sources=['_solve_toeplitz.c'], include_dirs=[get_numpy_include_dirs()])
config.add_data_dir('tests')
config.add_data_files('cython_blas.pxd')
config.add_data_files('cython_lapack.pxd')
sources = ['_blas_subroutine_wrappers.f', '_lapack_subroutine_wrappers.f']
sources += get_g77_abi_wrappers(lapack_opt)
includes = (numpy_info().get_include_dirs() + [get_python_inc()])
config.add_library('fwrappers', sources=sources, include_dirs=includes)
config.add_extension('cython_blas', sources=['cython_blas.c'], depends=['cython_blas.pyx', 'cython_blas.pxd', 'fortran_defs.h', '_blas_subroutines.h'], include_dirs=['.'], libraries=['fwrappers'], extra_info=lapack_opt)
config.add_extension('cython_lapack', sources=['cython_lapack.c'], depends=['cython_lapack.pyx', 'cython_lapack.pxd', 'fortran_defs.h', '_lapack_subroutines.h'], include_dirs=['.'], libraries=['fwrappers'], extra_info=lapack_opt)
config.add_extension('_decomp_update', sources=['_decomp_update.c'])
config.add_data_files('src/id_dist/doc/doc.tex')
config.add_data_files('src/lapack_deprecations/LICENSE')
return config |
.skip
def test_inline_lambda_array():
def lamb(A: dace.float64[20], B: dace.float64[20], C: dace.float64[20]):
f = (lambda a, b: (a + b))
A[:] = f(B, C)
A = np.random.rand(20)
B = np.random.rand(20)
C = np.random.rand(20)
lamb(A, B, C)
assert np.allclose(A, (B + C)) |
def distributions(sigma, q):
mu0 = (lambda y: pdf_gauss(y, sigma=sigma, mean=0.0))
mu1 = (lambda y: pdf_gauss(y, sigma=sigma, mean=1.0))
mu = (lambda y: (((1 - q) * mu0(y)) + (q * mu1(y))))
return (mu0, mu1, mu) |
_spec_function('entity_matching')
def get_entity_matching_spec(dataset: str) -> RunSpec:
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.entity_matching_scenario.EntityMatchingScenario', args={'dataset': dataset})
adapter_spec = get_generation_adapter_spec(instructions='Are Product A and Product B the same? Yes or No?', output_noun='Answer')
return RunSpec(name=f'entity_matching:dataset={dataset}', scenario_spec=scenario_spec, adapter_spec=adapter_spec, metric_specs=(get_exact_match_metric_specs() + get_generative_harms_metric_specs()), groups=['entity_matching']) |
class CamembertForMaskedLM():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def mean_std(data):
data = data[(~ np.isnan(data))]
mean = np.mean(data)
std = np.std(data)
return (mean, std) |
class TestTreeFragments(CythonTest):
def test_basic(self):
F = self.fragment(u'x = 4')
T = F.copy()
self.assertCode(u'x = 4', T)
def test_copy_is_taken(self):
F = self.fragment(u'if True: x = 4')
T1 = F.root
T2 = F.copy()
self.assertEqual('x', T2.stats[0].if_clauses[0].body.lhs.name)
T2.stats[0].if_clauses[0].body.lhs.name = 'other'
self.assertEqual('x', T1.stats[0].if_clauses[0].body.lhs.name)
def test_substitutions_are_copied(self):
T = self.fragment(u'y + y').substitute({'y': NameNode(pos=None, name='x')})
self.assertEqual('x', T.stats[0].expr.operand1.name)
self.assertEqual('x', T.stats[0].expr.operand2.name)
self.assertTrue((T.stats[0].expr.operand1 is not T.stats[0].expr.operand2))
def test_substitution(self):
F = self.fragment(u'x = 4')
y = NameNode(pos=None, name=u'y')
T = F.substitute({'x': y})
self.assertCode(u'y = 4', T)
def test_exprstat(self):
F = self.fragment(u'PASS')
pass_stat = PassStatNode(pos=None)
T = F.substitute({'PASS': pass_stat})
self.assertTrue(isinstance(T.stats[0], PassStatNode), T)
def test_pos_is_transferred(self):
F = self.fragment(u'\n x = y\n x = u * v ** w\n ')
T = F.substitute({'v': NameNode(pos=None, name='a')})
v = F.root.stats[1].rhs.operand2.operand1
a = T.stats[1].rhs.operand2.operand1
self.assertEqual(v.pos, a.pos)
def test_temps(self):
TemplateTransform.temp_name_counter = 0
F = self.fragment(u'\n TMP\n x = TMP\n ')
T = F.substitute(temps=[u'TMP'])
s = T.body.stats
self.assertTrue(isinstance(s[0].expr, TempRefNode))
self.assertTrue(isinstance(s[1].rhs, TempRefNode))
self.assertTrue((s[0].expr.handle is s[1].rhs.handle)) |
_driver.jit
def reset_log_mask(log_mask, episode_length):
tidx = numba_driver.threadIdx.x
if (tidx == 0):
for i in range((episode_length + 1)):
log_mask[i] = 0 |
def parse_bboxes_file(ann_filenames, ann_is_gt_box, detect_thresh, boxes_sample_rate=1):
all_boxes = {}
count = 0
unique_box_count = 0
for (filename, is_gt_box) in zip(ann_filenames, ann_is_gt_box):
with g_pathmgr.open(filename, 'r') as f:
for line in f:
row = line.strip().split(',')
if (not is_gt_box):
score = float(row[7])
if (score < detect_thresh):
continue
(video_name, frame_sec) = (row[0], int(row[1]))
if ((frame_sec % boxes_sample_rate) != 0):
continue
box_key = ','.join(row[2:6])
box = list(map(float, row[2:6]))
label = ((- 1) if (row[6] == '') else int(row[6]))
if (video_name not in all_boxes):
all_boxes[video_name] = {}
for sec in AVA_VALID_FRAMES:
all_boxes[video_name][sec] = {}
if (box_key not in all_boxes[video_name][frame_sec]):
all_boxes[video_name][frame_sec][box_key] = [box, []]
unique_box_count += 1
all_boxes[video_name][frame_sec][box_key][1].append(label)
if (label != (- 1)):
count += 1
for video_name in all_boxes.keys():
for frame_sec in all_boxes[video_name].keys():
all_boxes[video_name][frame_sec] = list(all_boxes[video_name][frame_sec].values())
return (all_boxes, count, unique_box_count) |
def validate_auth(ctx: click.core.Context, param: click.core.Parameter, raw_value: (str | None)) -> (tuple[(str, str)] | None):
if (raw_value is not None):
with reraise_format_error(raw_value):
(user, password) = tuple(raw_value.split(':'))
if (not user):
raise click.BadParameter('Username should not be empty.')
if (not is_latin_1_encodable(user)):
raise click.BadParameter('Username should be latin-1 encodable.')
if (not is_latin_1_encodable(password)):
raise click.BadParameter('Password should be latin-1 encodable.')
return (user, password)
return None |
def create_initializer_tensors(parser, weight_file):
tensors = []
if (weight_file == None):
return tensors
initializer_ops = parser.get_initializer_op_names_n_shape_type()
npzfile = np.load(weight_file)
for op_name in initializer_ops:
if (op_name in npzfile.files):
mlir_type = initializer_ops[op_name].element_type
shape = [initializer_ops[op_name].get_dim_size(i) for i in range(initializer_ops[op_name].rank)]
weight_data = npzfile[op_name]
tensor = helper.make_tensor(op_name, type_map(mlir_type), shape, weight_data)
tensors.append(tensor)
else:
raise ValueError('No {} in {} weight file'.format(op_name, weight_file))
return tensors |
def HanoiTowerGraph(pegs, disks, labels=True, positions=True):
from sage.rings.integer import Integer
pegs = Integer(pegs)
if (pegs < 2):
raise ValueError(('Pegs for Tower of Hanoi graph should be two or greater (not %d)' % pegs))
disks = Integer(disks)
if (disks < 1):
raise ValueError(('Disks for Tower of Hanoi graph should be one or greater (not %d)' % disks))
edges = [[i, j] for i in range(pegs) for j in range((i + 1), pegs)]
nverts = 1
for d in range(2, (disks + 1)):
prevedges = edges
nverts = (pegs * nverts)
edges = []
for p in range(pegs):
largedisk = (p * nverts)
for anedge in prevedges:
edges.append([(anedge[0] + largedisk), (anedge[1] + largedisk)])
from sage.combinat.subset import Subsets
for state in range(nverts):
emptypegs = list(range(pegs))
reduced_state = state
for i in range((d - 1)):
apeg = (reduced_state % pegs)
if (apeg in emptypegs):
emptypegs.remove(apeg)
reduced_state = (reduced_state // pegs)
for (freea, freeb) in Subsets(emptypegs, 2):
edges.append([((freea * nverts) + state), ((freeb * nverts) + state)])
H = Graph({}, loops=False, multiedges=False)
H.add_edges(edges)
if (labels or positions):
mapping = {}
pos = {}
a = Integer((- 1))
one = Integer(1)
if positions:
radius_multiplier = (1 + (1 / sin((pi / pegs))))
sine = []
cosine = []
for i in range(pegs):
angle = (((2 * i) * pi) / float(pegs))
sine.append(sin(angle))
cosine.append(cos(angle))
for i in range((pegs ** disks)):
a += one
state = a.digits(base=pegs, padto=disks)
if labels:
state.reverse()
mapping[i] = tuple(state)
state.reverse()
if positions:
locx = 0.0
locy = 0.0
radius = 1.0
parity = (- 1.0)
for index in range(disks):
p = state[index]
radius *= radius_multiplier
parity *= (- 1.0)
locx_temp = (((cosine[p] * locx) - ((parity * sine[p]) * locy)) + (radius * cosine[p]))
locy_temp = ((((parity * sine[p]) * locx) + (cosine[p] * locy)) - ((radius * parity) * sine[p]))
locx = locx_temp
locy = locy_temp
pos[i] = (locx, locy)
if positions:
H.set_pos(pos)
if labels:
H.relabel(mapping)
return H |
def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent: Union[(Dict, str, None)]=None, extract_compressed_file=False, force_extract=False, local_files_only=False) -> Optional[str]:
if (cache_dir is None):
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
output_path = get_from_cache(url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, user_agent=user_agent, local_files_only=local_files_only)
elif os.path.exists(url_or_filename):
output_path = url_or_filename
elif (urlparse(url_or_filename).scheme == ''):
raise EnvironmentError('file {} not found'.format(url_or_filename))
else:
raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename))
if extract_compressed_file:
if ((not is_zipfile(output_path)) and (not tarfile.is_tarfile(output_path))):
return output_path
(output_dir, output_file) = os.path.split(output_path)
output_extract_dir_name = (output_file.replace('.', '-') + '-extracted')
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if (os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and (not force_extract)):
return output_path_extracted
lock_path = (output_path + '.lock')
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, 'r') as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(output_path))
return output_path_extracted
return output_path |
def MainOpFunctionThatThrowsCustomErrorInBuilder(inputs, _):
raise CustomError('This is an intentional exception in builder.') |
.skipif((platform.system() == 'Windows'), reason='Fails on Windows')
def test_cli(testdir, unique_hook, raw_schema, cli, openapi3_base_url, hypothesis_max_examples, snapshot_cli):
assert (run(testdir, cli, unique_hook, raw_schema, openapi3_base_url, hypothesis_max_examples) == snapshot_cli) |
def prc_auc(y_true, y_score):
(precision, recall, threshold) = precision_recall_curve(y_true, y_score)
auc = calculate_auc(recall, precision)
return auc |
class BenchmarkDiscreteTimeSeries(BenchmarkDiscreteTimeSeriesBase):
def __init__(self, algo_dict: Dict=None, kargs_dict: Dict=None, num_exp: int=20, custom_metric_dict: Optional[Dict]={}, **kargs):
BenchmarkDiscreteTimeSeriesBase.__init__(self, algo_dict=algo_dict, num_exp=num_exp, kargs_dict=kargs_dict, custom_metric_dict=custom_metric_dict, **kargs)
def benchmark_variable_complexity(self, num_vars_list: List[int]=[2, 10, 20, 40], graph_density: float=0.1, T: int=1000, data_max_lag: int=3, fn: Callable=(lambda x: x), coef: float=0.1, noise_fn: Callable=np.random.randn):
all_results = []
self.variant_values = num_vars_list
self.variant_name = 'Number of Variables'
for num_vars in num_vars_list:
noise_fn_list = ([noise_fn] * num_vars)
result_list = base_synthetic_time_series_benchmark(self.algo_dict, self.kargs_dict, noise_fn_list, num_vars=num_vars, graph_density=graph_density, T=T, data_max_lag=data_max_lag, num_exp=self.num_exp, fn=fn, coef=coef, discrete=True, nstates=5, custom_metric_dict=self.custom_metric_dict)
all_results.append(result_list)
self.results_full = all_results
def benchmark_sample_complexity(self, T_list: List[int]=[100, 500, 1000, 5000], num_vars: int=20, graph_density: float=0.1, data_max_lag: int=3, fn: Callable=(lambda x: x), coef: float=0.1, noise_fn: Callable=np.random.randn):
all_results = []
self.variant_values = T_list
self.variant_name = 'Number of Samples'
for T in T_list:
noise_fn_list = ([noise_fn] * num_vars)
result_list = base_synthetic_time_series_benchmark(self.algo_dict, self.kargs_dict, noise_fn_list, num_vars=num_vars, graph_density=graph_density, T=T, data_max_lag=data_max_lag, num_exp=self.num_exp, fn=fn, coef=coef, discrete=True, custom_metric_dict=self.custom_metric_dict)
all_results.append(result_list)
self.results_full = all_results
def benchmark_graph_density(self, graph_density_list: List[float]=[0.05, 0.1, 0.2, 0.5], num_vars: int=20, T: int=1000, data_max_lag: int=3, fn: Callable=(lambda x: x), coef: float=0.1, noise_fn: Callable=np.random.randn):
all_results = []
self.variant_values = graph_density_list
self.variant_name = 'Graph Density'
for graph_density in graph_density_list:
noise_fn_list = ([noise_fn] * num_vars)
result_list = base_synthetic_time_series_benchmark(self.algo_dict, self.kargs_dict, noise_fn_list, num_vars=num_vars, graph_density=graph_density, T=T, data_max_lag=data_max_lag, num_exp=self.num_exp, fn=fn, coef=coef, discrete=True, nstates=5, custom_metric_dict=self.custom_metric_dict)
all_results.append(result_list)
self.results_full = all_results
def benchmark_data_max_lag(self, data_max_lag_list: List[int]=[1, 5, 10], num_vars: int=20, graph_density: float=0.1, T: int=1000, fn: Callable=(lambda x: x), coef: float=0.1, noise_fn: Callable=np.random.randn):
all_results = []
self.variant_values = data_max_lag_list
self.variant_name = 'Data Max Lag'
for data_max_lag in data_max_lag_list:
noise_fn_list = ([noise_fn] * num_vars)
result_list = base_synthetic_time_series_benchmark(self.algo_dict, self.kargs_dict, noise_fn_list, num_vars=num_vars, graph_density=graph_density, T=T, data_max_lag=data_max_lag, num_exp=self.num_exp, fn=fn, coef=coef, discrete=True, nstates=5, custom_metric_dict=self.custom_metric_dict)
all_results.append(result_list)
self.results_full = all_results |
class GroupOps(object):
def identity():
_res = ([0.0] * 4)
_res[0] = 0
_res[1] = 0
_res[2] = 0
_res[3] = 0
return sym.EquirectangularCameraCal.from_storage(_res)
def inverse(a):
_a = a.data
_res = ([0.0] * 4)
_res[0] = (- _a[0])
_res[1] = (- _a[1])
_res[2] = (- _a[2])
_res[3] = (- _a[3])
return sym.EquirectangularCameraCal.from_storage(_res)
def compose(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 4)
_res[0] = (_a[0] + _b[0])
_res[1] = (_a[1] + _b[1])
_res[2] = (_a[2] + _b[2])
_res[3] = (_a[3] + _b[3])
return sym.EquirectangularCameraCal.from_storage(_res)
def between(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 4)
_res[0] = ((- _a[0]) + _b[0])
_res[1] = ((- _a[1]) + _b[1])
_res[2] = ((- _a[2]) + _b[2])
_res[3] = ((- _a[3]) + _b[3])
return sym.EquirectangularCameraCal.from_storage(_res)
def inverse_with_jacobian(a):
_a = a.data
_res = ([0.0] * 4)
_res[0] = (- _a[0])
_res[1] = (- _a[1])
_res[2] = (- _a[2])
_res[3] = (- _a[3])
_res_D_a = numpy.zeros((4, 4))
_res_D_a[(0, 0)] = (- 1)
_res_D_a[(1, 0)] = 0
_res_D_a[(2, 0)] = 0
_res_D_a[(3, 0)] = 0
_res_D_a[(0, 1)] = 0
_res_D_a[(1, 1)] = (- 1)
_res_D_a[(2, 1)] = 0
_res_D_a[(3, 1)] = 0
_res_D_a[(0, 2)] = 0
_res_D_a[(1, 2)] = 0
_res_D_a[(2, 2)] = (- 1)
_res_D_a[(3, 2)] = 0
_res_D_a[(0, 3)] = 0
_res_D_a[(1, 3)] = 0
_res_D_a[(2, 3)] = 0
_res_D_a[(3, 3)] = (- 1)
return (sym.EquirectangularCameraCal.from_storage(_res), _res_D_a)
def compose_with_jacobians(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 4)
_res[0] = (_a[0] + _b[0])
_res[1] = (_a[1] + _b[1])
_res[2] = (_a[2] + _b[2])
_res[3] = (_a[3] + _b[3])
_res_D_a = numpy.zeros((4, 4))
_res_D_a[(0, 0)] = 1
_res_D_a[(1, 0)] = 0
_res_D_a[(2, 0)] = 0
_res_D_a[(3, 0)] = 0
_res_D_a[(0, 1)] = 0
_res_D_a[(1, 1)] = 1
_res_D_a[(2, 1)] = 0
_res_D_a[(3, 1)] = 0
_res_D_a[(0, 2)] = 0
_res_D_a[(1, 2)] = 0
_res_D_a[(2, 2)] = 1
_res_D_a[(3, 2)] = 0
_res_D_a[(0, 3)] = 0
_res_D_a[(1, 3)] = 0
_res_D_a[(2, 3)] = 0
_res_D_a[(3, 3)] = 1
_res_D_b = numpy.zeros((4, 4))
_res_D_b[(0, 0)] = 1
_res_D_b[(1, 0)] = 0
_res_D_b[(2, 0)] = 0
_res_D_b[(3, 0)] = 0
_res_D_b[(0, 1)] = 0
_res_D_b[(1, 1)] = 1
_res_D_b[(2, 1)] = 0
_res_D_b[(3, 1)] = 0
_res_D_b[(0, 2)] = 0
_res_D_b[(1, 2)] = 0
_res_D_b[(2, 2)] = 1
_res_D_b[(3, 2)] = 0
_res_D_b[(0, 3)] = 0
_res_D_b[(1, 3)] = 0
_res_D_b[(2, 3)] = 0
_res_D_b[(3, 3)] = 1
return (sym.EquirectangularCameraCal.from_storage(_res), _res_D_a, _res_D_b)
def between_with_jacobians(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 4)
_res[0] = ((- _a[0]) + _b[0])
_res[1] = ((- _a[1]) + _b[1])
_res[2] = ((- _a[2]) + _b[2])
_res[3] = ((- _a[3]) + _b[3])
_res_D_a = numpy.zeros((4, 4))
_res_D_a[(0, 0)] = (- 1)
_res_D_a[(1, 0)] = 0
_res_D_a[(2, 0)] = 0
_res_D_a[(3, 0)] = 0
_res_D_a[(0, 1)] = 0
_res_D_a[(1, 1)] = (- 1)
_res_D_a[(2, 1)] = 0
_res_D_a[(3, 1)] = 0
_res_D_a[(0, 2)] = 0
_res_D_a[(1, 2)] = 0
_res_D_a[(2, 2)] = (- 1)
_res_D_a[(3, 2)] = 0
_res_D_a[(0, 3)] = 0
_res_D_a[(1, 3)] = 0
_res_D_a[(2, 3)] = 0
_res_D_a[(3, 3)] = (- 1)
_res_D_b = numpy.zeros((4, 4))
_res_D_b[(0, 0)] = 1
_res_D_b[(1, 0)] = 0
_res_D_b[(2, 0)] = 0
_res_D_b[(3, 0)] = 0
_res_D_b[(0, 1)] = 0
_res_D_b[(1, 1)] = 1
_res_D_b[(2, 1)] = 0
_res_D_b[(3, 1)] = 0
_res_D_b[(0, 2)] = 0
_res_D_b[(1, 2)] = 0
_res_D_b[(2, 2)] = 1
_res_D_b[(3, 2)] = 0
_res_D_b[(0, 3)] = 0
_res_D_b[(1, 3)] = 0
_res_D_b[(2, 3)] = 0
_res_D_b[(3, 3)] = 1
return (sym.EquirectangularCameraCal.from_storage(_res), _res_D_a, _res_D_b) |
class RawXXreverseDataset(data.Dataset):
def __init__(self, raw_file, list_file, audio_window):
self.raw_file = raw_file
self.audio_window = audio_window
self.utts = []
with open(list_file) as f:
temp = f.readlines()
temp = [x.strip() for x in temp]
self.h5f = h5py.File(self.raw_file, 'r')
for i in temp:
utt_len = self.h5f[i].shape[0]
if (utt_len > 20480):
self.utts.append(i)
def __len__(self):
return len(self.utts)
def __getitem__(self, index):
utt_id = self.utts[index]
utt_len = self.h5f[utt_id].shape[0]
index = np.random.randint(((utt_len - self.audio_window) + 1))
original = self.h5f[utt_id][index:(index + self.audio_window)]
return (original, original[::(- 1)].copy()) |
_node_type()
class WaveguideModeSource(optplan.EmSource):
type = schema_utils.polymorphic_model_type('source.waveguide_mode')
center = optplan.vec3d()
extents = optplan.vec3d()
normal = optplan.vec3d()
mode_num = types.IntType()
power = types.FloatType() |
class ConvBnReluResidualTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test):
super().__init__(unit_test, experimental_exporter=True)
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
y = layers.Conv2D(7, 8)(inputs)
x = layers.BatchNormalization()(y)
x = layers.Activation('relu')(x)
outputs = layers.Add()([x, y])
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
holders = get_layers_from_model_by_type(quantized_model, KerasActivationQuantizationHolder)
add_layer = get_layers_from_model_by_type(quantized_model, layers.Add)[0]
bn_layer = get_layers_from_model_by_type(quantized_model, layers.BatchNormalization)[0]
self.unit_test.assertTrue((holders[1].output.ref() in [t.ref() for t in add_layer.input]))
self.unit_test.assertTrue((holders[3].output.ref() in [t.ref() for t in add_layer.input]))
self.unit_test.assertTrue(isinstance(bn_layer, layers.BatchNormalization)) |
class COIN(JoinFeature):
def __init__(self):
JoinFeature.__init__(self, 'sage_numerical_backends_coin', [MIPBackend('coin')], spkg='sage_numerical_backends_coin') |
class TraceHistory(_History):
def on_epoch_end(self, epoch, logs):
self._record_trace()
return super().on_epoch_end(epoch, logs) |
def inference(network, test_loader):
if torch.cuda.is_available():
network = network.to('cuda:0')
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for (data, target) in test_loader:
if torch.cuda.is_available():
data = data.to('cuda:0')
target = target.to('cuda:0')
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(test_loader.dataset), ((100.0 * correct) / len(test_loader.dataset))))
accuracy = float((correct / len(test_loader.dataset)))
return accuracy |
def got() -> operations.GraphOfOperations:
operations_graph = operations.GraphOfOperations()
plans = operations.Generate(2, 1)
operations_graph.append_operation(plans)
for i in range(1, 3):
list_id = f'List {i}'
sub_list = operations.Selector((lambda thoughts, list_id=list_id: [thought for thought in thoughts if (thought.state['part'] == list_id)]))
sub_list.add_predecessor(plans)
operations_graph.add_operation(sub_list)
sort_sub_list = operations.Generate(1, 5)
sort_sub_list.add_predecessor(sub_list)
operations_graph.add_operation(sort_sub_list)
score_sub_list = operations.Score(1, False, utils.num_errors)
score_sub_list.add_predecessor(sort_sub_list)
operations_graph.add_operation(score_sub_list)
keep_best_sub_list = operations.KeepBestN(1, False)
keep_best_sub_list.add_predecessor(score_sub_list)
operations_graph.add_operation(keep_best_sub_list)
final_aggregate = operations.Aggregate(10)
operations_graph.append_operation(final_aggregate)
operations_graph.append_operation(operations.Score(1, False, utils.num_errors))
keep_best_aggregate_final = operations.KeepBestN(1, False)
operations_graph.append_operation(keep_best_aggregate_final)
operations_graph.append_operation(operations.Generate(1, 10))
score_aggr_3 = operations.Score(1, False, utils.num_errors)
score_aggr_3.add_predecessor(keep_best_aggregate_final)
operations_graph.append_operation(score_aggr_3)
operations_graph.append_operation(operations.KeepBestN(1, False))
operations_graph.append_operation(operations.GroundTruth(utils.test_sorting))
return operations_graph |
class Resolver(BaseResolver):
_allowed_strategies = {'eager', 'only-if-needed', 'to-satisfy-only'}
def __init__(self, preparer, finder, wheel_cache, make_install_req, use_user_site, ignore_dependencies, ignore_installed, ignore_requires_python, force_reinstall, upgrade_strategy, py_version_info=None):
super(Resolver, self).__init__()
assert (upgrade_strategy in self._allowed_strategies)
if (py_version_info is None):
py_version_info = sys.version_info[:3]
else:
py_version_info = normalize_version_info(py_version_info)
self._py_version_info = py_version_info
self.preparer = preparer
self.finder = finder
self.wheel_cache = wheel_cache
self.upgrade_strategy = upgrade_strategy
self.force_reinstall = force_reinstall
self.ignore_dependencies = ignore_dependencies
self.ignore_installed = ignore_installed
self.ignore_requires_python = ignore_requires_python
self.use_user_site = use_user_site
self._make_install_req = make_install_req
self._discovered_dependencies = defaultdict(list)
def resolve(self, root_reqs, check_supported_wheels):
requirement_set = RequirementSet(check_supported_wheels=check_supported_wheels)
for req in root_reqs:
if req.constraint:
check_invalid_constraint_type(req)
requirement_set.add_requirement(req)
discovered_reqs = []
hash_errors = HashErrors()
for req in chain(requirement_set.all_requirements, discovered_reqs):
try:
discovered_reqs.extend(self._resolve_one(requirement_set, req))
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
return requirement_set
def _is_upgrade_allowed(self, req):
if (self.upgrade_strategy == 'to-satisfy-only'):
return False
elif (self.upgrade_strategy == 'eager'):
return True
else:
assert (self.upgrade_strategy == 'only-if-needed')
return (req.user_supplied or req.constraint)
def _set_req_to_reinstall(self, req):
if ((not self.use_user_site) or dist_in_usersite(req.satisfied_by)):
req.should_reinstall = True
req.satisfied_by = None
def _check_skip_installed(self, req_to_install):
if self.ignore_installed:
return None
req_to_install.check_if_exists(self.use_user_site)
if (not req_to_install.satisfied_by):
return None
if self.force_reinstall:
self._set_req_to_reinstall(req_to_install)
return None
if (not self._is_upgrade_allowed(req_to_install)):
if (self.upgrade_strategy == 'only-if-needed'):
return 'already satisfied, skipping upgrade'
return 'already satisfied'
if (not req_to_install.link):
try:
self.finder.find_requirement(req_to_install, upgrade=True)
except BestVersionAlreadyInstalled:
return 'already up-to-date'
except DistributionNotFound:
pass
self._set_req_to_reinstall(req_to_install)
return None
def _find_requirement_link(self, req):
upgrade = self._is_upgrade_allowed(req)
best_candidate = self.finder.find_requirement(req, upgrade)
if (not best_candidate):
return None
link = best_candidate.link
if link.is_yanked:
reason = (link.yanked_reason or '<none given>')
msg = u'The candidate selected for download or install is a yanked version: {candidate}\nReason for being yanked: {reason}'.format(candidate=best_candidate, reason=reason)
logger.warning(msg)
return link
def _populate_link(self, req):
if (req.link is None):
req.link = self._find_requirement_link(req)
if ((self.wheel_cache is None) or self.preparer.require_hashes):
return
cache_entry = self.wheel_cache.get_cache_entry(link=req.link, package_name=req.name, supported_tags=get_supported())
if (cache_entry is not None):
logger.debug('Using cached wheel link: %s', cache_entry.link)
if ((req.link is req.original_link) and cache_entry.persistent):
req.original_link_is_in_wheel_cache = True
req.link = cache_entry.link
def _get_abstract_dist_for(self, req):
if req.editable:
return self.preparer.prepare_editable_requirement(req)
assert (req.satisfied_by is None)
skip_reason = self._check_skip_installed(req)
if req.satisfied_by:
return self.preparer.prepare_installed_requirement(req, skip_reason)
self._populate_link(req)
abstract_dist = self.preparer.prepare_linked_requirement(req)
if (not self.ignore_installed):
req.check_if_exists(self.use_user_site)
if req.satisfied_by:
should_modify = ((self.upgrade_strategy != 'to-satisfy-only') or self.force_reinstall or self.ignore_installed or (req.link.scheme == 'file'))
if should_modify:
self._set_req_to_reinstall(req)
else:
logger.info('Requirement already satisfied (use --upgrade to upgrade): %s', req)
return abstract_dist
def _resolve_one(self, requirement_set, req_to_install):
if (req_to_install.constraint or req_to_install.prepared):
return []
req_to_install.prepared = True
abstract_dist = self._get_abstract_dist_for(req_to_install)
dist = abstract_dist.get_pkg_resources_distribution()
_check_dist_requires_python(dist, version_info=self._py_version_info, ignore_requires_python=self.ignore_requires_python)
more_reqs = []
def add_req(subreq, extras_requested):
sub_install_req = self._make_install_req(str(subreq), req_to_install)
parent_req_name = req_to_install.name
(to_scan_again, add_to_parent) = requirement_set.add_requirement(sub_install_req, parent_req_name=parent_req_name, extras_requested=extras_requested)
if (parent_req_name and add_to_parent):
self._discovered_dependencies[parent_req_name].append(add_to_parent)
more_reqs.extend(to_scan_again)
with indent_log():
if (not requirement_set.has_requirement(req_to_install.name)):
assert req_to_install.user_supplied
requirement_set.add_requirement(req_to_install, parent_req_name=None)
if (not self.ignore_dependencies):
if req_to_install.extras:
logger.debug('Installing extra requirements: %r', ','.join(req_to_install.extras))
missing_requested = sorted((set(req_to_install.extras) - set(dist.extras)))
for missing in missing_requested:
logger.warning("%s does not provide the extra '%s'", dist, missing)
available_requested = sorted((set(dist.extras) & set(req_to_install.extras)))
for subreq in dist.requires(available_requested):
add_req(subreq, extras_requested=available_requested)
if ((not req_to_install.editable) and (not req_to_install.satisfied_by)):
req_to_install.successfully_downloaded = True
return more_reqs
def get_installation_order(self, req_set):
order = []
ordered_reqs = set()
def schedule(req):
if (req.satisfied_by or (req in ordered_reqs)):
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._discovered_dependencies[req.name]:
schedule(dep)
order.append(req)
for install_req in req_set.requirements.values():
schedule(install_req)
return order |
def feature_prop(feats, g, k):
assert (feats.shape[0] == g.num_nodes())
degs = g.in_degrees().float().clamp(min=1)
norm = torch.pow(degs, (- 0.5)).unsqueeze(1)
for _ in range(k):
feats = (feats * norm)
g.ndata['h'] = feats
g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
feats = g.ndata.pop('h')
feats = (feats * norm)
return feats |
def resonant_secular_contribution_dictionary(j, k, Nmin, Nmax, G, mIn, mOut, MIn, MOut, Lambda0In, Lambda0Out):
extra_args = (G, mIn, mOut, MIn, MOut, Lambda0In, Lambda0Out)
Nmin = (2 * (Nmin // 2))
Nmax = (2 * (Nmax // 2))
all_dicts = []
nmax = ((Nmax + 2) // (2 * k))
for n in range(1, (nmax + 1)):
j1 = (n * j)
k1 = (n * k)
res_args = []
Mmax = (1 + ((Nmax - (2 * k1)) // 2))
for M in range(0, (Mmax + 1)):
res_args += _resonance_arguments_of_fixed_order(j1, k1, (k1 + (2 * M)))
dres = resonant_terms_list_to_secular_contribution_dictionary(res_args, j1, k1, Nmin, Nmax, *extra_args)
all_dicts.append(dres)
return _add_dicts(*all_dicts) |
class FBLASRoutine():
_blas_name = ''
_user_name = ''
_width = generator_definitions.DEFAULT_WIDTH
_type: fblas_types.RoutineType
_type_str: str
_uses_shift_registers = False
_size_shift_registers = 0
_codegen = None
_incx = 1
_incy = 1
_tile_n_size = generator_definitions.DEFAULT_TILE_SIZE
_tile_m_size = generator_definitions.DEFAULT_TILE_SIZE
_order = None
_diag = None
_transposeA = None
_transposeB = None
_side = None
_uplo = None
_input_channels = None
_output_channels = None
_tiles_A_order: fblas_types.FblasOrder = fblas_types.FblasOrder.FblasRowMajor
_elements_A_order: fblas_types.FblasOrder = fblas_types.FblasOrder.FblasRowMajor
_has_2D_computational_tile = False
_width_x = 0
_width_y = 0
_tile_size = 0
_systolic = False
_vect_size = 0
def __init__(self, blas_name: str, user_name: str, type: fblas_types.RoutineType, platform: fblas_types.Platform, codegen: fblas_types.FblasCodegen):
self._blas_name = blas_name
self._user_name = user_name
self._type = type
self._type_str = fblas_types.ROUTINE_TYPE_TO_TYPE_STR[type]
self._platform = platform
self._codegen = codegen
self._width = generator_definitions.DEFAULT_WIDTH
self._input_channels = {}
self._output_channels = {}
self._incx = 1
self._incy = 1
self._tile_n_size = generator_definitions.DEFAULT_TILE_SIZE
self._tile_m_size = generator_definitions.DEFAULT_TILE_SIZE
self._order = fblas_types.FblasOrder.FblasOrderUndef
self._diag = fblas_types.FblasDiag.FblasDiagUndef
self._transposeA = fblas_types.FblasTranspose.FblasTransUndef
self._transposeB = fblas_types.FblasTranspose.FblasTransUndef
self._side = fblas_types.FblasSide.FblasSideUndef
self._uplo = fblas_types.FblasUpLo.FblasUpLoUndef
if (type == fblas_types.RoutineType.Double):
self._uses_shift_registers = True
self._size_shift_registers = fblas_types.SHIFT_REGISTER_SIZES[(type, platform)]
else:
self._uses_shift_registers = False
self._has_2D_computational_tile = False
self._width_x = self._width = generator_definitions.DEFAULT_2D_CTILE_WIDTH
self._width_y = self._width = generator_definitions.DEFAULT_2D_CTILE_WIDTH
self._tile_size = generator_definitions.DEFAULT_TILE_SIZE
self._systolic = False
self._vect_size = 4
def __str__(self):
return 'Routine {} implements {} with type {}\n Width: {} Incx: {} Incy: {}'.format(self._user_name, self._blas_name, self._type, self._width, self._incx, self._incy)
def blas_name(self):
return self._blas_name
def user_name(self):
return self._user_name
def type(self):
return self._type
def type_str(self):
return self._type_str
def uses_shift_registers(self):
return self._uses_shift_registers
_shift_registers.setter
def uses_shift_registers(self, value: bool):
self._uses_shift_registers = value
if value:
self._size_shift_registers = fblas_types.SHIFT_REGISTER_SIZES[(self.type, self._platform)]
def size_shift_registers(self):
return self._size_shift_registers
def width(self):
return self._width
def width(self, width: int):
self._width = width
def incx(self):
return self._incx
def incx(self, incx: int):
self._incx = incx
def incy(self):
return self._incy
def incy(self, incy: int):
self._incy = incy
def tile_n_size(self):
return self._tile_n_size
_n_size.setter
def tile_n_size(self, tile_size: int):
self._tile_n_size = tile_size
def tile_m_size(self):
return self._tile_m_size
_m_size.setter
def tile_m_size(self, tile_size: int):
self._tile_m_size = tile_size
def tile_size(self):
return self._tile_size
_size.setter
def tile_size(self, tile_size: int):
self._tile_size = tile_size
def order(self):
return self._order
def order(self, order: fblas_types.FblasOrder):
self._order = order
def uplo(self):
return self._uplo
def uplo(self, uplo: fblas_types.FblasUpLo):
self._uplo = uplo
def transposedA(self):
return self._transposeA
def transposedA(self, trans: fblas_types.FblasTranspose):
self._transposeA = trans
def transposedB(self):
return self._transposeB
def transposedB(self, trans: fblas_types.FblasTranspose):
self._transposeB = trans
def input_channels(self):
return self._input_channels
def output_channels(self):
return self._output_channels
def tiles_A_order(self):
return self._tiles_A_order
_A_order.setter
def tiles_A_order(self, order: fblas_types.FblasOrder):
self._tiles_A_order = order
def elements_A_order(self):
return self._elements_A_order
_A_order.setter
def elements_A_order(self, order: fblas_types.FblasOrder):
self._elements_A_order = order
def has_2D_computational_tile(self):
return self._has_2D_computational_tile
_2D_computational_tile.setter
def has_2D_computational_tile(self, value: bool):
self._has_2D_computational_tile = value
def width_x(self):
return self._width_x
_x.setter
def width_x(self, width: int):
self._width_x = width
def width_y(self):
return self._width_y
_y.setter
def width_y(self, width: int):
self._width_y = width
def systolic(self):
return self._systolic
def systolic(self, value: bool):
self._systolic = value
def vect_size(self):
return self._vect_size
_size.setter
def vect_size(self, value: int):
self._vect_size = value
def are_tiles_A_rowstreamed(self):
return (self._tiles_A_order == fblas_types.FblasOrder.FblasRowMajor)
def are_elements_A_rowstreamed(self):
return (self._elements_A_order == fblas_types.FblasOrder.FblasRowMajor)
def add_input_channel(self, routine_channel_name, user_name):
self._input_channels[routine_channel_name] = user_name
def add_output_channel(self, routine_channel_name, user_name):
self._output_channels[routine_channel_name] = user_name |
def build_graph(deps):
nodes = []
edges = []
for d in deps.values():
if ((d.dst == no_parent) or (d.dep == no_parent)):
nodes.append((d.src, d.lemma))
else:
dst_ids = [int(dst_id) for dst_id in d.dst.split(sep_deps_list)]
dst_types = d.dep.split(sep_deps_list)
dsts = zip(dst_ids, dst_types)
nodes.append((d.src, d.lemma))
for (dst_id, dst_label) in dsts:
edges.append((d.src, dst_id, dst_label))
return build_nx_graph(nodes, edges) |
class GIN(ScalableGNN):
def __init__(self, num_nodes: int, in_channels: int, hidden_channels: int, out_channels: int, num_layers: int):
super().__init__(num_nodes, hidden_channels, num_layers, pool_size=2, buffer_size=60000)
self.in_channels = in_channels
self.out_channels = out_channels
self.lins = torch.nn.ModuleList()
self.lins.append(Linear(in_channels, hidden_channels))
self.lins.append(Linear(hidden_channels, out_channels))
self.convs = torch.nn.ModuleList()
for i in range(num_layers):
self.convs.append(GINConv(Identity(), train_eps=True))
self.mlps = torch.nn.ModuleList()
for _ in range(num_layers):
mlp = Sequential(Linear(hidden_channels, hidden_channels), BatchNorm1d(hidden_channels, track_running_stats=False), ReLU(), Linear(hidden_channels, hidden_channels), ReLU())
self.mlps.append(mlp)
def forward(self, x: Tensor, adj_t: SparseTensor, *args):
x = self.lins[0](x).relu_()
reg = 0
it = zip(self.convs[:(- 1)], self.mlps[:(- 1)], self.histories)
for (i, (conv, mlp, history)) in enumerate(it):
h = conv((x, x[:adj_t.size(0)]), adj_t)
if ((i > 0) and self.training):
approx = mlp((h + (0.1 * torch.randn_like(h))))
h = mlp(h)
if ((i > 0) and self.training):
diff = (h - approx).norm(dim=(- 1))
reg += (diff.mean() / len(self.histories))
h += x[:h.size(0)]
x = self.push_and_pull(history, h, *args)
h = self.convs[(- 1)]((x, x[:adj_t.size(0)]), adj_t)
h = self.mlps[(- 1)](h)
h += x[:h.size(0)]
x = self.lins[1](h)
return (x, reg)
_grad()
def forward_layer(self, layer: int, x: Tensor, adj_t: SparseTensor, state):
if (layer == 0):
x = self.lins[0](x).relu_()
h = self.convs[layer]((x, x[:adj_t.size(0)]), adj_t)
h = self.mlps[layer](h)
h += x[:h.size(0)]
if (layer == (self.num_layers - 1)):
h = self.lins[1](h)
return h |
def sample(dataset: datasets.Dataset, seed: int, n_examples_per_label: int) -> Dict[(str, List[Union[(str, int)]])]:
examples_by_label = collections.defaultdict(list)
hash_to_index = collections.defaultdict(list)
for (idx, row) in enumerate(dataset):
fingerprint = _hash(row['text'], seed)
examples_by_label[row['label']].append(fingerprint)
hash_to_index[fingerprint].append(idx)
indexes = []
for examples in examples_by_label.values():
examples.sort()
for fingerprint in examples[:n_examples_per_label]:
indexes.extend(hash_to_index[fingerprint])
def filter_fn(example, idx) -> bool:
del example
return (idx in indexes)
return dataset[indexes] |
class PacifyFlushWrapper(object):
def __init__(self, wrapped):
self.wrapped = wrapped
def flush(self):
try:
self.wrapped.flush()
except IOError as e:
import errno
if (e.errno != errno.EPIPE):
raise
def __getattr__(self, attr):
return getattr(self.wrapped, attr) |
def test_iterations_max_constrained():
def fg(x):
n = len(x)
c = np.arange(n)
f = (x.dot(x) + c.dot(x))
g = ((2 * x) + c)
return (f, g)
def constraint_f(x):
f = (np.sum(x) - 1)
return f
def constraint_jac_prod(x, y):
g = np.ones_like(x)
jp = (y * g)
return jp
constraints = {'type': 'eq', 'fun': constraint_f, 'jacprod': constraint_jac_prod}
options = {'eps_pg': 0.0001, 'constraint_tol': 0.0001, 'max_iter': 1, 'm': 10, 'ls': 0, 'verbose': 0}
n = 4
x0 = np.zeros(n)
res = minimize(fg, x0, constraints=constraints, options=options, np=np)
assert (res.status == 2) |
def test_replace_ref_nodes_with_names_nested():
class OuterModel(optplan.ProblemGraphNode.Schema):
type = types.StringType(default='Model')
value = optplan.ReferenceType(optplan.ProblemGraphNode.Schema)
class InnerModel(optplan.ProblemGraphNode.Schema):
type = types.StringType(default='Model2')
value = optplan.ReferenceType(optplan.ProblemGraphNode.Schema)
modelb = ModelB(name='m1')
inner_model = InnerModel(name='m2', value=modelb)
outer_model = OuterModel(name='m3', value=inner_model)
model_list = [outer_model, inner_model, modelb]
schema._replace_ref_nodes_with_names(outer_model, model_list)
assert (outer_model.value == inner_model.name)
assert (inner_model.value == modelb.name) |
def train(params):
assert params['training'], 'change training mode to true'
tf.compat.v1.logging.info('Building the model ...')
transformer = Transformer(num_layers=params['num_layers'], d_model=params['model_depth'], num_heads=params['num_heads'], dff=params['dff'], vocab_size=params['vocab_size'], batch_size=params['batch_size'])
tf.compat.v1.logging.info('Creating the batcher ...')
b = batcher(params['data_dir'], params['vocab_path'], params)
tf.compat.v1.logging.info('Creating the checkpoint manager')
logdir = '{}/logdir'.format(params['model_dir'])
checkpoint_dir = '{}/checkpoint'.format(params['model_dir'])
ckpt = tf.train.Checkpoint(step=tf.Variable(0), transformer=transformer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_dir, max_to_keep=11)
ckpt.restore(ckpt_manager.latest_checkpoint)
if ckpt_manager.latest_checkpoint:
print('Restored from {}'.format(ckpt_manager.latest_checkpoint))
else:
print('Initializing from scratch.')
tf.compat.v1.logging.info('Starting the training ...')
train_model(transformer, b, params, ckpt, ckpt_manager) |
def make_proba_distribution(action_space: gym.spaces.Space, use_sde: bool=False, dist_kwargs: Optional[Dict[(str, Any)]]=None) -> Distribution:
if (dist_kwargs is None):
dist_kwargs = {}
if isinstance(action_space, spaces.Box):
assert (len(action_space.shape) == 1), 'Error: the action space must be a vector'
cls = (StateDependentNoiseDistribution if use_sde else DiagGaussianDistribution)
return cls(reduce(operator.mul, action_space.shape), **dist_kwargs)
elif isinstance(action_space, spaces.Discrete):
return CategoricalDistribution(action_space.n, **dist_kwargs)
elif isinstance(action_space, spaces.MultiDiscrete):
return MultiCategoricalDistribution(action_space.nvec, **dist_kwargs)
elif isinstance(action_space, spaces.MultiBinary):
return BernoulliDistribution(action_space.n, **dist_kwargs)
else:
raise NotImplementedError(f'Error: probability distribution, not implemented for action spaceof type {type(action_space)}. Must be of type Gym Spaces: Box, Discrete, MultiDiscrete or MultiBinary.') |
def format_csv(df, timestamp_column=None, value_columns=None):
timestamp_column_name = (df.columns[timestamp_column] if timestamp_column else df.columns[0])
value_column_names = (df.columns[value_columns] if value_columns else df.columns[1:])
data = dict()
data['timestamp'] = df[timestamp_column_name].astype('int64').values
for column in value_column_names:
data[column] = df[column].astype(float).values
return pd.DataFrame(data) |
def get_default_environments() -> List[str]:
cp = subprocess.run(['tox', '-l'], stdout=subprocess.PIPE)
return [str(s, 'utf-8') for s in cp.stdout.splitlines()] |
def overlaps(x, y):
if ((x.start == x.stop) or (y.start == y.stop)):
return False
return (((x.start < y.stop) and (x.stop > y.start)) or ((x.stop > y.start) and (y.stop > x.start))) |
class ExpressionNice(Expression):
def __init__(self, ex):
from sage.symbolic.ring import SR
self._parent = SR
Expression.__init__(self, SR, x=ex)
def _repr_(self):
d = self._parent._repr_element_(self)
list_d = []
_list_derivatives(self, list_d)
for m in list_d:
funcname = m[1]
diffargs = m[3]
numargs = len(diffargs)
if (numargs > 1):
numargs = ('^' + str(numargs))
else:
numargs = ''
variables = m[4]
strv = [str(v) for v in variables]
comp_chars = ['+', '-', '*', '/', '^', '(']
for (i, sv) in enumerate(strv):
if any(((c in sv) for c in comp_chars)):
strv[i] = (('(' + sv) + ')')
occ = dict(((i, (((strv[i] + '^') + str(diffargs.count(i))) if (diffargs.count(i) > 1) else strv[i])) for i in diffargs))
res = ((((('d' + str(numargs)) + '(') + str(funcname)) + ')/d') + 'd'.join(occ.values()))
s = self._parent._repr_element_(m[0])
if m[5]:
res = ((('(' + res) + ')^') + str(m[5]))
o = ((s + '^') + str(m[5]))
else:
o = s
d = d.replace(o, res)
import re
from sage.manifolds.manifold import TopologicalManifold
if TopologicalManifold.options.omit_function_arguments:
list_f = []
_list_functions(self, list_f)
for m in list_f:
d = re.sub((m[1] + '\\([^)]+\\)'), m[1], d)
return d
def _latex_(self):
from sage.misc.latex import latex
d = self._parent._latex_element_(self)
list_d = []
_list_derivatives(self, list_d)
for m in list_d:
if (str(m[1]) == str(m[2])):
funcname = str(m[1])
else:
funcname = str(m[2])
diffargs = m[3]
numargs = len(diffargs)
if (numargs > 1):
numargs = ('^' + str(numargs))
else:
numargs = ''
variables = m[4]
strv = [str(v) for v in variables]
latv = [latex(v) for v in variables]
comp_chars = ['+', '-', '*', '/', '^', '(']
for (i, sv) in enumerate(strv):
if any(((c in sv) for c in comp_chars)):
latv[i] = (('\\left(' + latv[i]) + '\\right)')
occ = {i: (((latv[i] + '^') + latex(diffargs.count(i))) if (diffargs.count(i) > 1) else latv[i]) for i in diffargs}
res = (((((('\\frac{\\partial' + numargs) + '\\,') + funcname) + '}{\\partial ') + '\\partial '.join((i for i in occ.values()))) + '}')
s = self._parent._latex_element_(m[0])
if m[5]:
res = (((('\\left(' + res) + '\\right)^{') + str(m[5])) + '}')
o = (((s + '^{') + str(m[5])) + '}')
else:
o = s
d = d.replace(o, res)
from sage.manifolds.manifold import TopologicalManifold
if TopologicalManifold.options.omit_function_arguments:
list_f = []
_list_functions(self, list_f)
for m in list_f:
d = d.replace((str(m[3]) + str(m[4])), str(m[3]))
return d |
def bbox_coco_to_center(bbox):
bbox[0] = (bbox[0] + (bbox[2] / 2))
bbox[1] = (bbox[1] + (bbox[3] / 2))
bbox[2] = bbox[2]
bbox[3] = bbox[3]
return bbox |
def test_setup(tmp_path):
logfile = str((tmp_path / 'testlog.log'))
setup(use_stdout=True, filename=logfile, log_level=logging.DEBUG) |
class FIDInceptionE_1(torchvision.models.inception.InceptionE):
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl)]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1) |
def do_striptags(value):
if hasattr(value, '__html__'):
value = value.__html__()
return Markup(text_type(value)).striptags() |
def _valid_data_column_names(features_list, target_columns):
valid_data_column_names = []
for feature in features_list:
if ((feature['name'] not in target_columns) and (feature['is_ignore'] != 'true') and (feature['is_row_identifier'] != 'true')):
valid_data_column_names.append(feature['name'])
return valid_data_column_names |
class W2Vec(Txt2Vec):
def __init__(self, data_path, norm=0, clean=True):
super(W2Vec, self).__init__(data_path, norm, clean)
self.w2v = BigFile(data_path)
(vocab_size, self.ndims) = self.w2v.shape()
logger.info(('vob size: %d, vec dim: %d' % (vocab_size, self.ndims)))
def _encoding(self, words):
(renamed, vectors) = self.w2v.read(words)
if (len(vectors) > 0):
vec = np.array(vectors).mean(axis=0)
else:
vec = np.zeros(self.ndims)
return vec
def raw_encoding(self, query):
words = self._preprocess(query)
(renamed, vectors) = self.w2v.read(words)
if (len(vectors) > 0):
vec = np.array(vectors)
else:
vec = np.zeros((len(words), self.ndims))
return vec |
class DebertaV2TokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = DebertaV2Tokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=False, split_by_punct=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs) -> None:
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, split_by_punct=split_by_punct, **kwargs)
self.do_lower_case = do_lower_case
self.split_by_punct = split_by_punct
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = (False if (not self.vocab_file) else True)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not self.can_save_slow_tokenizer):
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
class MultiGenerativeModel():
def __init__(self, generative_models: list, model_probs='equal', shared_context_gen=None):
self.generative_models = generative_models
self.num_models = len(generative_models)
self.model_prior = self._determine_model_prior(model_probs)
self.shared_context = shared_context_gen
def _determine_model_prior(self, model_probs):
if (model_probs == 'equal'):
return (lambda b: np.random.default_rng().integers(low=0, high=self.num_models, size=b))
return (lambda b: np.random.default_rng().choice(self.num_models, size=b, p=model_probs))
def __call__(self, batch_size, **kwargs):
out_dict = {DEFAULT_KEYS['model_outputs']: [], DEFAULT_KEYS['model_indices']: []}
model_samples = self.model_prior(batch_size)
(model_indices, counts) = np.unique(model_samples, return_counts=True)
context_dict = {}
if (self.shared_context is not None):
context_dict = self.shared_context()
for (m, batch_size_m) in zip(model_indices, counts):
model_out = self.generative_models[m](batch_size_m, sim_args=context_dict, **kwargs)
out_dict[DEFAULT_KEYS['model_outputs']].append(model_out)
out_dict[DEFAULT_KEYS['model_indices']].append(m)
if context_dict:
for (k, v) in context_dict.items():
out_dict[k] = v
return out_dict |
def get_default_group() -> Optional[ProcessGroup]:
return torch_dist.distributed_c10d._get_default_group() |
def sample_categorical(n_cat, batchsize, distribution='uniform', xp=np):
if (distribution == 'uniform'):
return xp.random.randint(low=0, high=n_cat, size=batchsize).astype(xp.int32)
else:
raise NotImplementedError |
def parse_wheel(wheel_zip, name):
try:
info_dir = wheel_dist_info_dir(wheel_zip, name)
metadata = wheel_metadata(wheel_zip, info_dir)
version = wheel_version(metadata)
except UnsupportedWheel as e:
raise UnsupportedWheel('{} has an invalid wheel, {}'.format(name, str(e)))
check_compatibility(version, name)
return (info_dir, metadata) |
class AuxiliaryHeadImageNet(nn.Module):
def __init__(self, C, num_classes):
super(AuxiliaryHeadImageNet, self).__init__()
self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), (- 1)))
return x |
class AvoidOOM():
def __init__(self, to_cpu=True, test=False):
self.to_cpu = to_cpu
self.test = test
def retry_if_cuda_oom(self, func):
(func)
def wrapped(*args, **kwargs):
if (not self.test):
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
(dtype, device) = (None, None)
values = (args + tuple(kwargs.values()))
for value in values:
if isinstance(value, torch.Tensor):
dtype = value.dtype
device = value.device
break
if ((dtype is None) or (device is None)):
raise ValueError('There is no tensor in the inputs, cannot get dtype and device.')
fp16_args = cast_tensor_type(args, dst_type=torch.half)
fp16_kwargs = cast_tensor_type(kwargs, dst_type=torch.half)
logger = get_root_logger()
logger.warning(f'Attempting to copy inputs of {str(func)} to FP16 due to CUDA OOM')
with _ignore_torch_cuda_oom():
output = func(*fp16_args, **fp16_kwargs)
output = cast_tensor_type(output, src_type=torch.half, dst_type=dtype)
if (not self.test):
return output
logger.warning('Using FP16 still meet CUDA OOM')
if self.to_cpu:
logger.warning(f'Attempting to copy inputs of {str(func)} to CPU due to CUDA OOM')
cpu_device = torch.empty(0).device
cpu_args = cast_tensor_type(args, dst_type=cpu_device)
cpu_kwargs = cast_tensor_type(kwargs, dst_type=cpu_device)
with _ignore_torch_cuda_oom():
logger.warning(f'Convert outputs to GPU (device={device})')
output = func(*cpu_args, **cpu_kwargs)
output = cast_tensor_type(output, src_type=cpu_device, dst_type=device)
return output
warnings.warn('Cannot convert output to GPU due to CUDA OOM, the output is now on CPU, which might cause errors if the output need to interact with GPU data in subsequent operations')
logger.warning('Cannot convert output to GPU due to CUDA OOM, the output is on CPU now.')
return func(*cpu_args, **cpu_kwargs)
else:
return func(*args, **kwargs)
return wrapped |
class A006318(SloaneSequence):
def __init__(self):
SloaneSequence.__init__(self, offset=0)
def _repr_(self):
return 'Large Schroeder numbers.'
def _eval(self, n):
if (n == 0):
return ZZ.one()
return ZZ((sum(((((2 ** k) * arith.binomial(n, k)) * arith.binomial(n, (k - 1))) for k in range((n + 1)))) // n)) |
def get_lexer(environment):
key = (environment.block_start_string, environment.block_end_string, environment.variable_start_string, environment.variable_end_string, environment.comment_start_string, environment.comment_end_string, environment.line_statement_prefix, environment.line_comment_prefix, environment.trim_blocks, environment.lstrip_blocks, environment.newline_sequence, environment.keep_trailing_newline)
lexer = _lexer_cache.get(key)
if (lexer is None):
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer |
class TrackedSpace(Space):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.visited_in_episode = False
def reset(self, agent_infos):
super().reset(agent_infos)
agent_infos['tracking_counter'] = 0
agent_infos['num_tracked_squares'] = (agent_infos.get('num_tracked_squares', 0) + 1)
self.visited_in_episode = False
def update_agent_infos(self, state_infos, agent_infos):
if self.agent_is_here(state_infos):
if (not self.visited_in_episode):
self.visited_in_episode = True
agent_infos['tracking_counter'] += 1
visitation_pct = (agent_infos['tracking_counter'] / agent_infos['num_tracked_squares'])
agent_infos['env_infos']['visitation_pct'] = visitation_pct |
def _cuda_deserialize(obj, location):
if location.startswith('cuda'):
if (location[5:] == ''):
device = 0
else:
device = max(int(location[5:]), 0)
if (not torch.cuda.is_available()):
raise RuntimeError("Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location='cpu' to map your storages to the CPU.")
if (device >= torch.cuda.device_count()):
raise RuntimeError('Attempting to deserialize object on CUDA device {} but torch.cuda.device_count() is {}. Please use torch.load with map_location to map your storages to an existing device.'.format(device, torch.cuda.device_count()))
return obj.cuda(device) |
class NonBlocking(IterDataPipe):
not_available_hook = default_not_available_hook
def __iter__(self):
self.reset_iterator()
return self
def __next__(self):
while True:
try:
return self.nonblocking_next()
except StopIteration:
raise StopIteration
except NotAvailable:
if (NonBlocking.not_available_hook is not None):
NonBlocking.not_available_hook()
def nonblocking_next(self):
raise NotImplementedError(('nonblocking_next is not implemented for %s' % self.__class__))
def reset_iterator(self):
raise NotImplementedError(('reset_iterator is not implemented for %s' % self.__class__))
def register_not_available_hook(hook_function):
NonBlocking.not_available_hook = hook_function |
class MedMNISTShardDataset(ShardDataset):
def __init__(self, x, y, data_type: str='train', rank: int=1, worldsize: int=1) -> None:
self.data_type = data_type
self.rank = rank
self.worldsize = worldsize
self.x = x[(self.rank - 1)::self.worldsize]
self.y = y[(self.rank - 1)::self.worldsize]
def __getitem__(self, index: int) -> Tuple[(Any, Any)]:
return (self.x[index], self.y[index])
def __len__(self) -> int:
return len(self.x) |
class MdpStepCollector(StepCollector):
def __init__(self, env, policy, max_num_epoch_paths_saved=None, render=False, render_kwargs=None):
if (render_kwargs is None):
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._num_steps_total = 0
self._num_paths_total = 0
self._obs = None
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._obs = None
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([('num steps total', self._num_steps_total), ('num paths total', self._num_paths_total)])
stats.update(create_stats_ordered_dict('path length', path_lens, always_show_all_stats=True))
return stats
def get_snapshot(self):
return dict(env=self._env, policy=self._policy)
def collect_new_steps(self, max_path_length, num_steps, discard_incomplete_paths):
for _ in range(num_steps):
self.collect_one_step(max_path_length, discard_incomplete_paths)
def collect_one_step(self, max_path_length, discard_incomplete_paths):
if (self._obs is None):
self._start_new_rollout()
(action, agent_info) = self._policy.get_action(self._obs)
(next_ob, reward, terminal, env_info) = self._env.step(action)
if self._render:
self._env.render(**self._render_kwargs)
terminal = np.array([terminal])
reward = np.array([reward])
self._current_path_builder.add_all(observations=self._obs, actions=action, rewards=reward, next_observations=next_ob, terminals=terminal, agent_infos=agent_info, env_infos=env_info)
if (terminal or (len(self._current_path_builder) >= max_path_length)):
self._handle_rollout_ending(max_path_length, discard_incomplete_paths)
self._start_new_rollout()
else:
self._obs = next_ob
def _start_new_rollout(self):
self._current_path_builder = PathBuilder()
self._obs = self._env.reset()
def _handle_rollout_ending(self, max_path_length, discard_incomplete_paths):
if (len(self._current_path_builder) > 0):
path = self._current_path_builder.get_all_stacked()
path_len = len(path['actions'])
if ((path_len != max_path_length) and (not path['terminals'][(- 1)]) and discard_incomplete_paths):
return
self._epoch_paths.append(path)
self._num_paths_total += 1
self._num_steps_total += path_len |
class MaxRewardPriorityQueue():
def __init__(self):
self.elems = []
def __len__(self):
return len(self.elems)
def add_list(self, smis, scores):
new_elems = [StorageElement(smi=smi, score=score) for (smi, score) in zip(smis, scores)]
self.elems.extend(new_elems)
self.elems = list(set(self.elems))
def get_elems(self):
return unravel_elems(self.elems)
def squeeze_by_kth(self, k):
k = min(k, len(self.elems))
self.elems = sorted(self.elems, reverse=True)[:k]
return self.elems[(- 1)].score
def squeeze_by_thr(self, thr):
self.elems = sorted(self.elems, reverse=True)
k = next((i for (i, elem) in enumerate(self.elems) if (elem.score < thr)), len(self.elems))
self.elems = self.elems[:k]
return unravel_elems(self.elems)
def sample_batch(self, batch_size):
sampled_elems = random.choices(population=self.elems, k=batch_size)
return unravel_elems(sampled_elems) |
def is_in_index_region(lat, lon, index='ONI'):
(lat_bounds, lon_bounds) = get_region_bounds(index=index)
if (lat_bounds[0] <= lat <= lat_bounds[1]):
if (lon_bounds[0] <= lon <= lon_bounds[1]):
return True
return False |
class VariableEmbedder(Embedder):
def __init__(self, params, wd=0.0, initializer=None, name='variable_embedder'):
(V, d) = (params.vocab_size, params.hidden_size)
with tf.variable_scope(name):
self.emb_mat = tf.get_variable('emb_mat', dtype='float', shape=[V, d], initializer=initializer)
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(self.emb_mat), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
def __call__(self, word, name='embedded_content'):
out = tf.nn.embedding_lookup(self.emb_mat, word, name=name)
return out |
class LSTMwRecDropout(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, pad=False, rec_dropout=0):
super().__init__()
self.batch_first = batch_first
self.pad = pad
self.num_layers = num_layers
self.hidden_size = hidden_size
self.dropout = dropout
self.drop = nn.Dropout(dropout, inplace=True)
self.rec_drop = nn.Dropout(rec_dropout, inplace=True)
self.num_directions = (2 if bidirectional else 1)
self.cells = nn.ModuleList()
for l in range(num_layers):
in_size = (input_size if (l == 0) else (self.num_directions * hidden_size))
for d in range(self.num_directions):
self.cells.append(nn.LSTMCell(in_size, hidden_size, bias=bias))
def forward(self, input, hx=None):
def rnn_loop(x, batch_sizes, cell, inits, reverse=False):
batch_size = batch_sizes[0].item()
states = [list(init.split(([1] * batch_size))) for init in inits]
h_drop_mask = x.new_ones(batch_size, self.hidden_size)
h_drop_mask = self.rec_drop(h_drop_mask)
resh = []
if (not reverse):
st = 0
for bs in batch_sizes:
s1 = cell(x[st:(st + bs)], ((torch.cat(states[0][:bs], 0) * h_drop_mask[:bs]), torch.cat(states[1][:bs], 0)))
resh.append(s1[0])
for j in range(bs):
states[0][j] = s1[0][j].unsqueeze(0)
states[1][j] = s1[1][j].unsqueeze(0)
st += bs
else:
en = x.size(0)
for i in range((batch_sizes.size(0) - 1), (- 1), (- 1)):
bs = batch_sizes[i]
s1 = cell(x[(en - bs):en], ((torch.cat(states[0][:bs], 0) * h_drop_mask[:bs]), torch.cat(states[1][:bs], 0)))
resh.append(s1[0])
for j in range(bs):
states[0][j] = s1[0][j].unsqueeze(0)
states[1][j] = s1[1][j].unsqueeze(0)
en -= bs
resh = list(reversed(resh))
return (torch.cat(resh, 0), tuple((torch.cat(s, 0) for s in states)))
all_states = [[], []]
(inputdata, batch_sizes) = (input.data, input.batch_sizes)
for l in range(self.num_layers):
new_input = []
if ((self.dropout > 0) and (l > 0)):
inputdata = self.drop(inputdata)
for d in range(self.num_directions):
idx = ((l * self.num_directions) + d)
cell = self.cells[idx]
(out, states) = rnn_loop(inputdata, batch_sizes, cell, ((hx[i][idx] for i in range(2)) if (hx is not None) else (input.data.new_zeros(input.batch_sizes[0].item(), self.hidden_size, requires_grad=False) for _ in range(2))), reverse=(d == 1))
new_input.append(out)
all_states[0].append(states[0].unsqueeze(0))
all_states[1].append(states[1].unsqueeze(0))
if (self.num_directions > 1):
inputdata = torch.cat(new_input, 1)
else:
inputdata = new_input[0]
input = PackedSequence(inputdata, batch_sizes)
return (input, tuple((torch.cat(x, 0) for x in all_states))) |
def register_Ns3MmWaveMacSchedSapProvider_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::MmWaveMacSchedSapProvider const &', 'arg0')])
cls.add_method('SchedDlCqiInfoReq', 'void', [param('ns3::MmWaveMacSchedSapProvider::SchedDlCqiInfoReqParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SchedDlRlcBufferReq', 'void', [param('ns3::MmWaveMacSchedSapProvider::SchedDlRlcBufferReqParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SchedSetMcs', 'void', [param('int', 'mcs')], is_virtual=True)
cls.add_method('SchedTriggerReq', 'void', [param('ns3::MmWaveMacSchedSapProvider::SchedTriggerReqParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SchedUlCqiInfoReq', 'void', [param('ns3::MmWaveMacSchedSapProvider::SchedUlCqiInfoReqParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SchedUlMacCtrlInfoReq', 'void', [param('ns3::MmWaveMacSchedSapProvider::SchedUlMacCtrlInfoReqParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
return |
def test_regular_regular_axis1():
a1 = ak.from_json('[[0.0, 1.1], [2.2, 3.3]]')
a2 = ak.from_json('[[4.4, 5.5, 6.6], [7.7, 8.8, 9.9]]')
a1 = ak.to_regular(a1, axis=1)
a2 = ak.to_regular(a2, axis=1)
c = ak.concatenate([a1, a2], axis=1)
assert (c.to_list() == [[0.0, 1.1, 4.4, 5.5, 6.6], [2.2, 3.3, 7.7, 8.8, 9.9]])
assert (c.type == ArrayType(RegularType(NumpyType('float64'), 5), 2)) |
class OutputInTheMiddleNet(torch.nn.Module):
def __init__(self):
super(OutputInTheMiddleNet, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 3, kernel_size=1, stride=1)
self.conv2 = torch.nn.Conv2d(3, 3, kernel_size=1, stride=1)
self.identity = torch.nn.Identity()
def forward(self, x):
x1 = self.conv1(x)
x2 = self.identity(x1)
x3 = self.conv2(x2)
x4 = torch.relu(x3)
return (x, x1, x2, x3, x4) |
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__ns3NetDevice__gt___Ns3Time_Ns3Time_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::NetDevice >, ns3::Time, ns3::Time, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
cls.add_method('DoGetTypeid', 'std::string', [], is_static=True)
cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('operator()', 'void', [param('ns3::Ptr< ns3::Packet const >', 'arg0'), param('ns3::Ptr< ns3::NetDevice >', 'arg1'), param('ns3::Ptr< ns3::NetDevice >', 'arg2'), param('ns3::Time', 'arg3'), param('ns3::Time', 'arg4')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return |
class Graph():
def __init__(self, layout='h36m', strategy='spatial', max_hop=1, dilation=1):
self.max_hop = max_hop
self.dilation = dilation
self.get_edge(layout)
self.hop_dis = get_hop_distance(self.num_node, self.edge, max_hop=max_hop)
self.get_adjacency(strategy)
def __str__(self):
return self.A
def get_edge(self, layout):
if (layout == 'h36m'):
self.num_node = 17
self_link = [(i, i) for i in range(self.num_node)]
neighbor_link = [(0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6), (0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8, 14), (14, 15), (15, 16)]
self.edge = (self_link + neighbor_link)
self.center = 0
elif (layout == 'openpose'):
self.num_node = 18
self_link = [(i, i) for i in range(self.num_node)]
neighbor_link = [(4, 3), (3, 2), (7, 6), (6, 5), (13, 12), (12, 11), (10, 9), (9, 8), (11, 5), (8, 2), (5, 1), (2, 1), (0, 1), (15, 0), (14, 0), (17, 15), (16, 14)]
self.edge = (self_link + neighbor_link)
self.center = 1
elif (layout == 'ntu-rgb+d'):
self.num_node = 25
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11), (13, 1), (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (22, 23), (23, 8), (24, 25), (25, 12)]
neighbor_link = [((i - 1), (j - 1)) for (i, j) in neighbor_1base]
self.edge = (self_link + neighbor_link)
self.center = (21 - 1)
elif (layout == 'ntu_edge'):
self.num_node = 24
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [(1, 2), (3, 2), (4, 3), (5, 2), (6, 5), (7, 6), (8, 7), (9, 2), (10, 9), (11, 10), (12, 11), (13, 1), (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (21, 22), (22, 8), (23, 24), (24, 12)]
neighbor_link = [((i - 1), (j - 1)) for (i, j) in neighbor_1base]
self.edge = (self_link + neighbor_link)
self.center = 2
else:
raise ValueError('Do Not Exist This Layout.')
def get_adjacency(self, strategy):
valid_hop = range(0, (self.max_hop + 1), self.dilation)
adjacency = np.zeros((self.num_node, self.num_node))
for hop in valid_hop:
adjacency[(self.hop_dis == hop)] = 1
normalize_adjacency = normalize_digraph(adjacency)
if (strategy == 'uniform'):
A = np.zeros((1, self.num_node, self.num_node))
A[0] = normalize_adjacency
self.A = A
elif (strategy == 'distance'):
A = np.zeros((len(valid_hop), self.num_node, self.num_node))
for (i, hop) in enumerate(valid_hop):
A[i][(self.hop_dis == hop)] = normalize_adjacency[(self.hop_dis == hop)]
self.A = A
elif (strategy == 'spatial'):
A = []
for hop in valid_hop:
a_root = np.zeros((self.num_node, self.num_node))
a_close = np.zeros((self.num_node, self.num_node))
a_further = np.zeros((self.num_node, self.num_node))
for i in range(self.num_node):
for j in range(self.num_node):
if (self.hop_dis[(j, i)] == hop):
if (self.hop_dis[(j, self.center)] == self.hop_dis[(i, self.center)]):
a_root[(j, i)] = normalize_adjacency[(j, i)]
elif (self.hop_dis[(j, self.center)] > self.hop_dis[(i, self.center)]):
a_close[(j, i)] = normalize_adjacency[(j, i)]
else:
a_further[(j, i)] = normalize_adjacency[(j, i)]
if (hop == 0):
A.append(a_root)
else:
A.append((a_root + a_close))
A.append(a_further)
A = np.stack(A)
self.A = A
else:
raise ValueError('Do Not Exist This Strategy') |
def sproot(tck, mest=10):
if isinstance(tck, BSpline):
if (tck.c.ndim > 1):
mesg = 'Calling sproot() with BSpline objects with c.ndim > 1 is not recommended.'
warnings.warn(mesg, DeprecationWarning)
(t, c, k) = tck.tck
sh = tuple(range(c.ndim))
c = c.transpose((sh[1:] + (0,)))
return _impl.sproot((t, c, k), mest)
else:
return _impl.sproot(tck, mest) |
def register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('SetFrequency', 'void', [param('double', 'frequency')])
cls.add_method('SetSystemLoss', 'void', [param('double', 'systemLoss')])
cls.add_method('SetMinDistance', 'void', [param('double', 'minDistance')])
cls.add_method('GetMinDistance', 'double', [], is_const=True)
cls.add_method('GetFrequency', 'double', [], is_const=True)
cls.add_method('GetSystemLoss', 'double', [], is_const=True)
cls.add_method('SetHeightAboveZ', 'void', [param('double', 'heightAboveZ')])
cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True)
return |
def test_montage_simple_rgb():
(n_images, n_rows, n_cols, n_channels) = (2, 2, 2, 2)
arr_in = np.arange((((n_images * n_rows) * n_cols) * n_channels), dtype=float)
arr_in = arr_in.reshape(n_images, n_rows, n_cols, n_channels)
arr_out = montage(arr_in, channel_axis=(- 1))
arr_ref = np.array([[[0, 1], [2, 3], [8, 9], [10, 11]], [[4, 5], [6, 7], [12, 13], [14, 15]], [[7, 8], [7, 8], [7, 8], [7, 8]], [[7, 8], [7, 8], [7, 8], [7, 8]]])
assert_array_equal(arr_out, arr_ref) |
def get_logger(name, log_dir, config_dir):
config_dict = json.load(open('{}/log_config.json'.format(config_dir)))
config_dict['handlers']['file_handler']['filename'] = '{}/{}'.format(log_dir, name.replace('/', '-'))
logging.config.dictConfig(config_dict)
logger = logging.getLogger(name)
std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logging.Formatter(std_out_format))
logger.addHandler(consoleHandler)
return logger |
class CnnC3(Convolution2DArchitectureBase, NeuralNetworkTrainingDefault):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build_model(self, x_shape, y_shape):
self.assert_shapes(x_shape, y_shape)
assert (x_shape[1:] == (101, 6, 1))
n_classes = y_shape[1]
h1 = Convolution(filtersize=(3, 3, 1, 32), stride=(1, 1))
h2 = Convolution(filtersize=(3, 3, 32, 32), stride=(1, 1))
h3 = Convolution(filtersize=(2, 2, 32, 32), stride=(1, 1))
h4 = Linear(3072, n_classes)
self.model = Sequential([h1, Rect(), h2, Rect(), h3, Rect(), Flatten(), h4, SoftMax()])
if (not self.use_gpu):
self.model.to_numpy()
else:
self.model.to_cupy() |
def test_comments(foundation_cache):
pipe = stanza.Pipeline('en', model_dir=TEST_MODELS_DIR, processors='tokenize,pos,constituency', foundation_cache=foundation_cache)
doc = pipe(TEST_TEXT)
check_results(doc)
for sentence in doc.sentences:
assert any((x.startswith('# constituency = ') for x in sentence.comments))
doc.sentences[0].constituency = 'asdf'
assert ('# constituency = asdf' in doc.sentences[0].comments)
for sentence in doc.sentences:
assert (len([x for x in sentence.comments if x.startswith('# constituency')]) == 1) |
(('Python' not in caffe.layer_type_list()), 'Caffe built without Python layer support')
class TestPythonLayer(unittest.TestCase):
def setUp(self):
net_file = python_net_file()
self.net = caffe.Net(net_file, caffe.TRAIN)
os.remove(net_file)
def test_forward(self):
x = 8
self.net.blobs['data'].data[...] = x
self.net.forward()
for y in self.net.blobs['three'].data.flat:
self.assertEqual(y, ((10 ** 3) * x))
def test_backward(self):
x = 7
self.net.blobs['three'].diff[...] = x
self.net.backward()
for y in self.net.blobs['data'].diff.flat:
self.assertEqual(y, ((10 ** 3) * x))
def test_reshape(self):
s = 4
self.net.blobs['data'].reshape(s, s, s, s)
self.net.forward()
for blob in six.itervalues(self.net.blobs):
for d in blob.data.shape:
self.assertEqual(s, d)
def test_exception(self):
net_file = exception_net_file()
self.assertRaises(RuntimeError, caffe.Net, net_file, caffe.TEST)
os.remove(net_file)
def test_parameter(self):
net_file = parameter_net_file()
net = caffe.Net(net_file, caffe.TRAIN)
net.forward()
net.backward()
layer = net.layers[list(net._layer_names).index('layer')]
self.assertEqual(layer.blobs[0].data[0], 0)
self.assertEqual(layer.blobs[0].diff[0], 1)
layer.blobs[0].data[0] += layer.blobs[0].diff[0]
self.assertEqual(layer.blobs[0].data[0], 1)
(h, caffemodel_file) = tempfile.mkstemp()
net.save(caffemodel_file)
layer.blobs[0].data[0] = (- 1)
self.assertEqual(layer.blobs[0].data[0], (- 1))
net.copy_from(caffemodel_file)
self.assertEqual(layer.blobs[0].data[0], 1)
os.remove(caffemodel_file)
net2 = caffe.Net(net_file, caffe.TRAIN)
net2.share_with(net)
layer = net.layers[list(net2._layer_names).index('layer')]
self.assertEqual(layer.blobs[0].data[0], 1)
os.remove(net_file)
def test_phase(self):
net_file = phase_net_file()
for phase in (caffe.TRAIN, caffe.TEST):
net = caffe.Net(net_file, phase)
self.assertEqual(net.forward()['phase'], phase) |
class BenchmarkArguments():
models: List[str] = list_field(default=[], metadata={'help': 'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version of all available models'})
batch_sizes: List[int] = list_field(default=[8], metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'})
sequence_lengths: List[int] = list_field(default=[8, 32, 128, 512], metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'})
inference: bool = field(default=True, metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'})
cuda: bool = field(default=True, metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'})
tpu: bool = field(default=True, metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'})
fp16: bool = field(default=False, metadata={'help': 'Use FP16 to accelerate inference.'})
training: bool = field(default=False, metadata={'help': 'Benchmark training of model'})
verbose: bool = field(default=False, metadata={'help': 'Verbose memory tracing'})
speed: bool = field(default=True, metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'})
memory: bool = field(default=True, metadata={'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'})
trace_memory_line_by_line: bool = field(default=False, metadata={'help': 'Trace memory line by line'})
save_to_csv: bool = field(default=False, metadata={'help': 'Save result to a CSV file'})
log_print: bool = field(default=False, metadata={'help': 'Save all print statements in a log file'})
env_print: bool = field(default=False, metadata={'help': 'Whether to print environment information'})
multi_process: bool = field(default=True, metadata={'help': 'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled for debugging / testing and on TPU.'})
inference_time_csv_file: str = field(default=f'inference_time_{round(time())}.csv', metadata={'help': 'CSV filename used if saving time results to csv.'})
inference_memory_csv_file: str = field(default=f'inference_memory_{round(time())}.csv', metadata={'help': 'CSV filename used if saving memory results to csv.'})
train_time_csv_file: str = field(default=f'train_time_{round(time())}.csv', metadata={'help': 'CSV filename used if saving time results to csv for training.'})
train_memory_csv_file: str = field(default=f'train_memory_{round(time())}.csv', metadata={'help': 'CSV filename used if saving memory results to csv for training.'})
env_info_csv_file: str = field(default=f'env_info_{round(time())}.csv', metadata={'help': 'CSV filename used if saving environment information.'})
log_filename: str = field(default=f'log_{round(time())}.csv', metadata={'help': 'Log filename used if print statements are saved in log.'})
repeat: int = field(default=3, metadata={'help': 'Times an experiment will be run.'})
only_pretrain_model: bool = field(default=False, metadata={'help': 'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain model weights.'})
def __post_init__(self):
warnings.warn(f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils are deprecated in general and it is advised to use external Benchmarking libraries to benchmark Transformer models.', FutureWarning)
def to_json_string(self):
return json.dumps(dataclasses.asdict(self), indent=2)
def model_names(self):
assert (len(self.models) > 0), "Please make sure you provide at least one model name / model identifier, *e.g.* `--models bert-base-cased` or `args.models = ['bert-base-cased']."
return self.models
def do_multi_processing(self):
if (not self.multi_process):
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.')
return False
else:
return True |
class graph_dict_helper(object):
def __init__(self, properties_data=None, object_placing=None, object_states=None, max_nodes=300):
if (properties_data is None):
properties_data = load_properties_data()
if (object_placing is None):
object_placing = load_object_placing()
if (object_states is None):
object_states = load_object_states()
self.properties_data = properties_data
self.object_placing = object_placing
self.object_states = object_states
self.max_nodes = max_nodes
self.open_closed = BinaryVariable(['OPEN', 'CLOSED'], default='CLOSED')
self.on_off = BinaryVariable(['ON', 'OFF'], default='OFF')
self.clean_dirty = BinaryVariable(['CLEAN', 'DIRTY'], default='CLEAN')
self.plugged_in_out = BinaryVariable(['PLUGGED_IN', 'PLUGGED_OUT'], default='PLUGGED_IN')
self.binary_variables = [self.open_closed, self.on_off, self.clean_dirty, self.plugged_in_out]
self.body_part = ['face', 'leg', 'arm', 'eye', 'hand', 'feet']
self.possible_rooms = ['home_office', 'kitchen', 'living_room', 'bathroom', 'dining_room', 'bedroom', 'kids_bedroom', 'entrance_hall']
self.script_object2unity_object = load_name_equivalence()
self.unity_object2script_object = build_unity2object_script(self.script_object2unity_object)
self.equivalent_rooms = {'kitchen': 'dining_room', 'dining_room': 'kitchen', 'entrance_hall': 'living_room', 'home_office': 'living_room', 'living_room': 'home_office', 'kids_bedroom': 'bedroom'}
self.relation_script_precond_simulator = {'inside': 'INSIDE', 'location': 'INSIDE', 'atreach': 'CLOSE', 'in': 'ON'}
self.states_script_precond_simulator = {'dirty': 'DIRTY', 'clean': 'CLEAN', 'open': 'OPEN', 'closed': 'CLOSED', 'plugged': 'PLUGGED_IN', 'unplugged': 'PLUGGED_OUT', 'is_on': 'ON', 'is_off': 'OFF', 'sitting': 'SITTING', 'lying': 'LYING'}
self.relation_placing_simulator = {'in': 'INSIDE', 'on': 'ON', 'nearby': 'CLOSE'}
self.states_mapping = {'dirty': 'dirty', 'clean': 'clean', 'open': 'open', 'closed': 'closed', 'plugged': 'plugged_in', 'unplugged': 'plugged_out', 'on': 'on', 'off': 'off'}
def initialize(self, graph_dict):
script_object_ids = [node['id'] for node in filter((lambda v: ((v['id'] >= 1000) and (v['id'] < 2000))), graph_dict['nodes'])]
random_object_ids = [node['id'] for node in filter((lambda v: (v['id'] >= 2000)), graph_dict['nodes'])]
self.script_objects_id = (max(script_object_ids) if (len(script_object_ids) != 0) else 1000)
self.random_objects_id = (max(random_object_ids) if (len(random_object_ids) != 0) else 2000)
def check_binary(self, graph_dict, id_checker, verbose):
open_closed = self.open_closed
on_off = self.on_off
plugged_in_out = self.plugged_in_out
for node in graph_dict['nodes']:
if id_checker(node['id']):
if ('CAN_OPEN' in node['properties']):
if (not open_closed.check(node, verbose)):
open_closed.set_to_default_state(node)
if ('HAS_PLUG' in node['properties']):
if (not plugged_in_out.check(node, verbose)):
plugged_in_out.set_to_default_state(node)
if ('HAS_SWTICH' in node['properties']):
if (not on_off.check(node, verbose)):
on_off.set_to_default_state(node)
if (('light' in node['class_name']) or ('lamp' in node['class_name'])):
if (not on_off.check(node, verbose)):
on_off.set_node_state(node, 'ON')
if (node['category'] == 'Doors'):
if (not open_closed.check(node, verbose)):
open_closed.set_node_state(node, 'OPEN')
def open_all_doors(self, graph_dict):
open_closed = self.open_closed
for node in graph_dict['nodes']:
if (node['category'] == 'Doors'):
open_closed.set_node_state(node, 'OPEN')
def get_object_binary_variables(self, object_name):
states = self.object_states[object_name]
bin_vars = self.get_binary_variables(states)
return bin_vars
def get_binary_variables(self, possible_states):
added_variables = []
state_to_bin_var = {}
possible_states = []
for bin_var in self.binary_variables:
state_to_bin_var[bin_var.positive] = (bin_var, bin_var.default)
state_to_bin_var[bin_var.negative] = (bin_var, bin_var.default)
for state in possible_states:
(bin_var, default_var) = state_to_bin_var[state]
if (default_var not in added_variables):
added_variables.append(default_var)
possible_states.append(bin_var)
return possible_states
def set_to_default_state(self, graph_dict, first_room, id_checker):
open_closed = self.open_closed
on_off = self.on_off
clean_dirty = self.clean_dirty
plugged_in_out = self.plugged_in_out
body_part = self.body_part
character_id = [i['id'] for i in filter((lambda v: (v['class_name'] == 'character')), graph_dict['nodes'])][0]
for node in graph_dict['nodes']:
if id_checker(node['id']):
if ('CAN_OPEN' in node['properties']):
open_closed.set_to_default_state(node)
if (node['class_name'] == 'door'):
open_closed.set_node_state(node, 'OPEN')
if ('HAS_PLUG' in node['properties']):
plugged_in_out.set_to_default_state(node)
if ('HAS_SWTICH' in node['properties']):
on_off.set_to_default_state(node)
clean_dirty.set_to_default_state(node)
if ((node['class_name'] == 'character') and (first_room is not None)):
graph_dict['edges'] = [e for e in filter((lambda e: ((e['from_id'] != character_id) and (e['to_id'] != character_id))), graph_dict['edges'])]
first_room_id = [i['id'] for i in filter((lambda v: (v['class_name'] == first_room)), graph_dict['nodes'])][0]
graph_dict['edges'].append({'relation_type': 'INSIDE', 'from_id': character_id, 'to_id': first_room_id})
node['states'] = []
if (('light' in node['class_name']) or ('lamp' in node['class_name'])):
on_off.set_node_state(node, 'ON')
if (node['category'] == 'Doors'):
open_closed.set_node_state(node, 'OPEN')
if any([(Property.BODY_PART in node['properties']) for v in body_part]):
graph_dict['edges'].append({'relation_type': 'CLOSE', 'from_id': character_id, 'to_id': node['id']})
graph_dict['edges'].append({'relation_type': 'CLOSE', 'from_id': node['id'], 'to_id': character_id})
def _add_missing_node(self, graph_dict, id, obj, category):
graph_dict['nodes'].append({'properties': [i.name for i in self.properties_data[obj]], 'id': id, 'states': [], 'category': category, 'class_name': obj})
def _random_pick_a_room_with_objects_name_in_graph(self, available_rooms_in_graph, available_rooms_in_graph_id, objects_in_script, available_nodes, graph_dict):
hist = np.zeros(len(available_rooms_in_graph_id))
for obj in objects_in_script:
obj_name = obj[0]
if (obj_name == 'character'):
continue
for node in available_nodes:
if (node['class_name'] == obj_name):
edges = [i for i in filter((lambda v: ((v['relation_type'] == 'INSIDE') and (v['from_id'] == node['id']) and (v['to_id'] in available_rooms_in_graph_id))), graph_dict['edges'])]
if (len(edges) > 0):
for edge in edges:
dest_id = edge['to_id']
idx = available_rooms_in_graph_id.index(dest_id)
hist[idx] += 1
if (hist.std() < 1e-05):
room_name = random.choice(available_rooms_in_graph)
else:
idx = np.argmax(hist)
room_name = available_rooms_in_graph[idx]
return room_name
def _any_room_except(self, first_room, available_rooms_in_graph):
available_rooms = copy.deepcopy(available_rooms_in_graph)
available_rooms.remove(first_room)
return random.choice(available_rooms)
def modify_script_with_specified_id(self, script, id_mapping, room_mapping):
for script_line in script:
for parameter in script_line.parameters:
if (parameter.name in self.possible_rooms):
parameter.name = room_mapping[parameter.name]
try:
assert ((parameter.name, parameter.instance) in id_mapping)
except:
print(parameter.name, parameter.instance)
print(id_mapping)
assert ((parameter.name, parameter.instance) in id_mapping)
parameter.instance = id_mapping[(parameter.name, parameter.instance)]
def ensure_light_on(self, graph_dict, id_checker):
on_off = self.on_off
for node in graph_dict['nodes']:
if (('light' in node['class_name']) or ('lamp' in node['class_name'])):
if id_checker(node['id']):
if ('ON' not in node['states']):
while ('OFF' in node['states']):
node['states'].remove('OFF')
on_off.set_node_state(node, 'ON')
def add_missing_object_from_script(self, script, precond, graph_dict, id_mapping):
equivalent_rooms = self.equivalent_rooms
possible_rooms = self.possible_rooms
available_rooms_in_graph = [i['class_name'] for i in filter((lambda v: (v['category'] == 'Rooms')), graph_dict['nodes'])]
available_rooms_in_graph_id = [i['id'] for i in filter((lambda v: (v['category'] == 'Rooms')), graph_dict['nodes'])]
available_nodes = copy.deepcopy(graph_dict['nodes'])
available_name = list(set([node['class_name'] for node in available_nodes]))
room_mapping = {}
for room in possible_rooms:
nroom = room
rooms_tried = []
while ((nroom not in available_rooms_in_graph) and (nroom not in rooms_tried)):
rooms_tried.append(nroom)
assert (nroom in equivalent_rooms), 'Not pre-specified mapping for room: {}'.format(nroom)
nroom = equivalent_rooms[nroom]
assert (nroom in available_rooms_in_graph), 'No equivalent room in graph for room: {}'.format(nroom)
room_mapping[room] = nroom
for precond_i in precond:
if ('location' in precond_i):
room = precond_i['location'][1][0]
precond_i['location'][1][0] = room_mapping[room]
for script_line in script:
for parameter in script_line.parameters:
if (parameter.name in possible_rooms):
parameter.name = room_mapping[parameter.name]
first_room = None
for script_line in script:
for parameter in script_line.parameters:
if ((parameter.name in possible_rooms) and (first_room is None)):
first_room = parameter.name
objects_in_script = {}
character_id = [i for i in filter((lambda v: (v['class_name'] == 'character')), graph_dict['nodes'])][0]['id']
key = ('character', 1)
objects_in_script[key] = (id_mapping[key] if (key in id_mapping) else character_id)
for key in script.obtain_objects():
if (key not in objects_in_script):
objects_in_script[key] = (id_mapping[key] if (key in id_mapping) else None)
location_precond = {(i['location'][0][0], int(i['location'][0][1])): i['location'][1][0] for i in filter((lambda v: ('location' in v)), precond)}
rooms_in_precond = list(set([i for i in location_precond.values()]))
if (first_room == None):
assert (len(rooms_in_precond) == 0)
first_room = self._random_pick_a_room_with_objects_name_in_graph(available_rooms_in_graph, available_rooms_in_graph_id, objects_in_script, available_nodes, graph_dict)
else:
first_room = self._any_room_except(first_room, available_rooms_in_graph)
assert ((first_room is not None) and (first_room in available_rooms_in_graph))
for obj in objects_in_script.keys():
if (objects_in_script[obj] is not None):
continue
room_obj = (location_precond[obj] if (obj in location_precond) else first_room)
room_id = [i['id'] for i in filter((lambda v: (v['class_name'] == room_obj)), graph_dict['nodes'])][0]
if (obj[0] in possible_rooms):
id_to_be_assigned = [i['id'] for i in filter((lambda v: (v['class_name'] == obj[0])), graph_dict['nodes'])]
objects_in_script[obj] = id_to_be_assigned[0]
elif (obj[0] in available_name):
added = False
possible_matched_nodes = [i for i in filter((lambda v: (v['class_name'] == obj[0])), available_nodes)]
for node in possible_matched_nodes:
obj_in_room = [i for i in filter((lambda v: ((v['relation_type'] == 'INSIDE') and (v['from_id'] == node['id']) and (v['to_id'] == room_id))), graph_dict['edges'])]
if (len(obj_in_room) == 0):
continue
else:
objects_in_script[obj] = node['id']
available_nodes.remove(node)
added = True
break
if (not added):
node_with_same_class_name = [node for node in filter((lambda v: (v['class_name'] == obj[0])), graph_dict['nodes'])]
category = node_with_same_class_name[0]['category']
self._add_missing_node(graph_dict, self.script_objects_id, obj[0], category)
objects_in_script[obj] = self.script_objects_id
graph_dict['edges'].append({'relation_type': 'INSIDE', 'from_id': self.script_objects_id, 'to_id': room_id})
self.script_objects_id += 1
else:
self._add_missing_node(graph_dict, self.script_objects_id, obj[0], 'placable_objects')
objects_in_script[obj] = self.script_objects_id
graph_dict['edges'].append({'relation_type': 'INSIDE', 'from_id': self.script_objects_id, 'to_id': room_id})
self.script_objects_id += 1
for script_line in script:
for parameter in script_line.parameters:
parameter.instance = objects_in_script[(parameter.name, parameter.instance)]
return (objects_in_script, first_room, room_mapping)
def prepare_from_precondition(self, precond, objects_in_script, graph_dict):
object_placing = self.object_placing
objects_to_place = list(object_placing.keys())
relation_script_precond_simulator = self.relation_script_precond_simulator
states_script_precond_simulator = self.states_script_precond_simulator
open_closed = self.open_closed
on_off = self.on_off
clean_dirty = self.clean_dirty
plugged_in_out = self.plugged_in_out
for p in precond:
for (k, v) in p.items():
if (k == 'location'):
continue
if (k in relation_script_precond_simulator):
(src_name, src_id) = v[0]
(tgt_name, tgt_id) = v[1]
src_id = int(src_id)
tgt_id = int(tgt_id)
src_id = objects_in_script[(src_name.lower().replace(' ', '_'), src_id)]
tgt_id = objects_in_script[(tgt_name.lower().replace(' ', '_'), tgt_id)]
graph_dict['edges'].append({'relation_type': relation_script_precond_simulator[k], 'from_id': src_id, 'to_id': tgt_id})
if (k == 'atreach'):
graph_dict['edges'].append({'relation_type': relation_script_precond_simulator[k], 'from_id': tgt_id, 'to_id': src_id})
elif (k in states_script_precond_simulator):
obj_id = objects_in_script[(v[0].lower().replace(' ', '_'), int(v[1]))]
for node in graph_dict['nodes']:
if (node['id'] == obj_id):
if (k in ['is_on', 'is_off']):
on_off.set_node_state(node, states_script_precond_simulator[k])
elif (k in ['open', 'closed']):
open_closed.set_node_state(node, states_script_precond_simulator[k])
elif (k in ['dirty', 'clean']):
clean_dirty.set_node_state(node, states_script_precond_simulator[k])
elif (k in ['plugged', 'unplugged']):
plugged_in_out.set_node_state(node, states_script_precond_simulator[k])
elif (k == 'sitting'):
if ('SITTING' not in node['states']):
node['states'].append('SITTING')
elif (k == 'lying'):
if ('LYING' not in node['states']):
node['states'].append('LYING')
break
elif (k in ['occupied', 'free']):
obj_id = objects_in_script[(v[0].lower().replace(' ', '_'), int(v[1]))]
for node in graph_dict['nodes']:
if (node['id'] == obj_id):
if (k == 'free'):
self._change_to_totally_free(node, graph_dict)
elif (k == 'occupied'):
self._change_to_occupied(node, graph_dict, objects_to_place)
break
def merge_object_name(self, object_name):
if (object_name in self.script_object2unity_object):
unity_name = self.script_object2unity_object[object_name][0].replace('_', '')
else:
unity_name = object_name.replace('_', '')
if (unity_name not in self.unity_object2script_object):
return object_name
return self.unity_object2script_object[unity_name]
def add_random_objs_graph_dict(self, graph_dict, n):
object_placing = self.object_placing
relation_placing_simulator = self.relation_placing_simulator
objects_to_place = list(object_placing.keys())
random.shuffle(objects_to_place)
rooms_id = [node['id'] for node in filter((lambda v: (v['class_name'] in self.possible_rooms)), graph_dict['nodes'])]
def _add_node(src_name, tgt_node, tgt_name):
tgt_id = tgt_node['id']
self._add_missing_node(graph_dict, self.random_objects_id, src_name, 'placable_objects')
specified_room_id = [edge['to_id'] for edge in filter((lambda v: ((v['from_id'] == tgt_id) and (v['relation_type'] == 'INSIDE') and (v['to_id'] in rooms_id))), graph_dict['edges'])][0]
graph_dict['edges'].append({'relation_type': 'INSIDE', 'from_id': self.random_objects_id, 'to_id': specified_room_id})
graph_dict['edges'].append({'relation_type': relation_placing_simulator[tgt_name['relation'].lower()], 'from_id': self.random_objects_id, 'to_id': tgt_id})
graph_dict['edges'].append({'relation_type': 'CLOSE', 'from_id': self.random_objects_id, 'to_id': tgt_id})
graph_dict['edges'].append({'relation_type': 'CLOSE', 'from_id': tgt_id, 'to_id': self.random_objects_id})
self.random_objects_id += 1
while (n > 0):
src_name = random.choice(objects_to_place)
tgt_names = copy.deepcopy(object_placing[src_name])
src_name = self.merge_object_name(src_name)
for tgt_name in tgt_names:
tgt_name['destination'] = self.merge_object_name(tgt_name['destination'])
random.shuffle(tgt_names)
for tgt_name in tgt_names:
tgt_nodes = [i for i in filter((lambda v: (v['class_name'] == tgt_name['destination'])), graph_dict['nodes'])]
if (len(tgt_nodes) != 0):
max_occupancies = max(SitExecutor._MAX_OCCUPANCIES.get(tgt_name['destination'], 0), LieExecutor._MAX_OCCUPANCIES.get(tgt_name['destination'], 0))
if (max_occupancies == 0):
tgt_node = random.choice(tgt_nodes)
_add_node(src_name, tgt_node, tgt_name)
n -= 1
break
else:
free_tgt_nodes = []
for tgt_node in tgt_nodes:
occupied_edges = [_edge for _edge in filter((lambda v: ((v['relation_type'] == 'ON') and (v['to_id'] == tgt_node['id']))), graph_dict['edges'])]
if (len(occupied_edges) < max_occupancies):
free_tgt_nodes.append(tgt_node)
if (len(free_tgt_nodes) != 0):
tgt_node = random.choice(free_tgt_nodes)
_add_node(src_name, tgt_node, tgt_name)
n -= 1
break
def random_change_object_state(self, objects_in_script, graph_dict, id_checker):
open_closed = self.open_closed
on_off = self.on_off
clean_dirty = self.clean_dirty
plugged_in_out = self.plugged_in_out
object_states = self.object_states
states_mapping = self.states_mapping
available_states = ['dirty', 'clean', 'open', 'closed', 'free', 'occupied', 'plugged', 'unplugged', 'on', 'off']
for node in graph_dict['nodes']:
if id_checker(node['id']):
if (node['class_name'] in object_states):
possible_states = object_states[node['class_name']]
possible_states = [i for i in filter((lambda v: (v in available_states)), possible_states)]
if (len(possible_states) == 0):
continue
state = random.choice(possible_states)
if (state in ['free', 'occupied']):
pass
else:
state = states_mapping[state]
if (state in ['dirty', 'clean']):
clean_dirty.sample_state(node)
elif (state in ['on', 'off']):
on_off.sample_state(node)
elif (state in ['open', 'closed']):
open_closed.sample_state(node)
elif (state in ['plugged_in', 'plugged_out']):
plugged_in_out.sample_state(node)
def _remove_one_random_nodes(self, graph_dict):
start_id = 2000
random_nodes_ids = [node['id'] for node in filter((lambda v: (v['id'] >= start_id)), graph_dict['nodes'])]
if (len(random_nodes_ids) != 0):
remove_id = np.min(random_nodes_ids)
graph_dict['nodes'] = [node for node in filter((lambda v: (v['id'] != remove_id)), graph_dict['nodes'])]
graph_dict['edges'] = [edge for edge in filter((lambda v: ((v['from_id'] != remove_id) and (v['to_id'] != remove_id))), graph_dict['edges'])]
def _change_to_occupied(self, node, graph_dict, objects_to_place):
if ((node['class_name'] in SitExecutor._MAX_OCCUPANCIES) or (node['class_name'] in LieExecutor._MAX_OCCUPANCIES)):
name = node['class_name']
max_occupancy = (SitExecutor._MAX_OCCUPANCIES[name] if (name in SitExecutor._MAX_OCCUPANCIES) else LieExecutor._MAX_OCCUPANCIES[name])
occupied_edges = [_edge for _edge in filter((lambda v: ((v['relation_type'] == 'ON') and (v['to_id'] == node['id']))), graph_dict['edges'])]
current_state = ('free' if (len(occupied_edges) < max((max_occupancy - 1), 1)) else 'occupied')
if (current_state != 'occupied'):
rooms_id = [_node['id'] for _node in filter((lambda v: (v['category'] == 'Rooms')), graph_dict['nodes'])]
room_id = None
for edge in graph_dict['edges']:
if ((edge['relation_type'] == 'INSIDE') and (edge['from_id'] == node['id']) and (edge['to_id'] in rooms_id)):
room_id = edge['to_id']
assert (room_id is not None), print("{}({}) doesn't exist in any room".format(node['class_name'], node['id']))
number_objects_to_add = (max_occupancy - len(occupied_edges))
if (number_objects_to_add < 0):
import ipdb
ipdb.set_trace()
object_placing = self.object_placing
random.shuffle(objects_to_place)
for src_name in objects_to_place:
tgt_names = object_placing[src_name]
src_name = self.merge_object_name(src_name)
for tgt_name in tgt_names:
tgt_name['destination'] = self.merge_object_name(tgt_name['destination'])
if (name in [i['destination'] for i in filter((lambda v: (v['relation'] == 'ON')), tgt_names)]):
self._remove_one_random_nodes(graph_dict)
self._add_missing_node(graph_dict, self.random_objects_id, src_name, 'placable_objects')
graph_dict['edges'].append({'relation_type': 'INSIDE', 'from_id': self.random_objects_id, 'to_id': room_id})
graph_dict['edges'].append({'relation_type': 'ON', 'from_id': self.random_objects_id, 'to_id': node['id']})
graph_dict['edges'].append({'relation_type': 'CLOSE', 'from_id': self.random_objects_id, 'to_id': node['id']})
graph_dict['edges'].append({'relation_type': 'CLOSE', 'from_id': node['id'], 'to_id': self.random_objects_id})
self.random_objects_id += 1
number_objects_to_add -= 0
if (number_objects_to_add <= 0):
break
def _change_to_totally_free(self, node, graph_dict):
if ((node['class_name'] in SitExecutor._MAX_OCCUPANCIES) or (node['class_name'] in LieExecutor._MAX_OCCUPANCIES)):
occupied_edges = [_edge for _edge in filter((lambda v: ((v['relation_type'] == 'ON') and (v['to_id'] == node['id']))), graph_dict['edges'])]
occupied_nodes_id = [_edge['from_id'] for _edge in occupied_edges]
removed_edges = []
for occupied_node_id in occupied_nodes_id:
removed_edges += [edge for edge in filter((lambda v: ((v['from_id'] == occupied_node_id) and (v['to_id'] == node['id']))), graph_dict['edges'])]
removed_edges += [edge for edge in filter((lambda v: ((v['from_id'] == node['id']) and (v['to_id'] == occupied_node_id))), graph_dict['edges'])]
for edge in removed_edges:
graph_dict['edges'].remove(edge)
floor_id = [_node['id'] for _node in filter((lambda v: (v['class_name'] == 'floor')), graph_dict['nodes'])]
for obj_id in occupied_nodes_id:
to_id = random.choice(floor_id)
graph_dict['edges'].append({'relation_type': 'ON', 'from_id': obj_id, 'to_id': to_id})
graph_dict['edges'].append({'relation_type': 'CLOSE', 'from_id': obj_id, 'to_id': to_id})
graph_dict['edges'].append({'relation_type': 'CLOSE', 'from_id': to_id, 'to_id': obj_id})
def check_objs_in_room(self, graph_dict):
rooms_id = [node['id'] for node in filter((lambda v: (v['category'] == 'Rooms')), graph_dict['nodes'])]
other_id = [node['id'] for node in filter((lambda v: (v['category'] != 'Rooms')), graph_dict['nodes'])]
id2name = {node['id']: node['class_name'] for node in graph_dict['nodes']}
for id in other_id:
in_room = []
for edge in graph_dict['edges']:
if ((edge['from_id'] == id) and (edge['relation_type'] == 'INSIDE') and (edge['to_id'] in rooms_id)):
in_room.append(edge['to_id'])
if (len(in_room) > 1):
print('src object: {}({})'.format(id2name[id], id), 'in_rooms:', ', '.join([id2name for i in in_room]))
print('exist in more than one room')
elif (len(in_room) == 0):
print('src object: {}({})'.format(id2name[id], id)) |
class SentencepieceTokenizer(object):
def __init__(self, vocab, unk_token, do_lower_case=False, remove_space=True, keep_accents=True, sp_model_kwargs: Optional[Dict[(str, Any)]]=None):
self.vocab = vocab
self.unk_token = unk_token
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace('``', '"').replace("''", '"')
if (not self.keep_accents):
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if (not unicodedata.combining(c))])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def tokenize(self, text):
text = self.preprocess_text(text)
pieces = self.sp_model.encode(text, out_type=str)
new_pieces = []
for piece in pieces:
if ((len(piece) > 1) and (piece[(- 1)] == str(',')) and piece[(- 2)].isdigit()):
cur_pieces = self.sp_model.EncodeAsPieces(piece[:(- 1)].replace(SPIECE_UNDERLINE, ''))
if ((piece[0] != SPIECE_UNDERLINE) and (cur_pieces[0][0] == SPIECE_UNDERLINE)):
if (len(cur_pieces[0]) == 1):
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[(- 1)])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces |
class Params():
def __init__(self, **kwargs):
self.input_shape = kwargs.get('input_shape', (96, 1400))
self.input_channels = kwargs.get('input_channels', 1)
self.cnn_features_list = kwargs.get('cnn_features_list', [16, 32, 64, 96, 128])
self.cnn_kernel_size = kwargs.get('cnn_kernel_size', [3, 3, 3, 3, 3])
self.cnn_stride_size = kwargs.get('cnn_stride_size', [(1, 1), (1, 1), (1, 1), (1, 1), (1, 1)])
self.cnn_pool_size = kwargs.get('cnn_pool_size', [(2, 2), (2, 2), (2, 2), (2, 2), (1, 1)])
self.cnn_batch_norm = kwargs.get('cnn_batch_norm', [False, False, False, False, False])
self.rnn_units = kwargs.get('rnn_units', [256, 256])
self.num_beam_paths = kwargs.get('num_beam_paths', 1)
self.csv_delimiter = kwargs.get('csv_delimiter', ';')
self.string_split_delimiter = kwargs.get('string_split_delimiter', '|')
self.csv_files_train = kwargs.get('csv_files_train')
self.csv_files_eval = kwargs.get('csv_files_eval')
self.blank_symbol = kwargs.get('blank_symbol', '$')
self.max_chars_per_string = kwargs.get('max_chars_per_string', 75)
self.lookup_alphabet_file = kwargs.get('lookup_alphabet_file')
self.data_augmentation = (kwargs.get('data_augmentation', True),)
self.data_augmentation_max_rotation = kwargs.get('data_augmentation_max_rotation', 0.005)
self.data_augmentation_max_slant = kwargs.get('data_augmentation_max_slant', 0.7)
self.n_epochs = kwargs.get('n_epochs', 50)
self.train_batch_size = kwargs.get('train_batch_size', 64)
self.eval_batch_size = kwargs.get('eval_batch_size', 128)
self.learning_rate = kwargs.get('learning_rate', 0.0001)
self.optimizer = kwargs.get('optimizer', 'adam')
self.output_model_dir = kwargs.get('output_model_dir', '')
self.evaluate_every_epoch = kwargs.get('evaluate_every_epoch', 5)
self.save_interval = kwargs.get('save_interval', 20)
self.restore_model = kwargs.get('restore_model', False)
self._assign_alphabet()
cnn_params = zip(self.cnn_pool_size, self.cnn_stride_size)
self.downscale_factor = reduce((lambda i, j: (i * j)), map((lambda k: (k[0][1] * k[1][1])), cnn_params))
assert (len(self.cnn_features_list) == len(self.cnn_kernel_size) == len(self.cnn_stride_size) == len(self.cnn_pool_size) == len(self.cnn_batch_norm)), 'Length of parameters of model are not the same, check that all the layers parameters have the same length.'
max_input_width = ((self.max_chars_per_string + 1) * self.downscale_factor)
assert (max_input_width <= self.input_shape[1]), 'Maximum length of labels is {}, input width should be greater or equal to {} but is {}'.format(self.max_chars_per_string, max_input_width, self.input_shape[1])
assert (self.optimizer in ['adam', 'rms', 'ada']), 'Unknown optimizer {}'.format(self.optimizer)
if os.path.isdir(self.output_model_dir):
print('WARNING : The output directory {} already exists.'.format(self.output_model_dir))
def show_experiment_params(self) -> dict:
return vars(self)
def _assign_alphabet(self):
self.alphabet = Alphabet(lookup_alphabet_file=self.lookup_alphabet_file, blank_symbol=self.blank_symbol)
def to_dict(self) -> dict:
new_dict = self.__dict__.copy()
del new_dict['alphabet']
del new_dict['downscale_factor']
return new_dict
def from_json_file(cls, json_file: str):
with open(json_file, 'r') as file:
config = json.load(file)
return cls(**config) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.