code stringlengths 101 5.91M |
|---|
def build_lstm_lm3(input_shape, output_size):
vocab_size = output_size
model = Sequential([Embedding((vocab_size + 1), 128, mask_zero=True, input_length=input_shape[0]), LSTM(650, unroll=True, return_sequences=True), Dropout(0.5), LSTM(650, unroll=True), Dropout(0.5), Dense(output_size), Activation('softmax')])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model |
class OpenAIGPTTokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
class Res16UNetSN34(Res16UNet34):
NORM_TYPE = NormType.SPARSE_SWITCH_NORM
BLOCK = BasicBlockSN |
class AugmentationConfig(object):
def __init__(self):
self.color = ColorAugmentation.AGGRESSIVE
self.crop = True
self.distort_aspect_ratio = AspectRatioAugmentation.NORMAL
self.quality = True
self.erasing = True
self.rotate90 = False
self.rotate45 = False
self.rotate_max = 13
self.flip_vertical = True
self.flip_horizontal = True
self.padding_square = False |
def parse_cmdline_kwargs(args):
def parse(v):
assert isinstance(v, str)
try:
return eval(v)
except (NameError, SyntaxError):
return v
return {k: parse(v) for (k, v) in parse_unknown_args(args).items()} |
class SingleDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot)
self.A_paths = make_dataset(self.dir_A)
self.A_paths = sorted(self.A_paths)
self.transform = get_transform(opt)
def __getitem__(self, index):
A_path = self.A_paths[index]
A_img = Image.open(A_path).convert('RGB')
A_img = self.transform(A_img)
return {'A': A_img, 'A_paths': A_path}
def __len__(self):
return len(self.A_paths)
def name(self):
return 'SingleImageDataset' |
def load_pretrained(cfg: NamespaceMap, Module: Optional[Type[pl.LightningModule]], stage: str, **kwargs) -> pl.LightningModule:
save_path = Path(cfg.paths.pretrained.load)
filename = BEST_CHECKPOINT.format(stage=stage)
checkpoint = get_latest_match((save_path / filename))
loaded_module = Module.load_from_checkpoint(checkpoint, **kwargs)
return loaded_module |
_module()
class GPT4Gen(MInstrDataset):
def __init__(self, *args, version, **kwargs):
super().__init__(*args, **kwargs, placeholders=(IMAGE_PLACEHOLDER, QUESTION_PLACEHOLDER))
self.version = version
assert (version in ['a', 'c', 'bc'])
def __getitem__(self, item):
raw = self.get_raw_item(item)
image = self.get_image(raw['img_path'])
boxes = raw['boxes']
question = raw['question']
question = question.replace(PHRASE_ST_PLACEHOLDER, '').replace(PHRASE_ED_PLACEHOLDER, BOXES_PLACEHOLDER)
final_question = self.get_template().replace(QUESTION_PLACEHOLDER, question)
query_boxes_seq = raw['question_boxes_seq']
if (self.version == 'a'):
final_answer = raw['answer']
answer_boxes_seq = None
elif (self.version == 'c'):
final_answer = raw['cot_with_ans'].replace(PHRASE_ST_PLACEHOLDER, '').replace(PHRASE_ED_PLACEHOLDER, '')
answer_boxes_seq = None
elif (self.version == 'bc'):
final_answer = raw['cot_with_ans'].replace(PHRASE_ST_PLACEHOLDER, '').replace(PHRASE_ED_PLACEHOLDER, BOXES_PLACEHOLDER)
answer_boxes_seq = raw['answer_boxes_seq']
else:
assert False
ret = {'image': image, 'target': {'boxes': boxes}, 'conversations': [{'from': 'human', 'value': final_question, 'boxes_seq': query_boxes_seq}, {'from': 'gpt', 'value': final_answer, 'boxes_seq': answer_boxes_seq}]}
return ret |
def check_target_type(y, indicate_one_vs_all=False):
type_y = type_of_target(y)
if (type_y == 'multilabel-indicator'):
if np.any((y.sum(axis=1) > 1)):
raise ValueError('Imbalanced-learn currently supports binary, multiclass and binarized encoded multiclasss targets. Multilabel and multioutput targets are not supported.')
y = y.argmax(axis=1)
else:
y = column_or_1d(y)
return ((y, (type_y == 'multilabel-indicator')) if indicate_one_vs_all else y) |
class Rescaling(nn.Module):
def __init__(self, bias, scaling_mat):
super(Rescaling, self).__init__()
self.bias = nn.Parameter(bias, requires_grad=False)
self.scaling_mat = nn.Parameter(scaling_mat, requires_grad=False)
def forward(self, x):
output = (x - self.bias)
output = nn.functional.conv2d(output, self.scaling_mat)
return output |
def test_digits_precomputed_two_stage():
model1 = FacilityLocationSelection(100)
model2 = GraphCutSelection(100)
model = MixtureSelection(100, [model1, model2], [1.0, 0.3], metric='precomputed', optimizer='two-stage')
model.fit(X_digits_cosine_cupy)
assert_array_equal(model.ranking, digits_cosine_ranking)
assert_array_almost_equal(model.gains, digits_cosine_gains, 4) |
class MCD(BaseDetector):
def __init__(self, contamination=0.1, store_precision=True, assume_centered=False, support_fraction=None, random_state=None):
super(MCD, self).__init__(contamination=contamination)
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
X = check_array(X)
self._set_n_classes(y)
self.detector_ = MinCovDet(store_precision=self.store_precision, assume_centered=self.assume_centered, support_fraction=self.support_fraction, random_state=self.random_state)
self.detector_.fit(X=X, y=y)
self.decision_scores_ = self.detector_.dist_
self._process_decision_scores()
return self
def decision_function(self, X):
check_is_fitted(self, ['decision_scores_', 'threshold_', 'labels_'])
X = check_array(X)
return self.detector_.mahalanobis(X)
def raw_location_(self):
return self.detector_.raw_location_
def raw_covariance_(self):
return self.detector_.raw_covariance_
def raw_support_(self):
return self.detector_.raw_support_
def location_(self):
return self.detector_.location_
def covariance_(self):
return self.detector_.covariance_
def precision_(self):
return self.detector_.precision_
def support_(self):
return self.detector_.support_ |
def main(A, t_max, M, N_max, R, exec_type, theta):
print('{}-armed Bernoulli bandit with optimal, TS and sampling policies with {} MC samples for {} time-instants and {} realizations'.format(A, M, t_max, R))
dir_string = '../results/{}/A={}/t_max={}/R={}/M={}/N_max={}/theta={}'.format(os.path.basename(__file__).split('.')[0], A, t_max, R, M, N_max, str.replace(str.strip(np.array_str(theta.flatten()), ' []'), ' ', '_'))
os.makedirs(dir_string, exist_ok=True)
context = None
reward_function = {'dist': stats.bernoulli, 'theta': theta}
reward_prior = {'dist': stats.beta, 'alpha': np.ones((A, 1)), 'beta': np.ones((A, 1))}
bandits = []
bandits_labels = []
thompsonSampling = {'sampling_type': 'static', 'MC_type': 'MC_arms', 'M': 1, 'arm_N_samples': 1}
bandits.append(BayesianBanditSampling(A, reward_function, reward_prior, thompsonSampling))
bandits_labels.append('Thompson Sampling')
invPfaSampling = {'sampling_type': 'infPfa', 'Pfa': 'tGaussian', 'f(1/Pfa)': np.log10, 'MC_type': 'MC_arms', 'M': M, 'N_max': N_max}
bandits.append(BayesianBanditSampling(A, reward_function, reward_prior, invPfaSampling))
bandits_labels.append('tGaussian: log10(1/Pfa), M={}'.format(invPfaSampling['M']))
for (n, bandit) in enumerate(bandits):
bandit.execute_realizations(R, t_max, context, exec_type)
with open((dir_string + '/bandits.pickle'), 'wb') as f:
pickle.dump(bandits, f)
with open((dir_string + '/bandits_labels.pickle'), 'wb') as f:
pickle.dump(bandits_labels, f)
bandits_colors = [colors.cnames['black'], colors.cnames['skyblue'], colors.cnames['cyan'], colors.cnames['blue'], colors.cnames['palegreen'], colors.cnames['lime'], colors.cnames['green'], colors.cnames['yellow'], colors.cnames['orange'], colors.cnames['red'], colors.cnames['purple'], colors.cnames['fuchsia'], colors.cnames['pink'], colors.cnames['saddlebrown'], colors.cnames['chocolate'], colors.cnames['burlywood']]
bandits_colors = [colors.cnames['black'], colors.cnames['red']]
dir_plots = (dir_string + '/plots')
os.makedirs(dir_plots, exist_ok=True)
t_plot = t_max
plot_std = False
bandits_plot_regret(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots)
plot_std = True
bandits_plot_regret(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots)
plot_std = False
bandits_plot_cumregret(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots)
plot_std = True
bandits_plot_cumregret(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots)
plot_std = True
bandits_plot_rewards_expected(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots)
plot_std = True
bandits_plot_arm_density(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots)
plot_std = False
bandits_plot_actions(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots)
plot_std = True
bandits_plot_actions(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots)
plot_std = False
bandits_plot_actions_correct(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots)
plot_std = True
bandits_plot_actions_correct(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots) |
def mergeGuide(dct_lst):
output_lst = list()
line_prev = dct_lst[0]
for line_dct in dct_lst[1:]:
episode_id = int(line_dct['Episode_ID'])
turn_id = int(line_dct['Turn_ID'])
speaker = line_dct['Speaker']
if (episode_id == int(line_prev['Episode_ID'])):
if ((speaker == line_prev['Speaker']) and (speaker == 'Guide')):
turn_id = int(line_dct['Turn_ID'])
utter = '{}\t{}'.format(line_prev['Utter'], line_dct['Utter'].strip())
intent = '{};{}'.format(line_prev['Intent'], line_dct['Intent'].strip())
slot_tags = '{}\t{}'.format(line_prev['SlotTags'], line_dct['SlotTags'].strip())
line_prev = {'Turn_ID': turn_id, 'Speaker': speaker, 'Episode_ID': episode_id, 'Utter': utter, 'Intent': intent, 'SlotTags': slot_tags}
elif ((speaker == line_prev['Speaker']) and (speaker == 'Tourist')):
output_lst.append(line_prev)
line_prev = line_dct
elif ((speaker != line_prev['Speaker']) and (speaker == 'Guide')):
output_lst.append(line_prev)
line_prev = line_dct
elif ((speaker != line_prev['Speaker']) and (speaker == 'Tourist')):
output_lst.append(line_prev)
line_prev = line_dct
else:
output_lst.append(line_prev)
line_prev = line_dct
output_lst.append(line_prev)
return output_lst |
def if_skip(api):
skip_list = ['tf.keras.Input', 'tf.keras.layers.Input']
if (api in skip_list):
return True
skip_keyword = ['initializers', 'tf.keras.applications.']
for k in skip_keyword:
if (k in api):
return True
return False |
def get_dataset(task):
(X, y, _, _) = task.get_dataset().get_data(task.target_name)
return (X, y) |
def load_or_encode_corpus(model_args: ModelArguments, data_args: DataArguments, eval_args: EvalArguments):
out_index_path = os.path.join(data_args.out_corpus_dir, 'index')
out_corpus_ids_path = os.path.join(data_args.out_corpus_dir, 'corpus_ids.npy')
if (os.path.exists(out_index_path) and os.path.exists(out_corpus_ids_path)):
index = faiss.read_index(out_index_path)
corpus_ids = np.load(out_corpus_ids_path)
logger.info('Load pre-computed corpus representations')
else:
doc_tokenizer = ANCETokenizerFast.from_pretrained(model_args.doc_encoder_path)
doc_encoder = ance_repconc_from_pretrained(model_args.doc_encoder_path, False, None, None)
if (data_args.data_format == 'msmarco'):
corpus = load_corpus(data_args.corpus_path, doc_tokenizer.sep_token, verbose=is_main_process(eval_args.local_rank))
elif (data_args.data_format == 'beir'):
corpus = load_beir_corpus(data_args.corpus_path, doc_tokenizer.sep_token, verbose=is_main_process(eval_args.local_rank))
else:
raise NotImplementedError()
(index, corpus_ids) = encode_corpus(corpus, doc_encoder, doc_tokenizer, model_args.max_seq_length, eval_args)
if is_main_process(eval_args.local_rank):
os.makedirs(data_args.out_corpus_dir, exist_ok=True)
faiss.write_index(index, out_index_path)
np.save(out_corpus_ids_path, corpus_ids)
return (index, corpus_ids) |
def set_recursively(hf_pointer, key, value, full_name, weight_type):
for attribute in key.split('.'):
hf_pointer = getattr(hf_pointer, attribute)
if (weight_type is not None):
hf_shape = getattr(hf_pointer, weight_type).shape
else:
hf_shape = hf_pointer.shape
if (hf_shape != value.shape):
raise ValueError(f"Shape of hf {(((key + '.') + weight_type) if (weight_type is not None) else '')} is {hf_shape}, but should be {value.shape} for {full_name}")
if (weight_type == 'weight'):
hf_pointer.weight.data = value
elif (weight_type == 'weight_g'):
hf_pointer.weight_g.data = value
elif (weight_type == 'weight_v'):
hf_pointer.weight_v.data = value
elif (weight_type == 'bias'):
hf_pointer.bias.data = value
elif (weight_type == 'running_mean'):
hf_pointer.running_mean.data = value
elif (weight_type == 'running_var'):
hf_pointer.running_var.data = value
elif (weight_type == 'num_batches_tracked'):
hf_pointer.num_batches_tracked.data = value
else:
hf_pointer.data = value
logger.info(f"{(key + (('.' + weight_type) if (weight_type is not None) else ''))} was initialized from {full_name}.") |
def move(board: Board, action: int) -> Tuple[(Board, float)]:
board = transform_board(board, action)
(board, reward) = move_left(board)
board = transform_board(board, action)
return (board, reward) |
class BertForNextSentencePrediction(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TensorFlowTransformer(object):
def __init__(self, def_path, data_path, verbose=True, phase='test'):
self.verbose = verbose
self.phase = phase
self.load(def_path, data_path, phase)
self.params = None
self.source = None
def load(self, def_path, data_path, phase):
graph = GraphBuilder(def_path, phase).build()
if (data_path is not None):
graph = DataInjector(def_path, data_path)(graph)
transformers = [BatchNormScaleBiasFuser(), ReLUFuser(allowed_parent_types=[NodeKind.Convolution, NodeKind.InnerProduct, NodeKind.BatchNorm]), NodeRenamer((lambda node: node.name.replace('/', '_')))]
self.graph = graph.transformed(transformers)
if self.verbose:
print_stderr(self.graph)
def transform_data(self):
if (self.params is None):
transformers = [DataReshaper({NodeKind.Convolution: (2, 3, 1, 0), NodeKind.InnerProduct: (1, 0)}), BatchNormPreprocessor(), ParameterNamer()]
self.graph = self.graph.transformed(transformers)
self.params = {node.name: node.data for node in self.graph.nodes if node.data}
return self.params
def transform_source(self):
if (self.source is None):
mapper = TensorFlowMapper(self.graph)
chains = mapper.map()
emitter = TensorFlowEmitter()
self.source = emitter.emit(self.graph.name, chains)
return self.source |
def collect_results(args):
dirs = os.listdir(args.path)
print('[*] ===== total {} files in TPE dir'.format(len(dirs)))
count = 0
penalty_k = []
scale_lr = []
wi = []
big_sz = []
small_sz = []
ratio = []
eao = []
count = 0
for d in dirs:
param_path = os.path.join(args.path, d)
json_path = os.path.join(param_path, 'result.json')
if (not os.path.exists(json_path)):
continue
try:
js = json.load(open(json_path, 'r'))
except:
continue
if (not ('EAO' in list(js.keys()))):
continue
else:
count += 1
eao.append(js['EAO'])
temp = js['config']
scale_lr.append(temp['scale_lr'])
wi.append(temp['window_influence'])
penalty_k.append(temp['penalty_k'])
ratio.append(temp['ratio'])
small_sz.append(temp['small_sz'])
big_sz.append(temp['big_sz'])
print('{} params group have been tested'.format(count))
eao = np.array(eao)
max_idx = np.argmax(eao)
max_eao = eao[max_idx]
print('penalty_k: {:.4f}, scale_lr: {:.4f}, wi: {:.4f}, ratio: {:.4f}, small_sz: {}, big_sz: {:.4f}, eao: {}'.format(penalty_k[max_idx], scale_lr[max_idx], wi[max_idx], ratio[max_idx], small_sz[max_idx], big_sz[max_idx], max_eao)) |
class KeypointRCNNFeatureExtractor(nn.Module):
def __init__(self, cfg):
super(KeypointRCNNFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(output_size=(resolution, resolution), scales=scales, sampling_ratio=sampling_ratio)
self.pooler = pooler
input_features = cfg.MODEL.BACKBONE.OUT_CHANNELS
layers = cfg.MODEL.ROI_KEYPOINT_HEAD.CONV_LAYERS
next_feature = input_features
self.blocks = []
for (layer_idx, layer_features) in enumerate(layers, 1):
layer_name = 'conv_fcn{}'.format(layer_idx)
module = Conv2d(next_feature, layer_features, 3, stride=1, padding=1)
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(module.bias, 0)
self.add_module(layer_name, module)
next_feature = layer_features
self.blocks.append(layer_name)
def forward(self, x, proposals):
x = self.pooler(x, proposals)
for layer_name in self.blocks:
x = F.relu(getattr(self, layer_name)(x))
return x |
def get_dataset_splits(dataset):
dataset_keys = ['x', 't', 'd', 'y', 'y_normalized']
train_index = dataset['metadata']['train_index']
val_index = dataset['metadata']['val_index']
test_index = dataset['metadata']['test_index']
dataset_train = dict()
dataset_val = dict()
dataset_test = dict()
for key in dataset_keys:
dataset_train[key] = dataset[key][train_index]
dataset_val[key] = dataset[key][val_index]
dataset_test[key] = dataset[key][test_index]
dataset_train['metadata'] = dataset['metadata']
dataset_val['metadata'] = dataset['metadata']
dataset_test['metadata'] = dataset['metadata']
return (dataset_train, dataset_val, dataset_test) |
def terminal_format(args):
line = ''
for x in args:
if (len(x) == 3):
line += (('{}={' + str(x[2])) + '}').format(str(x[0]), x[1])
elif (len(x) == 2):
line += (('{' + str(x[1])) + '}').format(x[0])
line += ' '
return line |
def _graph_network_no_node_update(graph_tuple):
update_node_fn = None
update_edge_fn = (lambda e, sn, rn, g: e)
update_global_fn = (lambda gn, ge, g: g)
net = nn.GraphNetwork(update_edge_fn, update_node_fn, update_global_fn)
return net(graph_tuple) |
.no_cover
.timeout(40)
def test_trpo_cubecrash():
env = os.environ.copy()
env['GARAGE_EXAMPLE_TEST_N_EPOCHS'] = '1'
assert (subprocess.run([str((EXAMPLES_ROOT_DIR / 'tf/trpo_cubecrash.py')), '--batch_size', '4'], check=False, env=env).returncode == 0) |
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', dest='model', required=True, type=str, help='Path to .pt model.', metavar=imed_utils.Metavar.file)
parser.add_argument('-d', '--dimension', dest='dimension', required=True, type=int, help='Input dimension (2 for 2D inputs, 3 for 3D inputs).', metavar=imed_utils.Metavar.int)
parser.add_argument('-n', '--n_channels', dest='n_channels', default=1, type=int, help='Number of input channels of the model.', metavar=imed_utils.Metavar.int)
parser.add_argument('-g', '--gpu_id', dest='gpu_id', default=0, type=str, help='GPU number if available.', metavar=imed_utils.Metavar.int)
return parser |
.parametrize('proj_head_dims', [[None, [16, 8]], [[16, 8], None], [[16, 8], [16, 8]]])
def test_projection_head_value_error(proj_head_dims):
cat_embed_cols = ['col1', 'col2']
continuous_cols = ['col3', 'col4']
preprocessor = TabPreprocessor(cat_embed_cols=cat_embed_cols, continuous_cols=continuous_cols, with_attention=True, with_cls_token=True)
X_tab = preprocessor.fit_transform(test_df)
tr_model = _build_transf_model('saint', preprocessor, preprocessor.cat_embed_input, preprocessor.continuous_cols)
with pytest.raises(ValueError):
cd_trainer = ContrastiveDenoisingTrainer(model=tr_model, preprocessor=preprocessor, projection_head1_dims=proj_head_dims[0], projection_head2_dims=proj_head_dims[1], verbose=0) |
def convert_type(function):
(function)
def wrap(image, *args, **kwargs):
image_type = image.dtype
image = tf.image.convert_image_dtype(image, tf.float32)
if (len(args) >= 1):
bboxes = args[0]
bboxes_type = bboxes.dtype
bboxes_absolute = bboxes_type.is_integer
bboxes = tf.cast(bboxes, tf.float32)
bboxes = (_bboxes_to_relative(image, bboxes) if bboxes_absolute else bboxes)
bboxes = tf.clip_by_value(bboxes, 0.0, 1.0)
(image, bboxes) = function(image, bboxes, *args[1:], **kwargs)
image = tf.clip_by_value(image, 0.0, 1.0)
image = tf.image.convert_image_dtype(image, image_type, saturate=True)
bboxes = tf.clip_by_value(bboxes, 0.0, 1.0)
bboxes = (_bboxes_to_absolute(image, bboxes) if bboxes_absolute else bboxes)
bboxes = tf.cast(bboxes, bboxes_type)
return (image, bboxes)
image = function(image, **kwargs)
image = tf.clip_by_value(image, 0.0, 1.0)
image = tf.image.convert_image_dtype(image, image_type, saturate=True)
return image
return wrap |
class HeuristicMultivariateDifferentiablePointProcess(POMultivariatePointProcess):
def expconcrete_dist(self):
if (not hasattr(self, '_expconcrete_dist')):
self._expconcrete_dist = ExpConcreteDistribution((self.hidden_dim + 1))
return self._expconcrete_dist
def e_step_obj_func(self, history: MultivariateEventSeq, variational_dist, beta=1.0, **kwargs) -> torch.Tensor:
obj = 0
n_sampling = kwargs.get('n_sampling', 1)
for _ in range(n_sampling):
history_with_hidden = variational_dist.sample_hidden_seq(history=history)
entropy = self.hidden_entropy(history_with_hidden, **kwargs)
obj = ((obj + self.neg_ll(history_with_hidden, **kwargs)) - (beta * entropy))
return ((obj / n_sampling) + self.regularize())
def fit(self, history_list: List[MultivariateEventSeq], variational_dist=None, n_epochs=100, obj_func_kwargs={}, optimizer='Adagrad', optimizer_kwargs={'lr': 0.01}, temperature_rate=0.99, shuffle=False, print_freq=10, logger=print, **fit_kwargs):
if (not obj_func_kwargs.get('use_variational', False)):
variational_dist = self
optimizer_model = getattr(torch.optim, optimizer)(params=self.parameters(), **optimizer_kwargs)
if (variational_dist is not self):
optimizer_var = getattr(torch.optim, optimizer)(params=variational_dist.parameters(), **optimizer_kwargs)
else:
optimizer_var = optimizer_model
for iter_idx in range(n_epochs):
if shuffle:
self.rng.shuffle(history_list)
running_loss = 0
for each_history in history_list:
loss = self.e_step(each_history, variational_dist, optimizer_var, **obj_func_kwargs)
self.m_step(each_history, variational_dist, optimizer_model, **obj_func_kwargs)
if (print_freq != (- 1)):
running_loss += loss.item()
if ((print_freq > 0) and ((iter_idx % print_freq) == 0)):
logger('#(iter) = {}\t loss = {}'.format(iter_idx, (running_loss / len(history_list))))
logger('\t\t temperature = {}'.format(self.temperature))
if (print_freq == (- 1)):
logger('#(iter) = {}\ttemperature = {}'.format(iter_idx, self.temperature))
self.temperature = (self.temperature * temperature_rate)
return self |
class EventWriter():
def write(self):
raise NotImplementedError
def close(self):
pass |
class UploadCommand(Command):
description = 'Build and publish the package.'
user_options = []
def status(s):
print('\x1b[1m{0}\x1b[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds...')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine...')
os.system('twine upload dist/*')
self.status('Pushing git tags...')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit() |
class Top5Accuracy(PytorchMetric):
def __init__(self):
self.total = torch.tensor(0)
self.correct = torch.tensor(0)
def __call__(self, preds, targets):
batch_size = targets.size(0)
(_, preds) = preds.topk(5, dim=(- 1), largest=True, sorted=True)
preds = preds.type_as(targets).t()
targets = targets.view(1, (- 1)).expand_as(preds)
self.correct += preds.eq(targets).contiguous().view((- 1)).sum()
self.total += batch_size
def compute(self):
return (self.correct.float() / self.total) |
_REGISTRY.register()
def build_timm_backbone(cfg, input_shape):
model = TIMM(cfg.MODEL.TIMM.BASE_NAME, cfg.MODEL.TIMM.OUT_LEVELS, freeze_at=cfg.MODEL.TIMM.FREEZE_AT, norm=cfg.MODEL.TIMM.NORM, pretrained=cfg.MODEL.TIMM.PRETRAINED)
return model |
def copy_fold(in_folder: str, out_folder: str):
shutil.copy(join(in_folder, 'debug.json'), join(out_folder, 'debug.json'))
shutil.copy(join(in_folder, 'model_final_checkpoint.model'), join(out_folder, 'model_final_checkpoint.model'))
shutil.copy(join(in_folder, 'model_final_checkpoint.model.pkl'), join(out_folder, 'model_final_checkpoint.model.pkl'))
shutil.copy(join(in_folder, 'progress.png'), join(out_folder, 'progress.png'))
if isfile(join(in_folder, 'network_architecture.pdf')):
shutil.copy(join(in_folder, 'network_architecture.pdf'), join(out_folder, 'network_architecture.pdf')) |
class WarpCTC(chainer.Chain):
def __init__(self, odim, eprojs, dropout_rate):
super(WarpCTC, self).__init__()
from chainer_ctc.warpctc import ctc as warp_ctc
self.ctc = warp_ctc
self.dropout_rate = dropout_rate
self.loss = None
with self.init_scope():
self.ctc_lo = L.Linear(eprojs, odim)
def forward(self, hs, ys):
self.loss = None
ilens = ([hs.shape[1]] * hs.shape[0])
olens = [x.shape[0] for x in ys]
y_hat = self.ctc_lo(F.dropout(hs, ratio=self.dropout_rate), n_batch_axes=2).transpose(1, 0, 2)
logging.info(((self.__class__.__name__ + ' input lengths: ') + str(ilens)))
logging.info(((self.__class__.__name__ + ' output lengths: ') + str(olens)))
self.loss = self.ctc(y_hat, ilens, ys)[0]
logging.info(('ctc loss:' + str(self.loss.data)))
return self.loss
def log_softmax(self, hs):
y_hat = self.ctc_lo(F.pad_sequence(hs), n_batch_axes=2)
return F.log_softmax(y_hat.reshape((- 1), y_hat.shape[(- 1)])).reshape(y_hat.shape)
def argmax(self, hs_pad):
return F.argmax(self.ctc_lo(F.pad_sequence(hs_pad), n_batch_axes=2), axis=(- 1)) |
def extract_hyperparameters_from_keras(model):
import tensorflow as tf
hyperparameters = {}
if (hasattr(model, 'optimizer') and (model.optimizer is not None)):
hyperparameters['optimizer'] = model.optimizer.get_config()
else:
hyperparameters['optimizer'] = None
hyperparameters['training_precision'] = tf.keras.mixed_precision.global_policy().name
return hyperparameters |
class GPTJModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TestImageResizeTransform(unittest.TestCase):
def test_image_resize_1(self):
images_batch = (torch.ones((3, 100, 100, 3), dtype=torch.uint8) * 100)
transform = ImageResizeTransform()
images_transformed = transform(images_batch)
IMAGES_GT = (torch.ones((3, 3, 800, 800), dtype=torch.float) * 100)
self.assertEqual(images_transformed.size(), IMAGES_GT.size())
self.assertAlmostEqual(torch.abs((IMAGES_GT - images_transformed)).max().item(), 0.0) |
(scope='module')
def sconv2dlstm_hidden_reset_subtract_instance():
return snn.SConv2dLSTM(1, 8, 3, init_hidden=True, reset_mechanism='subtract') |
def write_json(json_data):
file_name = 'all_data.json'
dir_path = os.path.join(parent_path, 'data', 'all_data')
if (not os.path.exists(dir_path)):
os.mkdir(dir_path)
file_path = os.path.join(dir_path, file_name)
print('writing {}'.format(file_name))
with open(file_path, 'w') as outfile:
json.dump(json_data, outfile) |
class Adam_GC(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam_GC, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam_GC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
if (group['weight_decay'] != 0):
grad.add_(group['weight_decay'], p.data)
if (len(list(grad.size())) > 1):
grad.add_((- grad.mean(dim=tuple(range(1, len(list(grad.size())))), keepdim=True)))
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = (group['lr'] / bias_correction1)
p.data.addcdiv_((- step_size), exp_avg, denom)
return loss |
def is_verified_rect(rect):
if ('uhrs' in rect):
judge_result = rect['uhrs']
assert (judge_result.get('1', 0) >= judge_result.get('2', 0))
return True
if (('class' not in rect) or ('rect' not in rect)):
return False
if ('uhrs_confirm' in rect):
assert (rect['uhrs_confirm'] > 0)
return True
if (('conf' in rect) and (rect['conf'] < 1)):
return False
if ('merge_from' in rect):
return all((is_verified_rect(r) for r in rect['merge_from']))
return True |
def evaluate_3rd_user_task_fastgcnnew(valid_batch_index, model, sess, valid_data, is_training):
(valid_target_user, valid_k_shot_item, valid_second_order_uesrs, valid_third_order_items, valid_oracle_user_ebd, valid_mask_num_second_order_user, valid_mask_num_third_order_item) = valid_data
(evaluate_loss, evaluate_pearson) = (0.0, 0.0)
for index in tqdm.tqdm(valid_batch_index):
(batch_target_user, batch_kshot_item, batch_2nd_user, batch_3rd_item, batch_oracle_user_ebd, batch_mask_num_2nd_user, batch_mask_num_3rd_item) = gfn.split_batch_user(valid_target_user, valid_k_shot_item, valid_second_order_uesrs, valid_third_order_items, valid_oracle_user_ebd, valid_mask_num_second_order_user, valid_mask_num_third_order_item, index)
feed_dict = {model.target_user: batch_oracle_user_ebd, model.support_item_1st_: batch_kshot_item, model.training_phrase_user_task: is_training, model.support_user_2nd_: batch_2nd_user, model.training_phrase_item_task: is_training, model.support_item_3rd: batch_3rd_item}
(batch_evaluate_loss, batch_predict_ebd, batch_target_ebd) = sess.run([model.loss_3rd_user, model.predict_u_3rd, model.target_user], feed_dict)
evaluate_loss += batch_evaluate_loss
batch_pearson = Pearson_correlation(batch_predict_ebd, batch_target_ebd)
evaluate_pearson += batch_pearson
return ((evaluate_loss / len(valid_batch_index)), (evaluate_pearson / len(valid_batch_index))) |
def add_scores():
script_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(script_dir, '../data/drd3_scores.pickle'), 'rb') as f:
scored_smiles = pickle.load(f)
df = pd.read_csv(os.path.join(script_dir, 'moses_train.csv'), index_col=0)
smiles = df.smiles
scores = []
scores_raw = []
low = (- 9)
high = (- 7)
print('>>> Adding scores to moses_train dataset')
for s in smiles:
if (s in scored_smiles):
scores_raw.append(scored_smiles[s])
if (scored_smiles[s] < low):
scores.append(2)
elif (scored_smiles[s] > high):
scores.append(1)
else:
scores.append(0)
else:
scores.append(0)
scores_raw.append(0)
df['drd3'] = pd.Series(scores_raw, index=df.index)
df['drd3_binned'] = pd.Series(scores, index=df.index)
df.to_csv(os.path.join(script_dir, '../data/moses_train.csv'))
print('Saved train set csv with "drd3" and "drd3_binned" columns. Value 0 indicates no data or uninformative score')
cpt = np.count_nonzero(df, axis=0)
print('Number of non zero raw docking scores :', cpt[(- 2)])
print('Number of non zero binned scores (excluding uninformative scores): ', cpt[(- 1)]) |
class EpsProposal(object):
def __init__(self, T):
self.T = T
self.reset()
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if (self.t >= self.T):
raise StopIteration()
eps_val = self(self.t)
self.t += 1
return eps_val
def reset(self):
self.t = 0 |
class RetreivalDataset(Dataset):
def __init__(self, task: str, dataroot: str, annotations_jsonpath: str, split: str, image_features_reader: ImageFeaturesH5Reader, gt_image_features_reader: ImageFeaturesH5Reader, tokenizer: BertTokenizer, padding_index: int=0, max_seq_length: int=20, max_region_num: int=37):
(self._entries, self.imgid2entry) = _load_annotations(annotations_jsonpath, task)
self.image_id_list = [*self.imgid2entry]
self._image_features_reader = image_features_reader
self._tokenizer = tokenizer
self.num_labels = 1
self._split = split
self._padding_index = padding_index
self._max_region_num = max_region_num
self._max_seq_length = max_seq_length
if (self._split == 'train'):
image_info = cPickle.load(open(os.path.join(dataroot, 'hard_negative.pkl'), 'rb'))
for (key, value) in image_info.items():
setattr(self, key, value)
self.train_imgId2pool = {imageId: i for (i, imageId) in enumerate(self.train_image_list)}
cache_path = os.path.join(dataroot, 'cache', (((((task + '_') + split) + '_') + str(max_seq_length)) + '.pkl'))
if (not os.path.exists(cache_path)):
self.tokenize()
self.tensorize()
cPickle.dump(self._entries, open(cache_path, 'wb'))
else:
print(('loading entries from %s' % cache_path))
self._entries = cPickle.load(open(cache_path, 'rb'))
def tokenize(self):
for entry in self._entries:
sentence_tokens = self._tokenizer.tokenize(entry['caption'])
sentence_tokens = ((['[CLS]'] + sentence_tokens) + ['[SEP]'])
tokens = [self._tokenizer.vocab.get(w, self._tokenizer.vocab['[UNK]']) for w in sentence_tokens]
tokens = tokens[:self._max_seq_length]
segment_ids = ([0] * len(tokens))
input_mask = ([1] * len(tokens))
if (len(tokens) < self._max_seq_length):
padding = ([self._padding_index] * (self._max_seq_length - len(tokens)))
tokens = (tokens + padding)
input_mask += padding
segment_ids += padding
assert_eq(len(tokens), self._max_seq_length)
entry['token'] = tokens
entry['input_mask'] = input_mask
entry['segment_ids'] = segment_ids
def tensorize(self):
for entry in self._entries:
token = torch.from_numpy(np.array(entry['token']))
entry['token'] = token
input_mask = torch.from_numpy(np.array(entry['input_mask']))
entry['input_mask'] = input_mask
segment_ids = torch.from_numpy(np.array(entry['segment_ids']))
entry['segment_ids'] = segment_ids
def __getitem__(self, index):
entry = self._entries[index]
image_id = entry['image_id']
(features, num_boxes, boxes, _) = self._image_features_reader[image_id]
mix_num_boxes = min(int(num_boxes), self._max_region_num)
mix_boxes_pad = np.zeros((self._max_region_num, 5))
mix_features_pad = np.zeros((self._max_region_num, 2048))
image_mask = ([1] * int(mix_num_boxes))
while (len(image_mask) < self._max_region_num):
image_mask.append(0)
mix_boxes_pad[:mix_num_boxes] = boxes[:mix_num_boxes]
mix_features_pad[:mix_num_boxes] = features[:mix_num_boxes]
features1 = torch.tensor(mix_features_pad).float()
image_mask1 = torch.tensor(image_mask).long()
spatials1 = torch.tensor(mix_boxes_pad).float()
caption1 = entry['token']
input_mask1 = entry['input_mask']
segment_ids1 = entry['segment_ids']
while True:
img_id2 = random.choice(self.image_id_list)
if (img_id2 != image_id):
break
entry2 = self._entries[random.choice(self.imgid2entry[img_id2])]
features2 = features1
image_mask2 = image_mask1
spatials2 = spatials1
caption2 = entry2['token']
input_mask2 = entry2['input_mask']
segment_ids2 = entry2['segment_ids']
while True:
img_id3 = random.choice(self.image_id_list)
if (img_id3 != image_id):
break
(features3, num_boxes3, boxes3, _) = self._image_features_reader[img_id3]
image_mask3 = ([1] * int(num_boxes3))
mix_num_boxes3 = min(int(num_boxes3), self._max_region_num)
mix_boxes_pad3 = np.zeros((self._max_region_num, 5))
mix_features_pad3 = np.zeros((self._max_region_num, 2048))
while (len(image_mask3) < self._max_region_num):
image_mask3.append(0)
mix_boxes_pad[:mix_num_boxes3] = boxes3[:mix_num_boxes3]
mix_features_pad[:mix_num_boxes3] = features3[:mix_num_boxes3]
features3 = torch.tensor(mix_features_pad).float()
image_mask3 = torch.tensor(image_mask3).long()
spatials3 = torch.tensor(mix_boxes_pad).float()
caption3 = caption1
input_mask3 = input_mask1
segment_ids3 = segment_ids1
if (self._split == 'train'):
rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]]
pool_img_idx = int(rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))])
img_id4 = self.train_image_list[pool_img_idx]
else:
while True:
img_id4 = random.choice(self.image_id_list)
if (img_id4 != image_id):
break
entry4 = self._entries[random.choice(self.imgid2entry[img_id4])]
features4 = features1
image_mask4 = image_mask1
spatials4 = spatials1
caption4 = entry4['token']
input_mask4 = entry4['input_mask']
segment_ids4 = entry4['segment_ids']
features = torch.stack([features1, features2, features3, features4], dim=0)
spatials = torch.stack([spatials1, spatials2, spatials3, spatials4], dim=0)
image_mask = torch.stack([image_mask1, image_mask2, image_mask3, image_mask4], dim=0)
caption = torch.stack([caption1, caption2, caption3, caption4], dim=0)
input_mask = torch.stack([input_mask1, input_mask2, input_mask3, input_mask4], dim=0)
segment_ids = torch.stack([segment_ids1, segment_ids2, segment_ids3, segment_ids4], dim=0)
co_attention_mask = torch.zeros((4, self._max_region_num, self._max_seq_length))
target = 0
return (features, spatials, image_mask, caption, target, input_mask, segment_ids, co_attention_mask, image_id)
def __len__(self):
return len(self._entries) |
class AttResU_Net(nn.Module):
def __init__(self, img_ch=3, output_ch=1):
super(AttResU_Net, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = res_conv_block(ch_in=img_ch, ch_out=64)
self.Conv2 = res_conv_block(ch_in=64, ch_out=128)
self.Conv3 = res_conv_block(ch_in=128, ch_out=256)
self.Conv4 = res_conv_block(ch_in=256, ch_out=512)
self.Conv5 = res_conv_block(ch_in=512, ch_out=1024)
self.Up5 = up_conv(ch_in=1024, ch_out=512)
self.Att5 = Attention_block(F_g=512, F_l=512, F_int=256)
self.Up_conv5 = res_conv_block(ch_in=1024, ch_out=512)
self.Up4 = up_conv(ch_in=512, ch_out=256)
self.Att4 = Attention_block(F_g=256, F_l=256, F_int=128)
self.Up_conv4 = res_conv_block(ch_in=512, ch_out=256)
self.Up3 = up_conv(ch_in=256, ch_out=128)
self.Att3 = Attention_block(F_g=128, F_l=128, F_int=64)
self.Up_conv3 = res_conv_block(ch_in=256, ch_out=128)
self.Up2 = up_conv(ch_in=128, ch_out=64)
self.Att2 = Attention_block(F_g=64, F_l=64, F_int=32)
self.Up_conv2 = res_conv_block(ch_in=128, ch_out=64)
self.Conv_1x1 = nn.Conv2d(64, output_ch, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2)
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3)
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4)
x5 = self.Maxpool(x4)
x5 = self.Conv5(x5)
d5 = self.Up5(x5)
x4 = self.Att5(g=d5, x=x4)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
x3 = self.Att4(g=d4, x=x3)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
x2 = self.Att3(g=d3, x=x2)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
x1 = self.Att2(g=d2, x=x1)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
d1 = self.Conv_1x1(d2)
out = nn.Sigmoid()(d1)
return out |
def center_crop():
data = np.arange((3 * 5)).reshape(3, 5)
print(data)
m = CenterCrop(size=(3, 3), p=1.0)
print(m)
res = m(data)
print(res) |
class Speech2TextPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def vad_collector(sample_rate, frame_duration_ms, padding_duration_ms, vad, frames):
num_padding_frames = int((padding_duration_ms / frame_duration_ms))
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
sys.stdout.write(('1' if is_speech else '0'))
if (not triggered):
ring_buffer.append((frame, is_speech))
num_voiced = len([f for (f, speech) in ring_buffer if speech])
if (num_voiced > (0.9 * ring_buffer.maxlen)):
triggered = True
sys.stdout.write(('+(%s)' % (ring_buffer[0][0].timestamp,)))
for (f, s) in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for (f, speech) in ring_buffer if (not speech)])
if (num_unvoiced > (0.9 * ring_buffer.maxlen)):
sys.stdout.write(('-(%s)' % (frame.timestamp + frame.duration)))
triggered = False
(yield b''.join([f.bytes for f in voiced_frames]))
ring_buffer.clear()
voiced_frames = []
if triggered:
sys.stdout.write(('-(%s)' % (frame.timestamp + frame.duration)))
sys.stdout.write('\n')
if voiced_frames:
(yield b''.join([f.bytes for f in voiced_frames])) |
class DPDataset(Dataset):
def __init__(self, corpus, dialogs, context_size=2, min_reply_length=None, max_reply_length=None):
self.corpus = corpus
self.contexts = []
self.replies = []
for dialog in dialogs:
max_start_i = (len(dialog) - context_size)
for start_i in range(max_start_i):
reply = dialog[(start_i + context_size)]
context = []
for i in range(start_i, (start_i + context_size)):
context.extend(dialog[i])
if (((min_reply_length is None) or (len(reply) >= min_reply_length)) and ((max_reply_length is None) or (len(reply) <= max_reply_length))):
self.contexts.append(context)
self.replies.append(reply)
def __len__(self):
return len(self.contexts)
def __getitem__(self, item):
context = self.contexts[item]
replies = self.replies[item]
return (LongTensor(context), LongTensor(replies)) |
class Trainer(object):
def __init__(self, output_dir):
self.model_dir = os.path.join(output_dir, 'Model')
os.makedirs(self.model_dir)
self.image_dir = os.path.join(output_dir, 'Image')
os.makedirs(self.image_dir)
self.dataloader = get_dataloader()
self.batch_size = cfg.TRAIN.BATCH_SIZE
self.fixed_image = self.prepare_data(next(iter(self.dataloader)))[0]
save_img_results(self.fixed_image.cpu(), None, (- 1), self.image_dir)
def prepare_data(self, data):
real_img = data[1]
real_img = real_img.to(device)
real_z = torch.FloatTensor(self.batch_size, cfg.GAN.Z_DIM).normal_(0, 1).to(device)
if (random.uniform(0, 1) < 0.2):
real_p = torch.softmax(torch.FloatTensor(self.batch_size, cfg.SUPER_CATEGORIES).normal_(0, 1), dim=1).to(device)
else:
real_p = torch.zeros(self.batch_size, cfg.SUPER_CATEGORIES).to(device)
idxs = torch.LongTensor(self.batch_size).random_(0, cfg.SUPER_CATEGORIES)
for (i, idx) in enumerate(idxs):
real_p[(i, idx)] = 1
real_c = torch.zeros(self.batch_size, cfg.FINE_GRAINED_CATEGORIES).to(device)
idxs = torch.LongTensor(self.batch_size).random_(0, cfg.FINE_GRAINED_CATEGORIES)
for (i, idx) in enumerate(idxs):
real_c[(i, idx)] = 1
real_b = torch.zeros(self.batch_size, cfg.FINE_GRAINED_CATEGORIES).to(device)
idxs = torch.LongTensor(self.batch_size).random_(0, cfg.FINE_GRAINED_CATEGORIES)
for (i, idx) in enumerate(idxs):
real_b[(i, idx)] = 1
return (real_img, real_z, real_b, real_p, real_c)
def train(self):
(self.netG, self.encoder, self.extractor, self.dis_dis) = load_network()
self.netG.eval()
self.encoder.eval()
(self.optimizerEX, self.optimizerDD) = define_optimizers(self.extractor, self.dis_dis)
self.RF_loss = nn.BCELoss()
self.L1 = nn.L1Loss()
for epoch in range(cfg.TRAIN.SECOND_MAX_EPOCH):
for data in self.dataloader:
(real_img, real_z, real_b, real_p, real_c) = self.prepare_data(data)
with torch.no_grad():
(real_distribution, real_fake_image) = self.netG(real_z, real_c, real_p, real_b, 'code', only=True)
fake_distribution = self.extractor(real_img)
self.optimizerDD.zero_grad()
fake_pred = self.dis_dis(fake_distribution.detach())
real_pred = self.dis_dis(real_distribution)
DD_loss = (self.RF_loss(fake_pred, torch.zeros_like(fake_pred)) + self.RF_loss(real_pred, torch.ones_like(real_pred)))
DD_loss.backward()
self.optimizerDD.step()
self.optimizerEX.zero_grad()
fake_pred = self.dis_dis(fake_distribution)
l1loss = self.L1(self.extractor(real_fake_image), real_distribution)
EX_loss = self.RF_loss(fake_pred, torch.ones_like(fake_pred))
(EX_loss + l1loss).backward()
self.optimizerEX.step()
self.extractor.eval()
with torch.no_grad():
(code_z, code_b, _, code_c) = self.encoder(self.fixed_image, 'softmax')
feat_p = self.extractor(self.fixed_image)
(fake_imgs, fg_imgs, mk_imgs, fg_mk) = self.netG(code_z, code_c, feat_p, code_b, 'feature')
save_img_results(None, (((fake_imgs + fg_imgs) + mk_imgs) + fg_mk), epoch, self.image_dir)
self.extractor.train()
save_model(self.dis_dis, self.extractor, 0, self.model_dir)
print((str(epoch) + 'th epoch finished')) |
def load_custom_testing_dataset_multiclass_str():
data = [['a', 1, 'zero'], ['b', 5, 'one'], ['c', 2, 'two'], ['a', 3, 'one'], ['c', 4, 'zero']]
return pd.DataFrame(data, columns=['Categorical', 'Numerical', 'Outcome']) |
class Equalizer(Processor):
def __init__(self, name='EQUALIZER', block_size=512, sample_rate=44100, gain_range=((- 10.0), 5.0), q_range=(5.0, 30.0), hard_clip=False):
super().__init__(name, None, block_size, sample_rate)
MIN_GAIN = gain_range[0]
MAX_GAIN = gain_range[1]
MIN_Q = q_range[0]
MAX_Q = q_range[1]
self.parameters = ParameterList()
self.parameters.add(Parameter('low_shelf_gain', 0.0, 'int', processor=self, minimum=MIN_GAIN, maximum=MAX_GAIN))
self.parameters.add(Parameter('low_shelf_freq', 80.0, 'int', processor=self, minimum=20.0, maximum=1000.0))
self.parameters.add(Parameter('first_band_gain', 0.0, 'int', processor=self, minimum=MIN_GAIN, maximum=MAX_GAIN))
self.parameters.add(Parameter('first_band_freq', 400.0, 'int', processor=self, minimum=200.0, maximum=5000.0))
self.parameters.add(Parameter('first_band_q', 5.0, 'int', processor=self, minimum=MIN_Q, maximum=MAX_Q))
self.parameters.add(Parameter('second_band_gain', 0.0, 'int', processor=self, minimum=MIN_GAIN, maximum=MAX_GAIN))
self.parameters.add(Parameter('second_band_freq', 1000.0, 'int', processor=self, minimum=500.0, maximum=6000.0))
self.parameters.add(Parameter('second_band_q', 5.0, 'int', processor=self, minimum=MIN_Q, maximum=MAX_Q))
self.parameters.add(Parameter('third_band_gain', 0.0, 'int', processor=self, minimum=MIN_GAIN, maximum=MAX_GAIN))
self.parameters.add(Parameter('third_band_freq', 5000.0, 'int', processor=self, minimum=2000.0, maximum=10000.0))
self.parameters.add(Parameter('third_band_q', 5.0, 'int', processor=self, minimum=MIN_Q, maximum=MAX_Q))
self.parameters.add(Parameter('high_shelf_gain', 0.0, 'int', processor=self, minimum=MIN_GAIN, maximum=MAX_GAIN))
self.parameters.add(Parameter('high_shelf_freq', 10000.0, 'int', processor=self, minimum=8000.0, maximum=20000.0))
(self.bands, self.filters) = self.setup_filters()
self.hard_clip = hard_clip
def setup_filters(self):
filters = {}
for band in BANDS:
G = getattr(self.parameters, (band + '_gain')).value
fc = getattr(self.parameters, (band + '_freq')).value
rate = self.sample_rate
if (band in ['low_shelf', 'high_shelf']):
Q = 0.707
filter_type = band
else:
Q = getattr(self.parameters, (band + '_q')).value
filter_type = 'peaking'
filters[band] = IIRfilter(G, Q, fc, rate, filter_type, n_channels=2)
return (BANDS, filters)
def update_filter(self, band):
self.filters[band].G = getattr(self.parameters, (band + '_gain')).value
self.filters[band].fc = getattr(self.parameters, (band + '_freq')).value
self.filters[band].rate = self.sample_rate
if (band in ['first_band', 'second_band', 'third_band']):
self.filters[band].Q = getattr(self.parameters, (band + '_q')).value
def update(self, parameter_name):
pass
def reset_state(self):
for (band, iirfilter) in self.filters.items():
iirfilter.reset_state()
def process(self, data):
for (band, irrfilter) in self.filters.items():
data = irrfilter.apply_filter(data)
if self.hard_clip:
data = np.clip(data, (- 1.0), 1.0)
return data |
def llama_model_quantize(fname_inp: bytes, fname_out: bytes, ftype: c_int, nthread: c_int) -> int:
return _lib.llama_model_quantize(fname_inp, fname_out, ftype, nthread) |
def random_subset_indices(np_array, n_first_subset):
idx = np.arange(0, len(np_array))
np.random.shuffle(idx)
idx1 = idx[0:n_first_subset]
idx2 = idx[n_first_subset:]
return (idx1, idx2) |
class FlaxStableDiffusionImg2ImgPipeline(metaclass=DummyObject):
_backends = ['flax', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['flax', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['flax', 'transformers']) |
def ani(index):
i1.set_data(observations[index][0].transpose(1, 2, 0))
i2.set_data(observations[index][1].transpose(1, 2, 0)) |
.register('Constant')
class OpConstantProp(mx.operator.CustomOpProp):
def __init__(self, val_str, shape_str, type_str='float32'):
super(OpConstantProp, self).__init__(need_top_grad=False)
val = [float(x) for x in val_str.split(',')]
shape = [int(x) for x in shape_str.split(',')]
self.val = mx.nd.array(val, dtype=type_str).reshape(shape)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, [self.val.shape], [])
def infer_type(self, in_type):
return (in_type, [self.val.dtype], [])
def create_operator(self, ctx, shapes, dtypes):
return OpConstant(self.val.as_in_context(ctx)) |
def get_covariance_matrix(f_map, eye=None):
eps = 1e-05
(B, C, H, W) = f_map.shape
HW = (H * W)
if (eye is None):
eye = torch.eye(C).cuda()
f_map = f_map.contiguous().view(B, C, (- 1))
f_cor = (torch.bmm(f_map, f_map.transpose(1, 2)).div((HW - 1)) + (eps * eye))
return (f_cor, B) |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
logger = get_logger(cfg.log_level)
if (args.launcher == 'none'):
dist = False
logger.info('Disabled distributed training.')
else:
dist = True
init_dist(**cfg.dist_params)
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
if (rank != 0):
logger.setLevel('ERROR')
logger.info('Enabled distributed training.')
normalize = transforms.Normalize(mean=cfg.mean, std=cfg.std)
train_dataset = datasets.CIFAR10(root=cfg.data_root, train=True, transform=transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
val_dataset = datasets.CIFAR10(root=cfg.data_root, train=False, transform=transforms.Compose([transforms.ToTensor(), normalize]))
if dist:
num_workers = cfg.data_workers
assert ((cfg.batch_size % world_size) == 0)
batch_size = (cfg.batch_size // world_size)
train_sampler = DistributedSampler(train_dataset, world_size, rank)
val_sampler = DistributedSampler(val_dataset, world_size, rank)
shuffle = False
else:
num_workers = (cfg.data_workers * len(cfg.gpus))
batch_size = cfg.batch_size
train_sampler = None
val_sampler = None
shuffle = True
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, sampler=train_sampler, num_workers=num_workers)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, sampler=val_sampler, num_workers=num_workers)
model = getattr(resnet_cifar, cfg.model)()
if dist:
model = DistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()])
else:
model = DataParallel(model, device_ids=cfg.gpus).cuda()
runner = Runner(model, batch_processor, cfg.optimizer, cfg.work_dir, log_level=cfg.log_level)
runner.register_training_hooks(lr_config=cfg.lr_config, optimizer_config=cfg.optimizer_config, checkpoint_config=cfg.checkpoint_config, log_config=cfg.log_config)
if dist:
runner.register_hook(DistSamplerSeedHook())
if (cfg.get('resume_from') is not None):
runner.resume(cfg.resume_from)
elif (cfg.get('load_from') is not None):
runner.load_checkpoint(cfg.load_from)
runner.run([train_loader, val_loader], cfg.workflow, cfg.total_epochs) |
def tag_arxiv_more(line):
line = RE_ARXIV_CATCHUP.sub('\\g<suffix>/\\g<year>\\g<month>\\g<num>', line)
for (report_re, report_repl) in RE_OLD_ARXIV:
report_number = (report_repl + '/\\g<num>')
line = report_re.sub(((u'<cds.ARXIV>' + report_number) + u'</cds.ARXIV>'), line)
return line |
class OmniglotConv(nn.Module):
def __init__(self, taskcla, sparsity=0.5):
super(OmniglotConv, self).__init__()
self.conv1 = SubnetConv2d(1, 64, 3, sparsity=sparsity, bias=False)
s = compute_conv_output_size(28, 3, stride=1, padding=0)
self.conv2 = SubnetConv2d(64, 64, 3, sparsity=sparsity, bias=False)
s = compute_conv_output_size(s, 3, stride=1, padding=0)
s = (s // 2)
self.conv3 = SubnetConv2d(64, 64, 3, sparsity=sparsity, bias=False)
s = compute_conv_output_size(s, 3, stride=1, padding=0)
self.conv4 = SubnetConv2d(64, 64, 3, sparsity=sparsity, bias=False)
s = compute_conv_output_size(s, 3, stride=1, padding=0)
s = (s // 2)
self.maxpool = nn.MaxPool2d(2)
self.relu = nn.ReLU()
self.taskcla = taskcla
self.last = nn.ModuleList()
for (t, n) in self.taskcla:
self.last.append(nn.Linear(((s * s) * 64), n, bias=False))
self.none_masks = {}
for (name, module) in self.named_modules():
if (isinstance(module, SubnetLinear) or isinstance(module, SubnetConv2d)):
self.none_masks[(name + '.weight')] = None
self.none_masks[(name + '.bias')] = None
def forward(self, x, task_id, mask, mode='train'):
if (mask is None):
mask = self.none_masks
bsz = deepcopy(x.size(0))
x = self.relu(self.conv1(x, weight_mask=mask['conv1.weight'], bias_mask=mask['conv1.bias'], mode=mode))
x = self.relu(self.conv2(x, weight_mask=mask['conv2.weight'], bias_mask=mask['conv2.bias'], mode=mode))
x = self.maxpool(x)
x = self.relu(self.conv3(x, weight_mask=mask['conv3.weight'], bias_mask=mask['conv3.bias'], mode=mode))
x = self.relu(self.conv4(x, weight_mask=mask['conv4.weight'], bias_mask=mask['conv4.bias'], mode=mode))
x = self.maxpool(x)
x = x.view(bsz, (- 1))
h_keys = ['last.{}.weight'.format(task_id), 'last.{}.bias'.format(task_id)]
y = self.last[task_id](x)
return y
def get_masks(self, task_id):
task_mask = {}
for (name, module) in self.named_modules():
if ('last' in name):
if (name != ('last.' + str(task_id))):
continue
if (isinstance(module, SubnetLinear) or isinstance(module, SubnetConv2d)):
print(name)
task_mask[(name + '.weight')] = (module.weight_mask.detach().clone() > 0).type(torch.uint8)
if (getattr(module, 'bias') is not None):
task_mask[(name + '.bias')] = (module.bias_mask.detach().clone() > 0).type(torch.uint8)
else:
task_mask[(name + '.bias')] = None
return task_mask |
class _RandomGPBase():
def __init__(self, size_in, prior_factor=1.0, weight_prior_std=1.0, bias_prior_std=3.0, **kwargs):
self._params = OrderedDict()
self._param_dists = OrderedDict()
self.prior_factor = prior_factor
self.gp = VectorizedGP(size_in, **kwargs)
for (name, shape) in self.gp.parameter_shapes().items():
if (name == 'constant_mean'):
mean_p_loc = torch.zeros(1).to(device)
mean_p_scale = torch.ones(1).to(device)
self._param_dist(name, Normal(mean_p_loc, mean_p_scale).to_event(1))
if (name == 'lengthscale_raw'):
lengthscale_p_loc = torch.zeros(shape[(- 1)]).to(device)
lengthscale_p_scale = torch.ones(shape[(- 1)]).to(device)
self._param_dist(name, Normal(lengthscale_p_loc, lengthscale_p_scale).to_event(1))
if (name == 'noise_raw'):
noise_p_loc = ((- 1.0) * torch.ones(1).to(device))
noise_p_scale = torch.ones(1).to(device)
self._param_dist(name, Normal(noise_p_loc, noise_p_scale).to_event(1))
if (('mean_nn' in name) or ('kernel_nn' in name)):
mean = torch.zeros(shape).to(device)
if ('weight' in name):
std = (weight_prior_std * torch.ones(shape).to(device))
elif ('bias' in name):
std = (bias_prior_std * torch.ones(shape).to(device))
else:
raise NotImplementedError
self._param_dist(name, Normal(mean, std).to_event(1))
for (param_name_gp, param_name_prior) in zip(self.gp.named_parameters().keys(), self._param_dists.keys()):
assert (param_name_gp == param_name_prior)
self.hyper_prior = CatDist(self._param_dists.values())
def sample_params_from_prior(self, shape=torch.Size()):
return self.hyper_prior.sample(shape)
def sample_fn_from_prior(self, shape=torch.Size()):
params = self.sample_params_from_prior(shape=shape)
return self.get_forward_fn(params)
def get_forward_fn(self, params):
gp_model = copy.deepcopy(self.gp)
gp_model.set_parameters_as_vector(params)
return gp_model
def _param_dist(self, name, dist):
assert (type(name) == str)
assert isinstance(dist, torch.distributions.Distribution)
assert (name not in list(self._param_dists.keys()))
assert hasattr(dist, 'rsample')
self._param_dists[name] = dist
return dist
def _log_prob_prior(self, params):
return self.hyper_prior.log_prob(params)
def _log_prob_likelihood(self, *args):
raise NotImplementedError
def log_prob(self, *args):
raise NotImplementedError
def parameter_shapes(self):
param_shapes_dict = OrderedDict()
for (name, dist) in self._param_dists.items():
param_shapes_dict[name] = dist.event_shape
return param_shapes_dict |
def schedule(epoch):
t = (epoch / (args.swa_start if args.swa else args.epochs))
lr_ratio = ((args.swa_lr / args.lr_init) if args.swa else 0.01)
if (t <= 0.5):
factor = 1.0
elif (t <= 0.9):
factor = (1.0 - (((1.0 - lr_ratio) * (t - 0.5)) / 0.4))
else:
factor = lr_ratio
return (args.lr_init * factor) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
send_example_telemetry('run_mlm', model_args, data_args)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
if training_args.should_log:
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None), streaming=data_args.streaming)
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None), streaming=data_args.streaming)
raw_datasets['train'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None), streaming=data_args.streaming)
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
if (extension == 'txt'):
extension = 'text'
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset(extension, data_files=data_files, split=f'train[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
raw_datasets['train'] = load_dataset(extension, data_files=data_files, split=f'train[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
config_kwargs = {'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if (model_args.config_overrides is not None):
logger.info(f'Overriding config: {model_args.config_overrides}')
config.update_from_string(model_args.config_overrides)
logger.info(f'New config: {config}')
tokenizer_kwargs = {'cache_dir': model_args.cache_dir, 'use_fast': model_args.use_fast_tokenizer, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
if model_args.model_name_or_path:
model = AutoModelForMaskedLM.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None), low_cpu_mem_usage=model_args.low_cpu_mem_usage)
else:
logger.info('Training new model from scratch')
model = AutoModelForMaskedLM.from_config(config)
embedding_size = model.get_input_embeddings().weight.shape[0]
if (len(tokenizer) > embedding_size):
model.resize_token_embeddings(len(tokenizer))
if training_args.do_train:
column_names = list(raw_datasets['train'].features)
else:
column_names = list(raw_datasets['validation'].features)
text_column_name = ('text' if ('text' in column_names) else column_names[0])
if (data_args.max_seq_length is None):
max_seq_length = tokenizer.model_max_length
if (max_seq_length > 1024):
logger.warning('The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can override this default with `--block_size xxx`.')
max_seq_length = 1024
else:
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
padding = ('max_length' if data_args.pad_to_max_length else False)
def tokenize_function(examples):
examples[text_column_name] = [line for line in examples[text_column_name] if ((len(line) > 0) and (not line.isspace()))]
return tokenizer(examples[text_column_name], padding=padding, truncation=True, max_length=max_seq_length, return_special_tokens_mask=True)
with training_args.main_process_first(desc='dataset map tokenization'):
if (not data_args.streaming):
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on dataset line_by_line')
else:
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, remove_columns=[text_column_name])
else:
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
with training_args.main_process_first(desc='dataset map tokenization'):
if (not data_args.streaming):
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on every text in dataset')
else:
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, remove_columns=column_names)
def group_texts(examples):
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if (total_length >= max_seq_length):
total_length = ((total_length // max_seq_length) * max_seq_length)
result = {k: [t[i:(i + max_seq_length)] for i in range(0, total_length, max_seq_length)] for (k, t) in concatenated_examples.items()}
return result
with training_args.main_process_first(desc='grouping texts together'):
if (not data_args.streaming):
tokenized_datasets = tokenized_datasets.map(group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache), desc=f'Grouping texts in chunks of {max_seq_length}')
else:
tokenized_datasets = tokenized_datasets.map(group_texts, batched=True)
if training_args.do_train:
if ('train' not in tokenized_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = tokenized_datasets['train']
if (data_args.max_train_samples is not None):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
if training_args.do_eval:
if ('validation' not in tokenized_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = tokenized_datasets['validation']
if (data_args.max_eval_samples is not None):
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
logits = logits[0]
return logits.argmax(dim=(- 1))
metric = evaluate.load('accuracy')
def compute_metrics(eval_preds):
(preds, labels) = eval_preds
labels = labels.reshape((- 1))
preds = preds.reshape((- 1))
mask = (labels != (- 100))
labels = labels[mask]
preds = preds[mask]
return metric.compute(predictions=preds, references=labels)
pad_to_multiple_of_8 = (data_args.line_by_line and training_args.fp16 and (not data_args.pad_to_max_length))
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability, pad_to_multiple_of=(8 if pad_to_multiple_of_8 else None))
trainer = Trainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=(compute_metrics if (training_args.do_eval and (not is_torch_tpu_available())) else None), preprocess_logits_for_metrics=(preprocess_logits_for_metrics if (training_args.do_eval and (not is_torch_tpu_available())) else None))
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate()
max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics['eval_loss'])
except OverflowError:
perplexity = float('inf')
metrics['perplexity'] = perplexity
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'fill-mask'}
if (data_args.dataset_name is not None):
kwargs['dataset_tags'] = data_args.dataset_name
if (data_args.dataset_config_name is not None):
kwargs['dataset_args'] = data_args.dataset_config_name
kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}'
else:
kwargs['dataset'] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs) |
def uncompressed_rle(mask):
l = mask.flatten(order='F').tolist()
counts = []
p = False
cnt = 0
for i in l:
if (i == p):
cnt += 1
else:
counts.append(cnt)
p = i
cnt = 1
counts.append(cnt)
return {'counts': counts, 'size': [mask.shape[0], mask.shape[1]]} |
class MinitaurEnvRandomizer(env_randomizer_base.EnvRandomizerBase):
def __init__(self, minitaur_base_mass_err_range=MINITAUR_BASE_MASS_ERROR_RANGE, minitaur_leg_mass_err_range=MINITAUR_LEG_MASS_ERROR_RANGE, battery_voltage_range=BATTERY_VOLTAGE_RANGE, motor_viscous_damping_range=MOTOR_VISCOUS_DAMPING_RANGE):
self._minitaur_base_mass_err_range = minitaur_base_mass_err_range
self._minitaur_leg_mass_err_range = minitaur_leg_mass_err_range
self._battery_voltage_range = battery_voltage_range
self._motor_viscous_damping_range = motor_viscous_damping_range
def randomize_env(self, env):
self._randomize_minitaur(env.minitaur)
def _randomize_minitaur(self, minitaur):
base_mass = minitaur.GetBaseMassesFromURDF()
randomized_base_mass = random.uniform((np.array(base_mass) * (1.0 + self._minitaur_base_mass_err_range[0])), (np.array(base_mass) * (1.0 + self._minitaur_base_mass_err_range[1])))
minitaur.SetBaseMasses(randomized_base_mass)
leg_masses = minitaur.GetLegMassesFromURDF()
leg_masses_lower_bound = (np.array(leg_masses) * (1.0 + self._minitaur_leg_mass_err_range[0]))
leg_masses_upper_bound = (np.array(leg_masses) * (1.0 + self._minitaur_leg_mass_err_range[1]))
randomized_leg_masses = [np.random.uniform(leg_masses_lower_bound[i], leg_masses_upper_bound[i]) for i in xrange(len(leg_masses))]
minitaur.SetLegMasses(randomized_leg_masses)
randomized_battery_voltage = random.uniform(BATTERY_VOLTAGE_RANGE[0], BATTERY_VOLTAGE_RANGE[1])
minitaur.SetBatteryVoltage(randomized_battery_voltage)
randomized_motor_damping = random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0], MOTOR_VISCOUS_DAMPING_RANGE[1])
minitaur.SetMotorViscousDamping(randomized_motor_damping)
randomized_foot_friction = random.uniform(MINITAUR_LEG_FRICTION[0], MINITAUR_LEG_FRICTION[1])
minitaur.SetFootFriction(randomized_foot_friction) |
.parametrize('add_batch_size, max_length, add_sequence_length, sample_sequence_length, sample_period', [(1, 8, 3, 3, 4)])
.parametrize('add_iter, expected_priority_indices, expected_priority_values', [(0, [0], [1.0]), (1, [1], [0.0]), (2, [1], [1.0]), (3, [0], [1.0]), (4, [1], [1.0]), (5, [0], [0.0]), (6, [0], [1.0]), (7, [1], [1.0])])
def test_item_and_priority_calculation_case3(fake_transition: chex.ArrayTree, rng_key: chex.PRNGKey, add_batch_size: int, sample_batch_size: int, max_length: int, add_sequence_length: int, sample_sequence_length: int, sample_period: int, add_iter: int, expected_priority_indices: List[int], expected_priority_values: List[float]):
check_index_calc(fake_transition, add_batch_size, sample_batch_size, max_length, add_sequence_length, sample_sequence_length, sample_period, add_iter, expected_priority_indices, expected_priority_values) |
class Concat(Container):
def __init__(self, dimension, bigdl_type='float'):
super(Concat, self).__init__(None, bigdl_type, dimension) |
def process_flickr8k():
if (not os.path.exists('flickr8k')):
os.makedirs('flickr8k')
if (not os.path.exists('flickr8k/Flickr8k_Dataset.zip')):
gdd.download_file_from_google_drive(file_id='1WNY8pV-u8xtBYBVal03qwjQs4VKurUZn', dest_path='./flickr8k/Flickr8k_Dataset.zip', unzip=True, showsize=True)
if (not os.path.exists('flickr8k/Flickr8k_text.zip')):
gdd.download_file_from_google_drive(file_id='1ljB7DR-YM-q9WKnHDW5dHjauK029B2s6', dest_path='./flickr8k/Flickr8k_text.zip', unzip=True, showsize=True)
flickr8k_image2ann = collections.defaultdict(list)
captionid2caption = {}
with open('flickr8k/Flickr8k.token.txt') as f:
for line in f:
(image, ann) = line.strip().split('\t')
flickr8k_image2ann[image.split('#')[0]].append(ann)
captionid2caption[image] = ann
flickr8k_image2ann = {k.split('.')[0]: v for (k, v) in flickr8k_image2ann.items()}
(all_8k_expert, all_8k_crowdflower) = ([], [])
total_ann = 0
with open('flickr8k/CrowdFlowerAnnotations.txt') as f:
for line in f:
(image_id, caption_id, yes_prec, n_yes, n_no) = line.strip().split('\t')
all_8k_crowdflower.append({'image_id': image_id, 'caption_id': caption_id, 'caption': captionid2caption[caption_id], 'n_yes': int(n_yes), 'n_no': int(n_no), 'yes_prec': float(yes_prec), 'image_filepath': 'Flicker8k_Dataset/{}'.format(image_id)})
assert (np.abs((float(yes_prec) - (int(n_yes) / (int(n_yes) + int(n_no))))) < 1e-05)
total_ann += (int(n_yes) + int(n_no))
all_index = {}
for d in all_8k_crowdflower:
if (d['image_id'] not in all_index):
all_index[d['image_id']] = {'human_judgement': [], 'image_id': d['image_id'], 'image_path': d['image_filepath'], 'ground_truth': [x for x in flickr8k_image2ann[d['image_id'].split('.')[0]]]}
if (d['caption'] in all_index[d['image_id']]['ground_truth']):
all_index[d['image_id']]['ground_truth'].remove(d['caption'])
all_index[d['image_id']]['human_judgement'].append({'image_id': d['image_id'], 'image_path': d['image_filepath'], 'caption': d['caption'], 'rating': d['yes_prec']})
print('For crowdflower, we are dumping {} judgments between {} images'.format(len(all_8k_crowdflower), len(all_index)))
with open('flickr8k/crowdflower_flickr8k.json', 'w') as f:
f.write(json.dumps(all_index))
skip = 0
with open('flickr8k/ExpertAnnotations.txt') as f:
for line in f:
(image_id, caption_id, ex1, ex2, ex3) = line.strip().split('\t')
caption = captionid2caption[caption_id]
if (caption in flickr8k_image2ann[image_id.split('.')[0]]):
skip += 1
continue
all_8k_expert.append({'image_id': image_id.split('.')[0], 'image_filepath': 'Flicker8k_Dataset/{}'.format(image_id), 'caption': caption, 'expert1': float(ex1), 'expert2': float(ex2), 'expert3': float(ex3)})
all_index = {}
for d in all_8k_expert:
if (d['image_id'] not in all_index):
all_index[d['image_id']] = {'human_judgement': [], 'image_id': d['image_id'], 'image_path': d['image_filepath'], 'ground_truth': flickr8k_image2ann[d['image_id']]}
all_index[d['image_id']]['human_judgement'].append({'image_id': d['image_id'], 'image_path': d['image_filepath'], 'caption': d['caption'], 'rating': d['expert1']})
all_index[d['image_id']]['human_judgement'].append({'image_id': d['image_id'], 'image_path': d['image_filepath'], 'caption': d['caption'], 'rating': d['expert2']})
all_index[d['image_id']]['human_judgement'].append({'image_id': d['image_id'], 'image_path': d['image_filepath'], 'caption': d['caption'], 'rating': d['expert3']})
print('For expert, we are dumping {} judgments between {} images'.format((len(all_8k_expert) * 3), len(all_index)))
with open('flickr8k/flickr8k.json', 'w') as f:
f.write(json.dumps(all_index)) |
def placeholder_fit(trainer, module, datamodule):
trainer.data_connector.attach_data(module, datamodule=datamodule)
if hasattr(module, 'hparams'):
parsing.clean_namespace(module.hparams)
trainer.config_validator.verify_loop_configurations(module)
trainer.callback_connector.attach_model_logging_functions(module)
trainer.model = module |
def test_aggregator_pipeline(saliency_mt_model: HuggingfaceEncoderDecoderModel):
out = saliency_mt_model.attribute('This is a test.', attribute_target=True, step_scores=['probability'], device='cpu', show_progress=False)
seqattr = out.sequence_attributions[0]
squeezesum = AggregatorPipeline([ContiguousSpanAggregator, SequenceAttributionAggregator])
out_agg_squeezesum = seqattr.aggregate(squeezesum, source_spans=(3, 5), target_spans=[(0, 3), (4, 6)])
assert (out_agg_squeezesum.source_attributions.shape == (5, 4))
assert (out_agg_squeezesum.target_attributions.shape == (4, 4))
assert (out_agg_squeezesum.step_scores['probability'].shape == (4,))
sumsqueeze = AggregatorPipeline([SequenceAttributionAggregator, ContiguousSpanAggregator])
out_agg_sumsqueeze = seqattr.aggregate(sumsqueeze, source_spans=(3, 5), target_spans=[(0, 3), (4, 6)])
assert (out_agg_sumsqueeze.source_attributions.shape == (5, 4))
assert (out_agg_sumsqueeze.target_attributions.shape == (4, 4))
assert (out_agg_sumsqueeze.step_scores['probability'].shape == (4,))
assert (not torch.allclose(out_agg_squeezesum.source_attributions, out_agg_sumsqueeze.source_attributions))
assert (not torch.allclose(out_agg_squeezesum.target_attributions, out_agg_sumsqueeze.target_attributions))
named_squeezesum = ['spans', 'scores']
named_sumsqueeze = ['scores', 'spans']
out_agg_squeezesum_named = seqattr.aggregate(named_squeezesum, source_spans=(3, 5), target_spans=[(0, 3), (4, 6)])
out_agg_sumsqueeze_named = seqattr.aggregate(named_sumsqueeze, source_spans=(3, 5), target_spans=[(0, 3), (4, 6)])
assert (out_agg_squeezesum_named.source_attributions.shape == (5, 4))
assert (out_agg_squeezesum_named.target_attributions.shape == (4, 4))
assert (out_agg_squeezesum_named.step_scores['probability'].shape == (4,))
assert (out_agg_sumsqueeze_named.source_attributions.shape == (5, 4))
assert (out_agg_sumsqueeze_named.target_attributions.shape == (4, 4))
assert (out_agg_sumsqueeze_named.step_scores['probability'].shape == (4,))
assert (not torch.allclose(out_agg_squeezesum_named.source_attributions, out_agg_sumsqueeze_named.source_attributions))
assert (not torch.allclose(out_agg_squeezesum_named.target_attributions, out_agg_sumsqueeze_named.target_attributions)) |
_name('contract_matseq')
def test_contract_matseq_large_bonddim(benchmark):
contract_matseq_runner(benchmark, bond_dim=100) |
def build_vocab(vocab_root_path, train_all_text, text_min_count):
print('building vocab,train')
vocab = []
for text in train_all_text:
words = text.split(' ')
for word in words:
if (word not in vocab):
vocab.append(word)
freq = dict(zip(vocab, [0 for i in range(len(vocab))]))
for text in train_all_text:
words = text.split(' ')
for word in words:
freq[word] += 1
if (not os.path.exists(os.path.join(vocab_root_path, 'freq.csv'))):
print('no freq.csv, so save it')
with open(os.path.join(vocab_root_path, 'vocab', 'freq.csv'), 'w') as f:
writer = csv.writer(f)
results = list(zip(freq.keys(), freq.values()))
writer.writerows(results)
results = []
for word in freq.keys():
if (freq[word] < text_min_count):
continue
else:
results.append(word)
results.insert(0, 'PAD')
results.insert(1, 'UNK')
with open(os.path.join(vocab_root_path, 'vocab', (('vocab-' + str(text_min_count)) + '.txt')), 'w') as f:
f.write('\n'.join(results))
return results |
def concretize_op(op: Union[(AbsOpBase, Placeholder)], model: Optional[z3.ModelRef]) -> Union[(AbsOpBase, Placeholder)]:
if (isinstance(op, Constant) or isinstance(op, Input)):
ret_op = deepcopy(op)
values = []
for (idx, s) in enumerate(op.abs_tensor.shape):
if isinstance(s, z3.ExprRef):
ret_op.abs_tensor.shape[idx] = model.eval(s).as_long()
return ret_op
elif isinstance(op, Placeholder):
shape = []
for (idx, s) in enumerate(op.ttype.shape):
if isinstance(s, z3.ExprRef):
shape.append(model.eval(s).as_long())
return Placeholder(AbsTensor(shape=shape, dtype=op.ttype.dtype))
construct_param_dict = signature(op.__init__).parameters
values = []
symbolic_idx = []
if (op.num_var_param is not None):
key = list(construct_param_dict.keys())[0]
values = list(getattr(op, key))
symbolic_idx = [i for i in range(len(values)) if isinstance(values[i], z3.ExprRef)]
else:
for (idx, key) in enumerate(construct_param_dict):
param = getattr(op, key)
values.append(param)
if isinstance(param, z3.ExprRef):
symbolic_idx.append(idx)
for idx in symbolic_idx:
values[idx] = model.eval(values[idx]).as_long()
concrete_op = type(op)(*values)
concrete_op.inp_ranks = op.inp_ranks
concrete_op.out_ranks = op.out_ranks
concrete_op.extra_attrs = op.extra_attrs
return concrete_op |
def backward(W, h=[0.0625, 0.25, 0.375, 0.25, 0.0625]):
nX = np.shape(W)
Lh = np.size(h)
rec = sp2.Starlet2D(nX[1], nX[2], nX[0], (nX[3] - 1), Lh).backward_omp(np.real(W))
return rec |
def sigmoid_ce_loss_(inputs: torch.Tensor, targets: torch.Tensor):
num_masks = max(inputs.size(0), 1.0)
loss = F.binary_cross_entropy(inputs, targets, reduction='none')
return (loss.flatten(1).mean(1).sum() / num_masks) |
def show_curves():
step_size = 0.1
all_curves = {}
for log_dir in tqdm(list(logs_dir.iterdir())):
cfg = utils.read_config(str((log_dir / 'config.yml')))
data = load_data(cfg)
if (data is None):
continue
if (cfg.experiment_name not in all_curves):
all_curves[cfg.experiment_name] = []
all_curves[cfg.experiment_name].append(get_curve_for_run(data, step_size))
def plot_curves(obstacle_config, experiment_names, fontsize=20):
for experiment_name in experiment_names:
curves = extend_curves(all_curves[experiment_name])
x = np.arange(0, ((len(curves[0]) - 0.5) * step_size), step_size)
y_mean = np.mean(curves, axis=0)
y_std = np.std(curves, axis=0)
plt.plot(x, y_mean, label=get_label(experiment_name))
plt.fill_between(x, (y_mean - y_std), (y_mean + y_std), alpha=0.2)
plt.xlabel('Distance (m)', fontsize=fontsize)
plt.ylabel('Num Blocks', fontsize=fontsize)
if (obstacle_config == 'large_divider'):
plt.xlim(0, 120)
num_cubes = (20 if obstacle_config.startswith('large') else 10)
plt.ylim(0, num_cubes)
plt.xticks(fontsize=(fontsize - 2))
plt.yticks(range(0, (num_cubes + 1), 2), fontsize=(fontsize - 2))
plt.legend(fontsize=(fontsize - 2), loc='upper left')
def f(obstacle_config, experiment_names, save_to_pdf):
if (len(experiment_names) == 0):
return
plot_curves(obstacle_config, experiment_names)
if save_to_pdf:
plt.savefig('curves-{}.pdf'.format(obstacle_config), bbox_inches='tight')
else:
plt.show()
obstacle_config_dropdown = widgets.Dropdown(options=obstacle_configs)
experiment_names_select = widgets.SelectMultiple(layout=widgets.Layout(width='50%'))
save_toggle = widgets.ToggleButton(description='Save to PDF')
def update_experiment_names_options(*_):
matching_experiment_names = []
for experiment_name in all_curves:
if experiment_name.startswith(obstacle_config_dropdown.value):
matching_experiment_names.append(experiment_name)
experiment_names_select.options = matching_experiment_names
experiment_names_select.rows = len(matching_experiment_names)
obstacle_config_dropdown.observe(update_experiment_names_options)
interact(f, obstacle_config=obstacle_config_dropdown, experiment_names=experiment_names_select, save_to_pdf=save_toggle) |
class RandomStatePredictor():
def __init__(self):
pass
def predict(self, state: State, next_states) -> dict:
raise NotImplementedError |
def reindex(es, source_index, target_index):
helpers.reindex(es, source_index=source_index, target_index=target_index) |
def require_non_multigpu(test_case):
if (not _torch_available):
return unittest.skip('test requires PyTorch')(test_case)
import torch
if (torch.cuda.device_count() > 1):
return unittest.skip('test requires 0 or 1 GPU')(test_case)
else:
return test_case |
class TestTorchOP(unittest.TestCase):
def setUpClass(self):
pass
def tearDownClass(self):
os.remove('conf.yaml')
pass
def test_1(self):
text = "\nmodel:\n name: model\n operator:\n input_data:\n type: Input\n output:\n input_ids.1:\n dtype: s32\n shape: [-1, -1]\n pastkv.1:\n dtype: fp32\n shape: [-1, -1]\n mask.1:\n dtype: s32\n shape: [-1, -1]\n self.model.gpt_neox.embed_in.weight:\n dtype: fp32\n shape: [50280, 2560]\n location: [0, ]\n self.model.gpt_neox.layers.0.input_layernorm.weight:\n dtype: fp32\n shape: [2560]\n location: [, 10240]\n self.model.gpt_neox.layers.0.input_layernorm.bias:\n dtype: fp32\n shape: [2560]\n location: [, 10240]\n self.model.gpt_neox.layers.0.attention.query_key_value.weight:\n dtype: fp32\n shape: [7680, 2560]\n location: [, ]\n self.model.gpt_neox.layers.0.attention.query_key_value.bias:\n dtype: fp32\n shape: [7680]\n location: [, 30720]\n '13':\n dtype: fp32\n shape: [1, 1, 2048, 20]\n location: [, 163840]\n '12':\n dtype: fp32\n shape: [1, 1, 2048, 20]\n location: [, 163840]\n '19':\n dtype: s32\n shape: [1]\n location: [, 4]\n aten::neg_133_mul_val:\n dtype: fp32\n shape: [1]\n location: [, 4]\n aten::neg_135_mul_val:\n dtype: fp32\n shape: [1]\n location: [, 4]\n '6':\n shape: [1, 1, 2048, 2048]\n location: [, 4194304]\n '4':\n dtype: fp32\n shape: [1]\n location: [, 4]\n self.model.gpt_neox.layers.0.attention.dense.weight:\n dtype: fp32\n shape: [2560, 2560]\n location: [, ]\n self.model.gpt_neox.layers.0.attention.dense.bias:\n dtype: fp32\n shape: [2560]\n location: [, 10240]\n self.model.gpt_neox.layers.0.post_attention_layernorm.weight:\n dtype: fp32\n shape: [2560]\n location: [, 10240]\n self.model.gpt_neox.layers.0.post_attention_layernorm.bias:\n dtype: fp32\n shape: [2560]\n location: [, 10240]\n self.model.gpt_neox.layers.0.mlp.dense_h_to_4h.weight:\n dtype: fp32\n shape: [10240, 2560]\n location: [, ]\n self.model.gpt_neox.layers.0.mlp.dense_h_to_4h.bias:\n dtype: fp32\n shape: [10240]\n location: [, 40960]\n self.model.gpt_neox.layers.0.mlp.dense_4h_to_h.weight:\n dtype: fp32\n shape: [2560, 10240]\n location: [, ]\n self.model.gpt_neox.layers.0.mlp.dense_4h_to_h.bias:\n dtype: fp32\n shape: [2560]\n location: [, 10240]\n self.model.gpt_neox.final_layer_norm.weight:\n dtype: fp32\n shape: [2560]\n location: [, 10240]\n self.model.gpt_neox.final_layer_norm.bias:\n dtype: fp32\n shape: [2560]\n location: [, 10240]\n self.model.embed_out.weight:\n dtype: fp32\n shape: [50280, 2560]\n location: [, ]\n prim::padding_sequence_3:\n type: PaddingSequence\n input:\n mask.1: {}\n output:\n '197': {}\n attr:\n dst_shape: -1,1,1,-1\n dims: 1\n padding_value: -3.e+38\n prim::TupleUnpack_2:\n type: TupleUnpack\n input:\n pastkv.1: {}\n output:\n '51': {}\n prim::TupleUnpack_119:\n type: TupleUnpack\n input:\n '51': {}\n output:\n past_key.1: {}\n past_value.1: {}\n prim::gather_indices_1:\n type: prim::gather_indices\n input:\n input_ids.1: {}\n output:\n '202': {}\n aten::embedding_0:\n type: Gather\n input:\n self.model.gpt_neox.embed_in.weight: {}\n input_ids.1: {}\n output:\n '74': {}\n attr:\n batch_dims: 0\n axis: 1\n embedding: true\n aten::layer_norm_17:\n type: LayerNorm\n input:\n '74': {}\n self.model.gpt_neox.layers.0.input_layernorm.weight: {}\n self.model.gpt_neox.layers.0.input_layernorm.bias: {}\n output:\n input.4: {}\n attr:\n epsilon: 1.0e-05\n normalized_shape: ''\n aten::linear_16:\n type: InnerProduct\n input:\n input.4: {}\n self.model.gpt_neox.layers.0.attention.query_key_value.weight: {}\n self.model.gpt_neox.layers.0.attention.query_key_value.bias: {}\n output:\n qkv.1: {}\n aten::size_94:\n type: Shape\n input:\n qkv.1: {}\n output:\n '77': {}\n attr:\n start: 0\n end: 1\n aten::size_80:\n type: Shape\n input:\n qkv.1: {}\n output:\n '78': {}\n attr:\n start: 1\n end: 2\n prim::ListConstruct_45:\n type: ListConstruct\n input:\n '77': {}\n '78': {}\n output:\n '79': {}\n aten::view_123:\n type: View\n input:\n qkv.1: {}\n '79': {}\n output:\n qkv.2: {}\n attr:\n shape: -1,-1,32,240\n aten::slice_43:\n type: Slice\n input:\n qkv.2: {}\n output:\n '81': {}\n attr:\n axes: 3\n starts: 0\n ends: 80\n steps: 1\n aten::permute_23:\n type: Reorder\n input:\n '81': {}\n output:\n query.1: {}\n attr:\n src_perm: 0,1,2,3\n dst_perm: 0,2,1,3\n aten::slice_40:\n type: Slice\n input:\n qkv.2: {}\n output:\n '83': {}\n attr:\n axes: 3\n starts: 80\n ends: 160\n steps: 1\n aten::permute_24:\n type: Reorder\n input:\n '83': {}\n output:\n key.1: {}\n attr:\n src_perm: 0,1,2,3\n dst_perm: 0,2,1,3\n aten::slice_41:\n type: Slice\n input:\n qkv.2: {}\n output:\n '85': {}\n attr:\n axes: 3\n starts: 160\n ends: \n steps: 1\n aten::permute_25:\n type: Reorder\n input:\n '85': {}\n output:\n value.1: {}\n attr:\n src_perm: 0,1,2,3\n dst_perm: 0,2,1,3\n aten::slice_35:\n type: Slice\n input:\n query.1: {}\n output:\n q.1: {}\n attr:\n axes: 3\n starts: 0\n ends: 20\n steps: 1\n aten::slice_36:\n type: Slice\n input:\n query.1: {}\n output:\n query_pass.1: {}\n attr:\n axes: 3\n starts: 20\n ends: \n steps: 1\n aten::slice_37:\n type: Slice\n input:\n key.1: {}\n output:\n k.1: {}\n attr:\n axes: 3\n starts: 0\n ends: 20\n steps: 1\n aten::slice_38:\n type: Slice\n input:\n key.1: {}\n output:\n key_pass.1: {}\n attr:\n axes: 3\n starts: 20\n ends: \n steps: 1\n aten::size_63:\n type: Shape\n input:\n key.1: {}\n output:\n '91': {}\n attr:\n start: 2\n end: 3\n aten::size_64:\n type: Shape\n input:\n past_key.1: {}\n output:\n '93': {}\n attr:\n start: 2\n end: 3\n aten::add_92:\n type: Add\n input:\n '91': {}\n '93': {}\n output:\n seq_len0.1: {}\n aten::slice_7:\n type: Slice\n input:\n '13': {}\n seq_len0.1: {}\n output:\n '98': {}\n attr:\n axes: 0\n starts: 0\n ends: null\n steps: 1\n aten::slice_6:\n type: Slice\n input:\n '12': {}\n seq_len0.1: {}\n output:\n '100': {}\n attr:\n axes: 0\n starts: 0\n ends: null\n steps: 1\n aten::size_81:\n type: Shape\n input:\n '98': {}\n output:\n '106': {}\n attr:\n start: 1\n end: 2\n aten::size_54:\n type: Shape\n input:\n '98': {}\n output:\n '107': {}\n attr:\n start: 3\n end: 4\n prim::ListConstruct_82:\n type: ListConstruct\n input:\n '106': {}\n '107': {}\n output:\n '108': {}\n aten::repeat_122:\n type: Repeat\n input:\n '202': {}\n '108': {}\n output:\n gather_indices0.1: {}\n aten::size_95:\n type: Shape\n input:\n gather_indices0.1: {}\n output:\n '110': {}\n attr:\n start: 0\n end: 1\n prim::ListConstruct_83:\n type: ListConstruct\n input:\n '110': {}\n output:\n '111': {}\n aten::repeat_128:\n type: Repeat\n input:\n '98': {}\n '111': {}\n output:\n '112': {}\n aten::gather_65:\n type: Gather\n input:\n '112': {}\n gather_indices0.1: {}\n output:\n cos.2: {}\n aten::repeat_129:\n type: Repeat\n input:\n '100': {}\n '111': {}\n output:\n '114': {}\n aten::gather_66:\n type: Gather\n input:\n '114': {}\n gather_indices0.1: {}\n output:\n sin.2: {}\n aten::mul_124:\n type: Mul\n input:\n q.1: {}\n cos.2: {}\n output:\n '116': {}\n attr:\n algorithm: mul\n aten::size_55:\n type: Shape\n input:\n q.1: {}\n output:\n '117': {}\n attr:\n start: 3\n end: 4\n aten::floor_divide_8:\n type: Div\n input:\n '117': {}\n '19': {}\n output:\n '119': {}\n attr:\n algorithm: div\n aten::slice_56:\n type: Slice\n input:\n q.1: {}\n '119': {}\n output:\n x1.1: {}\n attr:\n axes: 3\n starts: 0\n ends: null\n steps: 1\n aten::slice_57:\n type: Slice\n input:\n q.1: {}\n '119': {}\n output:\n x2.1: {}\n attr:\n axes: 3\n starts: null\n ends: \n steps: 1\n aten::neg_133:\n type: Neg\n input:\n x2.1: {}\n aten::neg_133_mul_val: {}\n output:\n '123': {}\n attr:\n algorithm: mul\n prim::ListConstruct_132:\n type: ListConstruct\n input:\n '123': {}\n x1.1: {}\n output:\n '124': {}\n aten::cat_70:\n type: Concat\n input:\n '124': {}\n output:\n '125': {}\n attr:\n axis: -1\n aten::mul_130:\n type: Mul\n input:\n '125': {}\n sin.2: {}\n output:\n '126': {}\n attr:\n algorithm: mul\n aten::add_84:\n type: Add\n input:\n '116': {}\n '126': {}\n output:\n query0.1: {}\n aten::mul_126:\n type: Mul\n input:\n k.1: {}\n cos.2: {}\n output:\n '128': {}\n attr:\n algorithm: mul\n aten::size_58:\n type: Shape\n input:\n k.1: {}\n output:\n '129': {}\n attr:\n start: 3\n end: 4\n aten::floor_divide_9:\n type: Div\n input:\n '129': {}\n '19': {}\n output:\n '131': {}\n attr:\n algorithm: div\n aten::slice_59:\n type: Slice\n input:\n k.1: {}\n '131': {}\n output:\n x10.1: {}\n attr:\n axes: 3\n starts: 0\n ends: null\n steps: 1\n aten::slice_60:\n type: Slice\n input:\n k.1: {}\n '131': {}\n output:\n x20.1: {}\n attr:\n axes: 3\n starts: null\n ends: \n steps: 1\n aten::neg_135:\n type: Neg\n input:\n x20.1: {}\n aten::neg_135_mul_val: {}\n output:\n '135': {}\n attr:\n algorithm: mul\n prim::ListConstruct_134:\n type: ListConstruct\n input:\n '135': {}\n x10.1: {}\n output:\n '136': {}\n aten::cat_71:\n type: Concat\n input:\n '136': {}\n output:\n '137': {}\n attr:\n axis: -1\n aten::mul_131:\n type: Mul\n input:\n '137': {}\n sin.2: {}\n output:\n '138': {}\n attr:\n algorithm: mul\n aten::add_85:\n type: Add\n input:\n '128': {}\n '138': {}\n output:\n key0.1: {}\n prim::ListConstruct_125:\n type: ListConstruct\n input:\n query0.1: {}\n query_pass.1: {}\n output:\n '140': {}\n aten::cat_72:\n type: Concat\n input:\n '140': {}\n output:\n query1.1: {}\n attr:\n axis: -1\n prim::ListConstruct_127:\n type: ListConstruct\n input:\n key0.1: {}\n key_pass.1: {}\n output:\n '142': {}\n aten::cat_73:\n type: Concat\n input:\n '142': {}\n output:\n key1.1: {}\n attr:\n axis: -1\n prim::ListConstruct_120:\n type: ListConstruct\n input:\n past_key.1: {}\n key1.1: {}\n output:\n '144': {}\n aten::cat_76:\n type: Concat\n input:\n '144': {}\n output:\n key2.1: {}\n attr:\n axis: -2\n prim::ListConstruct_121:\n type: ListConstruct\n input:\n past_value.1: {}\n value.1: {}\n output:\n '146': {}\n aten::cat_77:\n type: Concat\n input:\n '146': {}\n output:\n value0.1: {}\n attr:\n axis: -2\n aten::size_96:\n type: Shape\n input:\n query1.1: {}\n output:\n '148': {}\n attr:\n start: 0\n end: 1\n aten::size_86:\n type: Shape\n input:\n query1.1: {}\n output:\n '150': {}\n attr:\n start: 1\n end: 2\n aten::size_67:\n type: Shape\n input:\n query1.1: {}\n output:\n '152': {}\n attr:\n start: 2\n end: 3\n aten::size_78:\n type: Shape\n input:\n key2.1: {}\n output:\n '155': {}\n attr:\n start: -2\n end: -1\n aten::sub_87:\n type: Sub\n input:\n '155': {}\n '152': {}\n output:\n '157': {}\n attr:\n algorithm: sub\n aten::slice_5:\n type: Slice\n input:\n '6': {}\n '157': {}\n '155': {}\n output:\n '159': {}\n attr:\n axes: 2\n starts: null\n ends: null\n steps: 1\n aten::slice_61:\n type: Slice\n input:\n '159': {}\n '155': {}\n output:\n causal_mask.1: {}\n attr:\n axes: 3\n starts: 0\n ends: null\n steps: 1\n aten::mul_139:\n type: Mul\n input:\n '148': {}\n '150': {}\n output:\n '161': {}\n attr:\n algorithm: mul\n prim::ListConstruct_140:\n type: ListConstruct\n input:\n '161': {}\n '152': {}\n '155': {}\n output:\n '167': {}\n aten::zeros_52:\n type: Zeros\n input:\n '167': {}\n output:\n attn_scores.1: {}\n aten::transpose_137:\n type: Reorder\n input:\n key2.1: {}\n output:\n '200': {}\n attr:\n transpose_dims: 2,3\n prim::mybaddbmm_20:\n type: Baddbmm\n input:\n attn_scores.1: {}\n query1.1: {}\n '200': {}\n output:\n '201': {}\n attr:\n beta: 1.0\n alpha: 0.\n aten::where_4:\n type: Where\n input:\n causal_mask.1: {}\n '201': {}\n '4': {}\n output:\n attn_scores2.1: {}\n attr:\n mask_value: -3.e+38\n aten::add_88:\n type: Add\n input:\n attn_scores2.1: {}\n '197': {}\n output:\n input.6: {}\n aten::softmax_74:\n type: Softmax\n input:\n input.6: {}\n output:\n attn_weights.1: {}\n attr:\n axis: -1\n aten::matmul_138:\n type: Matmul\n input:\n attn_weights.1: {}\n value0.1: {}\n output:\n tensor.1: {}\n aten::permute_26:\n type: Reorder\n input:\n tensor.1: {}\n output:\n '178': {}\n attr:\n src_perm: 0,1,2,3\n dst_perm: 0,2,1,3\n aten::size_97:\n type: Shape\n input:\n '178': {}\n output:\n '180': {}\n attr:\n start: 0\n end: 1\n aten::size_89:\n type: Shape\n input:\n '178': {}\n output:\n '181': {}\n attr:\n start: 1\n end: 2\n prim::ListConstruct_29:\n type: ListConstruct\n input:\n '180': {}\n '181': {}\n output:\n '182': {}\n aten::view_143:\n type: View\n input:\n '178': {}\n '182': {}\n output:\n input0.1: {}\n attr:\n shape: -1,-1,2560\n aten::linear_15:\n type: InnerProduct\n input:\n input0.1: {}\n self.model.gpt_neox.layers.0.attention.dense.weight: {}\n self.model.gpt_neox.layers.0.attention.dense.bias: {}\n output:\n '184': {}\n aten::layer_norm_14:\n type: LayerNorm\n input:\n '74': {}\n self.model.gpt_neox.layers.0.post_attention_layernorm.weight: {}\n self.model.gpt_neox.layers.0.post_attention_layernorm.bias: {}\n output:\n input.8: {}\n attr:\n epsilon: 1.0e-05\n normalized_shape: ''\n aten::gelu_49:\n type: MatMulWithBiasGelu\n input:\n input.8: {}\n self.model.gpt_neox.layers.0.mlp.dense_h_to_4h.weight: {}\n self.model.gpt_neox.layers.0.mlp.dense_h_to_4h.bias: {}\n output:\n '187': {}\n attr:\n append_op: gelu_tanh\n aten::add_90:\n type: MatMulWithBiasAdd\n input:\n '187': {}\n self.model.gpt_neox.layers.0.mlp.dense_4h_to_h.weight: {}\n self.model.gpt_neox.layers.0.mlp.dense_4h_to_h.bias: {}\n '184': {}\n output:\n '189': {}\n attr:\n append_op: sum\n aten::add_91:\n type: Add\n input:\n '189': {}\n '74': {}\n output:\n input.2: {}\n aten::layer_norm_11:\n type: LayerNorm\n input:\n input.2: {}\n self.model.gpt_neox.final_layer_norm.weight: {}\n self.model.gpt_neox.final_layer_norm.bias: {}\n output:\n input.1: {}\n attr:\n epsilon: 1.0e-05\n normalized_shape: ''\n aten::linear_10:\n type: InnerProduct\n input:\n input.1: {}\n self.model.embed_out.weight: {}\n output:\n '192': {}\n prim::TupleConstruct_136:\n type: TupleConstruct\n input:\n key2.1: {}\n value0.1: {}\n output:\n '193': {}\n prim::TupleConstruct_145:\n type: TupleConstruct\n input:\n '193': {}\n output:\n '194': {}\n prim::TupleConstruct_144:\n type: TupleConstruct\n input:\n '192': {}\n '194': {}\n output:\n '195': {}\n"
file = open('conf.yaml', 'w')
file.write(text)
file.close()
dollygraph = Graph()
dollygraph.graph_init('./conf.yaml')
dollygraph.framework_modeling_config['framework'] = 'torch'
for dest_op_name in dollygraph.nodes[0].output_tensors[7].dest_op:
dest_node = dollygraph.get_node_by_name(dest_op_name)
dest_node.input_tensors[1].data = np.zeros([7680, 2560], dtype=np.float32)
dest_node.input_tensors[2].data = np.zeros([7680, 2560], dtype=np.float32)
for dest_op_name in dollygraph.nodes[0].output_tensors[15].dest_op:
dest_node = dollygraph.get_node_by_name(dest_op_name)
dest_node.input_tensors[1].data = np.zeros([2560, 2560], dtype=np.float32)
dest_node.input_tensors[2].data = np.zeros([2560], dtype=np.float32)
for dest_op_name in dollygraph.nodes[0].output_tensors[20].dest_op:
dest_node = dollygraph.get_node_by_name(dest_op_name)
dest_node.input_tensors[1].data = np.zeros([10240, 2560], dtype=np.float32)
dest_node.input_tensors[2].data = np.zeros([10240], dtype=np.float32)
for dest_op_name in dollygraph.nodes[0].output_tensors[21].dest_op:
dest_node = dollygraph.get_node_by_name(dest_op_name)
dest_node.input_tensors[1].data = np.zeros([2560, 10240], dtype=np.float32)
dest_node.input_tensors[2].data = np.zeros([2560], dtype=np.float32)
for dest_op_name in dollygraph.nodes[0].output_tensors[25].dest_op:
dest_node = dollygraph.get_node_by_name(dest_op_name)
dest_node.input_tensors[1].data = np.zeros([50280, 2560], dtype=np.float32)
for dest_op_name in dollygraph.nodes[0].output_tensors[8].dest_op:
dest_node = dollygraph.get_node_by_name(dest_op_name)
dest_node.input_tensors[0].data = np.zeros([1, 1, 2048, 20], dtype=np.float32)
for dest_op_name in dollygraph.nodes[0].output_tensors[9].dest_op:
dest_node = dollygraph.get_node_by_name(dest_op_name)
dest_node.input_tensors[0].data = np.zeros([1, 1, 2048, 20], dtype=np.float32)
for dest_op_name in dollygraph.nodes[0].output_tensors[13].dest_op:
dest_node = dollygraph.get_node_by_name(dest_op_name)
dest_node.input_tensors[0].data = np.zeros([1, 1, 2048, 2048], dtype=np.float32)
oldlen = len(dollygraph.nodes)
p_fusion = PATTERNS['TorchUnpackBaddbmm']()
dollygraph = p_fusion(dollygraph)
newlen = len(dollygraph.nodes)
self.assertTrue((oldlen != newlen))
oldlen = len(dollygraph.nodes)
p_fusion = PATTERNS['RemoveZeros']()
dollygraph = p_fusion(dollygraph)
newlen = len(dollygraph.nodes)
self.assertTrue((oldlen != newlen))
oldlen = len(dollygraph.nodes)
p_fusion = PATTERNS['LowerAllTuples']()
dollygraph = p_fusion(dollygraph)
newlen = len(dollygraph.nodes)
self.assertTrue((oldlen != newlen))
p_fusion = PATTERNS['TorchEmbedding']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['RmsNorm']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['InnerproductReshapeFusion']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['InnerproductWithBiasGelu']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['InnerproductWithSwish']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['SliceMask']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['ArangewithReciprocal']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['InnerproductwithSlice']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['RoraryPosEmb']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['EinsumwithArange']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['RemoveSlice']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['RemoveRange']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['MatMulWithTransposeScaleAdd']()
dollygraph = p_fusion(dollygraph)
oldlen = len(dollygraph.nodes)
p_fusion = PATTERNS['NeoxReorderChange']()
dollygraph = p_fusion(dollygraph)
newlen = len(dollygraph.nodes)
self.assertTrue((oldlen != newlen))
oldlen = len(dollygraph.nodes)
p_fusion = PATTERNS['NeoxRoraryPosEmb']()
dollygraph = p_fusion(dollygraph)
newlen = len(dollygraph.nodes)
self.assertTrue((oldlen != newlen))
p_fusion = PATTERNS['InsertQuantNode']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['InsertBF16Node']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['TorchInsertBF16Node']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['QuantizeFusion']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['QKVMerge']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['ReshapeFusion']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['OperatorAdaptor']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['EmbeddingsTo2DBeforeInnerProduct']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['QuantGatherToBF16']()
dollygraph = p_fusion(dollygraph)
p_fusion = PATTERNS['MultiHeadAttention']()
dollygraph = p_fusion(dollygraph) |
class ParallelCompile(object):
__slots__ = ('envvar', 'default', 'max', 'old')
def __init__(self, envvar=None, default=0, max=0):
self.envvar = envvar
self.default = default
self.max = max
self.old = []
def function(self):
def compile_function(compiler, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
(macros, objects, extra_postargs, pp_opts, build) = compiler._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = compiler._get_cc_args(pp_opts, debug, extra_preargs)
threads = self.default
if (self.envvar is not None):
threads = int(os.environ.get(self.envvar, self.default))
def _single_compile(obj):
try:
(src, ext) = build[obj]
except KeyError:
return
compiler._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
try:
import multiprocessing
from multiprocessing.pool import ThreadPool
except ImportError:
threads = 1
if (threads == 0):
try:
threads = multiprocessing.cpu_count()
threads = (self.max if (self.max and (self.max < threads)) else threads)
except NotImplementedError:
threads = 1
if (threads > 1):
for _ in ThreadPool(threads).imap_unordered(_single_compile, objects):
pass
else:
for ob in objects:
_single_compile(ob)
return objects
return compile_function
def install(self):
distutils.ccompiler.CCompiler.compile = self.function()
return self
def __enter__(self):
self.old.append(distutils.ccompiler.CCompiler.compile)
return self.install()
def __exit__(self, *args):
distutils.ccompiler.CCompiler.compile = self.old.pop() |
def _check_supported_json_output_versions(version):
return (version in _SchemaVersions.ALL_VERSIONS) |
class DenseGaussianVariable(object):
def __init__(self, batch_size, n_variables, const_prior_var, n_input, update_form, posterior_form='gaussian', learn_prior=True):
self.batch_size = batch_size
self.n_variables = n_variables
assert (update_form in ['direct', 'highway']), 'Latent variable update form not found.'
self.update_form = update_form
self.posterior_form = posterior_form
self.learn_prior = learn_prior
if self.learn_prior:
self.prior_mean = Dense(n_input[1], self.n_variables)
self.prior_log_var = None
if (not const_prior_var):
self.prior_log_var = Dense(n_input[1], self.n_variables)
self.posterior_mean = Dense(n_input[0], self.n_variables)
if (self.posterior_form == 'gaussian'):
self.posterior_log_var = Dense(n_input[0], self.n_variables)
if (self.update_form == 'highway'):
self.posterior_mean_gate = Dense(n_input[0], self.n_variables, 'sigmoid')
if (self.posterior_form == 'gaussian'):
self.posterior_log_var_gate = Dense(n_input[0], self.n_variables, 'sigmoid')
self.posterior = self.init_dist(self.posterior_form)
self.prior = self.init_dist()
if (self.learn_prior and const_prior_var):
self.prior.log_var_trainable()
def init_dist(self, form='gaussian'):
if (form == 'gaussian'):
return DiagonalGaussian(self.n_variables, Variable(torch.zeros(self.batch_size, self.n_variables)), Variable(torch.zeros(self.batch_size, self.n_variables)))
elif (form == 'point_estimate'):
return PointEstimate(Variable(torch.zeros(self.batch_size, self.n_variables)))
else:
raise Exception('Distribution form not found.')
def encode(self, input):
mean = self.posterior_mean(input)
if (self.posterior_form == 'gaussian'):
log_var = torch.clamp(self.posterior_log_var(input), (- 15.0), 15.0)
if (self.update_form == 'highway'):
mean_gate = self.posterior_mean_gate(input)
if (self.posterior_form == 'gaussian'):
log_var_gate = self.posterior_log_var_gate(input)
mean = ((mean_gate * self.posterior.mean.detach()) + ((1 - mean_gate) * mean))
if (self.posterior_form == 'gaussian'):
log_var = torch.clamp(((log_var_gate * self.posterior.log_var.detach()) + ((1 - log_var_gate) * log_var)), (- 15.0), 15.0)
self.posterior.mean = mean
self.posterior.mean.retain_grad()
if (self.posterior_form == 'gaussian'):
self.posterior.log_var = log_var
self.posterior.log_var.retain_grad()
return self.posterior.sample(resample=True)
def decode(self, input, n_samples, generate=False):
if self.learn_prior:
batch_size = input.size()[0]
sample_size = input.size()[1]
data_size = input.size()[2]
input = input.view((- 1), data_size)
mean = self.prior_mean(input).view(batch_size, sample_size, (- 1))
self.prior.mean = mean
if (self.prior_log_var is not None):
log_var = self.prior_log_var(input).view(batch_size, sample_size, (- 1))
self.prior.log_var = log_var
if generate:
sample = self.prior.sample(n_samples=n_samples, resample=True)
else:
sample = self.posterior.sample(n_samples=n_samples, resample=True)
return sample
def error(self, averaged=True):
sample = self.posterior.sample()
n_samples = sample.size()[1]
prior_mean = self.prior.mean.detach()
if (len(prior_mean.data.shape) == 2):
prior_mean = prior_mean.unsqueeze(1).repeat(1, n_samples, 1)
if averaged:
return (sample - prior_mean).mean(dim=1)
else:
return (sample - prior_mean)
def norm_error(self, averaged=True):
sample = self.posterior.sample()
n_samples = sample.size()[1]
prior_mean = self.prior.mean.detach()
if (len(prior_mean.data.shape) == 2):
prior_mean = prior_mean.unsqueeze(1).repeat(1, n_samples, 1)
prior_log_var = self.prior.log_var.detach()
if (len(prior_log_var.data.shape) == 2):
prior_log_var = prior_log_var.unsqueeze(1).repeat(1, n_samples, 1)
n_error = ((sample - prior_mean) / torch.exp((prior_log_var + 1e-07)))
if averaged:
n_error = n_error.mean(dim=1)
return n_error
def kl_divergence(self):
return (self.posterior.log_prob(self.posterior.sample()) - self.prior.log_prob(self.posterior.sample()))
def analytical_kl(self):
n_samples = self.posterior.sample().size()[1]
kl = ((- 0.5) * (((1 + self.posterior.log_var) - torch.pow(self.posterior.mean, 2)) - torch.exp(self.posterior.log_var)))
return kl.unsqueeze(1).repeat(1, n_samples, 1)
def reset(self, mean=None, log_var=None, from_prior=True):
if from_prior:
mean = self.prior.mean.data.clone()
log_var = self.prior.log_var.data.clone()
if (len(mean.shape) == 3):
mean = mean.mean(dim=1)
if (len(log_var.shape) == 3):
log_var = log_var.mean(dim=1)
self.reset_mean(mean)
if (self.posterior_form == 'gaussian'):
self.reset_log_var(log_var)
def reset_mean(self, value):
self.posterior.reset_mean(value)
def reset_log_var(self, value):
self.posterior.reset_log_var(value)
def trainable_mean(self):
self.posterior.mean_trainable()
def trainable_log_var(self):
self.posterior.log_var_trainable()
def not_trainable_mean(self):
self.posterior.mean_not_trainable()
def not_trainable_log_var(self):
self.posterior.log_var_not_trainable()
def eval(self):
if self.learn_prior:
self.prior_mean.eval()
if (self.prior_log_var is not None):
self.prior_log_var.eval()
self.posterior_mean.eval()
if (self.posterior_form == 'gaussian'):
self.posterior_log_var.eval()
if (self.update_form == 'highway'):
self.posterior_mean_gate.eval()
if (self.posterior_form == 'gaussian'):
self.posterior_log_var_gate.eval()
def train(self):
if self.learn_prior:
self.prior_mean.train()
if (self.prior_log_var is not None):
self.prior_log_var.train()
self.posterior_mean.train()
if (self.posterior_form == 'gaussian'):
self.posterior_log_var.train()
if (self.update_form == 'highway'):
self.posterior_mean_gate.train()
if (self.posterior_form == 'gaussian'):
self.posterior_log_var_gate.train()
def cuda(self, device_id=0):
if self.learn_prior:
self.prior_mean.cuda(device_id)
if (self.prior_log_var is not None):
self.prior_log_var.cuda(device_id)
self.posterior_mean.cuda(device_id)
if (self.posterior_form == 'gaussian'):
self.posterior_log_var.cuda(device_id)
if (self.update_form == 'highway'):
self.posterior_mean_gate.cuda(device_id)
if (self.posterior_form == 'gaussian'):
self.posterior_log_var_gate.cuda(device_id)
self.prior.cuda(device_id)
self.posterior.cuda(device_id)
def parameters(self):
return (self.encoder_parameters() + self.decoder_parameters())
def encoder_parameters(self):
encoder_params = []
encoder_params.extend(list(self.posterior_mean.parameters()))
if (self.posterior_form == 'gaussian'):
encoder_params.extend(list(self.posterior_log_var.parameters()))
if (self.update_form == 'highway'):
encoder_params.extend(list(self.posterior_mean_gate.parameters()))
if (self.posterior_form == 'gaussian'):
encoder_params.extend(list(self.posterior_log_var_gate.parameters()))
return encoder_params
def decoder_parameters(self):
decoder_params = []
if self.learn_prior:
decoder_params.extend(list(self.prior_mean.parameters()))
if (self.prior_log_var is not None):
decoder_params.extend(list(self.prior_log_var.parameters()))
else:
decoder_params.append(self.prior.log_var)
return decoder_params
def state_parameters(self):
return self.posterior.state_parameters()
def state_gradients(self):
assert (self.posterior.mean.grad is not None), 'State gradients are None.'
grads = [self.posterior.mean.grad.detach()]
if (self.posterior_form == 'gaussian'):
grads += [self.posterior.log_var.grad.detach()]
for grad in grads:
grad.volatile = False
return grads |
def smplPvis():
smplPbu.switch()
if (smplPbu.status() == 'Shide'):
smplP.off()
elif (smplPbu.status() == 'Sshow'):
smplP.on() |
class Attention(Model):
_compatible_windows = (window_module.Sliding, window_module.Expanding)
def __init__(self, in_channels, out_channels, residual_channels, num_heads, hidden_channels, num_layers, dropout=0, activation='relu'):
super(Attention, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.residual_channels = residual_channels
self.num_heads = num_heads
self.hidden_channels = hidden_channels
self.num_layers = num_layers
encoder_layer = nn.TransformerEncoderLayer(d_model=residual_channels, nhead=num_heads, dim_feedforward=hidden_channels, dropout=dropout, activation=activation)
self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers)
self.linear = nn.Linear(residual_channels, out_channels)
def forward(self, signatures):
assert (len(signatures) == 1)
signatures = signatures[0]
x = torch.stack(signatures, dim=1)
x = self.transformer_encoder(x)
x = self.linear(x)
return x |
def unfreeze_model(model):
if (type(model) == QuantAct):
model.unfix()
elif (type(model) == nn.Sequential):
mods = []
for (n, m) in model.named_children():
unfreeze_model(m)
else:
for attr in dir(model):
mod = getattr(model, attr)
if (isinstance(mod, nn.Module) and ('norm' not in attr)):
unfreeze_model(mod)
return model |
def _get_augmented_positions(s: str, spec: AugmentationSpec) -> List[int]:
return [pos[1] for pos in replace_tokens_and_get_augmented_positions(s, spec)[1]] |
def test_tuple_rvalue_getter():
pop = 1000
tup = tuple(range(pop))
m.tuple_rvalue_getter(tup) |
class SquadExample(object):
def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
def __str__(self):
return self.__repr__()
def __repr__(self):
s = []
s.append(('qas_id: %s' % tokenization.printable_text(self.qas_id)))
s.append(('question_text: %s' % tokenization.printable_text(self.question_text)))
s.append(('doc_tokens: [%s]' % ' '.join(self.doc_tokens)))
if self.start_position:
s.append(('start_position: %d' % self.start_position))
if self.end_position:
s.append(('end_position: %d' % self.end_position))
return ', '.join(s) |
def addLaserCalibration(laser_num, key, val):
global calibration
if (laser_num < len(calibration['lasers'])):
calibration['lasers'][laser_num][key] = val
else:
calibration['lasers'].append({key: val}) |
def get_log(event_path, tag):
try:
a = {}
a[tag] = []
a['step'] = []
for e in summary_iterator(event_path):
for v in e.summary.value:
if (v.tag == tag):
a['step'].append(e.step)
a[tag].append(v.simple_value)
return a
except:
print('Event file possibly corrupt: {}'.format(event_path))
return None |
class ColorJitter(object):
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
self.tv_F = tv_t.ColorJitter(self.brightness, self.contrast, self.saturation, self.hue)
self.cv_F = cv_t.ColorJitter(self.brightness, self.contrast, self.saturation, self.hue)
def __call__(self, img):
if (type(img) == np.ndarray):
return self.cv_F.__call__(img)
else:
return self.tv_F.__call__(img)
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
format_string += 'brightness={}'.format(self.brightness)
format_string += ', contrast={}'.format(self.contrast)
format_string += ', saturation={}'.format(self.saturation)
format_string += ', hue={})'.format(self.hue)
return format_string |
class FlaxMarianPreTrainedModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
.parametrize('input_data,expected', testdata)
def test_pascal(input_data, expected):
assert (pascal(*input_data) == expected) |
def _compute_aspect_ratios_coco_dataset(dataset, indices=None):
if (indices is None):
indices = range(len(dataset))
aspect_ratios = []
for i in indices:
img_info = dataset.coco.imgs[dataset.ids[i]]
aspect_ratio = (float(img_info['width']) / float(img_info['height']))
aspect_ratios.append(aspect_ratio)
return aspect_ratios |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.