code stringlengths 101 5.91M |
|---|
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--input_model', type=str, required=False, default='vgg16-12.onnx')
parser.add_argument('--output_model', type=str, required=True)
return parser.parse_args() |
def main():
global args
args = parser.parse_args()
model = models.__dict__[args.arch]()
print(model)
input = torch.randn(1, 3, args.input_size, args.input_size)
model.train()
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model = model.to(device)
input = input.to(device)
(flops, params) = profile(model, inputs=(input,))
print('flops = ', flops)
print('params = ', params)
(flops, params) = clever_format([flops, params], '%.3f')
print('flops = ', flops)
print('params = ', params) |
class TFMobileViTConvLayer(tf.keras.layers.Layer):
def __init__(self, config: MobileViTConfig, out_channels: int, kernel_size: int, stride: int=1, groups: int=1, bias: bool=False, dilation: int=1, use_normalization: bool=True, use_activation: Union[(bool, str)]=True, **kwargs) -> None:
super().__init__(**kwargs)
logger.warning(f'''
{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish to train/fine-tine this model, you need a GPU or a TPU''')
padding = (int(((kernel_size - 1) / 2)) * dilation)
self.padding = tf.keras.layers.ZeroPadding2D(padding)
if ((out_channels % groups) != 0):
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.')
self.convolution = tf.keras.layers.Conv2D(filters=out_channels, kernel_size=kernel_size, strides=stride, padding='VALID', dilation_rate=dilation, groups=groups, use_bias=bias, name='convolution')
if use_normalization:
self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-05, momentum=0.1, name='normalization')
else:
self.normalization = None
if use_activation:
if isinstance(use_activation, str):
self.activation = get_tf_activation(use_activation)
elif isinstance(config.hidden_act, str):
self.activation = get_tf_activation(config.hidden_act)
else:
self.activation = config.hidden_act
else:
self.activation = None
def call(self, features: tf.Tensor, training: bool=False) -> tf.Tensor:
padded_features = self.padding(features)
features = self.convolution(padded_features)
if (self.normalization is not None):
features = self.normalization(features, training=training)
if (self.activation is not None):
features = self.activation(features)
return features |
def recall(y_true, y_pred):
from keras import backend as K
true_positives = K.sum(K.round(K.clip((y_true * y_pred), 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = (true_positives / (possible_positives + K.epsilon()))
return recall |
def getPrediction(params):
if (not params['model']):
return []
interpreter = utils.load_tflite(models_dir, params['model'], 'checkpoints', 'best.tflite')
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
real_path = (dataset_dir + params['path'])
frames = associate_frames.read_file_list((real_path + '/sensor_data/matched_frame_ctrl_cmd.txt'))
result = np.empty((0, 2), dtype=int)
cmd_input = np.array([[(params['indicator'] or 0)]], dtype=np.float32)
start = params['start']
end = params['end']
keys = list(frames)[start:end]
print(('get prediction from frame ' + str(start)))
for frame in keys:
(_, _, img, left, right, ind) = frames[frame]
path = f'{real_path}/images/{img}_crop.jpeg'
if (params['indicator'] is None):
cmd_input = np.array([[ind]], dtype=np.float32)
img = utils.load_img(path, is_crop=False)
img_input = np.expand_dims(img, axis=0)
interpreter.set_tensor(input_details[0]['index'], cmd_input)
interpreter.set_tensor(input_details[1]['index'], img_input)
interpreter.invoke()
output = (interpreter.get_tensor(output_details[0]['index']) * 255)
result = np.concatenate((result, output.astype(int)))
return api.encode(result) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--image_list', help='Path of the image_list file', default=None)
parser.add_argument('--image_dir', help='Root dir of the image path in image_list file', default=None)
parser.add_argument('--output_dir', default=None)
parser.add_argument('--anno', default=None)
parser.add_argument('--ve_name', type=str, default=None, choices=['RN101', 'ViT-B/32', 'ViT-B/16', 'RN101_448', 'ViT-B/32_448'])
parser.add_argument('--model_type_or_path', default='RN101', type=str, help='model type from original CLIP or model path offline')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
print('Called with args:')
pprint(vars(args), indent=2)
return args |
class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm):
def _check_input_dim(self, input):
return |
_model
def resnetv2_50x3_bitm(pretrained=False, **kwargs):
return _create_resnetv2('resnetv2_50x3_bitm', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, stem_type='fixed', **kwargs) |
def get_backbone_net(backbone='resnet101', output_stride=16, pretrained=True, norm_layer=nn.BatchNorm2d, bn_mom=0.01, root_beta=True):
networks_obj_dict = {'resnet50': resnet_v1.resnet50, 'resnet101': resnet_v1.resnet101, 'resnet152': resnet_v1.resnet152}
assert (backbone in networks_obj_dict.keys())
if ('resnet' in backbone):
backbone_net = networks_obj_dict[backbone](output_stride=output_stride, pretrained=pretrained, norm_layer=norm_layer, bn_mom=bn_mom, root_beta=root_beta)
return backbone_net |
class Attention(Layer):
def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert (len(input_shape) == 3)
self.W = self.add_weight((input_shape[(- 1)],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint)
self.features_dim = input_shape[(- 1)]
if self.bias:
self.b = self.add_weight((input_shape[1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(K.dot(K.reshape(x, ((- 1), features_dim)), K.reshape(self.W, (features_dim, 1))), ((- 1), step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if (mask is not None):
a *= K.cast(mask, K.floatx())
a /= K.cast((K.sum(a, axis=1, keepdims=True) + K.epsilon()), K.floatx())
a = K.expand_dims(a)
weighted_input = (x * a)
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.features_dim) |
def PrintDebugInfoForUtterance(ctm_edits_out_handle, split_lines_of_cur_utterance, segments_for_utterance, deleted_segments_for_utterance):
info_to_print = []
for n in range(len(segments_for_utterance)):
segment = segments_for_utterance[n]
start_string = 'start-segment-{0}[{1}]'.format((n + 1), segment.DebugInfo())
info_to_print.append((segment.StartTime(), start_string))
end_string = 'end-segment-{}'.format((n + 1))
info_to_print.append((segment.EndTime(), end_string))
for n in range(len(deleted_segments_for_utterance)):
segment = deleted_segments_for_utterance[n]
start_string = 'start-deleted-segment-{0}[{1}]'.format((n + 1), segment.DebugInfo())
info_to_print.append((segment.StartTime(), start_string))
end_string = 'end-deleted-segment-{}'.format((n + 1))
info_to_print.append((segment.EndTime(), end_string))
info_to_print = sorted(info_to_print)
for i in range(len(split_lines_of_cur_utterance)):
split_line = split_lines_of_cur_utterance[i]
split_line[0] += '[{}]'.format(i)
start_time = float(split_line[2])
end_time = (start_time + float(split_line[3]))
split_line_copy = list(split_line)
while ((len(info_to_print) > 0) and (info_to_print[0][0] <= end_time)):
(segment_start, string) = info_to_print[0]
info_to_print = info_to_print[1:]
split_line_copy.append(((string + '=') + TimeToString(segment_start, args.frame_length)))
print(' '.join(split_line_copy), file=ctm_edits_out_handle) |
def makedirs(path: str) -> None:
if path.startswith('s3'):
access_key_id = os.environ['AWS_ACCESS_KEY_ID']
secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY']
import boto3
s3_client = boto3.Session(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key).client('s3')
path_parts = path.split('://')[1].split('/')
bucket = path_parts.pop(0)
key = '/'.join(path_parts)
return s3_client.put_object(Bucket=bucket, Key=key, Body='')
elif path.startswith('hdfs://'):
return file_utils.mkdirs(path)
else:
if path.startswith('file://'):
path = path[len('file://'):]
os.makedirs(path) |
class TestSmoothQuantTF(unittest.TestCase):
def setUpClass(self):
pass
def tearDownClass(self):
pass
_random()
def test_conv_sq(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name='input')
top_relu = tf.nn.relu(x)
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(top_relu, paddings, 'CONSTANT')
conv_weights = tf.compat.v1.get_variable('weight', [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding='VALID')
normed = tf.compat.v1.layers.batch_normalization(conv)
conv_weights2 = tf.compat.v1.get_variable('weight2', [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding='SAME')
normed2 = tf.compat.v1.layers.batch_normalization(conv2)
add = tf.raw_ops.Add(x=normed, y=normed2, name='addv2')
relu = tf.nn.relu(add)
relu6 = tf.nn.relu6(relu, name='op_to_store')
out_name = relu6.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name])
set_random_seed(9527)
config = PostTrainingQuantConfig(quant_level=1, recipes={'smooth_quant': True, 'smooth_quant_args': {'alpha': 0.5}}, calibration_sampling_size=[500])
from neural_compressor.data import Datasets
dataset = Datasets('tensorflow')['dummy'](shape=(100, 56, 56, 16), label=True)
dataloader = DataLoader(framework='tensorflow', dataset=dataset, batch_size=1)
from neural_compressor import Metric
top1 = Metric(name='topk', k=1)
output_graph = fit(model=output_graph_def, conf=config, calib_dataloader=dataloader, eval_dataloader=dataloader, eval_metric=top1)
mul_count = 0
for i in output_graph.graph_def.node:
if (i.op == 'Mul'):
mul_count += 1
self.assertEqual(mul_count, 2)
_random()
def test_sq_matmul(self):
x_data = np.random.rand(1024, 1024).astype(np.float32)
y_data = np.random.rand(1024, 1024).astype(np.float32)
import tensorflow.compat.v1 as tf
x = tf.placeholder(tf.float32, shape=[1024, 1024], name='x')
y = tf.constant(y_data, dtype=tf.float32, shape=[1024, 1024])
z = tf.matmul(x, y)
bias = np.random.rand(1024).astype(np.float32)
z = tf.nn.bias_add(z, bias)
z = tf.nn.relu(z, name='op_to_store')
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data, y: y_data})
output_graph_def = sess.graph.as_graph_def()
set_random_seed(9527)
config = PostTrainingQuantConfig(quant_level=1, recipes={'smooth_quant': True, 'smooth_quant_args': {'alpha': 0.5}}, calibration_sampling_size=[1024])
from neural_compressor.data import Datasets
dataset = Datasets('tensorflow')['dummy'](shape=(1024, 1024), label=True)
dataloader = DataLoader(framework='tensorflow', dataset=dataset, batch_size=1024)
from neural_compressor import Metric
top1 = Metric(name='topk', k=1)
output_graph = fit(model=output_graph_def, conf=config, calib_dataloader=dataloader, eval_dataloader=dataloader, eval_metric=top1)
mul_count = 0
for i in output_graph.graph_def.node:
if (i.op == 'Mul'):
mul_count += 1
self.assertEqual(mul_count, 1)
_random()
def test_sq_conv_matmul(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name='input')
top_relu = tf.nn.relu(x)
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(top_relu, paddings, 'CONSTANT')
conv1_weights = tf.compat.v1.get_variable('weight_conv1', [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv1 = tf.nn.conv2d(x_pad, conv1_weights, strides=[1, 2, 2, 1], padding='VALID')
matmul_weights = tf.compat.v1.get_variable('weight_matmul', [((28 * 28) * 16), ((7 * 7) * 32)], initializer=tf.compat.v1.random_normal_initializer())
conv1_reshaped = tf.reshape(conv1, shape=[(- 1), ((28 * 28) * 16)])
matmul = tf.matmul(conv1_reshaped, matmul_weights)
reshape = tf.reshape(matmul, (1, 7, 7, 32))
conv2_weights = tf.compat.v1.get_variable('weight_conv2', [7, 7, 32, 1], initializer=tf.compat.v1.random_normal_initializer())
conv2 = tf.nn.conv2d(reshape, conv2_weights, strides=[1, 2, 2, 1], padding='VALID')
leaky_relu = tf.nn.leaky_relu(conv2, name='op_to_store')
out_name = leaky_relu.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name])
set_random_seed(9527)
config = PostTrainingQuantConfig(quant_level=1, recipes={'smooth_quant': True, 'smooth_quant_args': {'alpha': 0.6}}, calibration_sampling_size=[500])
from neural_compressor.data import Datasets
dataset = Datasets('tensorflow')['dummy'](shape=(100, 56, 56, 16), label=True)
dataloader = DataLoader(framework='tensorflow', dataset=dataset)
from neural_compressor import Metric
top1 = Metric(name='topk', k=1)
output_graph = fit(model=output_graph_def, conf=config, calib_dataloader=dataloader, eval_dataloader=dataloader, eval_metric=top1)
mul_count = 0
for i in output_graph.graph_def.node:
if (i.op == 'Mul'):
mul_count += 1
self.assertEqual(mul_count, 3) |
def get_data():
from bigdl.chronos.data import get_public_dataset
(tsdata_train, tsdata_val, tsdata_test) = get_public_dataset(name='nyc_taxi')
tsdata_test.df.to_csv('deployment_data.csv', index=False)
tsdata_train.scale(scaler, fit=True)
return (tsdata_train, tsdata_val, tsdata_test) |
def add_pll_clock_output(bel, ec, entry):
(io_x, io_y, io_z) = entry[1]
io_zs = 'io_{}/D_IN_0'.format(io_z)
io_z = int(io_z)
add_bel_output(bel, wire_names[(io_x, io_y, io_zs)], entry[0])
for (gidx, ginfo) in glbinfo.items():
if ((ginfo['pi_gb_x'], ginfo['pi_gb_y'], ginfo['pi_gb_pio']) == (io_x, io_y, io_z)):
add_bel_output(bel, wire_names[(io_x, io_y, ('glb_netwk_%d' % gidx))], (entry[0] + '_GLOBAL')) |
def add_all_preds(df_county):
for method in methods:
for t in tqdm(range(1, (ndays + 1))):
d = (today - timedelta(t))
if ((d < date(2020, 3, 16)) and (method in ['demographic'])):
continue
use_df = exponential_modeling.leave_t_day_out(df_county, (0 + t))
if ((method != 'ensemble') and (method != 'demographic')):
use_df = fit_and_predict.fit_and_predict(use_df, target_day=np.arange(1, (horizon + 1)), outcome=outcome, method=method, mode='predict_future', output_key=f'predicted_{outcome}_{method}_{horizon}')
elif (method == 'demographic'):
use_df = fit_and_predict.fit_and_predict(use_df, target_day=np.arange(1, (horizon + 1)), outcome=outcome, method='shared_exponential', mode='predict_future', demographic_vars=very_important_vars, output_key=f'predicted_{outcome}_{method}_{horizon}')
df_county[f'all_{outcome}_pred_{d.month}_{d.day}_{method}_{horizon}'] = use_df[f'predicted_{outcome}_{method}_{horizon}']
return df_county |
def read_basic_block(fname, data, verbose):
with open(fname, 'rb') as f:
code = f.read((- 1))
start_pos = code.index(START_MARKER)
if (start_pos == (- 1)):
raise ValueError('START MARKER NOT FOUND')
end_pos = code.index(END_MARKER)
if (end_pos == (- 1)):
raise ValueError('END MARKER NOT FOUND')
block_binary = code[(start_pos + len(START_MARKER)):end_pos]
return datum_of_code(data, binascii.b2a_hex(block_binary), verbose) |
def setup(rank: Optional[int]=None, world_size: Optional[int]=None):
if (rank is None):
rank = get_local_rank()
if (world_size is None):
world_size = get_world_size()
if (world_size <= 1):
return (rank, world_size)
if (not dist.is_initialized()):
if (sys.platform == 'win32'):
init_method = 'file:///f:/libtmp/dist-tmp'
dist.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=world_size)
elif torch.cuda.is_available():
dist.init_process_group(backend='nccl', rank=rank, world_size=world_size)
else:
dist.init_process_group(backend='gloo', rank=rank, world_size=world_size)
return (rank, world_size) |
def predictor_minstependgame_get():
from phcpy.phcpy2c3 import py2c_get_value_of_continuation_parameter as get
return get(10) |
class DiscreteInverseModel(nn.Module):
def __init__(self, state_size, action_size, hidden_size, **kwargs):
super().__init__()
self.fc1 = nn.Linear((state_size * 2), hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.act_p = nn.Linear(hidden_size, action_size)
self.apply(weight_init)
def forward(self, state, next_state):
inp = torch.cat((state, next_state), dim=(- 1))
x = F.relu(self.fc1(inp))
x = F.relu(self.fc2(x))
act_p = self.act_p(x)
dist = pyd.categorical.Categorical(logits=act_p)
return dist |
class PyTorchFilters(object):
def __init__(self):
self.filters = {}
self.filters.update(PYTORCH_FILTERS) |
def download_coco2014(root, phase):
work_dir = os.getcwd()
tmpdir = os.path.join(root, 'tmp/')
if (not os.path.exists(root)):
os.makedirs(root)
if (not os.path.exists(tmpdir)):
os.makedirs(tmpdir)
if (phase == 'train'):
filename = 'train2014.zip'
elif (phase == 'val'):
filename = 'val2014.zip'
cached_file = os.path.join(tmpdir, filename)
if (not os.path.exists(cached_file)):
print('Downloading: "{}" to {}\n'.format(urls[(phase + '_img')], cached_file))
os.chdir(tmpdir)
subprocess.call(('wget ' + urls[(phase + '_img')]), shell=True)
os.chdir(root)
img_data = os.path.join(root, filename.split('.')[0])
if (not os.path.exists(img_data)):
print('[dataset] Extracting tar file {file} to {path}'.format(file=cached_file, path=root))
command = 'unzip {} -d {}'.format(cached_file, root)
os.system(command)
print('[dataset] Done!')
cached_file = os.path.join(tmpdir, 'annotations_trainval2014.zip')
if (not os.path.exists(cached_file)):
print('Downloading: "{}" to {}\n'.format(urls['annotations'], cached_file))
os.chdir(tmpdir)
subprocess.call(('wget ' + urls['annotations']), shell=True)
os.chdir(root)
annotations_data = os.path.join(root, 'annotations')
if (not os.path.exists(annotations_data)):
print('[dataset] Extracting tar file {file} to {path}'.format(file=cached_file, path=root))
command = 'unzip {} -d {}'.format(cached_file, root)
os.system(command)
print('[annotation] Done!')
annotations_data = os.path.join(root, 'annotations')
anno = os.path.join(root, '{}_anno.json'.format(phase))
img_id = {}
annotations_id = {}
if (not os.path.exists(anno)):
annotations_file = json.load(open(os.path.join(annotations_data, 'instances_{}2014.json'.format(phase))))
annotations = annotations_file['annotations']
category = annotations_file['categories']
category_id = {}
for cat in category:
category_id[cat['id']] = cat['name']
cat2idx = categoty_to_idx(sorted(category_id.values()))
images = annotations_file['images']
for annotation in annotations:
if (annotation['image_id'] not in annotations_id):
annotations_id[annotation['image_id']] = set()
annotations_id[annotation['image_id']].add(cat2idx[category_id[annotation['category_id']]])
for img in images:
if (img['id'] not in annotations_id):
continue
if (img['id'] not in img_id):
img_id[img['id']] = {}
img_id[img['id']]['file_name'] = img['file_name']
img_id[img['id']]['labels'] = list(annotations_id[img['id']])
anno_list = []
for (k, v) in img_id.items():
anno_list.append(v)
json.dump(anno_list, open(anno, 'w'))
if (not os.path.exists(os.path.join(root, 'category.json'))):
json.dump(cat2idx, open(os.path.join(root, 'category.json'), 'w'))
del img_id
del anno_list
del images
del annotations_id
del annotations
del category
del category_id
print('[json] Done!')
os.chdir(work_dir) |
def get_default_train_test_split(dataset_key) -> Optional[Tuple[(List[int], List[int])]]:
predefined = get_predefined_train_test_split(dataset_key)
if (predefined is not None):
return predefined
return get_random_train_test_indices(dataset_key) |
.xfail(env.PYPY, reason="PyPy 7.3.7 doesn't clear this anymore", strict=False)
def test_to_python():
mat = m.Matrix(5, 4)
assert (memoryview(mat).shape == (5, 4))
assert (mat[(2, 3)] == 0)
mat[(2, 3)] = 4.0
mat[(3, 2)] = 7.0
assert (mat[(2, 3)] == 4)
assert (mat[(3, 2)] == 7)
assert (struct.unpack_from('f', mat, (((3 * 4) + 2) * 4)) == (7,))
assert (struct.unpack_from('f', mat, (((2 * 4) + 3) * 4)) == (4,))
mat2 = np.array(mat, copy=False)
assert (mat2.shape == (5, 4))
assert (abs(mat2).sum() == 11)
assert ((mat2[(2, 3)] == 4) and (mat2[(3, 2)] == 7))
mat2[(2, 3)] = 5
assert (mat2[(2, 3)] == 5)
cstats = ConstructorStats.get(m.Matrix)
assert (cstats.alive() == 1)
del mat
pytest.gc_collect()
assert (cstats.alive() == 1)
del mat2
pytest.gc_collect()
assert (cstats.alive() == 0)
assert (cstats.values() == ['5x4 matrix'])
assert (cstats.copy_constructions == 0)
assert (cstats.copy_assignments == 0)
assert (cstats.move_assignments == 0) |
class OpTuningConfig():
def __init__(self, op_name, op_type, op_quant_mode, tuning_space, kwargs={}):
self.op_name = op_name
self.op_type = op_type
self.op_name_type = (self.op_name, self.op_type)
self.op_quant_mode = op_quant_mode
self.kwargs = kwargs
self.act_dtype = None
self.weight_dtype = None
self.has_weight = (self.op_name_type in tuning_space.ops_attr['weight'])
self._set_dtype()
self.tune_list = (WEIGHT_ONLY_TUNING_ITEMS_LST if (self.op_quant_mode == 'weight_only') else TUNING_ITEMS_LST)
def _set_dtype(self):
if (self.op_quant_mode in PRECISION_LIST):
(self.act_dtype, self.weight_dtype) = (self.op_quant_mode, self.op_quant_mode)
else:
self.act_dtype = self.kwargs.get('activation_dtype', None)
if (('weight', 'dtype') in self.kwargs):
self.weight_dtype = self.kwargs[('weight', 'dtype')]
else:
self.weight_dtype = self.kwargs.get('weight_dtype', None)
assert (self.act_dtype and isinstance(self.act_dtype, str)), (f"Didn't assign the activation data type for {(self.op_name, self.op_type)}", f'with quant_mode {self.op_quant_mode}')
def __repr__(self) -> str:
msg = f'''op name: {self.op_name}, op type : {self.op_type}
'''
msg += f''' activation dtype: {self.act_dtype}
'''
if (self.op_quant_mode != 'weight_only'):
msg += (f''' weight dtype: {self.weight_dtype}
''' if self.has_weight else '')
for (key, val) in self.kwargs.items():
if (key in self.tune_list):
msg += f''' {key[0]} {key[1]}: {val}
'''
return msg
def get_state(self):
result = {}
if self.has_weight:
result['weight'] = {'dtype': self.weight_dtype}
result['activation'] = {'dtype': self.act_dtype, 'quant_mode': self.op_quant_mode}
for (key, val) in self.kwargs.items():
if (key in self.tune_list):
result[key[0]][key[1]] = val
return result
def from_state(cls, config: Dict):
cls(**config) |
.script_launch_mode('subprocess')
def test_training_3d_2class_single_channel_with_data_augmentation(download_functional_test_files, script_runner):
file_config = os.path.join(__data_testing_dir__, 'automate_training_config.json')
context = imed_config_manager.ConfigurationManager(file_config).get_config()
context[ConfigKW.DEFAULT_MODEL][ModelParamsKW.IS_2D] = False
context[ConfigKW.MODIFIED_3D_UNET] = {ModelParamsKW.APPLIED: True, ModelParamsKW.LENGTH_3D: [32, 32, 16], ModelParamsKW.STRIDE_3D: [32, 32, 16], ModelParamsKW.N_FILTERS: 4}
context[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.TARGET_SUFFIX] = ['_lesion-manual', '_seg-manual']
context[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.CONTRAST_PARAMS][ContrastParamsKW.TRAINING_VALIDATION] = ['T1w', 'T2w']
context[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.CONTRAST_PARAMS][ContrastParamsKW.TESTING] = ['T1w', 'T2w']
context[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.MULTICHANNEL] = False
context[ConfigKW.TRANSFORMATION][TransformationKW.RESAMPLE] = {'wspace': 0.75, 'hspace': 0.75, 'dspace': 0.75}
context[ConfigKW.TRANSFORMATION][TransformationKW.CENTERCROP] = {'size': [32, 32, 16]}
context[ConfigKW.TRANSFORMATION][TransformationKW.RANDOM_AFFINE] = {'degrees': 10, 'scale': [0.03, 0.03, 0.03], 'translate': [0.8, 0.8, 0.8], 'applied_to': ['im', 'gt'], 'dataset_type': ['training']}
file_config_updated = os.path.join(__tmp_dir__, 'data_functional_testing', 'config_3d_training.json')
with Path(file_config_updated).open(mode='w') as fp:
json.dump(context, fp, indent=4)
__output_dir__ = Path(__tmp_dir__, 'results')
ret = script_runner.run('ivadomed', '-c', f'{file_config_updated}', '--path-data', f'{__data_testing_dir__}', '--path-output', f'{__output_dir__}')
logger.debug(f'{ret.stdout}')
logger.debug(f'{ret.stderr}')
assert ret.success |
def main():
display_interval = 0
discontinuous = False
resolution = 0
def usage():
print('Usage: python cube.py [-v] [-discontinuous] resolution')
exit()
for a in sys.argv[1:]:
if (a == '-v'):
display_interval = 100
elif (a == '-discontinuous'):
discontinuous = True
elif a.isdecimal():
resolution = int(a)
else:
usage()
if (resolution <= 0):
usage()
util.init_tf()
out_dir = ('out/cube_%s_%d' % (('d' if discontinuous else 'c'), resolution))
fit_cube(max_iter=5000, resolution=resolution, discontinuous=discontinuous, log_interval=10, display_interval=display_interval, out_dir=out_dir, log_fn='log.txt', imgsave_interval=1000, imgsave_fn='img_%06d.png')
print('Done.') |
def _ParseAndStripGTestFlags(argv):
global _gtest_flags_are_parsed
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
if (flag.upper() in os.environ):
_flag_map[flag] = os.environ[flag.upper()]
i = 1
while (i < len(argv)):
prefix = (('--' + flag) + '=')
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
i += 1 |
def test_scene_ids():
dataset = _construct_dataset(100)
assert (dataset.scene_ids == [('scene_id_' + str(ii)) for ii in range(10)]) |
class ImageCoder(object):
def __init__(self):
self._sess = tf.compat.v1.Session()
self._png_data = tf.compat.v1.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
self._cmyk_data = tf.compat.v1.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
self._decode_jpeg_data = tf.compat.v1.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data})
assert (len(image.shape) == 3)
assert (image.shape[2] == 3)
return image |
def scitodeci(sci):
tmp = re.search('(\\d+\\.?\\d+)\\*\\^(-?\\d+)', sci)
return (float(tmp.group(1)) * pow(10, float(tmp.group(2)))) |
class Inspector():
def __init__(self, scores: 'pd.DataFrame', model: Union[(str, 'PipelineCreator', List['PipelineCreator'], 'BaseEstimator', None)]=None, X: Optional[List[str]]=None, y: Optional[str]=None, groups: Optional[str]=None, cv: Optional[int]=None) -> None:
self._scores = scores
self._model = model
self._X = X
self._y = y
self._groups = groups
self._cv = cv
def model(self) -> PipelineInspector:
if (self._model is None):
raise_error('No model was provided. Cannot inspect the model.')
return PipelineInspector(model=self._model)
def folds(self) -> FoldsInspector:
if (self._cv is None):
raise_error('No cv was provided. Cannot inspect the folds.')
if (self._X is None):
raise_error('No X was provided. Cannot inspect the folds.')
if (self._y is None):
raise_error('No y was provided. Cannot inspect the folds.')
return FoldsInspector(scores=self._scores, X=self._X, y=self._y, groups=self._groups, cv=self._cv) |
def evaluate(model, g, features, labels, mask, loss_func):
model.eval()
with torch.no_grad():
logits = model(g, features)
loss = loss_func(logits[mask], labels[mask])
(accuracy, micro_f1, macro_f1) = score(logits[mask], labels[mask])
return (loss, accuracy, micro_f1, macro_f1) |
_model_architecture('lra', 'flash_lra_pf32')
def flash_lra_pf32(args):
args.apply_bert_init = getattr(args, 'apply_bert_init', False)
args.layer_type = getattr(args, 'layer_type', 'flash')
args.encoder_hidden_dim = getattr(args, 'encoder_hidden_dim', 384)
args.z_dim = getattr(args, 'z_dim', 64)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 128)
args.sentence_class_num = getattr(args, 'sentence_class_num', 2)
args.max_positions = getattr(args, 'max_positions', 1024)
base_architecture(args) |
def get_structural_features(tweet_id, context_tweet_id):
structure = load_structure_json(tweet_id)
thread_structure = list(looping_nested_dict(structure))
persistence = 0
depth = 0
for (id, dep) in thread_structure:
if (id == context_tweet_id):
persistence += 1
depth = dep
return (persistence, depth) |
def test_double_viviani_at_series(vrblvl=0):
pols = ['2*t^2 - x;', 'x^2 + y^2 + z^2 - 4;', '(x-1)^2 + y^2 - 1;']
lser = ['2*t^2;', '2*t;', '2;']
nser = double_newton_at_series(pols, lser, maxdeg=12, nbr=8, vrblvl=vrblvl)
variables = ['x', 'y', 'z']
for (var, pol) in zip(variables, nser):
print(var, '=', pol)
fail = (not (len(nser) == 3))
return fail |
def clean_pdf_file(filename):
with open(filename, 'r+b') as file, mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_WRITE) as mmfile:
start = mmfile.find(b'%PDF-')
if (start == (- 1)):
LOGGER.debug('not a PDF file')
return
end = mmfile.rfind(b'%%EOF')
offset = len(b'%%EOF')
if (start > 0):
LOGGER.debug('moving and truncating')
mmfile.move(0, start, ((end + offset) - start))
mmfile.resize(((end + offset) - start))
mmfile.flush()
elif ((end > 0) and ((end + offset) != mmfile.size())):
LOGGER.debug('truncating only')
mmfile.resize(((end + offset) - start))
mmfile.flush() |
def random_jpeg_compression(img: torch.Tensor, q_min: int=50, q_max: int=100):
q = ((torch.rand(1)[0] * q_min) + (q_max - q_min))
img = torchvision.io.encode_jpeg(img, quality=q)
return torchvision.io.decode_image(img) |
class StackedEmbedding(nn.Embedding):
def __init__(self, num_embeddings, embed_dim, padding_idx, num_stacked=1):
super().__init__(num_embeddings, embed_dim, padding_idx)
nn.init.normal_(self.weight, mean=0, std=(embed_dim ** (- 0.5)))
nn.init.constant_(self.weight[padding_idx], 0)
self.offset = 4
self.vocab_size = (num_embeddings - self.offset)
self.num_stacked = num_stacked
if (self.num_stacked > 1):
self.project_in_dim = Linear((embed_dim * num_stacked), embed_dim, bias=False)
def forward(self, input):
if (self.num_stacked == 1):
return super().forward(input)
mask = (input >= self.offset)
stacked_input = []
cum_input = input.new_zeros(input.shape)
for i in range(1, (self.num_stacked + 1)):
div = pow(self.vocab_size, i)
next_input = torch.remainder(((input - self.offset) - cum_input), div)
cum_input += next_input
next_input = torch.floor_divide(next_input, (div // self.vocab_size))
stacked_input.append((((next_input + self.offset) * mask) + (input * (~ mask))))
stacked_input = torch.stack(stacked_input[::(- 1)], dim=2)
embed = super().forward(stacked_input).view(input.size(0), input.size(1), (- 1))
embed = self.project_in_dim(embed)
return embed |
class EfficientNetPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class ModelArguments():
model_name_or_path: str = field(default='microsoft/layoutlmv3-base', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
processor_name: Optional[str] = field(default=None, metadata={'help': 'Name or path to the processor files if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `huggingface-cli login` (necessary to use this script with private models).'}) |
class _GridSample2dForward(torch.autograd.Function):
def forward(ctx, input, grid):
assert (input.ndim == 4)
assert (grid.ndim == 4)
output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
ctx.save_for_backward(input, grid)
return output
def backward(ctx, grad_output):
(input, grid) = ctx.saved_tensors
(grad_input, grad_grid) = _GridSample2dBackward.apply(grad_output, input, grid)
return (grad_input, grad_grid) |
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:(- 1)] + ((shp[(- 1)] * k),)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
(ob, reward, done, info) = self.env.step(action)
self.frames.append(ob)
return (self._get_ob(), reward, done, info)
def _get_ob(self):
assert (len(self.frames) == self.k)
return LazyFrames(list(self.frames)) |
class HubertForCTC(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class GeM(nn.Module):
def __init__(self, p=3.0, eps=1e-06, freeze_p=True):
super(GeM, self).__init__()
self.p = (p if freeze_p else Parameter((torch.ones(1) * p)))
self.eps = eps
def forward(self, x):
return F.adaptive_avg_pool2d(x.clamp(min=self.eps).pow(self.p), (1, 1)).pow((1.0 / self.p))
def __repr__(self):
if isinstance(self.p, float):
p = self.p
else:
p = self.p.data.tolist()[0]
return (((((((self.__class__.__name__ + '(') + 'p=') + '{:.4f}'.format(p)) + ', ') + 'eps=') + str(self.eps)) + ')') |
def parse_primitives(primitive_completion):
primitives = []
for line in primitive_completion.strip().split('\n'):
if (len(line) == 0):
print('Warning: Stopping since newline was encountered')
break
(primitive, obj) = line.split('(')
primitive = primitive.strip().replace('pick_and_', '')
obj = obj.strip().replace(')', '').replace('"', '')
primitives.append([obj, primitive])
return primitives |
class PLMSSampler(object):
def __init__(self, model, schedule='linear', **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if (type(attr) == torch.Tensor):
if (attr.device != torch.device('cuda')):
attr = attr.to(torch.device('cuda'))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize='uniform', ddim_eta=0.0, verbose=True):
if (ddim_eta != 0):
raise ValueError('ddim_eta must be 0 for PLMS')
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert (alphas_cumprod.shape[0] == self.ddpm_num_timesteps), 'alphas have to be defined for each timestep'
to_torch = (lambda x: x.clone().detach().to(torch.float32).to(self.model.device))
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt((1.0 / alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(((1.0 / alphas_cumprod.cpu()) - 1))))
(ddim_sigmas, ddim_alphas, ddim_alphas_prev) = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt((1.0 - ddim_alphas)))
sigmas_for_original_sampling_steps = (ddim_eta * torch.sqrt((((1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod)) * (1 - (self.alphas_cumprod / self.alphas_cumprod_prev)))))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
_grad()
def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, **kwargs):
if (conditioning is not None):
if isinstance(conditioning, dict):
ctmp = conditioning[list(conditioning.keys())[0]]
while isinstance(ctmp, list):
ctmp = ctmp[0]
cbs = ctmp.shape[0]
if (cbs != batch_size):
print(f'Warning: Got {cbs} conditionings but batch-size is {batch_size}')
elif (conditioning.shape[0] != batch_size):
print(f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}')
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
(C, H, W) = shape
size = (batch_size, C, H, W)
if verbose:
print(f'Data shape for PLMS sampling is {size}')
(samples, intermediates) = self.plms_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, verbose=verbose)
return (samples, intermediates)
_grad()
def plms_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, verbose=True):
device = self.model.betas.device
b = shape[0]
if (x_T is None):
img = torch.randn(shape, device=device)
else:
img = x_T
if (timesteps is None):
timesteps = (self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps)
elif ((timesteps is not None) and (not ddim_use_original_steps)):
subset_end = (int((min((timesteps / self.ddim_timesteps.shape[0]), 1) * self.ddim_timesteps.shape[0])) - 1)
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = (list(reversed(range(0, timesteps))) if ddim_use_original_steps else np.flip(timesteps))
total_steps = (timesteps if ddim_use_original_steps else timesteps.shape[0])
if verbose:
print(f'Running PLMS Sampling with {total_steps} timesteps')
old_eps = []
for (i, step) in enumerate(time_range):
index = ((total_steps - i) - 1)
ts = torch.full((b,), step, device=device, dtype=torch.long)
ts_next = torch.full((b,), time_range[min((i + 1), (len(time_range) - 1))], device=device, dtype=torch.long)
if (mask is not None):
assert (x0 is not None)
img_orig = self.model.q_sample(x0, ts)
img = ((img_orig * mask) + ((1.0 - mask) * img))
outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, old_eps=old_eps, t_next=ts_next)
(img, pred_x0, e_t) = outs
old_eps.append(e_t)
if (len(old_eps) >= 4):
old_eps.pop(0)
if callback:
callback(i)
if img_callback:
img_callback(pred_x0, i)
if (((index % log_every_t) == 0) or (index == (total_steps - 1))):
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return (img, intermediates)
_grad()
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, old_eps=None, t_next=None):
(b, *_, device) = (*x.shape, x.device)
def get_model_output(x, t):
if ((unconditional_conditioning is None) or (unconditional_guidance_scale == 1.0)):
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat(([x] * 2))
t_in = torch.cat(([t] * 2))
if isinstance(c, dict):
assert isinstance(unconditional_conditioning, dict)
c_in = dict()
for k in c:
if isinstance(c[k], list):
c_in[k] = [torch.cat([unconditional_conditioning[k][i], c[k][i]]) for i in range(len(c[k]))]
else:
c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])
else:
c_in = torch.cat([unconditional_conditioning, c])
(e_t_uncond, e_t) = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = (e_t_uncond + (unconditional_guidance_scale * (e_t - e_t_uncond)))
if (score_corrector is not None):
assert (self.model.parameterization == 'eps')
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
return e_t
alphas = (self.model.alphas_cumprod if use_original_steps else self.ddim_alphas)
alphas_prev = (self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev)
sqrt_one_minus_alphas = (self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas)
sigmas = (self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas)
def get_x_prev_and_pred_x0(e_t, index):
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)
pred_x0 = ((x - (sqrt_one_minus_at * e_t)) / a_t.sqrt())
if quantize_denoised:
(pred_x0, _, *_) = self.model.first_stage_model.quantize(pred_x0)
dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t)
noise = ((sigma_t * noise_like(x.shape, device, repeat_noise)) * temperature)
if (noise_dropout > 0.0):
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise)
return (x_prev, pred_x0)
e_t = get_model_output(x, t)
if (len(old_eps) == 0):
(x_prev, pred_x0) = get_x_prev_and_pred_x0(e_t, index)
e_t_next = get_model_output(x_prev, t_next)
e_t_prime = ((e_t + e_t_next) / 2)
elif (len(old_eps) == 1):
e_t_prime = (((3 * e_t) - old_eps[(- 1)]) / 2)
elif (len(old_eps) == 2):
e_t_prime = ((((23 * e_t) - (16 * old_eps[(- 1)])) + (5 * old_eps[(- 2)])) / 12)
elif (len(old_eps) >= 3):
e_t_prime = (((((55 * e_t) - (59 * old_eps[(- 1)])) + (37 * old_eps[(- 2)])) - (9 * old_eps[(- 3)])) / 24)
(x_prev, pred_x0) = get_x_prev_and_pred_x0(e_t_prime, index)
return (x_prev, pred_x0, e_t) |
def get_training_data(digits, fourX=True, idx=0):
train_data = []
for digit in digits:
training_image = image_dict[digit][idx]
training_image = np.ndarray.astype(training_image, int)
if fourX:
training_image = ((training_image // 4) * 4)
training_image = np.ndarray.flatten(training_image).tolist()
train_data.append(training_image)
return train_data |
def _read_state_dict_from_shm(meta_dict, tensor_shm):
state_dict = _traverse_state_dict(meta_dict, (lambda x: _read_tensor_from_buf(x, tensor_shm)))
return state_dict |
class NfCfg():
depths: Tuple[(int, int, int, int)]
channels: Tuple[(int, int, int, int)]
alpha: float = 0.2
stem_type: str = '3x3'
stem_chs: Optional[int] = None
group_size: Optional[int] = None
attn_layer: Optional[str] = None
attn_kwargs: dict = None
attn_gain: float = 2.0
width_factor: float = 1.0
bottle_ratio: float = 0.5
num_features: int = 0
ch_div: int = 8
reg: bool = False
extra_conv: bool = False
gamma_in_act: bool = False
same_padding: bool = False
skipinit: bool = False
zero_init_fc: bool = False
act_layer: str = 'silu' |
def _uniform_schedule(origin_distr, target_distr, i_estimator, total_estimator):
for (param, (param_name, param_type)) in zip([origin_distr, target_distr, i_estimator, total_estimator], list(BALANCING_SCHEDULE_PARAMS_TYPE.items())):
if (not isinstance(param, param_type)):
raise TypeError(f"'{param_name}' must be `{param_type}`, got {type(param)}.")
if (i_estimator >= total_estimator):
raise ValueError(f"'i_estimator' should < 'total_estimator', got 'i_estimator' = {i_estimator} >= 'total_estimator' = {total_estimator}.")
return target_distr |
def init_bias_lin_zero(model, logger=None):
layers_initialized = 0
a = 0
for m in model.modules():
if isinstance(m, nn.Linear):
if (m.bias is not None):
layers_initialized += 1
m.bias.data.zero_()
logger.info((('Initialized ' + str(layers_initialized)) + ' bias linear layers using 0')) |
class InplaceAbn(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, apply_act=True, act_layer='leaky_relu', act_param=0.01, drop_block=None):
super(InplaceAbn, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.momentum = momentum
if apply_act:
if isinstance(act_layer, str):
assert (act_layer in ('leaky_relu', 'elu', 'identity', ''))
self.act_name = (act_layer if act_layer else 'identity')
elif (act_layer == nn.ELU):
self.act_name = 'elu'
elif (act_layer == nn.LeakyReLU):
self.act_name = 'leaky_relu'
elif (act_layer == nn.Identity):
self.act_name = 'identity'
else:
assert False, f'Invalid act layer {act_layer.__name__} for IABN'
else:
self.act_name = 'identity'
self.act_param = act_param
if self.affine:
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.running_mean, 0)
nn.init.constant_(self.running_var, 1)
if self.affine:
nn.init.constant_(self.weight, 1)
nn.init.constant_(self.bias, 0)
def forward(self, x):
output = inplace_abn(x, self.weight, self.bias, self.running_mean, self.running_var, self.training, self.momentum, self.eps, self.act_name, self.act_param)
if isinstance(output, tuple):
output = output[0]
return output |
class MixtureSIN(DeepConditional):
def __init__(self, encoder: DeepConditional, mixture_params: MixtureParams):
super().__init__()
self.encoder = encoder
self.mixture_params = mixture_params
def predict(self, data) -> TorchDistribution:
potentials = self.encoder.predict(data)
mixture = self.mixture_params.get_distribution()
posteriors = mixture.posterior(potentials)
return posteriors |
class MusdbTrainDataset(Dataset):
def __init__(self, target: str='vocals', root: str=None, seq_duration: Optional[float]=6.0, samples_per_track: int=64, source_augmentations: Optional[Callable]=(lambda audio: audio), sample_rate: int=44100, seed: int=42, limitaug_method: str='limitaug_then_loudnorm', limitaug_mode: str='normal_L', limitaug_custom_target_lufs: float=None, limitaug_custom_target_lufs_std: float=None, target_loudnorm_lufs: float=(- 14.0), custom_limiter_attack_range: list=[2.0, 2.0], custom_limiter_release_range: list=[200.0, 200.0], *args, **kwargs) -> None:
self.seed = seed
random.seed(seed)
self.seq_duration = seq_duration
self.target = target
self.samples_per_track = samples_per_track
self.source_augmentations = source_augmentations
self.sample_rate = sample_rate
self.root = root
self.sources = ['vocals', 'bass', 'drums', 'other']
self.train_list = glob(f'{self.root}/train/*')
self.valid_list = ['ANiMAL - Rockshow', 'Actions - One Minute Smile', 'Alexander Ross - Goodbye Bolero', 'Clara Berry And Wooldog - Waltz For My Victims', 'Fergessen - Nos Palpitants', 'James May - On The Line', 'Johnny Lokke - Promises & Lies', 'Leaf - Summerghost', 'Meaxic - Take A Step', 'Patrick Talbot - A Reason To Leave', 'Skelpolu - Human Mistakes', 'Traffic Experiment - Sirens', 'Triviul - Angelsaint', 'Young Griffo - Pennies']
self.train_list = [x for x in self.train_list if (os.path.basename(x) not in self.valid_list)]
self.limitaug_method = limitaug_method
self.limitaug_mode = limitaug_mode
self.limitaug_custom_target_lufs = limitaug_custom_target_lufs
self.limitaug_custom_target_lufs_std = limitaug_custom_target_lufs_std
self.target_loudnorm_lufs = target_loudnorm_lufs
self.meter = pyln.Meter(self.sample_rate)
if (self.limitaug_method == 'linear_gain_increase'):
print('using linear gain increasing!')
self.board = Pedalboard([Gain(gain_db=0.0)])
elif (self.limitaug_method == 'limitaug'):
print('using limitaug!')
self.board = Pedalboard([Gain(gain_db=0.0), Limiter(threshold_db=0.0, release_ms=100.0)])
elif (self.limitaug_method == 'only_loudnorm'):
print('using only loudness normalized inputs')
elif (self.limitaug_method == 'limitaug_then_loudnorm'):
print('using limitaug then loudness normalize!')
self.board = Pedalboard([Gain(gain_db=0.0), Limiter(threshold_db=0.0, release_ms=100.0)])
elif (self.limitaug_method == 'custom_limiter_limitaug'):
print('using Custom limiter limitaug!')
self.custom_limiter_attack_range = custom_limiter_attack_range
self.custom_limiter_release_range = custom_limiter_release_range
self.board = Pedalboard([Gain(gain_db=0.0), Compressor(threshold_db=(- 10.0), ratio=4.0, attack_ms=2.0, release_ms=200.0), Compressor(threshold_db=0.0, ratio=1000.0, attack_ms=0.001, release_ms=100.0), Gain(gain_db=3.75), Clipping(threshold_db=0.0)])
self.limitaug_mode_statistics = {'normal': [(- 15.954), 1.264], 'normal_L': [(- 10.887), 1.191], 'normal_XL': [(- 8.608), 1.165], 'normal_short_term': [(- 17.317), 5.036], 'normal_L_short_term': [(- 12.303), 5.233], 'normal_XL_short_term': [(- 9.988), 5.518], 'custom': [limitaug_custom_target_lufs, limitaug_custom_target_lufs_std]}
def sample_target_lufs(self):
if (self.limitaug_mode == 'uniform'):
target_lufs = random.uniform((- 20), (- 5))
else:
target_lufs = random.gauss(self.limitaug_mode_statistics[self.limitaug_mode][0], self.limitaug_mode_statistics[self.limitaug_mode][1])
return target_lufs
def get_limitaug_results(self, mixture, target):
if (self.limitaug_method == 'linear_gain_increase'):
target_lufs = self.sample_target_lufs()
(mixture, target) = apply_linear_gain_increase(mixture, target, self.board, self.meter, self.sample_rate, target_lufs=target_lufs)
elif (self.limitaug_method == 'limitaug'):
self.board[1].release_ms = random.uniform(30.0, 200.0)
mixture_orig = mixture.copy()
target_lufs = self.sample_target_lufs()
(mixture, _) = apply_limitaug(mixture, self.board, self.meter, self.sample_rate, target_lufs=target_lufs)
print('mixture shape:', mixture.shape)
print('target shape:', target.shape)
target *= (mixture / (mixture_orig + 1e-08))
elif (self.limitaug_method == 'only_loudnorm'):
mixture_loudness = self.meter.integrated_loudness(mixture.T)
if np.isinf(mixture_loudness):
pass
else:
augmented_gain = (self.target_loudnorm_lufs - mixture_loudness)
mixture = (mixture * db2linear(augmented_gain))
target = (target * db2linear(augmented_gain))
elif (self.limitaug_method == 'limitaug_then_loudnorm'):
self.board[1].release_ms = random.uniform(30.0, 200.0)
mixture_orig = mixture.copy()
target_lufs = self.sample_target_lufs()
(mixture, _) = apply_limitaug(mixture, self.board, self.meter, self.sample_rate, target_lufs=target_lufs, target_loudnorm_lufs=self.target_loudnorm_lufs)
target *= (mixture / (mixture_orig + 1e-08))
elif (self.limitaug_method == 'custom_limiter_limitaug'):
self.board[1].attack_ms = random.uniform(self.custom_limiter_attack_range[0], self.custom_limiter_attack_range[1])
self.board[1].release_ms = random.uniform(self.custom_limiter_release_range[0], self.custom_limiter_release_range[1])
self.board[2].release_ms = random.uniform(30.0, 200.0)
mixture_orig = mixture.copy()
target_lufs = self.sample_target_lufs()
(mixture, _) = apply_limitaug(mixture, self.board, self.meter, self.sample_rate, target_lufs=target_lufs, target_loudnorm_lufs=self.target_loudnorm_lufs)
target *= (mixture / (mixture_orig + 1e-08))
return (mixture, target)
def __getitem__(self, index):
audio_sources = []
target_ind = None
for (k, source) in enumerate(self.sources):
if (source == self.target):
target_ind = k
track_path = self.train_list[(index // self.samples_per_track)]
audio_path = f'{track_path}/{source}.wav'
audio = load_wav_arbitrary_position_stereo(audio_path, self.sample_rate, self.seq_duration)
else:
track_path = random.choice(self.train_list)
audio_path = f'{track_path}/{source}.wav'
audio = load_wav_arbitrary_position_stereo(audio_path, self.sample_rate, self.seq_duration)
audio = self.source_augmentations(audio)
audio_sources.append(audio)
stems = np.stack(audio_sources, axis=0)
x = stems.sum(0)
y = stems[target_ind]
(x, y) = self.get_limitaug_results(x, y)
x = torch.as_tensor(x, dtype=torch.float32)
y = torch.as_tensor(y, dtype=torch.float32)
return (x, y)
def __len__(self):
return (len(self.train_list) * self.samples_per_track) |
class SuperResK1KXK1(PlainNetSuperBlockClass):
def __init__(self, in_channels=None, out_channels=None, stride=None, bottleneck_channels=None, sub_layers=None, kernel_size=None, no_create=False, no_reslink=False, no_BN=False, use_se=False, **kwargs):
super(SuperResK1KXK1, self).__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.bottleneck_channels = bottleneck_channels
self.sub_layers = sub_layers
self.kernel_size = kernel_size
self.no_create = no_create
self.no_reslink = no_reslink
self.no_BN = no_BN
self.use_se = use_se
if self.use_se:
print(('---debug use_se in ' + str(self)))
full_str = ''
last_channels = in_channels
current_stride = stride
for i in range(self.sub_layers):
inner_str = ''
inner_str += 'ConvKX({},{},{},{})'.format(last_channels, self.bottleneck_channels, 1, 1)
if (not self.no_BN):
inner_str += 'BN({})'.format(self.bottleneck_channels)
inner_str += 'RELU({})'.format(self.bottleneck_channels)
inner_str += 'ConvKX({},{},{},{})'.format(self.bottleneck_channels, self.bottleneck_channels, self.kernel_size, current_stride)
if (not self.no_BN):
inner_str += 'BN({})'.format(self.bottleneck_channels)
inner_str += 'RELU({})'.format(self.bottleneck_channels)
if self.use_se:
inner_str += 'SE({})'.format(bottleneck_channels)
inner_str += 'ConvKX({},{},{},{})'.format(self.bottleneck_channels, self.out_channels, 1, 1)
if (not self.no_BN):
inner_str += 'BN({})'.format(self.out_channels)
if (not self.no_reslink):
if (i == 0):
res_str = 'ResBlockProj({})RELU({})'.format(inner_str, out_channels)
else:
res_str = 'ResBlock({})RELU({})'.format(inner_str, out_channels)
else:
res_str = '{}RELU({})'.format(inner_str, out_channels)
full_str += res_str
inner_str = ''
inner_str += 'ConvKX({},{},{},{})'.format(self.out_channels, self.bottleneck_channels, 1, 1)
if (not self.no_BN):
inner_str += 'BN({})'.format(self.bottleneck_channels)
inner_str += 'RELU({})'.format(self.bottleneck_channels)
inner_str += 'ConvKX({},{},{},{})'.format(self.bottleneck_channels, self.bottleneck_channels, self.kernel_size, 1)
if (not self.no_BN):
inner_str += 'BN({})'.format(self.bottleneck_channels)
inner_str += 'RELU({})'.format(self.bottleneck_channels)
if self.use_se:
inner_str += 'SE({})'.format(bottleneck_channels)
inner_str += 'ConvKX({},{},{},{})'.format(self.bottleneck_channels, self.out_channels, 1, 1)
if (not self.no_BN):
inner_str += 'BN({})'.format(self.out_channels)
if (not self.no_reslink):
res_str = 'ResBlock({})RELU({})'.format(inner_str, out_channels)
else:
res_str = '{}RELU({})'.format(inner_str, out_channels)
full_str += res_str
last_channels = out_channels
current_stride = 1
pass
self.block_list = PlainNet.create_netblock_list_from_str(full_str, no_create=no_create, no_reslink=no_reslink, no_BN=no_BN, **kwargs)
if (not no_create):
self.module_list = nn.ModuleList(self.block_list)
else:
self.module_list = None
def __str__(self):
return (type(self).__name__ + '({},{},{},{},{})'.format(self.in_channels, self.out_channels, self.stride, self.bottleneck_channels, self.sub_layers))
def __repr__(self):
return (type(self).__name__ + '({}|in={},out={},stride={},btl_channels={},sub_layers={},kernel_size={})'.format(self.block_name, self.in_channels, self.out_channels, self.stride, self.bottleneck_channels, self.sub_layers, self.kernel_size))
def encode_structure(self):
return [self.out_channels, self.sub_layers, self.bottleneck_channels]
def split(self, split_layer_threshold):
if (self.sub_layers >= split_layer_threshold):
new_sublayers_1 = (split_layer_threshold // 2)
new_sublayers_2 = (self.sub_layers - new_sublayers_1)
new_block_str1 = (type(self).__name__ + '({},{},{},{},{})'.format(self.in_channels, self.out_channels, self.stride, self.bottleneck_channels, new_sublayers_1))
new_block_str2 = (type(self).__name__ + '({},{},{},{},{})'.format(self.out_channels, self.out_channels, 1, self.bottleneck_channels, new_sublayers_2))
return (new_block_str1 + new_block_str2)
else:
return str(self)
def structure_scale(self, scale=1.0, channel_scale=None, sub_layer_scale=None):
if (channel_scale is None):
channel_scale = scale
if (sub_layer_scale is None):
sub_layer_scale = scale
new_out_channels = global_utils.smart_round((self.out_channels * channel_scale))
new_bottleneck_channels = global_utils.smart_round((self.bottleneck_channels * channel_scale))
new_sub_layers = max(1, round((self.sub_layers * sub_layer_scale)))
return (type(self).__name__ + '({},{},{},{},{})'.format(self.in_channels, new_out_channels, self.stride, new_bottleneck_channels, new_sub_layers))
def create_from_str(cls, s, **kwargs):
assert cls.is_instance_from_str(s)
idx = _get_right_parentheses_index_(s)
assert (idx is not None)
param_str = s[len((cls.__name__ + '(')):idx]
tmp_idx = param_str.find('|')
if (tmp_idx < 0):
tmp_block_name = 'uuid{}'.format(uuid.uuid4().hex)
else:
tmp_block_name = param_str[0:tmp_idx]
param_str = param_str[(tmp_idx + 1):]
param_str_split = param_str.split(',')
in_channels = int(param_str_split[0])
out_channels = int(param_str_split[1])
stride = int(param_str_split[2])
bottleneck_channels = int(param_str_split[3])
sub_layers = int(param_str_split[4])
return (cls(in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck_channels=bottleneck_channels, sub_layers=sub_layers, block_name=tmp_block_name, **kwargs), s[(idx + 1):]) |
class TestCodeLlamaModel(unittest.TestCase):
def setUp(self):
return super().setUp()
def tearDown(self) -> None:
return super().tearDown()
def test_code_gen(self):
config = PipelineConfig(model_name_or_path='/tf_dataset2/models/nlp_toolkit/CodeLlama-7b-hf')
chatbot = build_chatbot(config=config)
result = chatbot.predict('def print_hello_world():')
print(result)
self.assertIn('Hello World', str(result)) |
def json_to_numpy(in_file):
f = open(in_file.as_posix(), 'r')
data = json.load(f)
frame_landmarks = []
for bp in LMKS.keys():
bp_landmarks = [[float(n) for n in lm.split(',')[:3]] for lm in data[f'{bp}_landmarks']['landmarks']]
if (len(bp_landmarks) == 0):
bp_landmarks = ([[(- 1.0), (- 1.0), (- 1.0)]] * LMKS[bp])
frame_landmarks += bp_landmarks
return np.array(frame_landmarks) |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
self.fc_roi = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x, boxes=None, share_fc=False):
bs = x.shape[0]
sz = x.shape[(- 1)]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
feat_map = x
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
if (boxes is not None):
index = torch.arange(bs).view((- 1), 1).to(x.device)
boxes = torch.cat([index, boxes], 1)
spatial_scale = (feat_map.shape[(- 1)] / sz)
roi_feat = roi_align(feat_map, boxes, output_size=(1, 1), spatial_scale=spatial_scale, sampling_ratio=(- 1), aligned=True).squeeze()
if share_fc:
out_roi = self.fc(roi_feat)
else:
out_roi = self.fc_roi(roi_feat)
return (x, out_roi)
return x |
_sentencepiece
_tokenizers
class T5TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = T5Tokenizer
rust_tokenizer_class = T5TokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
def setUp(self):
super().setUp()
tokenizer = T5Tokenizer(SAMPLE_VOCAB)
tokenizer.save_pretrained(self.tmpdirname)
def test_convert_token_and_id(self):
token = '<s>'
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<unk>')
self.assertEqual(vocab_keys[1], '<s>')
self.assertEqual(vocab_keys[(- 1)], '<pad>')
self.assertEqual(len(vocab_keys), 1101)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 1100)
def test_full_tokenizer(self):
tokenizer = T5Tokenizer(SAMPLE_VOCAB)
tokens = tokenizer.tokenize('This is a test')
self.assertListEqual(tokens, ['This', 'is', 'a', 't', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
tokens = tokenizer.tokenize('I was born in 92000, and this is false.')
self.assertListEqual(tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '9', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', 'e', '.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '<unk>', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', '<unk>', '.'])
_property
def t5_base_tokenizer(self):
return T5Tokenizer.from_pretrained('t5-base')
_property
def t5_base_tokenizer_fast(self):
return T5TokenizerFast.from_pretrained('t5-base')
def get_tokenizer(self, **kwargs) -> T5Tokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, pad_token=None, **kwargs)
def get_rust_tokenizer(self, **kwargs) -> T5TokenizerFast:
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, pad_token=None, **kwargs)
def test_rust_and_python_full_tokenizers(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = 'I was born in 92000, and this is false.'
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_eos_treatment(self):
tokenizer = self.t5_base_tokenizer
batch_with_eos_added = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'])
batch_without_eos_added = tokenizer(['hi', 'I went to the gym', ''])
self.assertListEqual(batch_with_eos_added['input_ids'], batch_without_eos_added['input_ids'])
def test_prepare_batch(self):
tokenizer = self.t5_base_tokenizer
src_text = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
expected_src_tokens = [71, 307, 8986, 21, 4505, 1635, 1707, 5, tokenizer.eos_token_id]
batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK)
self.assertIsInstance(batch, BatchEncoding)
if (FRAMEWORK != 'jax'):
result = list(batch.input_ids.numpy()[0])
else:
result = list(batch.input_ids.tolist()[0])
self.assertListEqual(expected_src_tokens, result)
self.assertEqual((2, 9), batch.input_ids.shape)
self.assertEqual((2, 9), batch.attention_mask.shape)
def test_empty_target_text(self):
tokenizer = self.t5_base_tokenizer
src_text = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK)
self.assertIn('input_ids', batch)
self.assertIn('attention_mask', batch)
self.assertNotIn('decoder_input_ids', batch)
self.assertNotIn('decoder_attention_mask', batch)
def test_max_length(self):
tokenizer = self.t5_base_tokenizer
tgt_text = ['Summary of the text.', 'Another summary.']
with tokenizer.as_target_tokenizer():
targets = tokenizer(tgt_text, max_length=32, padding='max_length', truncation=True, return_tensors=FRAMEWORK)
self.assertEqual(32, targets['input_ids'].shape[1])
def test_outputs_not_longer_than_maxlen(self):
tokenizer = self.t5_base_tokenizer
batch = tokenizer([('I am a small frog' * 1000), 'I am a small frog'], padding=True, truncation=True, return_tensors=FRAMEWORK)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual(batch.input_ids.shape, (2, 512))
def test_eos_in_input(self):
tokenizer = self.t5_base_tokenizer
src_text = ['A long paragraph for summarization. </s>']
tgt_text = ['Summary of the text. </s>']
expected_src_tokens = [71, 307, 8986, 21, 4505, 1635, 1707, 5, 1]
expected_tgt_tokens = [20698, 13, 8, 1499, 5, 1]
batch = tokenizer(src_text)
with tokenizer.as_target_tokenizer():
targets = tokenizer(tgt_text)
self.assertEqual(expected_src_tokens, batch['input_ids'][0])
self.assertEqual(expected_tgt_tokens, targets['input_ids'][0])
def test_token_type_ids(self):
src_text_1 = ['A first paragraph for summarization.']
src_text_2 = ['A second paragraph for summarization.']
fast_token_type_ids = self.t5_base_tokenizer_fast(src_text_1, src_text_2, add_special_tokens=True, return_token_type_ids=True).token_type_ids
slow_token_type_ids = self.t5_base_tokenizer(src_text_1, src_text_2, add_special_tokens=True, return_token_type_ids=True).token_type_ids
self.assertEqual(slow_token_type_ids, fast_token_type_ids)
self.assertEqual(len(slow_token_type_ids[0]), 18)
def test_fast_and_slow_same_result(self):
src_text = '<pad> Today is <unk> nice day </s>'
tgt_ids = [0, 1960, 19, 2, 1245, 239, 1]
tgt_text = '<pad> Today is<unk> nice day</s>'
fast_ids = self.t5_base_tokenizer_fast(src_text, add_special_tokens=False).input_ids
slow_ids = self.t5_base_tokenizer(src_text, add_special_tokens=False).input_ids
self.assertEqual(tgt_ids, fast_ids)
self.assertEqual(tgt_ids, slow_ids)
fast_text = self.t5_base_tokenizer_fast.decode(fast_ids)
slow_text = self.t5_base_tokenizer.decode(fast_ids)
self.assertEqual(tgt_text, fast_text)
self.assertEqual(tgt_text, slow_text)
def test_special_tokens_initialization(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
added_tokens = ([f'<extra_id_{i}>' for i in range(100)] + [AddedToken('<special>', lstrip=True)])
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
tokenizer_cr = self.rust_tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
p_output = tokenizer_p.encode('Hey this is a <special> token')
r_output = tokenizer_r.encode('Hey this is a <special> token')
cr_output = tokenizer_cr.encode('Hey this is a <special> token')
special_token_id = tokenizer_r.encode('<special>', add_special_tokens=False)[0]
self.assertEqual(p_output, r_output)
self.assertEqual(cr_output, r_output)
self.assertTrue((special_token_id in p_output))
self.assertTrue((special_token_id in r_output))
self.assertTrue((special_token_id in cr_output))
def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self):
tokenizer_list = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for (tokenizer_class, tokenizer_utils) in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(tmp_dir)
with open(os.path.join(tmp_dir, 'special_tokens_map.json'), encoding='utf-8') as json_file:
special_tokens_map = json.load(json_file)
with open(os.path.join(tmp_dir, 'tokenizer_config.json'), encoding='utf-8') as json_file:
tokenizer_config = json.load(json_file)
added_tokens_extra_ids = [f'<extra_id_{i}>' for i in range(100)]
special_tokens_map['additional_special_tokens'] = (added_tokens_extra_ids + ['an_additional_special_token'])
tokenizer_config['additional_special_tokens'] = (added_tokens_extra_ids + ['an_additional_special_token'])
with open(os.path.join(tmp_dir, 'special_tokens_map.json'), 'w', encoding='utf-8') as outfile:
json.dump(special_tokens_map, outfile)
with open(os.path.join(tmp_dir, 'tokenizer_config.json'), 'w', encoding='utf-8') as outfile:
json.dump(tokenizer_config, outfile)
tokenizer_without_change_in_init = tokenizer_class.from_pretrained(tmp_dir)
self.assertIn('an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens)
self.assertEqual(['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'])))
new_added_tokens = (added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=True)])
tokenizer = tokenizer_class.from_pretrained(tmp_dir, additional_special_tokens=new_added_tokens)
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens)
self.assertEqual(['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'])))
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[31220, 7, 41, 14034, 801, 38, 3, 102, 63, 17, 127, 524, 18, 7031, 2032, 277, 11, 3, 102, 63, 17, 127, 524, 18, 2026, 17, 10761, 18, 7041, 61, 795, 879, 18, 19681, 4648, 7, 41, 12920, 382, 6, 350, 6383, 4949, 6, 2158, 12920, 382, 9, 6, 3, 4, 11160, 6, 2043, 17153, 279, 49, 17, 6, 3, 4, 434, 9688, 11439, 21, 6869, 10509, 17725, 41, 567, 9138, 61, 11, 6869, 10509, 11946, 41, 18207, 517, 61, 28, 147, 3538, 1220, 7140, 10761, 2250, 16, 910, 1220, 8024, 11, 1659, 1413, 32, 883, 2020, 344, 2215, 226, 6, 12901, 382, 127, 524, 11, 4738, 7, 127, 15390, 5, 1], [272, 24203, 19, 876, 12, 554, 18, 9719, 1659, 2647, 26352, 6497, 7, 45, 73, 9339, 400, 26, 1499, 57, 22801, 10760, 30, 321, 646, 11, 269, 2625, 16, 66, 7500, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [37, 1704, 4216, 3, 20400, 4418, 7, 147, 8, 19743, 1782, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
self.tokenizer_integration_test_util(expected_encoding=expected_encoding, model_name='t5-base', revision='5a7ff2d8f5117c194c7e32ec1ccbf04642cca99b') |
def compute_hessian(f, params):
h = []
for i in params:
h_i = []
for j in params:
h_ij = tf.gradients(tf.gradients(f, j)[0], i)[0]
h_ij = ([0.0] if (h_ij is None) else h_ij)
h_i.append(h_ij)
h_i = tf.convert_to_tensor(h_i)
h.append(h_i)
h = tf.convert_to_tensor(h)
h = tf.reshape(h, (len(params), len(params)))
return h |
def mask_matrix_nms(masks, labels, scores, filter_thr=(- 1), nms_pre=(- 1), max_num=(- 1), kernel='gaussian', sigma=2.0, mask_area=None):
assert (len(labels) == len(masks) == len(scores))
if (len(labels) == 0):
return (scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(0, *masks.shape[(- 2):]), labels.new_zeros(0))
if (mask_area is None):
mask_area = masks.sum((1, 2)).float()
else:
assert (len(masks) == len(mask_area))
(scores, sort_inds) = torch.sort(scores, descending=True)
keep_inds = sort_inds
if ((nms_pre > 0) and (len(sort_inds) > nms_pre)):
sort_inds = sort_inds[:nms_pre]
keep_inds = keep_inds[:nms_pre]
scores = scores[:nms_pre]
masks = masks[sort_inds]
mask_area = mask_area[sort_inds]
labels = labels[sort_inds]
num_masks = len(labels)
flatten_masks = masks.reshape(num_masks, (- 1)).float()
inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0))
expanded_mask_area = mask_area.expand(num_masks, num_masks)
iou_matrix = (inter_matrix / ((expanded_mask_area + expanded_mask_area.transpose(1, 0)) - inter_matrix)).triu(diagonal=1)
expanded_labels = labels.expand(num_masks, num_masks)
label_matrix = (expanded_labels == expanded_labels.transpose(1, 0)).triu(diagonal=1)
(compensate_iou, _) = (iou_matrix * label_matrix).max(0)
compensate_iou = compensate_iou.expand(num_masks, num_masks).transpose(1, 0)
decay_iou = (iou_matrix * label_matrix)
if (kernel == 'gaussian'):
decay_matrix = torch.exp((((- 1) * sigma) * (decay_iou ** 2)))
compensate_matrix = torch.exp((((- 1) * sigma) * (compensate_iou ** 2)))
(decay_coefficient, _) = (decay_matrix / compensate_matrix).min(0)
elif (kernel == 'linear'):
decay_matrix = ((1 - decay_iou) / (1 - compensate_iou))
(decay_coefficient, _) = decay_matrix.min(0)
else:
raise NotImplementedError(f'{kernel} kernel is not supported in matrix nms!')
scores = (scores * decay_coefficient)
if (filter_thr > 0):
keep = (scores >= filter_thr)
keep_inds = keep_inds[keep]
if (not keep.any()):
return (scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(0, *masks.shape[(- 2):]), labels.new_zeros(0))
masks = masks[keep]
scores = scores[keep]
labels = labels[keep]
(scores, sort_inds) = torch.sort(scores, descending=True)
keep_inds = keep_inds[sort_inds]
if ((max_num > 0) and (len(sort_inds) > max_num)):
sort_inds = sort_inds[:max_num]
keep_inds = keep_inds[:max_num]
scores = scores[:max_num]
masks = masks[sort_inds]
labels = labels[sort_inds]
return (scores, labels, masks, keep_inds) |
def get_mean_std(exp_name):
root_path = '/data/sls/scratch/yuangong/avbyol/egs/vggsound/exp/'
three_res = []
for repeat in ['-r1', '-r2', '-r3']:
cur_res = (np.loadtxt((((root_path + exp_name) + repeat) + '/result.csv'), delimiter=',') * 100)
three_res.append(cur_res)
three_res = np.stack(three_res)
res_mean = np.mean(three_res, axis=0)
res_std = np.std(three_res, axis=0)
max_idx = (- 1)
res_mean = res_mean[(max_idx, [0, 3, 6])]
res_std = res_std[(max_idx, [0, 3, 6])]
return (res_mean[0], res_mean[1], res_mean[2], res_std[0], res_std[1], res_std[2]) |
class LayerConnection(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LAYERCONNECTION |
def crop(task_string, override=False, num_threads=default_num_threads):
cropped_out_dir = join(nnUNet_cropped_data, task_string)
maybe_mkdir_p(cropped_out_dir)
if (override and isdir(cropped_out_dir)):
shutil.rmtree(cropped_out_dir)
maybe_mkdir_p(cropped_out_dir)
splitted_4d_output_dir_task = join(nnUNet_raw_data, task_string)
(lists, _) = create_lists_from_splitted_dataset(splitted_4d_output_dir_task)
imgcrop = ImageCropper(num_threads, cropped_out_dir)
imgcrop.run_cropping(lists, overwrite_existing=override)
shutil.copy(join(nnUNet_raw_data, task_string, 'dataset.json'), cropped_out_dir) |
class LinearWarmup(BaseWarmup):
def __init__(self, optimizer, warmup_period, last_step=(- 1)):
group_count = len(optimizer.param_groups)
warmup_params = get_warmup_params(warmup_period, group_count)
super(LinearWarmup, self).__init__(optimizer, warmup_params, last_step)
def warmup_factor(self, step, warmup_period):
return min(1.0, ((step + 1) / warmup_period)) |
def draw(geometry=None, title='Open3D', width=1024, height=768, actions=None, lookat=None, eye=None, up=None, field_of_view=60.0, intrinsic_matrix=None, extrinsic_matrix=None, bg_color=(1.0, 1.0, 1.0, 1.0), bg_image=None, ibl=None, ibl_intensity=None, show_skybox=None, show_ui=None, raw_mode=False, point_size=None, line_width=None, animation_time_step=1.0, animation_duration=None, rpc_interface=False, on_init=None, on_animation_frame=None, on_animation_tick=None, non_blocking_and_return_uid=False):
gui.Application.instance.initialize()
w = O3DVisualizer(title, width, height)
w.set_background(bg_color, bg_image)
if (actions is not None):
for a in actions:
w.add_action(a[0], a[1])
if (point_size is not None):
w.point_size = point_size
if (line_width is not None):
w.line_width = line_width
def add(g, n):
if isinstance(g, dict):
w.add_geometry(g)
else:
w.add_geometry(('Object ' + str(n)), g)
n = 1
if isinstance(geometry, list):
for g in geometry:
add(g, n)
n += 1
elif (geometry is not None):
add(geometry, n)
w.reset_camera_to_default()
if ((lookat is not None) and (eye is not None) and (up is not None)):
w.setup_camera(field_of_view, lookat, eye, up)
elif ((intrinsic_matrix is not None) and (extrinsic_matrix is not None)):
w.setup_camera(intrinsic_matrix, extrinsic_matrix, width, height)
w.animation_time_step = animation_time_step
if (animation_duration is not None):
w.animation_duration = animation_duration
if (show_ui is not None):
w.show_settings = show_ui
if (ibl is not None):
w.set_ibl(ibl)
if (ibl_intensity is not None):
w.set_ibl_intensity(ibl_intensity)
if (show_skybox is not None):
w.show_skybox(show_skybox)
if rpc_interface:
w.start_rpc_interface(address='tcp://127.0.0.1:51454', timeout=10000)
def stop_rpc():
w.stop_rpc_interface()
return True
w.set_on_close(stop_rpc)
if raw_mode:
w.enable_raw_mode(True)
if (on_init is not None):
on_init(w)
if (on_animation_frame is not None):
w.set_on_animation_frame(on_animation_frame)
if (on_animation_tick is not None):
w.set_on_animation_tick(on_animation_tick)
gui.Application.instance.add_window(w)
if non_blocking_and_return_uid:
return w.uid
else:
gui.Application.instance.run() |
def vggm(num_classes=1000, pretrained='imagenet'):
if pretrained:
settings = pretrained_settings['vggm'][pretrained]
assert (num_classes == settings['num_classes']), 'num_classes should be {}, but is {}'.format(settings['num_classes'], num_classes)
model = VGGM(num_classes=1000)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
else:
model = VGGM(num_classes=num_classes)
return model |
_auth
def filter_datasets(dfilter, project, url, auth_headers):
filtered_datasets = {}
(field, pattern, regex) = parse_filter(dfilter)
endpoint = f'{url}/api/v1/datasets/'
params = {'project': project, f'{field}__{pattern}': regex}
while (endpoint is not None):
r = requests.get(endpoint, headers=auth_headers, params=params)
if (r.status_code != 200):
r.raise_for_status()
response = json.loads(r.text)
for dataset in response['results']:
filtered_datasets[dataset['id']] = dataset['name']
endpoint = response['next']
return filtered_datasets |
def del_field_tokens(task):
all_instances = ((task.train_data + task.val_data) + task.test_data)
for instance in all_instances:
if ('input1' in instance.fields):
field = instance.fields['input1']
del field.tokens
if ('input2' in instance.fields):
field = instance.fields['input2']
del field.tokens |
class AsyncMultiHook(MultiHook):
def __init__(self, hooks=None):
super().__init__(hooks)
self._original_sys_asyncgen_hooks = sys.get_asyncgen_hooks()
self._alive_asyncgens = weakref.WeakSet()
self._new_hooks = collections.deque()
def begin(self):
self._original_sys_asyncgen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._alive_asyncgens.add, finalizer=self._finalize_asyncgen)
super().begin()
self._update_hook_list()
def after_run(self, run_context, run_values):
super().after_run(run_context, run_values)
self._update_hook_list()
def end(self, session):
super().end(session)
self._update_hook_list()
self._remove_non_coroutine_hooks()
self._finish_hooks(session)
while self._alive_asyncgens:
for asyncgen in self._alive_asyncgens:
self._finalize_asyncgen(asyncgen)
self._update_hook_list()
self._finish_hooks(session)
if self._original_sys_asyncgen_hooks:
sys.set_asyncgen_hooks(*self._original_sys_asyncgen_hooks)
logging.debug('Ending hook 6.')
def _finish_hooks(self, session):
run_context = tf.train.SessionRunContext((), session)
while self._hooks:
run_args = self.before_run(run_context)
results = session.run(run_args.fetches, feed_dict=run_args.feed_dict, options=run_args.options)
run_values = tf.train.SessionRunValues(results, run_args.options, tf.RunMetadata())
self.after_run(run_context, run_values)
def _finalize_asyncgen(self, asyncgen):
self._new_hooks.append(CoroutineHook(coroutine=asyncgen.aclose()))
def _update_hook_list(self):
try:
while True:
new_hook = self._new_hooks.popleft()
new_hook.begin()
self._hooks.append(new_hook)
except IndexError:
pass
for hook in list(self._hooks):
if (self.is_managed_hook(hook) and hook.is_finished):
self._hooks.remove(hook)
def _remove_non_coroutine_hooks(self):
for hook in list(self._hooks):
if (not self.is_managed_hook(hook)):
self._hooks.remove(hook)
def is_managed_hook(hook):
return (isinstance(hook, CoroutineHook) or (isinstance(hook, _PeriodicHookWrapper) and isinstance(hook._hook, CoroutineHook))) |
def SENet154(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=False, stride_size=2, init_filters=64, repetitions=(3, 8, 36, 3), **kwargs):
return SENet(MODELS_PARAMS['senet154'], input_shape=input_shape, input_tensor=input_tensor, include_top=include_top, classes=classes, weights=weights, stride_size=stride_size, init_filters=init_filters, repetitions=repetitions, **kwargs) |
def forward(_):
if (len(input.value) > 0):
if (task.value == 'ner'):
output = nlp_token_class(input.value)
elif (task.value == 'sentiment-analysis'):
output = nlp_sentence_classif(input.value)
elif (input.value.find('<mask>') == (- 1)):
output = nlp_fill((input.value + ' <mask>'))
else:
output = nlp_fill(input.value)
print(output) |
class PPONModel(BaseModel):
def __init__(self, args):
super(PPONModel, self).__init__(args)
self.netG = networks.define_G(args).cuda()
if self.is_train:
if (args.which_model == 'perceptual'):
self.netD = networks.define_D().cuda()
self.netD.train()
self.netG.train()
self.load()
if self.is_train:
if (args.pixel_weight > 0):
l_pix_type = args.pixel_criterion
if (l_pix_type == 'l1'):
self.cri_pix = nn.L1Loss().cuda()
elif (l_pix_type == 'l2'):
self.cri_pix = nn.MSELoss().cuda()
else:
raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_pix_type))
self.l_pix_w = args.pixel_weight
else:
print('Remove pixel loss.')
self.cri_pix = None
if (args.structure_weight > 0):
self.cri_msssim = pytorch_msssim.MS_SSIM(data_range=args.rgb_range).cuda()
self.cri_ml1 = MultiscaleL1Loss().cuda()
else:
print('Remove structure loss.')
self.cri_msssim = None
self.cri_ml1 = None
if (args.feature_weight > 0):
l_fea_type = args.feature_criterion
if (l_fea_type == 'l1'):
self.cri_fea = nn.L1Loss().cuda()
elif (l_fea_type == 'l2'):
self.cri_fea = nn.MSELoss().cuda()
else:
raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_fea_type))
self.l_fea_w = args.feature_weight
else:
print('Remove feature loss.')
self.cri_fea = None
if self.cri_fea:
self.vgg = networks.define_F().cuda()
if (args.gan_weight > 0):
self.cri_gan = GANLoss(args.gan_type, 1.0, 0.0).cuda()
self.l_gan_w = args.gan_weight
else:
self.cri_gan = None
if (args.which_model == 'structure'):
for param in self.netG.CFEM.parameters():
param.requires_grad = False
for param in self.netG.CRM.parameters():
param.requires_grad = False
if (args.which_model == 'perceptual'):
for param in self.netG.CFEM.parameters():
param.requires_grad = False
for param in self.netG.CRM.parameters():
param.requires_grad = False
for param in self.netG.SFEM.parameters():
param.requires_grad = False
for param in self.netG.SRM.parameters():
param.requires_grad = False
optim_params = []
for (k, v) in self.netG.named_parameters():
if v.requires_grad:
optim_params.append(v)
else:
print('Warning: params [{:s}] will not optimize.'.format(k))
self.optimizer_G = torch.optim.Adam(optim_params, lr=args.lr_G)
self.optimizers.append(self.optimizer_G)
if (args.which_model == 'perceptual'):
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=args.lr_D)
self.optimizers.append(self.optimizer_D)
if (args.lr_scheme == 'MultiStepLR'):
for optimizer in self.optimizers:
self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, args.lr_steps, args.lr_gamma))
else:
raise NotImplementedError('MultiStepLR learning rate scheme is enough.')
self.log_dict = OrderedDict()
print(' Model initialized ')
self.print_network()
print('')
def feed_data(self, data, need_HR=True):
self.var_L = data[0].cuda()
if need_HR:
self.var_H = data[1].detach().cuda()
def optimize_parameters(self):
if (self.args.which_model == 'perceptual'):
for p in self.netD.parameters():
p.requires_grad = False
self.optimizer_G.zero_grad()
self.fake_H = self.netG(self.var_L)
l_g_total = 0
if self.cri_pix:
l_g_pix = (self.l_pix_w * self.cri_pix(self.fake_H, self.var_H))
l_g_total += l_g_pix
if self.cri_msssim:
l_g_mssim = (1.0 - self.cri_msssim(self.fake_H, self.var_H))
l_g_total += l_g_mssim
if self.cri_ml1:
l_g_ml1 = self.cri_ml1(self.fake_H, self.var_H)
l_g_total += l_g_ml1
if self.cri_fea:
real_fea = self.vgg(self.var_H).detach()
fake_fea = self.vgg(self.fake_H)
vgg_loss = self.cri_fea(fake_fea, real_fea)
l_g_fea = (self.l_fea_w * vgg_loss)
l_g_total += l_g_fea
if self.cri_gan:
pred_g_fake = self.netD(self.fake_H)
pred_g_real = self.netD(self.var_H)
pred_g_real.detach_()
l_g_gan = ((self.l_gan_w * (self.cri_gan((pred_g_real - torch.mean(pred_g_fake)), False) + self.cri_gan((pred_g_fake - torch.mean(pred_g_real)), True))) / 2)
l_g_total += l_g_gan
l_g_total.backward()
self.optimizer_G.step()
if (self.args.which_model == 'perceptual'):
for p in self.netD.parameters():
p.requires_grad = True
self.optimizer_D.zero_grad()
pred_d_real = self.netD(self.var_H)
pred_d_fake = self.netD(self.fake_H.detach())
l_d_real = self.cri_gan((pred_d_real - torch.mean(pred_d_fake)), True)
l_d_fake = self.cri_gan((pred_d_fake - torch.mean(pred_d_real)), False)
l_d_total = ((l_d_real + l_d_fake) / 2)
l_d_total.backward()
self.optimizer_D.step()
if self.cri_pix:
self.log_dict['l_g_pix'] = l_g_pix.item()
if (self.args.structure_weight > 0):
self.log_dict['l_g_msl1'] = l_g_ml1.item()
self.log_dict['l_g_msssim'] = l_g_mssim.item()
if (self.args.which_model == 'perceptual'):
if self.cri_fea:
self.log_dict['l_g_fea'] = l_g_fea.item()
if self.cri_gan:
self.log_dict['l_g_gan'] = l_g_gan.item()
self.log_dict['l_d_real'] = l_d_real.item()
self.log_dict['l_d_fake'] = l_d_fake.item()
self.log_dict['D_real'] = torch.mean(pred_d_real.detach())
self.log_dict['D_fake'] = torch.mean(pred_d_fake.detach())
def test(self):
self.netG.eval()
for (k, v) in self.netG.named_parameters():
v.requires_grad = False
self.fake_H = self.netG(self.var_L)
for (k, v) in self.netG.named_parameters():
v.requires_grad = True
self.netG.train()
def get_current_log(self):
return self.log_dict
def get_current_visuals(self, need_HR=True):
out_dict = OrderedDict()
out_dict['LR'] = self.var_L.detach()[0].float().cpu()
out_dict['SR'] = self.fake_H.detach()[0].float().cpu()
if need_HR:
out_dict['HR'] = self.var_H.detach()[0].float().cpu()
return out_dict
def print_network(self):
(s, n) = self.get_network_description(self.netG)
print('Number of parameters in G: {:,d}'.format(n))
if self.is_train:
message = ((' Generator \n' + s) + '\n')
network_path = os.path.join(self.save_dir, '../', 'network.txt')
with open(network_path, 'w') as f:
f.write(message)
if (self.args.which_model == 'perceptual'):
(s, n) = self.get_network_description(self.netD)
print('Number of parameters in D: {:,d}'.format(n))
message = (('\n\n\n Discriminator \n' + s) + '\n')
with open(network_path, 'a') as f:
f.write(message)
if self.cri_fea:
(s, n) = self.get_network_description(self.vgg)
print('Number of parameters in F: {:,d}'.format(n))
message = (('\n\n\n Perceptual Network \n' + s) + '\n')
with open(network_path, 'a') as f:
f.write(message)
def load(self):
load_path_G = self.args.pretrained_model_G
if (load_path_G is not None):
print('loading models for G [{:s}] ...'.format(load_path_G))
self.load_network(load_path_G, self.netG, strict=False)
load_path_D = self.args.pretrained_model_D
if (self.args.is_train and (load_path_D is not None)):
print('loading models for D [{:s}] ...'.format(load_path_D))
self.load_network(load_path_D, self.netD)
def save(self, iter_label):
self.save_network(self.save_dir, self.netG, 'G', iter_label)
if self.cri_gan:
self.save_network(self.save_dir, self.netD, 'D', iter_label) |
class RasterizeGLContext():
def __init__(self, output_db=True, mode='automatic', device=None):
assert ((output_db is True) or (output_db is False))
assert (mode in ['automatic', 'manual'])
self.output_db = output_db
self.mode = mode
if (device is None):
cuda_device_idx = torch.cuda.current_device()
else:
with torch.cuda.device(device):
cuda_device_idx = torch.cuda.current_device()
self.cpp_wrapper = _get_plugin().RasterizeGLStateWrapper(output_db, (mode == 'automatic'), cuda_device_idx)
self.active_depth_peeler = None
def set_context(self):
assert (self.mode == 'manual')
self.cpp_wrapper.set_context()
def release_context(self):
assert (self.mode == 'manual')
self.cpp_wrapper.release_context() |
def resize_width(img: tf.Tensor, label: tf.Tensor, width, height, interpolation=tf.image.ResizeMethod.BILINEAR):
img = tf.image.resize_with_pad(img, target_height=height, target_width=width, method=interpolation)
img = (img / 255)
img = ((img - 0.5) / 0.5)
return (img, label) |
class CTRLTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
control_codes = CONTROL_CODES
def __init__(self, vocab_file, merges_file, unk_token='<unk>', **kwargs):
super().__init__(unk_token=unk_token, **kwargs)
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
merges = merges_handle.read().split('\n')[1:(- 1)]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = tuple(token)
word = tuple((list(word[:(- 1)]) + [(word[(- 1)] + '</w>')]))
pairs = get_pairs(word)
if (not pairs):
return token
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
word = word[:(- 4)]
self.cache[token] = word
return word
def _tokenize(self, text):
split_tokens = []
words = re.findall('\\S+\\n?', text)
for token in words:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ', '').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!'.format(merge_file))
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file) |
def import_resolve(tex, path):
soup = TexSoup(tex)
dir_path = (os.path.dirname(path) + '/')
for _input in soup.find_all('input'):
path = os.path.join(dir_path, _input.args[0])
if (not os.path.exists(path)):
path = (path + '.tex')
_input.replace(*import_resolve(open(path), dir_path).contents)
for subimport in soup.find_all('subimport'):
path = os.path.join(dir_path, (subimport.args[0] + subimport.args[1]))
if (not os.path.exists(path)):
path = (path + '.tex')
subimport.replace(*import_resolve(open(path), dir_path).contents)
for _import in soup.find_all('import'):
path = os.path.join(dir_path, _import.args[0])
if (not os.path.exists(path)):
path = (path + '.tex')
_import.replace(*import_resolve(open(path), dir_path).contents)
for include in soup.find_all('include'):
path = os.path.join(dir_path, include.args[0])
if (not os.path.exists(path)):
path = (path + '.tex')
include.replace(*import_resolve(open(path), dir_path).contents)
return soup |
def log_metrics(step, metrics):
logger.info(f'Step {step}: {metrics}')
if accelerator.is_main_process:
accelerator.log(metrics, step) |
class inp_syncbatchnorm_(Function):
def forward(cls, ctx, x, gamma, beta, running_mean, running_var, extra, sync=True, training=True, momentum=0.1, eps=1e-05, activation='none', slope=0.01):
cls._parse_extra(ctx, extra)
ctx.sync = sync
ctx.training = training
ctx.momentum = momentum
ctx.eps = eps
ctx.activation = activation
ctx.slope = slope
x = x.contiguous()
gamma = gamma.contiguous()
beta = beta.contiguous()
if ctx.training:
if x.is_cuda:
(_ex, _exs) = src.gpu.expectation_forward(x)
else:
raise NotImplemented
if ctx.sync:
if ctx.is_master:
(_ex, _exs) = ([_ex.unsqueeze(0)], [_exs.unsqueeze(0)])
for _ in range(ctx.master_queue.maxsize):
(_ex_w, _exs_w) = ctx.master_queue.get()
ctx.master_queue.task_done()
_ex.append(_ex_w.unsqueeze(0))
_exs.append(_exs_w.unsqueeze(0))
_ex = comm.gather(_ex).mean(0)
_exs = comm.gather(_exs).mean(0)
tensors = comm.broadcast_coalesced((_ex, _exs), ([_ex.get_device()] + ctx.worker_ids))
for (ts, queue) in zip(tensors[1:], ctx.worker_queues):
queue.put(ts)
else:
ctx.master_queue.put((_ex, _exs))
(_ex, _exs) = ctx.worker_queue.get()
ctx.worker_queue.task_done()
_var = (_exs - (_ex ** 2))
running_mean.mul_((1 - ctx.momentum)).add_((ctx.momentum * _ex))
running_var.mul_((1 - ctx.momentum)).add_((ctx.momentum * _var))
ctx.mark_dirty(x, running_mean, running_var)
else:
(_ex, _var) = (running_mean.contiguous(), running_var.contiguous())
_exs = (_var + (_ex ** 2))
ctx.mark_dirty(x)
if x.is_cuda:
src.gpu.batchnorm_inp_forward(x, _ex, _exs, gamma, beta, ctx.eps)
else:
raise NotImplemented
_act_forward(ctx, x)
ctx.save_for_backward(x, _ex, _exs, gamma, beta)
return x
_differentiable
def backward(ctx, dz):
(z, _ex, _exs, gamma, beta) = ctx.saved_tensors
dz = dz.contiguous()
_act_backward(ctx, z, dz)
if dz.is_cuda:
(dx, _dex, _dexs, dgamma, dbeta) = src.gpu.batchnorm_inp_backward(dz, z, _ex, _exs, gamma, beta, ctx.eps)
else:
raise NotImplemented
if ctx.training:
if ctx.sync:
if ctx.is_master:
(_dex, _dexs) = ([_dex.unsqueeze(0)], [_dexs.unsqueeze(0)])
for _ in range(ctx.master_queue.maxsize):
(_dex_w, _dexs_w) = ctx.master_queue.get()
ctx.master_queue.task_done()
_dex.append(_dex_w.unsqueeze(0))
_dexs.append(_dexs_w.unsqueeze(0))
_dex = comm.gather(_dex).mean(0)
_dexs = comm.gather(_dexs).mean(0)
tensors = comm.broadcast_coalesced((_dex, _dexs), ([_dex.get_device()] + ctx.worker_ids))
for (ts, queue) in zip(tensors[1:], ctx.worker_queues):
queue.put(ts)
else:
ctx.master_queue.put((_dex, _dexs))
(_dex, _dexs) = ctx.worker_queue.get()
ctx.worker_queue.task_done()
if z.is_cuda:
src.gpu.expectation_inp_backward(dx, z, _dex, _dexs, _ex, _exs, gamma, beta, ctx.eps)
else:
raise NotImplemented
return (dx, dgamma, dbeta, None, None, None, None, None, None, None, None, None)
def _parse_extra(ctx, extra):
ctx.is_master = extra['is_master']
if ctx.is_master:
ctx.master_queue = extra['master_queue']
ctx.worker_queues = extra['worker_queues']
ctx.worker_ids = extra['worker_ids']
else:
ctx.master_queue = extra['master_queue']
ctx.worker_queue = extra['worker_queue'] |
_mode()
def score_sequences_with_huggingface_given_model(model: nn.Module, tokenizer: transformers.PreTrainedTokenizer, sequences: Sequence[str], per_device_batch_size=20, max_instances=sys.maxsize, mixed_precision: Optional[str]=None, tf32=False, divide_work=True):
torch.backends.cuda.matmul.allow_tf32 = torch.backends.cudnn.allow_tf32 = tf32
(local_rank, world_size) = distributed_utils.setup()
device = (torch.device('cuda', local_rank) if torch.cuda.is_available() else torch.device('cpu'))
model.forward = common.cast_with_native_amp(model.forward, mixed_precision=mixed_precision)
logger.warning(f'mixed_precision = {mixed_precision}')
sequences = sequences[:max_instances]
ori_data_size = len(sequences)
if ((world_size > 1) and divide_work):
batch_size = (per_device_batch_size * world_size)
else:
batch_size = per_device_batch_size
new_data_size = (batch_size * int(math.ceil((ori_data_size / batch_size))))
new_sequences = (list(sequences) + ([sequences[(- 1)]] * (new_data_size - ori_data_size)))
return_rewards = []
for (batch_idx, start_idx) in tqdm.tqdm(enumerate(range(0, new_data_size, batch_size)), desc='evaluating rewards for batches', total=(new_data_size // batch_size), disable=(not distributed_utils.is_main_process())):
batch = new_sequences[start_idx:(start_idx + batch_size)]
if ((world_size > 1) and divide_work):
local_batch = batch[(local_rank * per_device_batch_size):((local_rank + 1) * per_device_batch_size)]
else:
local_batch = batch
source = tokenizer(local_batch, return_tensors='pt', padding='max_length', max_length=tokenizer.model_max_length, truncation=True)
source = common.prepare_inputs(source, device=device)
rewards = model(input_ids=source.input_ids, attention_mask=source.attention_mask).rewards
if ((world_size > 1) and divide_work):
rewards = distributed_utils.all_gather_and_cat(rewards, dim=0)
return_rewards.extend(rewards.tolist())
return return_rewards[:ori_data_size] |
def simulated_data():
data_generator = LatentVariableData(view_features=[4, 5], latent_dimensions=2, random_state=1)
(X, Y) = data_generator.sample(20)
return (X, Y) |
def get_clean_rec_list(result_csv, n=100, k=20):
final_dict = {}
for i in range(n):
clean_rec_list = clean(result_csv['Result'][i])
final_dict[result_csv['name'][i]] = clean_rec_list
return final_dict |
class LocallyConnected1D(ZooKerasLayer):
def __init__(self, nb_filter, filter_length, activation=None, border_mode='valid', subsample_length=1, W_regularizer=None, b_regularizer=None, bias=True, input_shape=None, **kwargs):
if (border_mode != 'valid'):
invalidInputError(False, "For LocallyConnected1D, only border_mode='valid' is supported for now")
super(LocallyConnected1D, self).__init__(None, nb_filter, filter_length, activation, subsample_length, W_regularizer, b_regularizer, bias, (list(input_shape) if input_shape else None), **kwargs) |
def get_multi_gpu_models(config, emb_mat=None):
models = []
with tf.variable_scope(tf.get_variable_scope()) as vscope:
for gpu_idx in range(config.num_gpus):
with tf.name_scope('model_{}'.format(gpu_idx)) as scope, tf.device('/{}:{}'.format(config.device_type, gpu_idx)):
if (gpu_idx > 0):
tf.get_variable_scope().reuse_variables()
model = Model(config, scope, emb_mat, rep=(gpu_idx == 0))
models.append(model)
return models |
def plot_main():
data_path = '../sac/data/mengxiong'
plot_key = 'return-average'
(exps_data, plottable_keys, distinct_params) = reload_data(data_path)
(group_selectors, group_legends) = get_group_selectors(exps_data, custom_series_splitter)
(fig, ax) = plt.subplots(figsize=(8, 5))
for (idx, (selector, legend)) in enumerate(zip(group_selectors, group_legends)):
color = core.color_defaults[dict_leg2col[legend]]
(y, y_lower, y_upper) = get_shaded_curve(selector, plot_key, shade_type='median')
x = np.array(range(len(y)))
x += dict_xshift[legend]
y = sliding_mean(y, 5)
ax.plot(x, y, color=color, label=legend, linewidth=2.0)
def y_fmt(x, y):
return (str(int(np.round(x))) + 'K')
ax.xaxis.set_major_formatter(tick.FuncFormatter(y_fmt))
ax.grid(True)
ax.set_xlabel('Timesteps')
ax.set_ylabel('Average-return')
loc = 'best'
leg = ax.legend(loc=loc, prop={'size': 20}, ncol=1, labels=group_legends)
for legobj in leg.legendHandles:
legobj.set_linewidth(3.0)
save_name = filter_save_name('plots.png')
plt.savefig(osp.join(save_path, save_name), bbox_inches='tight') |
_registry(op_types='Mod')
class BinaryDirect8BitOperator(Operator):
def __init__(self, onnx_quantizer, onnx_node):
super(BinaryDirect8BitOperator, self).__init__(onnx_quantizer, onnx_node)
def quantize_check(self):
node = self.node
(data_found, _, _, _, _) = self.quantizer._get_quantization_params(node.output[0])
if (not data_found):
return False
if (not all([self.quantizer.is_valid_quantize_weight(i) for i in node.input])):
return False
return True
def quantize(self):
node = self.node
self.quantizer.quantize_inputs(node, initializer_use_weight_qType=False)
if ((not self.disable_qdq_for_node_output) or (self.quantizer.mode != 'qdq')):
self.quantizer.quantize_outputs(node)
node.name = (node.name + '_quant')
def convert_check(self, convert_format):
node = self.node
assert (convert_format in ['static']), "convert format for {} should be in ['static']".format(node.op_type)
children = self.quantizer.model.get_children(node)
if ((len(children) == 0) or (not node.name.endswith('_quant'))):
return False
return True
def convert(self, convert_format):
node = self.node
parents = self.quantizer.model.get_parents(node)
children = self.quantizer.model.get_children(node)
if (any([(i.op_type == 'DequantizeLinear') for i in parents]) and any([(i.op_type == 'QuantizeLinear') for i in children])):
for (idx, parent) in enumerate(parents):
if (parent.op_type == 'DequantizeLinear'):
self.node.input[idx] = parent.input[0]
self.quantizer.remove_nodes.append(parent)
for child in children:
if (child.op_type == 'QuantizeLinear'):
self.quantizer.remove_nodes.append(child)
self.quantizer.model.replace_input_of_all_nodes(child.output[0], (node.output[0] + '_quantized'))
node.output[0] = (node.output[0] + '_quantized') |
class TransformT(object):
def __init__(self, name, xform_fn):
self.name = name
self.xform = xform_fn
def transformer(self, probability, magnitude):
def return_function(img, label_img_pool):
res = False
s = []
if (random.random() < probability):
(img, s) = self.xform(img, label_img_pool, magnitude)
res = True
return ((img, s), res)
name = (self.name + '({:.1f},{})'.format(probability, magnitude))
return TransformFunction(return_function, name)
def do_transform(self, img, label_img_pool, magnitude):
f = self.transformer(PARAMETER_MAX, magnitude)
return f(img, label_img_pool) |
class SumOfSquaresPolynomialBijection(Bijection):
def __init__(self, num_input_channels, hidden_channels, activation, num_polynomials, polynomial_degree):
super().__init__(x_shape=(num_input_channels,), z_shape=(num_input_channels,))
arn = AutoRegressiveNN(input_dim=int(num_input_channels), hidden_dims=hidden_channels, param_dims=[((polynomial_degree + 1) * num_polynomials)], nonlinearity=activation())
self.flow = Polynomial(autoregressive_nn=arn, input_dim=int(num_input_channels), count_degree=polynomial_degree, count_sum=num_polynomials)
def _x_to_z(self, x):
z = self.flow._call(x)
log_jac = self.flow.log_abs_det_jacobian(None, None).view(x.shape[0], 1)
return {'z': z, 'log-jac': log_jac} |
.parametrize('kwargs', [{}, {'cell_type': 'GRU'}, dict(data_loader_kwargs=dict(target_normalizer=GroupNormalizer(groups=['agency', 'sku'], center=False))), dict(data_loader_kwargs=dict(lags={'volume': [2, 5]}, target='volume', time_varying_unknown_reals=['volume'], min_encoder_length=2)), dict(data_loader_kwargs=dict(time_varying_unknown_reals=['volume', 'discount'], target=['volume', 'discount'], lags={'volume': [2], 'discount': [2]}))])
def test_integration(data_with_covariates, tmp_path, kwargs):
_integration(data_with_covariates, tmp_path, **kwargs) |
class Factor(ModelBase):
c = None
id0 = None
m = None
nm = None
num = None
op = None
sub = None
v = None |
class DehnenCoreSphericalPotential(DehnenSphericalPotential):
def __init__(self, amp=1.0, a=1.0, normalize=False, ro=None, vo=None):
DehnenSphericalPotential.__init__(self, amp=amp, a=a, alpha=0, normalize=normalize, ro=ro, vo=vo)
self.hasC = True
self.hasC_dxdv = True
self.hasC_dens = True
return None
def _evaluate(self, R, z, phi=0.0, t=0.0):
r = numpy.sqrt(((R ** 2.0) + (z ** 2.0)))
return ((- (1.0 - (1.0 / ((1.0 + (self.a / r)) ** 2.0)))) / (6.0 * self.a))
def _Rforce(self, R, z, phi=0.0, t=0.0):
return (((- R) / numpy.power((numpy.sqrt(((R ** 2.0) + (z ** 2.0))) + self.a), 3.0)) / 3.0)
def _rforce_jax(self, r):
return ((((- self._amp) * r) / ((r + self.a) ** 3.0)) / 3.0)
def _R2deriv(self, R, z, phi=0.0, t=0.0):
r = numpy.sqrt(((R ** 2.0) + (z ** 2.0)))
return (- ((((2.0 * (R ** 2.0)) - (z ** 2.0)) - (self.a * r)) / ((3.0 * r) * numpy.power((r + self.a), 4.0))))
def _zforce(self, R, z, phi=0.0, t=0.0):
r = numpy.sqrt(((R ** 2.0) + (z ** 2.0)))
return (((- z) / numpy.power((self.a + r), 3.0)) / 3.0)
def _z2deriv(self, R, z, phi=0.0, t=0.0):
return self._R2deriv(z, R, phi=phi, t=t)
def _Rzderiv(self, R, z, phi=0.0, t=0.0):
a = self.a
r = numpy.sqrt(((R ** 2.0) + (z ** 2.0)))
return (- (((R * z) / r) / numpy.power((a + r), 4.0)))
def _dens(self, R, z, phi=0.0, t=0.0):
r = numpy.sqrt(((R ** 2.0) + (z ** 2.0)))
return ((((1.0 / ((1.0 + (r / self.a)) ** 4.0)) / 4.0) / numpy.pi) / (self.a ** 3.0))
def _mass(self, R, z=None, t=0.0):
if (z is not None):
raise AttributeError
return ((1.0 / ((1.0 + (self.a / R)) ** 3.0)) / 3.0) |
class FloatProblem(Problem[FloatSolution], ABC):
def __init__(self):
super(FloatProblem, self).__init__()
self.lower_bound = []
self.upper_bound = []
def number_of_variables(self) -> int:
return len(self.lower_bound)
def create_solution(self) -> FloatSolution:
new_solution = FloatSolution(self.lower_bound, self.upper_bound, self.number_of_objectives(), self.number_of_constraints())
new_solution.variables = [random.uniform((self.lower_bound[i] * 1.0), (self.upper_bound[i] * 1.0)) for i in range(self.number_of_variables())]
return new_solution |
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Categorical, self).__init__()
self.num_outputs = num_outputs
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), gain=0.01))
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedCategorical(logits=x) |
class LRAEncoder(FairseqEncoder):
def __init__(self, args, task):
if (args.input_type == 'text'):
dictionary = task.dictionary
vocab_size = len(dictionary)
padding_idx = dictionary.pad_index
offset_positions_by_padding = True
embedding_type = 'sparse'
else:
assert ((args.sen_rep_type == 'mp') or (args.layer_type == 'lstm'))
dictionary = None
vocab_size = None
padding_idx = None
offset_positions_by_padding = False
embedding_type = 'linear'
super().__init__(dictionary)
self.args = args
if (args.layer_type == 'transformer'):
self.encoder = TransformerLRAEncoder(tie_layer_weights=getattr(args, 'tie_layer_weights', False), padding_idx=padding_idx, vocab_size=vocab_size, num_encoder_layers=args.encoder_layers, embedding_type=embedding_type, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.act_dropout, max_seq_len=args.max_positions, use_position_embeddings=True, offset_positions_by_padding=offset_positions_by_padding, encoder_normalize_before=getattr(args, 'encoder_normalize_before', False), apply_bert_init=getattr(args, 'apply_bert_init', False), activation_fn=args.activation_fn, learned_pos_embedding=args.encoder_learned_pos, sen_rep_type=getattr(args, 'sen_rep_type', 'cls'))
elif (args.layer_type == 'lstm'):
self.encoder = LSTMLRAEncoder(padding_idx=padding_idx, vocab_size=vocab_size, num_layers=args.encoder_layers, bidirectional=True, embedding_type=embedding_type, embedding_dim=args.encoder_embed_dim, hidden_dim=args.encoder_ffn_embed_dim, input_dropout=args.dropout, output_dropout=args.act_dropout, max_seq_len=args.max_positions, sen_rep_type=getattr(args, 'sen_rep_type', 'cls'))
elif (args.layer_type == 'flash'):
self.encoder = FlashLRAEncoder(padding_idx=padding_idx, vocab_size=vocab_size, num_encoder_layers=args.encoder_layers, embedding_type=embedding_type, embedding_dim=args.encoder_embed_dim, hidden_dim=args.encoder_hidden_dim, z_dim=args.z_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, hidden_dropout=args.act_dropout, max_seq_len=args.max_positions, sen_rep_type=getattr(args, 'sen_rep_type', 'cls'))
elif (args.layer_type == 'mega'):
self.encoder = MegaLRAEncoder(padding_idx=padding_idx, vocab_size=vocab_size, num_encoder_layers=args.encoder_layers, embedding_type=embedding_type, embedding_dim=args.encoder_embed_dim, hidden_dim=args.encoder_hidden_dim, ffn_hidden_dim=args.encoder_ffn_embed_dim, z_dim=args.z_dim, n_dim=args.n_dim, attention_activation=args.attention_activation_fn, dropout=args.dropout, attention_dropout=args.attention_dropout, hidden_dropout=args.act_dropout, efficient_attn=args.efficient_attention, chunk_size=getattr(args, 'chunk_size', (- 1)), moving_layer=args.moving_layer, moving_act=args.moving_act, truncation=getattr(args, 'truncation_length', None), norm_type=args.norm_type, norm_num_groups=args.norm_num_groups, norm_affine=(not args.no_affine_norm), norm_eps=args.norm_eps, rel_pos_bias=args.rel_pos_bias, max_seq_len=args.max_positions, embed_max_norm=args.embedding_max_norm, sen_rep_type=getattr(args, 'sen_rep_type', 'mp'), layer_scale=args.layer_scale, init_mode=args.init_mode)
else:
self.encoder = LunaLRAEncoder(tie_layer_weights=getattr(args, 'tie_layer_weights', False), projection_length=args.encoder_projection_length, padding_idx=padding_idx, vocab_size=vocab_size, num_encoder_layers=args.encoder_layers, embedding_type=embedding_type, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, num_projected_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.act_dropout, max_seq_len=args.max_positions, use_position_embeddings=True, offset_positions_by_padding=offset_positions_by_padding, layernorm_embedding=getattr(args, 'encoder_normalize_before', False), normalize_before=False, apply_bert_init=getattr(args, 'apply_bert_init', False), tie_kv=getattr(args, 'tie_kv', False), activation_fn=args.activation_fn, learned_pos_embedding=args.encoder_learned_pos, embed_scale=None, sen_rep_type=getattr(args, 'sen_rep_type', 'cls'))
def forward(self, src_tokens, src_lengths=None, **kwargs):
return self.encoder(src_tokens, src_lengths, last_state_only=True) |
class BilinearFlatSim(nn.Module):
def __init__(self, x_size, y_size, opt={}, prefix='seqatt', dropout=None):
super(BilinearFlatSim, self).__init__()
self.opt = opt
self.weight_norm_on = opt.get('{}_weight_norm_on'.format(prefix), False)
self.linear = nn.Linear(y_size, x_size)
if self.weight_norm_on:
self.linear = weight_norm(self.linear)
if (dropout is None):
self.dropout = DropoutWrapper(opt.get('{}_dropout_p'.format(self.prefix), 0))
else:
self.dropout = dropout
def forward(self, x, y, x_mask):
x = self.dropout(x)
y = self.dropout(y)
Wy = self.linear(y)
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, (- float('inf')))
return xWy |
def state_dict_to_cpu(state_dict: OrderedDict):
new_state = OrderedDict()
for k in state_dict.keys():
newk = k.replace('module.', '')
new_state[newk] = state_dict[k].cpu()
return new_state |
def test_deprecated_api_warning():
_api_warning(name_dict=dict(old_key='new_key'))
def dummy_func(new_key=1):
return new_key
assert (dummy_func(old_key=2) == 2)
with pytest.raises(AssertionError):
dummy_func(old_key=1, new_key=2) |
def read_model(path, ext=''):
if (ext == ''):
if detect_model_format(path, '.bin'):
ext = '.bin'
elif detect_model_format(path, '.txt'):
ext = '.txt'
else:
print("Provide model format: '.bin' or '.txt'")
return
if (ext == '.txt'):
cameras = read_cameras_text(os.path.join(path, ('cameras' + ext)))
images = read_images_text(os.path.join(path, ('images' + ext)))
points3D = read_points3D_text((os.path.join(path, 'points3D') + ext))
else:
cameras = read_cameras_binary(os.path.join(path, ('cameras' + ext)))
images = read_images_binary(os.path.join(path, ('images' + ext)))
points3D = read_points3D_binary((os.path.join(path, 'points3D') + ext))
return (cameras, images, points3D) |
class TrexNerLoader():
def __init__(self):
self.label_set = set()
self.label_set.add('B')
self.label_set.add('I')
self.label_set.add('O')
def _load(self, path):
dataset = load_json(path)
for data in dataset:
triples = data['triples']
for triple in triples:
self.label_set.add(triple['predicate']['uri'])
return dataset
def load(self, path):
train_set = self._load(os.path.join(path, 'train.txt'))
dev_set = self._load(os.path.join(path, 'dev.txt'))
test_set = self._load(os.path.join(path, 'test.txt'))
return (train_set, dev_set, test_set)
def load_all(self, path):
datasets = []
for f in os.listdir(path):
if (f == 'vocabulary.txt'):
continue
dataset = load_json(os.path.join(path, f))
for data in dataset:
entities = data['entities']
for entity in entities:
entity
datasets.extend(dataset)
return datasets
def load_train(self, path):
train_set = self._load(os.path.join(path, 'train.json'))
return train_set
def load_data(self, file):
data_set = self._load(file)
return data_set
def get_labels(self):
labels = list(self.label_set)
labels.sort()
return labels |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.