code stringlengths 101 5.91M |
|---|
def test_conf_raises_for_unaccessible_arguments():
def conf_scope(a, b, c):
answer = 42
with pytest.raises(KeyError):
conf_scope(preset={'a': 1}, fallback={'b': 2}) |
def inference(file, inputs, outputs):
inputs_flatten = flatten(inputs)
inputs_flatten = update_flatten_list(inputs_flatten, [])
outputs_flatten = flatten(outputs)
outputs_flatten = update_flatten_list(outputs_flatten, [])
sess = onnxruntime.InferenceSession(file)
ort_inputs = dict(((sess.get_inputs()[i].name, to_numpy(input)) for (i, input) in enumerate(inputs_flatten)))
res = sess.run(None, ort_inputs)
if (outputs is not None):
print('== Checking model output ==')
[np.testing.assert_allclose(to_numpy(output), res[i], rtol=0.001, atol=1e-05) for (i, output) in enumerate(outputs_flatten)]
print('== Done ==') |
.parametrize('return_dataframe', [True, False])
.parametrize('embed_continuous', [True, False])
def test_bayesian_mlp_models(return_dataframe, embed_continuous):
tab_preprocessor = TabPreprocessor(cat_embed_cols=embed_cols, continuous_cols=cont_cols)
X_tab = tab_preprocessor.fit_transform(df_init)
model = BayesianTabMlp(column_idx=tab_preprocessor.column_idx, cat_embed_input=tab_preprocessor.cat_embed_input, continuous_cols=tab_preprocessor.continuous_cols, embed_continuous=embed_continuous, mlp_hidden_dims=[8, 4])
t2v = Tab2Vec(model, tab_preprocessor, return_dataframe=return_dataframe)
(t2v_out, _) = t2v.fit_transform(df_t2v, target_col='target')
embed_dim = sum([el[2] for el in tab_preprocessor.cat_embed_input])
n_cont_cols = len(tab_preprocessor.continuous_cols)
cont_dim = ((n_cont_cols * model.cont_embed_dim) if embed_continuous else n_cont_cols)
assert (t2v_out.shape[1] == (embed_dim + cont_dim)) |
def run(cmd, quit_on_error=True, shell=False):
p = subprocess.run(cmd, shell=shell, stdout=subprocess.PIPE)
if (quit_on_error and (p.returncode != 0)):
quit(p.returncode)
return p |
class DraggableCubePolygon(DraggablePolygon):
def __init__(self, canvas, cube_id):
cube_xy = ((VectorEnv.CUBE_WIDTH / 2) * np.array([[(- 1), 1], [1, 1], [1, (- 1)], [(- 1), (- 1)]])).tolist()
polygon = Polygon(cube_xy, True, color=VectorEnv.CUBE_COLOR)
super().__init__(canvas, polygon)
self.env = self.canvas.main_window.env
self.env_mutex = self.canvas.main_window.env_mutex
self.cube_id = cube_id
def on_new_pose(self):
if self.env_mutex.tryLock():
self.env.reset_cube_pose(self.cube_id, self.position[0], self.position[1], self.heading)
self.env.step_simulation()
self.canvas.main_window.refresh()
self.env_mutex.unlock() |
class TFRobertaForQuestionAnswering():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class SimulationActorClientDynamics(AbstractDynamics):
def __init__(self, world, robot):
self.world = world
self.robot = robot
def apply(self, state, action):
self.robot.arm(action.arm_cmd, action.arm_mode)
return None |
def order_data(data, param_list, wpgen, planner, maps, classic, quantity):
other_quantity = np.array([*param_list.keys()])[[(x != quantity) for x in [*param_list.keys()]]][0]
indices = list(data.index)
params = [quantity, other_quantity]
wpgen_col = (['none'] * len(indices))
planner_col = (['none'] * len(indices))
map_col = (['none'] * len(indices))
parameter_col = {}
for param in params:
parameter_col[param] = (['none'] * len(indices))
for (i, index) in enumerate(indices):
for wp in wpgen:
if (wp in index):
wpgen_col[i] = wp
break
for p in planner:
if (p in index):
planner_col[i] = p
if (p in classic):
wpgen_col[i] = 'classic'
break
for map in maps:
if (map in index):
map_col[i] = map
break
for param in params:
for p in param_list[param]:
if (p in index):
parameter_col[param][i] = p
break
data['wpgen'] = wpgen_col
data['planner'] = planner_col
data['map'] = map_col
for param in params:
data[param] = parameter_col[param]
data.index = ['{0}_{1}'.format(data['wpgen'][i], data['planner'][i]) for i in range(len(data))]
return data.sort_index() |
class PixelNormalize(FeatureTransformer):
def __init__(self, means, bigdl_type='float'):
super(PixelNormalize, self).__init__(bigdl_type, means) |
class TestNuScenesLidarseg(unittest.TestCase):
def setUp(self):
assert ('NUSCENES' in os.environ), 'Set NUSCENES env. variable to enable tests.'
self.nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False)
def test_num_classes(self) -> None:
self.assertEqual(len(self.nusc.lidarseg_idx2name_mapping), 32)
def test_num_colors(self) -> None:
num_classes = len(self.nusc.lidarseg_idx2name_mapping)
num_colors = len(self.nusc.colormap)
self.assertEqual(num_colors, num_classes)
def test_classes(self) -> None:
classes_in_colormap = list(self.nusc.colormap.keys())
for (name, idx) in self.nusc.lidarseg_name2idx_mapping.items():
self.assertEqual(name, classes_in_colormap[idx]) |
class GroundtruthFilterTest(tf.test.TestCase):
def test_filter_groundtruth(self):
input_image = tf.placeholder(tf.float32, shape=(None, None, 3))
input_boxes = tf.placeholder(tf.float32, shape=(None, 4))
input_classes = tf.placeholder(tf.int32, shape=(None,))
input_is_crowd = tf.placeholder(tf.bool, shape=(None,))
input_area = tf.placeholder(tf.float32, shape=(None,))
input_difficult = tf.placeholder(tf.float32, shape=(None,))
input_label_types = tf.placeholder(tf.string, shape=(None,))
valid_indices = tf.placeholder(tf.int32, shape=(None,))
input_tensors = {fields.InputDataFields.image: input_image, fields.InputDataFields.groundtruth_boxes: input_boxes, fields.InputDataFields.groundtruth_classes: input_classes, fields.InputDataFields.groundtruth_is_crowd: input_is_crowd, fields.InputDataFields.groundtruth_area: input_area, fields.InputDataFields.groundtruth_difficult: input_difficult, fields.InputDataFields.groundtruth_label_types: input_label_types}
output_tensors = ops.retain_groundtruth(input_tensors, valid_indices)
image_tensor = np.random.rand(224, 224, 3)
feed_dict = {input_image: image_tensor, input_boxes: np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]], dtype=np.float), input_classes: np.array([1, 2], dtype=np.int32), input_is_crowd: np.array([False, True], dtype=np.bool), input_area: np.array([32, 48], dtype=np.float32), input_difficult: np.array([True, False], dtype=np.bool), input_label_types: np.array(['APPROPRIATE', 'INCORRECT'], dtype=np.string_), valid_indices: np.array([0], dtype=np.int32)}
expected_tensors = {fields.InputDataFields.image: image_tensor, fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [1], fields.InputDataFields.groundtruth_is_crowd: [False], fields.InputDataFields.groundtruth_area: [32], fields.InputDataFields.groundtruth_difficult: [True], fields.InputDataFields.groundtruth_label_types: ['APPROPRIATE']}
with self.test_session() as sess:
output_tensors = sess.run(output_tensors, feed_dict=feed_dict)
for key in [fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_area]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_is_crowd, fields.InputDataFields.groundtruth_label_types]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
def test_filter_with_missing_fields(self):
input_boxes = tf.placeholder(tf.float32, shape=(None, 4))
input_classes = tf.placeholder(tf.int32, shape=(None,))
input_tensors = {fields.InputDataFields.groundtruth_boxes: input_boxes, fields.InputDataFields.groundtruth_classes: input_classes}
valid_indices = tf.placeholder(tf.int32, shape=(None,))
feed_dict = {input_boxes: np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]], dtype=np.float), input_classes: np.array([1, 2], dtype=np.int32), valid_indices: np.array([0], dtype=np.int32)}
expected_tensors = {fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [1]}
output_tensors = ops.retain_groundtruth(input_tensors, valid_indices)
with self.test_session() as sess:
output_tensors = sess.run(output_tensors, feed_dict=feed_dict)
for key in [fields.InputDataFields.groundtruth_boxes]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
def test_filter_with_empty_fields(self):
input_boxes = tf.placeholder(tf.float32, shape=(None, 4))
input_classes = tf.placeholder(tf.int32, shape=(None,))
input_is_crowd = tf.placeholder(tf.bool, shape=(None,))
input_area = tf.placeholder(tf.float32, shape=(None,))
input_difficult = tf.placeholder(tf.float32, shape=(None,))
valid_indices = tf.placeholder(tf.int32, shape=(None,))
input_tensors = {fields.InputDataFields.groundtruth_boxes: input_boxes, fields.InputDataFields.groundtruth_classes: input_classes, fields.InputDataFields.groundtruth_is_crowd: input_is_crowd, fields.InputDataFields.groundtruth_area: input_area, fields.InputDataFields.groundtruth_difficult: input_difficult}
output_tensors = ops.retain_groundtruth(input_tensors, valid_indices)
feed_dict = {input_boxes: np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]], dtype=np.float), input_classes: np.array([1, 2], dtype=np.int32), input_is_crowd: np.array([False, True], dtype=np.bool), input_area: np.array([], dtype=np.float32), input_difficult: np.array([], dtype=np.float32), valid_indices: np.array([0], dtype=np.int32)}
expected_tensors = {fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [1], fields.InputDataFields.groundtruth_is_crowd: [False], fields.InputDataFields.groundtruth_area: [], fields.InputDataFields.groundtruth_difficult: []}
with self.test_session() as sess:
output_tensors = sess.run(output_tensors, feed_dict=feed_dict)
for key in [fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_area]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_is_crowd]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
def test_filter_with_empty_groundtruth_boxes(self):
input_boxes = tf.placeholder(tf.float32, shape=(None, 4))
input_classes = tf.placeholder(tf.int32, shape=(None,))
input_is_crowd = tf.placeholder(tf.bool, shape=(None,))
input_area = tf.placeholder(tf.float32, shape=(None,))
input_difficult = tf.placeholder(tf.float32, shape=(None,))
valid_indices = tf.placeholder(tf.int32, shape=(None,))
input_tensors = {fields.InputDataFields.groundtruth_boxes: input_boxes, fields.InputDataFields.groundtruth_classes: input_classes, fields.InputDataFields.groundtruth_is_crowd: input_is_crowd, fields.InputDataFields.groundtruth_area: input_area, fields.InputDataFields.groundtruth_difficult: input_difficult}
output_tensors = ops.retain_groundtruth(input_tensors, valid_indices)
feed_dict = {input_boxes: np.array([], dtype=np.float).reshape(0, 4), input_classes: np.array([], dtype=np.int32), input_is_crowd: np.array([], dtype=np.bool), input_area: np.array([], dtype=np.float32), input_difficult: np.array([], dtype=np.float32), valid_indices: np.array([], dtype=np.int32)}
with self.test_session() as sess:
output_tensors = sess.run(output_tensors, feed_dict=feed_dict)
for key in input_tensors:
if (key == fields.InputDataFields.groundtruth_boxes):
self.assertAllEqual([0, 4], output_tensors[key].shape)
else:
self.assertAllEqual([0], output_tensors[key].shape) |
def ThresholdSumOther4(array):
scaled_array = scaling4(array)
threshold = np.median(scaled_array, axis=0)
lower_threshold_indices = (scaled_array > threshold)
scaled_array[lower_threshold_indices] = 0
return np.sum(scaled_array.T, axis=0) |
def get_parser():
parser = argparse.ArgumentParser(description='MeTRAbs 3D Human Pose Estimator', allow_abbrev=False)
parser.add_argument('--comment', type=str, default=None)
parser.add_argument('--seed', type=int, default=1, help='Seed for the random number generators')
parser.add_argument('--wandb-project', type=str, default='metrabs')
parser.add_argument('--workers', type=int, default=None, help='Number of parallel workers to run. Default is min(12, num_cpus)')
parser.add_argument('--multi-gpu', action=spu.argparse.BoolAction)
parser.add_argument('--train', action=spu.argparse.BoolAction, help='Train the model.')
parser.add_argument('--predict', action=spu.argparse.BoolAction, help='Test the model.')
parser.add_argument('--export-file', type=str, help='Export filename.')
parser.add_argument('--pred-path', type=str, default=None)
parser.add_argument('--viz', action=spu.argparse.BoolAction, help='Create graphical user interface for visualization.')
parser.add_argument('--load-path', type=str, default=None, help='Path of model checkpoint to load in the beginning.')
parser.add_argument('--checkpoint-dir', type=str, default=None, help='Directory path of model checkpoints.')
parser.add_argument('--init-path', type=str, default=None, help='Path of the pretrained checkpoint to initialize from once\n at the very start of training (i.e. not when resuming!).\n To restore for resuming an existing training use the --load-path option.')
parser.add_argument('--proc-side', type=int, default=256, help='Side length of image as processed by network.')
parser.add_argument('--geom-aug', action=spu.argparse.BoolAction, default=True, help='Training data augmentations such as rotation, scaling, translation etc.')
parser.add_argument('--test-aug', action=spu.argparse.BoolAction, help='Apply augmentations to test images.')
parser.add_argument('--rot-aug', type=float, help='Rotation augmentation in degrees.', default=20)
parser.add_argument('--scale-aug-up', type=float, help='Scale augmentation in percent.', default=25)
parser.add_argument('--scale-aug-down', type=float, help='Scale augmentation in percent.', default=25)
parser.add_argument('--shift-aug', type=float, help='Shift augmentation in percent.', default=10)
parser.add_argument('--partial-visibility-prob', type=float, default=0)
parser.add_argument('--occlude-aug-prob', type=float, default=0.5)
parser.add_argument('--occlude-aug-prob-2d', type=float, default=0.7)
parser.add_argument('--occlude-aug-scale', type=float, default=1)
parser.add_argument('--background-aug-prob', type=float, default=0.7)
parser.add_argument('--color-aug', action=spu.argparse.BoolAction, default=True)
parser.add_argument('--test-time-mirror-aug', action=spu.argparse.BoolAction)
parser.add_argument('--full-rot-aug-prob', type=float, default=0)
parser.add_argument('--antialias-train', type=int, default=1)
parser.add_argument('--antialias-test', type=int, default=1)
parser.add_argument('--image-interpolation-train', type=str, default='linear')
parser.add_argument('--image-interpolation-test', type=str, default='linear')
parser.add_argument('--test-subjects', type=str, default=None, help='Test subjects.')
parser.add_argument('--valid-subjects', type=str, default=None, help='Validation subjects.')
parser.add_argument('--train-subjects', type=str, default=None, help='Training subjects.')
parser.add_argument('--train-on', type=str, default='train', help='Training part.')
parser.add_argument('--validate-on', type=str, default='valid', help='Validation part.')
parser.add_argument('--test-on', type=str, default='test', help='Test part.')
parser.add_argument('--dtype', type=str, default='float16', choices=['float16', 'float32', 'bfloat16'], help='The floating point type to use for computations.')
parser.add_argument('--validate-period', type=int, default=None, help='Periodically validate during training, every this many steps.')
parser.add_argument('--checkpoint-period', type=int, default=2000)
parser.add_argument('--training-steps', type=int)
parser.add_argument('--weight-decay', type=float, default=0.003)
parser.add_argument('--base-learning-rate', type=float, default=0.0002121, help='Learning rate of the optimizer.')
parser.add_argument('--dual-finetune-lr', action=spu.argparse.BoolAction)
parser.add_argument('--loss-scale', type=float, default=128)
parser.add_argument('--dynamic-loss-scale', action=spu.argparse.BoolAction)
parser.add_argument('--ema-momentum', type=float, default=1, help='The momentum of the exponential moving average in Polyak averaging.')
parser.add_argument('--grad-accum-steps', type=int, default=1)
parser.add_argument('--force-grad-accum', action=spu.argparse.BoolAction, help='Use a GradientAccumulationOptimizer even when grad-accum-steps=1.Useful for being able to switch gradient accumulation on and offand have the checkpoints still work.')
parser.add_argument('--finetune-in-inference-mode', type=int, default=0)
parser.add_argument('--constrain-kernel-norm', type=float, default=20)
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--batch-size-test', type=int, default=150)
parser.add_argument('--batch-size-2d', type=int, default=32)
parser.add_argument('--ghost-bn', type=str, default='')
parser.add_argument('--data-format', type=str, default='NHWC', choices=['NHWC', 'NCHW'], help='Data format used internally.')
parser.add_argument('--stride-train', type=int, default=32)
parser.add_argument('--stride-test', type=int, default=32)
parser.add_argument('--centered-stride', action=spu.argparse.BoolAction, default=True)
parser.add_argument('--dataset2d', type=str, default='mpii', action=spu.argparse.HyphenToUnderscoreAction)
parser.add_argument('--dataset3d', type=str, default='h36m', action=spu.argparse.HyphenToUnderscoreAction)
parser.add_argument('--image-barecat-path', type=str)
parser.add_argument('--model-joints', type=str, default=None, action=spu.argparse.HyphenToUnderscoreAction)
parser.add_argument('--output-joints', type=str, default=None, action=spu.argparse.HyphenToUnderscoreAction)
parser.add_argument('--bone-length-dataset', type=str)
parser.add_argument('--model-class', type=str, default='Metrabs')
parser.add_argument('--backbone', type=str, default='efficientnetv2-s', help='Backbone of the predictor network.')
parser.add_argument('--depth', type=int, default=8, help='Number of voxels along the z axis for volumetric prediction')
parser.add_argument('--box-size-mm', type=float, default=2200)
parser.add_argument('--universal-skeleton', action=spu.argparse.BoolAction)
parser.add_argument('--weak-perspective', action=spu.argparse.BoolAction)
parser.add_argument('--mix-3d-inside-fov', type=float, default=0.5)
parser.add_argument('--mean-relative', action=spu.argparse.BoolAction, default=True)
parser.add_argument('--loss2d-factor', type=float, default=0.2)
parser.add_argument('--absloss-factor', type=float, default=0.1)
parser.add_argument('--absloss-start-step', type=int, default=5000)
parser.add_argument('--affine-weights', type=str)
parser.add_argument('--load-backbone-from', type=str)
parser.add_argument('--regularize-to-manifold', action=spu.argparse.BoolAction)
parser.add_argument('--loss-manif-factor', type=float, default=1)
parser.add_argument('--loss-manif-factor2d', type=float, default=1)
parser.add_argument('--transform-coords', action=spu.argparse.BoolAction)
parser.add_argument('--predict-all-and-latents', action=spu.argparse.BoolAction)
parser.add_argument('--teacher-loss-factor', type=float, default=1)
parser.add_argument('--teacher-start-step', type=int, default=5000)
parser.add_argument('--allhead-aegt-loss-factor', type=float, default=1)
parser.add_argument('--stop-gradient-latent', action=spu.argparse.BoolAction, default=True)
return parser |
def np2pil(arr: ty.A, /) -> Image:
if (arr.dtype == np.uint8):
return Image.fromarray(arr)
assert (arr.max() <= 1)
return Image.fromarray((arr * 255).astype(np.uint8)) |
def DirectedEdgeDetect(alpha=0, direction=(0.0, 1.0), name=None, deterministic=False, random_state=None):
alpha_param = iap.handle_continuous_param(alpha, 'alpha', value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
direction_param = iap.handle_continuous_param(direction, 'direction', value_range=None, tuple_to_uniform=True, list_to_choice=True)
def create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert((0 <= alpha_sample <= 1.0))
direction_sample = direction_param.draw_sample(random_state=random_state_func)
deg = (int((direction_sample * 360)) % 360)
rad = np.deg2rad(deg)
x = np.cos((rad - (0.5 * np.pi)))
y = np.sin((rad - (0.5 * np.pi)))
direction_vector = np.array([x, y])
matrix_effect = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]], dtype=np.float32)
for x in [(- 1), 0, 1]:
for y in [(- 1), 0, 1]:
if ((x, y) != (0, 0)):
cell_vector = np.array([x, y])
distance_deg = np.rad2deg(ia.angle_between_vectors(cell_vector, direction_vector))
distance = (distance_deg / 180)
similarity = ((1 - distance) ** 4)
matrix_effect[((y + 1), (x + 1))] = similarity
matrix_effect = (matrix_effect / np.sum(matrix_effect))
matrix_effect = (matrix_effect * (- 1))
matrix_effect[(1, 1)] = 1
matrix_nochange = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.float32)
matrix = (((1 - alpha_sample) * matrix_nochange) + (alpha_sample * matrix_effect))
return ([matrix] * nb_channels)
if (name is None):
name = ('Unnamed%s' % (ia.caller_name(),))
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state) |
def bn(channel):
layer = nn.BatchNorm2d(channel)
nn.init.constant(layer.weight, 1)
nn.init.constant(layer.bias, 0)
return layer |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pretrained model or model identifier from huggingface.co/models')
parser.add_argument('--task_name', default=None, type=str, required=True, help=('The name of the task to train selected in the list: ' + ', '.join(glue_processors.keys())))
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name_or_path')
parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name_or_path')
parser.add_argument('--cache_dir', default=None, type=str, help='Where do you want to store the pre-trained models downloaded from huggingface.co')
parser.add_argument('--data_subset', type=int, default=(- 1), help='If > 0: limit the data to a subset of data_subset instances.')
parser.add_argument('--overwrite_output_dir', action='store_true', help='Whether to overwrite data in output directory')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--dont_normalize_importance_by_layer', action='store_true', help="Don't normalize importance score by layers")
parser.add_argument('--dont_normalize_global_importance', action='store_true', help="Don't normalize all importance scores between 0 and 1")
parser.add_argument('--try_masking', action='store_true', help='Whether to try to mask head until a threshold of accuracy.')
parser.add_argument('--masking_threshold', default=0.9, type=float, help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).')
parser.add_argument('--masking_amount', default=0.1, type=float, help='Amount to heads to masking at each masking step.')
parser.add_argument('--metric_name', default='acc', type=str, help='Metric to use for head masking.')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, sequences shorter padded.')
parser.add_argument('--batch_size', default=1, type=int, help='Batch size.')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.')
args = parser.parse_args()
if (args.server_ip and args.server_port):
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if ((args.local_rank == (- 1)) or args.no_cuda):
args.device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
args.n_gpu = (0 if args.no_cuda else torch.cuda.device_count())
else:
torch.cuda.set_device(args.local_rank)
args.device = torch.device('cuda', args.local_rank)
args.n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logging.basicConfig(level=(logging.INFO if (args.local_rank in [(- 1), 0]) else logging.WARN))
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device, args.n_gpu, bool((args.local_rank != (- 1)))))
if is_main_process(args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
set_seed(args.seed)
args.task_name = args.task_name.lower()
if (args.task_name not in glue_processors):
raise ValueError(('Task not found: %s' % args.task_name))
processor = glue_processors[args.task_name]()
args.output_mode = glue_output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
config = AutoConfig.from_pretrained((args.config_name if args.config_name else args.model_name_or_path), num_labels=num_labels, finetuning_task=args.task_name, output_attentions=True, cache_dir=args.cache_dir)
tokenizer = AutoTokenizer.from_pretrained((args.tokenizer_name if args.tokenizer_name else args.model_name_or_path), cache_dir=args.cache_dir)
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, cache_dir=args.cache_dir)
model.to(args.device)
if (args.local_rank != (- 1)):
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
elif (args.n_gpu > 1):
model = nn.DataParallel(model)
os.makedirs(args.output_dir, exist_ok=True)
torch.save(args, os.path.join(args.output_dir, 'run_args.bin'))
logger.info('Training/evaluation parameters %s', args)
eval_dataset = GlueDataset(args, tokenizer=tokenizer, mode='dev')
if (args.data_subset > 0):
eval_dataset = Subset(eval_dataset, list(range(min(args.data_subset, len(eval_dataset)))))
eval_sampler = (SequentialSampler(eval_dataset) if (args.local_rank == (- 1)) else DistributedSampler(eval_dataset))
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.batch_size, collate_fn=default_data_collator)
compute_heads_importance(args, model, eval_dataloader)
if (args.try_masking and (args.masking_threshold > 0.0) and (args.masking_threshold < 1.0)):
head_mask = mask_heads(args, model, eval_dataloader)
prune_heads(args, model, eval_dataloader, head_mask) |
def test_brace_initialization():
a = m.BraceInitialization(123, 'test')
assert (a.field1 == 123)
assert (a.field2 == 'test')
b = m.NoBraceInitialization([123, 456])
assert (b.vec == [123, 456]) |
class MaxIteration(JavaValue):
def __init__(self, max, bigdl_type='float'):
JavaValue.__init__(self, None, bigdl_type, max) |
def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):
input_length = input.shape[0]
lower_index = round((input_length * (1 - (lower_percentile * 0.01))))
upper_index = round(((input_length * upper_percentile) * 0.01))
upper_bound = torch.kthvalue(input, k=upper_index).values
if (lower_percentile == 0):
lower_bound = (upper_bound * 0)
else:
lower_bound = (- torch.kthvalue((- input), k=lower_index).values)
if (not output_tensor):
lower_bound = lower_bound.item()
upper_bound = upper_bound.item()
return (lower_bound, upper_bound) |
def build_inference_based_loader(cfg: CfgNode, dataset_cfg: CfgNode, model: torch.nn.Module, embedder: Optional[torch.nn.Module]=None) -> InferenceBasedLoader:
dataset = build_bootstrap_dataset(dataset_cfg.DATASET, dataset_cfg.IMAGE_LOADER)
meta = MetadataCatalog.get(dataset_cfg.DATASET)
training_sampler = TrainingSampler(len(dataset))
data_loader = torch.utils.data.DataLoader(dataset, batch_size=dataset_cfg.IMAGE_LOADER.BATCH_SIZE, sampler=training_sampler, num_workers=dataset_cfg.IMAGE_LOADER.NUM_WORKERS, collate_fn=trivial_batch_collator, worker_init_fn=worker_init_reset_seed)
return InferenceBasedLoader(model, data_loader=data_loader, data_sampler=build_data_sampler(cfg, dataset_cfg.DATA_SAMPLER, embedder), data_filter=build_data_filter(dataset_cfg.FILTER), shuffle=True, batch_size=dataset_cfg.INFERENCE.OUTPUT_BATCH_SIZE, inference_batch_size=dataset_cfg.INFERENCE.INPUT_BATCH_SIZE, category_to_class_mapping=meta.category_to_class_mapping) |
def _get_creation_string() -> str:
argv = sys.argv
argv[0] = argv[0].split('/')[(- 1)]
return ('python ' + ' '.join(argv)) |
def get_model_loader(filename):
if filename.endswith('.npy'):
assert os.path.isfile(filename), filename
return ParamRestore(np.load(filename, encoding='latin1').item())
else:
return SaverRestore(filename) |
class ConvE(torch.nn.Module):
def __init__(self, d, d1, d2, **kwargs):
super(ConvE, self).__init__()
self.in_channels = kwargs['in_channels']
self.out_channels = kwargs['out_channels']
self.filt_h = kwargs['filt_h']
self.filt_w = kwargs['filt_w']
self.E = torch.nn.Embedding(len(d.entities), d1, padding_idx=0)
self.R = torch.nn.Embedding(len(d.relations), d2, padding_idx=0)
self.inp_drop = torch.nn.Dropout(kwargs['input_dropout'])
self.hidden_drop = torch.nn.Dropout(kwargs['hidden_dropout'])
self.feature_map_drop = torch.nn.Dropout2d(kwargs['feature_map_dropout'])
self.loss = torch.nn.BCELoss()
self.conv1 = torch.nn.Conv2d(self.in_channels, self.out_channels, (self.filt_h, self.filt_w), 1, 0, bias=True)
self.bn0 = torch.nn.BatchNorm2d(self.in_channels)
self.bn1 = torch.nn.BatchNorm2d(self.out_channels)
self.bn2 = torch.nn.BatchNorm1d(d1)
self.register_parameter('b', Parameter(torch.zeros(len(d.entities))))
fc_length = ((((20 - self.filt_h) + 1) * ((20 - self.filt_w) + 1)) * self.out_channels)
self.fc = torch.nn.Linear(fc_length, d1)
def init(self):
xavier_normal_(self.E.weight.data)
xavier_normal_(self.R.weight.data)
def forward(self, e1_idx, r_idx):
e1 = self.E(e1_idx).view((- 1), 1, 10, 20)
r = self.R(r_idx).view((- 1), 1, 10, 20)
x = torch.cat([e1, r], 2)
x = self.bn0(x)
x = self.inp_drop(x)
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.feature_map_drop(x)
x = x.view(e1.size(0), (- 1))
x = self.fc(x)
x = self.hidden_drop(x)
x = self.bn2(x)
x = F.relu(x)
x = torch.mm(x, self.E.weight.transpose(1, 0))
x += self.b.expand_as(x)
pred = F.sigmoid(x)
return pred |
_task('speech_text_joint_to_text')
class SpeechTextJointToTextTask(SpeechToTextTask):
def add_args(cls, parser):
super(SpeechTextJointToTextTask, cls).add_args(parser)
parser.add_argument('--parallel-text-data', default='', help='path to parallel text data directory')
parser.add_argument('--max-tokens-text', type=int, metavar='N', help='maximum tokens for encoder text input ')
parser.add_argument('--max-positions-text', type=int, metavar='N', default=400, help='maximum tokens for per encoder text input ')
parser.add_argument('--langpairs', default=None, metavar='S', help='language pairs for text training, separated with ","')
parser.add_argument('--speech-sample-ratio', default=1, type=float, metavar='N', help='Multiple Ratio for speech dataset with transcripts ')
parser.add_argument('--text-sample-ratio', default=1, type=float, metavar='N', help='Multiple Ratio for text set ')
parser.add_argument('--update-mix-data', action='store_true', help='use mixed data in one update when update-freq > 1')
parser.add_argument('--load-speech-only', action='store_true', help='load speech data only')
parser.add_argument('--mask-text-ratio', type=float, metavar='V', default=0.0, help='mask V source tokens for text only mode')
parser.add_argument('--mask-text-type', default='random', choices=['random', 'tail'], help='mask text typed')
parser.add_argument('--noise-token', default='', help='noise token for masking src text tokens if mask-text-ratio > 0')
parser.add_argument('--infer-target-lang', default='', metavar='S', help='target language for inference')
def __init__(self, args, src_dict, tgt_dict, infer_tgt_lang_id=None):
super().__init__(args, tgt_dict)
self.src_dict = src_dict
self.data_cfg = S2TJointDataConfig((Path(args.data) / args.config_yaml))
assert (self.tgt_dict.pad() == self.src_dict.pad())
assert (self.tgt_dict.eos() == self.src_dict.eos())
self.speech_only = args.load_speech_only
self._infer_tgt_lang_id = infer_tgt_lang_id
def setup_task(cls, args, **kwargs):
data_cfg = S2TJointDataConfig((Path(args.data) / args.config_yaml))
tgt_dict_path = (Path(args.data) / data_cfg.vocab_filename)
src_dict_path = (Path(args.data) / data_cfg.src_vocab_filename)
if ((not os.path.isfile(src_dict_path)) or (not os.path.isfile(tgt_dict_path))):
raise FileNotFoundError('Dict not found: {}'.format(args.data))
src_dict = Dictionary.load(src_dict_path.as_posix())
tgt_dict = Dictionary.load(tgt_dict_path.as_posix())
print('| src dictionary: {} types'.format(len(src_dict)))
print('| tgt dictionary: {} types'.format(len(tgt_dict)))
if (args.parallel_text_data != ''):
if (not os.path.isabs(args.parallel_text_data)):
args.parallel_text_data = os.path.join(args.data, args.parallel_text_data)
if (args.langpairs is None):
raise Exception('Could not infer language pair, please provide it explicitly')
infer_tgt_lang_id = None
if ((args.infer_target_lang != '') and data_cfg.prepend_tgt_lang_tag_no_change):
tgt_lang_tag = SpeechToTextDataset.LANG_TAG_TEMPLATE.format(args.infer_target_lang)
infer_tgt_lang_id = tgt_dict.index(tgt_lang_tag)
assert (infer_tgt_lang_id != tgt_dict.unk())
return cls(args, src_dict, tgt_dict, infer_tgt_lang_id=infer_tgt_lang_id)
def load_langpair_dataset(self, prepend_tgt_lang_tag=False, sampling_alpha=1.0, epoch=0):
lang_pairs = []
text_dataset = None
split = 'train'
for lp in self.args.langpairs.split(','):
(src, tgt) = lp.split('-')
text_dataset = load_langpair_dataset(self.args.parallel_text_data, split, src, self.src_dict, tgt, self.tgt_dict, combine=True, dataset_impl=None, upsample_primary=1, left_pad_source=False, left_pad_target=False, max_source_positions=self.args.max_positions_text, max_target_positions=self.args.max_target_positions, load_alignments=False, truncate_source=False)
if prepend_tgt_lang_tag:
text_dataset = TransformEosLangPairDataset(text_dataset, src_eos=self.src_dict.eos(), tgt_bos=self.tgt_dict.eos(), new_tgt_bos=self.tgt_dict.index(LANG_TAG_TEMPLATE.format(tgt)))
lang_pairs.append(text_dataset)
if (len(lang_pairs) > 1):
if (sampling_alpha != 1.0):
size_ratios = SpeechToTextDatasetCreator.get_size_ratios(self.args.langpairs.split(','), [len(s) for s in lang_pairs], alpha=sampling_alpha)
lang_pairs = [ResamplingDataset(d, size_ratio=r, epoch=epoch, replace=(r >= 1.0)) for (d, r) in zip(lang_pairs, size_ratios)]
return ConcatDataset(lang_pairs)
return text_dataset
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
return generator.generate(models, sample, prefix_tokens=prefix_tokens, constraints=constraints, bos_token=self._infer_tgt_lang_id)
def build_src_tokenizer(self, args):
logger.info(f'src-pre-tokenizer: {self.data_cfg.src_pre_tokenizer}')
return encoders.build_tokenizer(Namespace(**self.data_cfg.src_pre_tokenizer))
def build_src_bpe(self, args):
logger.info(f'tokenizer: {self.data_cfg.src_bpe_tokenizer}')
return encoders.build_bpe(Namespace(**self.data_cfg.src_bpe_tokenizer))
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith('train')
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
src_pre_tokenizer = self.build_src_tokenizer(self.args)
src_bpe_tokenizer = self.build_src_bpe(self.args)
ast_dataset = SpeechToTextJointDatasetCreator.from_tsv(self.args.data, self.data_cfg, split, self.tgt_dict, src_dict=(None if self.speech_only else self.src_dict), pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, src_pre_tokenizer=src_pre_tokenizer, src_bpe_tokenizer=src_bpe_tokenizer, is_train_split=is_train_split, epoch=epoch, seed=self.args.seed)
noise_token_id = (- 1)
text_dataset = None
if ((self.args.parallel_text_data != '') and is_train_split):
text_dataset = self.load_langpair_dataset(self.data_cfg.prepend_tgt_lang_tag_no_change, 1.0, epoch=epoch)
if (self.args.mask_text_ratio > 0):
noise_token_id = (self.src_dict.unk() if (self.args.noise_token == '') else self.src_dict.index(self.args.noise_token))
text_dataset = LangPairMaskDataset(text_dataset, src_bos=self.src_dict.bos(), src_eos=self.src_dict.eos(), noise_id=noise_token_id, mask_ratio=self.args.mask_text_ratio, mask_type=self.args.mask_text_type)
if (text_dataset is not None):
mdsets = [ModalityDatasetItem('sup_speech', ast_dataset, (self.args.max_source_positions, self.args.max_target_positions), self.args.max_tokens, self.args.batch_size), ModalityDatasetItem('text', text_dataset, (self.args.max_positions_text, self.args.max_target_positions), (self.args.max_tokens_text if (self.args.max_tokens_text is not None) else self.args.max_tokens), self.args.batch_size)]
ast_dataset = MultiModalityDataset(mdsets)
self.datasets[split] = ast_dataset
def target_dictionary(self):
return self.tgt_dict
def source_dictionary(self):
return (None if self.speech_only else self.src_dict)
def get_batch_iterator(self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0, data_buffer_size=0, disable_iterator_cache=False, skip_remainder_batch=False, grouped_shuffling=False, update_epoch_batch_itr=False):
if (not isinstance(dataset, MultiModalityDataset)):
return super(SpeechTextJointToTextTask, self).get_batch_iterator(dataset, max_tokens, max_sentences, max_positions, ignore_invalid_inputs, required_batch_size_multiple, seed, num_shards, shard_id, num_workers, epoch, data_buffer_size, disable_iterator_cache, skip_remainder_batch=skip_remainder_batch, update_epoch_batch_itr=update_epoch_batch_itr)
mult_ratio = [self.args.speech_sample_ratio, self.args.text_sample_ratio]
assert (len(dataset.datasets) == 2)
dataset.set_epoch(epoch)
batch_samplers = dataset.get_batch_samplers(mult_ratio, required_batch_size_multiple, seed)
epoch_iter = GroupedEpochBatchIterator(dataset=dataset, collate_fn=dataset.collater, batch_samplers=batch_samplers, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch, mult_rate=(1 if self.args.update_mix_data else max(self.args.update_freq)), buffer_size=data_buffer_size, skip_remainder_batch=skip_remainder_batch)
self.dataset_to_epoch_iter[dataset] = {}
return epoch_iter |
class SEModule(nn.Module):
def __init__(self, channels, reduction=16, act_layer=nn.ReLU, gate_layer='sigmoid', reduction_ratio=None, reduction_channels=None, min_channels=8, divisor=1):
super(SEModule, self).__init__()
if (reduction_channels is not None):
reduction_channels = reduction_channels
elif (reduction_ratio is not None):
reduction_channels = make_divisible((channels * reduction_ratio), divisor, min_channels)
else:
reduction_channels = make_divisible((channels // reduction), divisor, min_channels)
self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1, bias=True)
self.act = act_layer(inplace=True)
self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1, bias=True)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
x_se = x.mean((2, 3), keepdim=True)
x_se = self.fc1(x_se)
x_se = self.act(x_se)
x_se = self.fc2(x_se)
return (x * self.gate(x_se)) |
def _worker_loop(dataset_kind, dataset, index_queue, data_queue, done_event, auto_collation, collate_fn, drop_last, seed, init_fn, worker_id, num_workers):
try:
signal_handling._set_worker_signal_handlers()
torch.set_num_threads(1)
random.seed(seed)
torch.manual_seed(seed)
global _worker_info
_worker_info = WorkerInfo(id=worker_id, num_workers=num_workers, seed=seed, dataset=dataset)
from torch.utils.data import _DatasetKind
init_exception = None
try:
if (init_fn is not None):
init_fn(worker_id)
fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset, auto_collation, collate_fn, drop_last)
except Exception:
init_exception = ExceptionWrapper(where='in DataLoader worker process {}'.format(worker_id))
iteration_end = False
watchdog = ManagerWatchdog()
while watchdog.is_alive():
try:
r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
if (r is None):
assert (done_event.is_set() or iteration_end)
break
elif (done_event.is_set() or iteration_end):
continue
(idx, index) = r
if (init_exception is not None):
data = init_exception
init_exception = None
else:
try:
data = fetcher.fetch(index)
except Exception as e:
if (isinstance(e, StopIteration) and (dataset_kind == _DatasetKind.Iterable)):
data = _IterableDatasetStopIteration(worker_id)
iteration_end = True
else:
data = ExceptionWrapper(where='in DataLoader worker process {}'.format(worker_id))
data_queue.put((idx, data))
del data, idx, index, r
except KeyboardInterrupt:
pass
if done_event.is_set():
data_queue.cancel_join_thread()
data_queue.close() |
class ModifiedVGG16Model(torch.nn.Module):
def __init__(self, model=None):
super(ModifiedVGG16Model, self).__init__()
model = models.vgg16(pretrained=True)
self.features = model.features
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(25088, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, 2))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x |
class ConditionalMLP(MLP):
def __init__(self, input_dim, condition_dims, *args, verbose=False, **kwargs):
self._condition_dims = condition_dims
self._condition_keys = sorted(self._condition_dims.keys())
concat_dim = (input_dim + sum(condition_dims.values()))
super(ConditionalMLP, self).__init__(concat_dim, *args, **kwargs)
if verbose:
print('[ conditional mlp ] Conditioning keys: {}'.format(self._condition_keys))
print(self)
def forward(self, x, condition_dict):
if (condition_dict is None):
joined = x
else:
condition = flatten(condition_dict)
joined = torch.cat([x, condition], dim=(- 1))
out = self._layers(joined)
return out
def __repr__(self):
return '[ {} : {} parameters | {} ] {}'.format(self.name, self.num_parameters, self._condition_dims, super(MLP, self).__repr__()) |
class HourGlassResidual(nn.Module):
def __init__(self, in_channels, out_channels):
super(HourGlassResidual, self).__init__()
self.skip_layer = (Identity() if (in_channels == out_channels) else nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=True), nn.BatchNorm2d(out_channels)))
self.model = nn.Sequential(nn.Conv2d(in_channels, (out_channels // 2), kernel_size=1, bias=True), nn.BatchNorm2d((out_channels // 2)), nn.ReLU(inplace=True), nn.Conv2d((out_channels // 2), (out_channels // 2), kernel_size=3, stride=1, padding=1, bias=True), nn.BatchNorm2d((out_channels // 2)), nn.ReLU(inplace=True), nn.Conv2d((out_channels // 2), out_channels, kernel_size=1, bias=True), nn.BatchNorm2d(out_channels))
def forward(self, x):
residual = x
out = self.model(x)
return (out + self.skip_layer(residual)) |
class RDB(nn.Module):
def __init__(self, in_channels, growthRate, num_layer):
super(RDB, self).__init__()
in_channels_ = in_channels
modules = []
for i in range(num_layer):
modules.append(dense_layer(in_channels_, growthRate))
in_channels_ += growthRate
self.dense_layers = nn.Sequential(*modules)
self.conv1x1 = conv1x1(in_channels_, in_channels)
def forward(self, x):
out = self.dense_layers(x)
out = self.conv1x1(out)
out += x
return out |
def search_raw_array_pytorch(res, xb, xq, k, D=None, I=None, metric=faiss.METRIC_L2):
assert (xb.device == xq.device)
(nq, d) = xq.size()
if xq.is_contiguous():
xq_row_major = True
elif xq.t().is_contiguous():
xq = xq.t()
xq_row_major = False
else:
raise TypeError('matrix should be row or column-major')
xq_ptr = swig_ptr_from_FloatTensor(xq)
(nb, d2) = xb.size()
assert (d2 == d)
if xb.is_contiguous():
xb_row_major = True
elif xb.t().is_contiguous():
xb = xb.t()
xb_row_major = False
else:
raise TypeError('matrix should be row or column-major')
xb_ptr = swig_ptr_from_FloatTensor(xb)
if (D is None):
D = torch.empty(nq, k, device=xb.device, dtype=torch.float32)
else:
assert (D.shape == (nq, k))
assert (D.device == xb.device)
if (I is None):
I = torch.empty(nq, k, device=xb.device, dtype=torch.int64)
else:
assert (I.shape == (nq, k))
assert (I.device == xb.device)
D_ptr = swig_ptr_from_FloatTensor(D)
I_ptr = swig_ptr_from_LongTensor(I)
args = faiss.GpuDistanceParams()
args.metric = metric
args.k = k
args.dims = d
args.vectors = xb_ptr
args.vectorsRowMajor = xb_row_major
args.numVectors = nb
args.queries = xq_ptr
args.queriesRowMajor = xq_row_major
args.numQueries = nq
args.outDistances = D_ptr
args.outIndices = I_ptr
faiss.bfKnn(res, args)
return (D, I) |
def has_cl_indicator(span):
ind_list = (de_claim_indicators if (lang == 'de') else claim_indicators)
for token in span:
if (token.text in ind_list):
return 1
return 0 |
def train():
print(f'Starting training {config.name}...')
train_losses = []
t_start = time()
while True:
for input in train_loader:
config.step += 1
input = input.to(config.device)
(noisy_input, noise_tensor) = add_noise(input)
loss = train_step(input, noisy_input)
train_losses.append(loss)
if ((config.step % config.log_frequency) == 0):
log_msg = f'Iteration {config.step} - '
log_msg += f'train loss: {np.mean(train_losses):.4f}'
log_msg += f' - time: {(time() - t_start):.2f}s'
print(log_msg)
log({'train/loss': np.mean(train_losses)}, config)
train_losses = []
if (((config.step % 32) == 0) and config.lr_schedule):
lr_scheduler.step()
if ((config.step % config.anom_val_frequency) == 0):
evaluate(config, small_testloader, anom_val_step)
if (config.step >= config.max_steps):
save_model(model, config)
print(f'Reached {config.max_steps} iterations. Finished training {config.name}.')
return |
def evaluate(model):
infer = model.signatures['serving_default']
output_dict_keys = infer.structured_outputs.keys()
output_name = list(output_dict_keys)[0]
from neural_compressor import METRICS
metrics = METRICS('tensorflow')
metric = metrics['topk']()
def eval_func(dataloader, metric):
warmup = 5
iteration = None
latency_list = []
if (args.benchmark and (args.mode == 'performance')):
iteration = args.iters
for (idx, (inputs, labels)) in enumerate(dataloader):
inputs = np.array(inputs)
input_tensor = tf.constant(inputs)
start = time.time()
predictions = infer(input_tensor)[output_name]
end = time.time()
predictions = predictions.numpy()
metric.update(predictions, labels)
latency_list.append((end - start))
if (iteration and (idx >= iteration)):
break
latency = (np.array(latency_list[warmup:]).mean() / eval_dataloader.batch_size)
return latency
from neural_compressor.utils.create_obj_from_config import create_dataloader
dataloader_args = {'batch_size': args.batch_size, 'dataset': {'ImageRecord': {'root': args.dataset_location}}, 'transform': {'BilinearImagenet': {'height': 224, 'width': 224}}, 'filter': None}
eval_dataloader = create_dataloader('tensorflow', dataloader_args)
latency = eval_func(eval_dataloader, metric)
if (args.benchmark and (args.mode == 'performance')):
print('Batch size = {}'.format(eval_dataloader.batch_size))
print('Latency: {:.3f} ms'.format((latency * 1000)))
print('Throughput: {:.3f} images/sec'.format((1.0 / latency)))
acc = metric.result()
return acc |
def load_task_with_labels(x, y, labels):
tmp = []
for i in labels:
tmp.append(np.where((y == i))[0])
idx = np.concatenate(tmp, axis=None)
return (x[idx], y[idx]) |
def require_fairscale(test_case):
return unittest.skipUnless(is_fairscale_available(), 'test requires fairscale')(test_case) |
class TargetNode(Node):
def __init__(self, srcs, hdrs, deps, copts, name=None):
super().__init__(name)
self.srcs = srcs
self.hdrs = hdrs
self.deps = deps
self.copts = copts
def _eval(self, executor):
pass |
class ModelArguments():
text_model_name_or_path: str = field(metadata={'help': "The text model checkpoint for weights initialization.Don't set if you want to train a model from scratch."})
vision_model_name_or_path: str = field(metadata={'help': "The vision model checkpoint for weights initialization.Don't set if you want to train a model from scratch."})
from_pt: bool = field(default=True, metadata={'help': 'whether to load the text and vision model using PyTorch checkpoints.'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
dtype: Optional[str] = field(default='float32', metadata={'help': 'Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`.'}) |
def prototype_twitter_VHRED_StandardBias():
state = prototype_state()
state['train_dialogues'] = '../TwitterData/Training.dialogues.pkl'
state['test_dialogues'] = '../TwitterData/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterData/Validation.dialogues.pkl'
state['dictionary'] = '../TwitterData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_input'] = True
state['deep_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = False
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state |
def main():
if ((int(os.environ.get('LOCAL_RANK', (- 1))) != (- 1)) and ('--no_cuda' in sys.argv)):
from intel_extension_for_transformers.transformers.utils.utility import distributed_init
distributed_init()
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, OptimizationArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args, optim_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args, optim_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'''
distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'''))
logger.info(f'Training/evaluation parameters {training_args}')
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = data_args.test_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files, field='data', cache_dir=model_args.cache_dir)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForQuestionAnswering.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at to find the model types that meet this requirement')
if training_args.do_train:
column_names = raw_datasets['train'].column_names
elif training_args.do_eval:
column_names = raw_datasets['validation'].column_names
else:
column_names = raw_datasets['test'].column_names
question_column_name = ('question' if ('question' in column_names) else column_names[0])
context_column_name = ('context' if ('context' in column_names) else column_names[1])
answer_column_name = ('answers' if ('answers' in column_names) else column_names[2])
pad_on_right = (tokenizer.padding_side == 'right')
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def prepare_train_features(examples):
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
tokenized_examples = tokenizer(examples[(question_column_name if pad_on_right else context_column_name)], examples[(context_column_name if pad_on_right else question_column_name)], truncation=('only_second' if pad_on_right else 'only_first'), max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding=('max_length' if data_args.pad_to_max_length else False))
sample_mapping = tokenized_examples.pop('overflow_to_sample_mapping')
offset_mapping = tokenized_examples.pop('offset_mapping')
tokenized_examples['start_positions'] = []
tokenized_examples['end_positions'] = []
for (i, offsets) in enumerate(offset_mapping):
input_ids = tokenized_examples['input_ids'][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
sequence_ids = tokenized_examples.sequence_ids(i)
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
if (len(answers['answer_start']) == 0):
tokenized_examples['start_positions'].append(cls_index)
tokenized_examples['end_positions'].append(cls_index)
else:
start_char = answers['answer_start'][0]
end_char = (start_char + len(answers['text'][0]))
token_start_index = 0
while (sequence_ids[token_start_index] != (1 if pad_on_right else 0)):
token_start_index += 1
token_end_index = (len(input_ids) - 1)
while (sequence_ids[token_end_index] != (1 if pad_on_right else 0)):
token_end_index -= 1
if (not ((offsets[token_start_index][0] <= start_char) and (offsets[token_end_index][1] >= end_char))):
tokenized_examples['start_positions'].append(cls_index)
tokenized_examples['end_positions'].append(cls_index)
else:
while ((token_start_index < len(offsets)) and (offsets[token_start_index][0] <= start_char)):
token_start_index += 1
tokenized_examples['start_positions'].append((token_start_index - 1))
while (offsets[token_end_index][1] >= end_char):
token_end_index -= 1
tokenized_examples['end_positions'].append((token_end_index + 1))
return tokenized_examples
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if (data_args.max_train_samples is not None):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc='train dataset map pre-processing'):
train_dataset = train_dataset.map(prepare_train_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on train dataset')
if (data_args.max_train_samples is not None):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
def prepare_validation_features(examples):
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
tokenized_examples = tokenizer(examples[(question_column_name if pad_on_right else context_column_name)], examples[(context_column_name if pad_on_right else question_column_name)], truncation=('only_second' if pad_on_right else 'only_first'), max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding=('max_length' if data_args.pad_to_max_length else False))
sample_mapping = tokenized_examples.pop('overflow_to_sample_mapping')
tokenized_examples['example_id'] = []
for i in range(len(tokenized_examples['input_ids'])):
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = (1 if pad_on_right else 0)
sample_index = sample_mapping[i]
tokenized_examples['example_id'].append(examples['id'][sample_index])
tokenized_examples['offset_mapping'][i] = [(o if (sequence_ids[k] == context_index) else None) for (k, o) in enumerate(tokenized_examples['offset_mapping'][i])]
return tokenized_examples
if training_args.do_eval:
if ('validation' not in raw_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_examples = raw_datasets['validation']
if (data_args.max_eval_samples is not None):
max_eval_samples = min(len(eval_examples), data_args.max_eval_samples)
eval_examples = eval_examples.select(range(max_eval_samples))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
eval_dataset = eval_examples.map(prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on validation dataset')
if (data_args.max_eval_samples is not None):
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
if training_args.do_predict:
if ('test' not in raw_datasets):
raise ValueError('--do_predict requires a test dataset')
predict_examples = raw_datasets['test']
if (data_args.max_predict_samples is not None):
predict_examples = predict_examples.select(range(data_args.max_predict_samples))
with training_args.main_process_first(desc='prediction dataset map pre-processing'):
predict_dataset = predict_examples.map(prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on prediction dataset')
if (data_args.max_predict_samples is not None):
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
data_collator = (default_data_collator if data_args.pad_to_max_length else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if training_args.fp16 else None)))
def post_processing_function(examples, features, predictions, stage='eval'):
predictions = postprocess_qa_predictions(examples=examples, features=features, predictions=predictions, version_2_with_negative=data_args.version_2_with_negative, n_best_size=data_args.n_best_size, max_answer_length=data_args.max_answer_length, null_score_diff_threshold=data_args.null_score_diff_threshold, output_dir=training_args.output_dir, log_level=log_level, prefix=stage)
if data_args.version_2_with_negative:
formatted_predictions = [{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for (k, v) in predictions.items()]
else:
formatted_predictions = [{'id': k, 'prediction_text': v} for (k, v) in predictions.items()]
references = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = load_metric(('squad_v2' if data_args.version_2_with_negative else 'squad'))
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
metric_name = optim_args.metric_name
training_args.metric_for_best_model = metric_name
trainer = QuestionAnsweringTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), eval_examples=(eval_examples if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, post_process_function=post_processing_function, compute_metrics=compute_metrics)
calib_dataloader = trainer.get_eval_dataloader()
if optim_args.tune:
if (not training_args.do_eval):
raise ValueError('do_eval must be set to True for quantization.')
if (optim_args.quantization_approach != 'PostTrainingDynamic'):
if (not training_args.do_train):
raise ValueError('do_train must be set to True for static and aware training quantization.')
if (optim_args.quantization_approach == 'QuantizationAwareTraining'):
early_stopping_patience = 6
early_stopping_threshold = 0.001
trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, early_stopping_threshold))
tune_metric = metrics.Metric(name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol)
quantization_config = QuantizationConfig(approach=optim_args.quantization_approach, max_trials=200, metrics=[tune_metric], sampling_size=(len(train_dataset) // 20))
if (optim_args.strategy == 'mse_v2'):
quantization_config.strategy = 'mse_v2'
if (optim_args.framework == 'ipex'):
quantization_config.framework = 'pytorch_ipex'
trainer.calib_dataloader = calib_dataloader
model = trainer.quantize(quant_config=quantization_config)
if optim_args.benchmark_only:
if optim_args.int8:
model_path = training_args.output_dir
else:
model_path = model_args.model_name_or_path
trainer.benchmark(model_path, backend=('ipex' if (optim_args.framework == 'ipex') else 'torch'), batch_size=training_args.per_device_eval_batch_size, cores_per_instance=optim_args.cores_per_instance, num_of_instance=optim_args.num_of_instance)
if (optim_args.benchmark or optim_args.accuracy_only):
if optim_args.int8:
model = OptimizedModel.from_pretrained(training_args.output_dir, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
trainer.model = model
start_time = timeit.default_timer()
results = trainer.evaluate()
evalTime = (timeit.default_timer() - start_time)
max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset))
eval_samples = min(max_eval_samples, len(eval_dataset))
samples = ((eval_samples - (eval_samples % training_args.per_device_eval_batch_size)) if training_args.dataloader_drop_last else eval_samples)
logger.info('metrics keys: {}'.format(results.keys()))
bert_task_acc_keys = ['eval_f1', 'eval_accuracy', 'eval_matthews_correlation', 'eval_pearson', 'eval_mcc', 'eval_spearmanr']
ret = False
for key in bert_task_acc_keys:
if (key in results.keys()):
ret = True
print('Batch size = ', training_args.per_device_eval_batch_size)
print('Finally Eval {} Accuracy: {}'.format(key, results[key]))
print('Latency: {:.5f} ms'.format(((evalTime / samples) * 1000)))
print('Throughput: {:.5f} samples/sec'.format((samples / evalTime)))
break
assert ret, 'No metric returned, Please check inference metric!' |
def test_batched_attribution_consistency_decoder_only(saliency_gpt2_model):
(texts_single, reference_single) = EXAMPLES['short_texts_decoder'][0]
(texts_batch, reference_batch) = EXAMPLES['short_texts_decoder'][1]
out_single = saliency_gpt2_model.attribute(texts_single, reference_single, show_progress=False, device=get_default_device())
out_batch = saliency_gpt2_model.attribute(texts_batch, reference_batch, show_progress=False, device=get_default_device())
assert torch.allclose(out_single.sequence_attributions[0].target_attributions, out_batch.sequence_attributions[0].target_attributions, atol=0.08, equal_nan=True) |
def deeplabv3_resnetd50b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs):
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, multi_output=True).features
del backbone[(- 1)]
return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name='deeplabv3_resnetd50b_coco', **kwargs) |
class GaussianLinearMean(nn.Module):
def __init__(self, out_dim: int, noise_init: float, noise_is_shared: bool):
super(GaussianLinearMean, self).__init__()
self.out_dim = out_dim
self.noise_is_shared = noise_is_shared
if noise_is_shared:
log_var_noise = nn.Parameter((torch.ones(1, 1, dtype=cg.dtype) * inverse_positive_transform(torch.tensor(noise_init, dtype=cg.dtype))))
else:
log_var_noise = nn.Parameter((torch.ones(out_dim, 1, dtype=cg.dtype) * inverse_positive_transform(torch.tensor(noise_init, dtype=cg.dtype))))
self.log_var_noise = log_var_noise
def sample_from_output(self, f: torch.tensor, i: int, **kwargs) -> td.Distribution:
if self.noise_is_shared:
log_var_noise = self.log_var_noise.expand(self.out_dim, 1)
else:
log_var_noise = self.log_var_noise
var = positive_transform(log_var_noise[i])
dist = td.Normal(f, (torch.ones_like(f) * torch.sqrt(var)))
return dist.sample()
def expected_log_prob(self, Y, gauss_mean, gauss_cov, **kwargs):
if self.noise_is_shared:
log_var_noise = self.log_var_noise.expand(self.out_dim, 1)
else:
log_var_noise = self.log_var_noise
N = Y.size(1)
C_y_inv = (1.0 / positive_transform(log_var_noise)).expand((- 1), N)
log_p_y = batched_log_Gaussian(obs=Y, mean=gauss_mean, cov=C_y_inv, diagonal=True, cov_is_inverse=True)
trace = ((- 0.5) * torch.sum(torch.mul(C_y_inv, gauss_cov), 1))
ELL = (log_p_y + trace)
return ELL
def marginal_moments(self, gauss_mean, gauss_cov, diagonal, **kwargs):
N = gauss_mean.size(1)
if self.noise_is_shared:
log_var_noise = self.log_var_noise.expand(self.out_dim, 1)
else:
log_var_noise = self.log_var_noise
C_Y = positive_transform(log_var_noise).expand((- 1), N)
if (not diagonal):
C_Y = torch.diag_embed(C_Y)
C_Y = (C_Y + gauss_cov)
mu_Y = gauss_mean.clone()
return (mu_Y, C_Y)
def log_marginal(self, Y, gauss_mean, gauss_cov, **kwargs):
N = Y.size(1)
Dy = self.out_dim
(mx, Kxx) = self.marginal_moments(gauss_mean, gauss_cov, diagonal=False)
mx = mx.view(Dy, N, 1)
Y = Y.view(Dy, N, 1)
Y_mx = (Y - mx)
Lxx = psd_safe_cholesky(Kxx, upper=False, jitter=cg.global_jitter)
rhs = torch.cholesky_solve(Y_mx, Lxx, upper=False)
data_fit_term = torch.matmul(Y_mx.transpose(1, 2), rhs)
complexity_term = (2 * torch.log(torch.diagonal(Lxx, dim1=1, dim2=2)).sum(1))
cte = (((- N) / 2.0) * torch.log((2 * cg.pi)))
return (((- 0.5) * (data_fit_term + complexity_term)) + cte) |
def test_captured_utf8_3byte_offset1(capsys):
msg = '\uffff'
msg = ('1' + (msg * ((1024 // len(msg)) + 1)))
m.captured_output_default(msg)
(stdout, stderr) = capsys.readouterr()
assert (stdout == msg)
assert (stderr == '') |
def IGA_hyper(sample):
if sample:
return {'penalty_weight': (lambda r: (10 ** r.uniform(1, 5)))}
else:
return {'penalty_weight': (lambda r: 10.0)} |
class RobertaTokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
def verify_blender_scene(blender_scene_name: str='Scene') -> bpy.types.Scene:
scene = bpy.data.scenes.get(blender_scene_name, None)
if (scene is None):
log.debug(f'Could not find scene {blender_scene_name}')
scene = bpy.data.scenes[0]
log.debug(f'Setting scene to {scene.name}')
bpy.context.window.scene = scene
return scene |
def get_image_net(worker, enc_net, ref_net, init_net_path=None):
net = get_net(enc_net, ref_net, init_net_path=init_net_path)
train_set = _verify_and_get_test_set(worker)
return ImageNet(net, train_set) |
class MSDeAOT(AOT):
def __init__(self, cfg, encoder='mobilenetv2', decoder='fpn'):
super().__init__(cfg, encoder, decoder)
self.LSTT = MSDualBranchGPM(cfg.MODEL_LSTT_NUM, cfg.MODEL_ENCODER_EMBEDDING_DIM, cfg.MODEL_SELF_HEADS, cfg.MODEL_ATT_HEADS, emb_dropout=cfg.TRAIN_LSTT_EMB_DROPOUT, droppath=cfg.TRAIN_LSTT_DROPPATH, lt_dropout=cfg.TRAIN_LSTT_LT_DROPOUT, st_dropout=cfg.TRAIN_LSTT_ST_DROPOUT, droppath_lst=cfg.TRAIN_LSTT_DROPPATH_LST, droppath_scaling=cfg.TRAIN_LSTT_DROPPATH_SCALING, intermediate_norm=cfg.MODEL_DECODER_INTERMEDIATE_LSTT, return_intermediate=True, encoder_dim=cfg.MODEL_ENCODER_DIM)
decoder_indim = ((cfg.MODEL_ENCODER_EMBEDDING_DIM * ((cfg.MODEL_LSTT_NUM * 2) + 1)) if cfg.MODEL_DECODER_INTERMEDIATE_LSTT else (cfg.MODEL_ENCODER_EMBEDDING_DIM * 2))
self.decoder = build_decoder('fpn2', in_dim=decoder_indim, out_dim=(cfg.MODEL_MAX_OBJ_NUM + 1), decode_intermediate_input=cfg.MODEL_DECODER_INTERMEDIATE_LSTT, hidden_dim=cfg.MODEL_ENCODER_EMBEDDING_DIM, shortcut_dims=cfg.MODEL_ENCODER_DIM, align_corners=cfg.MODEL_ALIGN_CORNERS)
self.id_norm = nn.LayerNorm(cfg.MODEL_ENCODER_EMBEDDING_DIM)
self._init_weight()
def decode_id_logits(self, lstt_emb, shortcuts, step=None):
(n, c, h, w) = shortcuts[(- 1)].size()
decoder_inputs = [shortcuts[(- 1)]]
for i in range((len(lstt_emb) - 1)):
emb = lstt_emb[i]
decoder_inputs.append(emb.view(h, w, n, (- 1)).permute(2, 3, 0, 1))
(n, c, h, w) = shortcuts[(- 4)].size()
decoder_inputs.append(lstt_emb[(- 1)].view(h, w, n, (- 1)).permute(2, 3, 0, 1))
pred_logit = self.decoder(decoder_inputs, shortcuts)
return pred_logit
def get_id_emb(self, x):
id_emb = self.patch_wise_id_bank(x)
id_emb = self.id_norm(id_emb.permute(2, 3, 0, 1)).permute(2, 3, 0, 1)
id_emb = self.id_dropout(id_emb)
return id_emb |
class RagRetriever():
_init_retrieval = True
def __init__(self, config, question_encoder_tokenizer, generator_tokenizer):
super().__init__()
self.index = (LegacyIndex(config.retrieval_vector_size, (config.index_path or LEGACY_INDEX_PATH)) if (config.index_name == 'legacy') else HFIndex(config.dataset, config.dataset_split, config.index_name, config.retrieval_vector_size, config.index_path, config.use_dummy_dataset))
self.generator_tokenizer = generator_tokenizer
self.question_encoder_tokenizer = question_encoder_tokenizer
self.n_docs = config.n_docs
self.batch_size = config.retrieval_batch_size
self.config = config
if self._init_retrieval:
self.init_retrieval()
def from_pretrained(cls, retriever_name_or_path, **kwargs):
config = RagConfig.from_pretrained(retriever_name_or_path, **kwargs)
rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)
question_encoder_tokenizer = rag_tokenizer.question_encoder
generator_tokenizer = rag_tokenizer.generator
return cls(config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer)
def save_pretrained(self, save_directory):
self.config.save_pretrained(save_directory)
rag_tokenizer = RagTokenizer(question_encoder=self.question_encoder_tokenizer, generator=self.generator_tokenizer)
rag_tokenizer.save_pretrained(save_directory)
def init_retrieval(self):
logger.info('initializing retrieval')
self.index.init_index()
def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None):
def cat_input_and_doc(doc_title, doc_text, input_string, prefix):
if doc_title.startswith('"'):
doc_title = doc_title[1:]
if doc_title.endswith('"'):
doc_title = doc_title[:(- 1)]
if (prefix is None):
prefix = ''
out = (((((prefix + doc_title) + self.config.title_sep) + doc_text) + self.config.doc_sep) + input_string).replace(' ', ' ')
return out
rag_input_strings = [cat_input_and_doc(docs[i]['title'][j], docs[i]['text'][j], input_strings[i], prefix) for i in range(len(docs)) for j in range(n_docs)]
contextualized_inputs = self.generator_tokenizer.batch_encode_plus(rag_input_strings, max_length=self.config.max_combined_length, return_tensors=return_tensors, padding='max_length', truncation=True)
return (contextualized_inputs['input_ids'], contextualized_inputs['attention_mask'])
def _chunk_tensor(self, t: Iterable, chunk_size: int) -> List[Iterable]:
return [t[i:(i + chunk_size)] for i in range(0, len(t), chunk_size)]
def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[(np.ndarray, np.ndarray)]:
question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size)
ids_batched = []
vectors_batched = []
for question_hidden_states in question_hidden_states_batched:
start_time = time.time()
(ids, vectors) = self.index.get_top_docs(question_hidden_states, n_docs)
logger.debug('index search time: {} sec, batch size {}'.format((time.time() - start_time), question_hidden_states.shape))
ids_batched.extend(ids)
vectors_batched.extend(vectors)
return (np.array(ids_batched), np.array(vectors_batched))
def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[(np.ndarray, List[dict])]:
(doc_ids, retrieved_doc_embeds) = self._main_retrieve(question_hidden_states, n_docs)
return (retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids))
def __call__(self, question_input_ids: List[List[int]], question_hidden_states: np.ndarray, prefix=None, n_docs=None, return_tensors=None) -> BatchEncoding:
n_docs = (n_docs if (n_docs is not None) else self.n_docs)
prefix = (prefix if (prefix is not None) else self.config.generator.prefix)
(retrieved_doc_embeds, doc_ids, docs) = self.retrieve(question_hidden_states, n_docs)
input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True)
(context_input_ids, context_attention_mask) = self.postprocess_docs(docs, input_strings, prefix, n_docs, return_tensors=return_tensors)
return BatchEncoding({'context_input_ids': context_input_ids, 'context_attention_mask': context_attention_mask, 'retrieved_doc_embeds': retrieved_doc_embeds, 'doc_ids': doc_ids}, tensor_type=return_tensors) |
class KaldiWriter(BaseWriter):
def __init__(self, wspecifier, write_num_frames=None, compress=False, compression_method=2):
if compress:
self.writer = kaldiio.WriteHelper(wspecifier, compression_method=compression_method)
else:
self.writer = kaldiio.WriteHelper(wspecifier)
self.writer_scp = None
if (write_num_frames is not None):
self.writer_nframe = get_num_frames_writer(write_num_frames)
else:
self.writer_nframe = None
def __setitem__(self, key, value):
self.writer[key] = value
if (self.writer_nframe is not None):
self.writer_nframe.write(f'''{key} {len(value)}
''') |
def action_step(state, action_1, action_2, step, sample_len, opt, dataset):
(lp, mp, rp) = state
seg_len_1 = (((mp - lp) + 1) * action_1)
seg_len_2 = ((rp - mp) * action_2)
seg_len_1 = min(max(4, seg_len_1), (sample_len / val_opt['min_cycles']))
seg_len_2 = min(max(4, seg_len_2), (sample_len / val_opt['min_cycles']))
mp = int((mp + step))
lp = int(((mp - seg_len_1) + 1))
rp = int((mp + seg_len_2))
state = (lp, mp, rp)
done_flag = (mp >= sample_len)
fail_flag = ((((mp - lp) + 1) < 4) or ((rp - mp) < 4))
return (state, done_flag, fail_flag) |
class BottleNeckUpSampling(nn.Module):
def __init__(self, in_dim, projectionFactor, out_dim, dtype=torch.float32):
super(BottleNeckUpSampling, self).__init__()
self.conv0 = nn.Conv2d(in_dim, int((in_dim / projectionFactor)), kernel_size=3, padding=1)
self.bn0 = nn.BatchNorm2d(int((in_dim / projectionFactor)))
self.PReLU0 = nn.PReLU()
self.conv1 = nn.Conv2d(int((in_dim / projectionFactor)), int((in_dim / projectionFactor)), kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(int((in_dim / projectionFactor)))
self.PReLU1 = nn.PReLU()
self.block2 = conv_block_1(int((in_dim / projectionFactor)), out_dim)
self.do = nn.Dropout(p=0.01)
self.PReLU3 = nn.PReLU()
def forward(self, input):
c0 = self.conv0(input)
b0 = self.bn0(c0)
p0 = self.PReLU0(b0)
c1 = self.conv1(p0)
b1 = self.bn1(c1)
p1 = self.PReLU1(b1)
p2 = self.block2(p1)
do = self.do(p2)
return do |
def download_data():
if (not os.path.exists(zipfile_path)):
print(f'Downloading {config.download_url} to {zipfile_path}')
urlretrieve(config.download_url, zipfile_path)
print(f'Successfully downloaded {zipfile_path}')
zip_ref = ZipFile(zipfile_path, 'r')
zip_ref.extractall(config.raw_data_dir)
zip_ref.close()
os.rename(f'{config.raw_data_dir}/cornell movie-dialogs corpus', extracted_dir) |
def main():
args = parse_args()
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config)
if (args.report_to == 'wandb'):
if (not is_wandb_available()):
raise ImportError('Make sure to install wandb if you want to use it for logging during training.')
import wandb
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
if (args.seed is not None):
set_seed(args.seed)
if accelerator.is_main_process:
if (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(repo_id=(args.hub_model_id or Path(args.output_dir).name), exist_ok=True, token=args.hub_token).repo_id
noise_scheduler = DDPMScheduler(beta_schedule='squaredcos_cap_v2', prediction_type='sample')
image_processor = CLIPImageProcessor.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder='image_processor')
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder='tokenizer')
image_encoder = CLIPVisionModelWithProjection.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder='image_encoder')
text_encoder = CLIPTextModelWithProjection.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder='text_encoder')
prior = PriorTransformer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder='prior')
image_encoder.requires_grad_(False)
prior.requires_grad_(False)
text_encoder.requires_grad_(False)
weight_dtype = torch.float32
if (accelerator.mixed_precision == 'fp16'):
weight_dtype = torch.float16
elif (accelerator.mixed_precision == 'bf16'):
weight_dtype = torch.bfloat16
prior.to(accelerator.device, dtype=weight_dtype)
image_encoder.to(accelerator.device, dtype=weight_dtype)
text_encoder.to(accelerator.device, dtype=weight_dtype)
lora_attn_procs = {}
for name in prior.attn_processors.keys():
lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=2048, rank=args.rank)
prior.set_attn_processor(lora_attn_procs)
lora_layers = AttnProcsLayers(prior.attn_processors)
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError('Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`')
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
optimizer = optimizer_cls(lora_layers.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon)
if (args.dataset_name is not None):
dataset = load_dataset(args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir)
else:
data_files = {}
if (args.train_data_dir is not None):
data_files['train'] = os.path.join(args.train_data_dir, '**')
dataset = load_dataset('imagefolder', data_files=data_files, cache_dir=args.cache_dir)
column_names = dataset['train'].column_names
dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None)
if (args.image_column is None):
image_column = (dataset_columns[0] if (dataset_columns is not None) else column_names[0])
else:
image_column = args.image_column
if (image_column not in column_names):
raise ValueError(f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}")
if (args.caption_column is None):
caption_column = (dataset_columns[1] if (dataset_columns is not None) else column_names[1])
else:
caption_column = args.caption_column
if (caption_column not in column_names):
raise ValueError(f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}")
def tokenize_captions(examples, is_train=True):
captions = []
for caption in examples[caption_column]:
if isinstance(caption, str):
captions.append(caption)
elif isinstance(caption, (list, np.ndarray)):
captions.append((random.choice(caption) if is_train else caption[0]))
else:
raise ValueError(f'Caption column `{caption_column}` should contain either strings or lists of strings.')
inputs = tokenizer(captions, max_length=tokenizer.model_max_length, padding='max_length', truncation=True, return_tensors='pt')
text_input_ids = inputs.input_ids
text_mask = inputs.attention_mask.bool()
return (text_input_ids, text_mask)
def preprocess_train(examples):
images = [image.convert('RGB') for image in examples[image_column]]
examples['clip_pixel_values'] = image_processor(images, return_tensors='pt').pixel_values
(examples['text_input_ids'], examples['text_mask']) = tokenize_captions(examples)
return examples
with accelerator.main_process_first():
if (args.max_train_samples is not None):
dataset['train'] = dataset['train'].shuffle(seed=args.seed).select(range(args.max_train_samples))
train_dataset = dataset['train'].with_transform(preprocess_train)
def collate_fn(examples):
clip_pixel_values = torch.stack([example['clip_pixel_values'] for example in examples])
clip_pixel_values = clip_pixel_values.to(memory_format=torch.contiguous_format).float()
text_input_ids = torch.stack([example['text_input_ids'] for example in examples])
text_mask = torch.stack([example['text_mask'] for example in examples])
return {'clip_pixel_values': clip_pixel_values, 'text_input_ids': text_input_ids, 'text_mask': text_mask}
train_dataloader = torch.utils.data.DataLoader(train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers)
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
overrode_max_train_steps = True
lr_scheduler = get_scheduler(args.lr_scheduler, optimizer=optimizer, num_warmup_steps=(args.lr_warmup_steps * args.gradient_accumulation_steps), num_training_steps=(args.max_train_steps * args.gradient_accumulation_steps))
clip_mean = prior.clip_mean.clone()
clip_std = prior.clip_std.clone()
(lora_layers, optimizer, train_dataloader, lr_scheduler) = accelerator.prepare(lora_layers, optimizer, train_dataloader, lr_scheduler)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if overrode_max_train_steps:
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
if accelerator.is_main_process:
accelerator.init_trackers('text2image-fine-tune', config=vars(args))
total_batch_size = ((args.train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
global_step = 0
first_epoch = 0
if args.resume_from_checkpoint:
if (args.resume_from_checkpoint != 'latest'):
path = os.path.basename(args.resume_from_checkpoint)
else:
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith('checkpoint')]
dirs = sorted(dirs, key=(lambda x: int(x.split('-')[1])))
path = (dirs[(- 1)] if (len(dirs) > 0) else None)
if (path is None):
accelerator.print(f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run.")
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f'Resuming from checkpoint {path}')
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split('-')[1])
initial_global_step = global_step
first_epoch = (global_step // num_update_steps_per_epoch)
else:
initial_global_step = 0
progress_bar = tqdm(range(0, args.max_train_steps), initial=initial_global_step, desc='Steps', disable=(not accelerator.is_local_main_process))
clip_mean = clip_mean.to(weight_dtype).to(accelerator.device)
clip_std = clip_std.to(weight_dtype).to(accelerator.device)
for epoch in range(first_epoch, args.num_train_epochs):
prior.train()
train_loss = 0.0
for (step, batch) in enumerate(train_dataloader):
with accelerator.accumulate(prior):
(text_input_ids, text_mask, clip_images) = (batch['text_input_ids'], batch['text_mask'], batch['clip_pixel_values'].to(weight_dtype))
with torch.no_grad():
text_encoder_output = text_encoder(text_input_ids)
prompt_embeds = text_encoder_output.text_embeds
text_encoder_hidden_states = text_encoder_output.last_hidden_state
image_embeds = image_encoder(clip_images).image_embeds
noise = torch.randn_like(image_embeds)
bsz = image_embeds.shape[0]
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=image_embeds.device)
timesteps = timesteps.long()
image_embeds = ((image_embeds - clip_mean) / clip_std)
noisy_latents = noise_scheduler.add_noise(image_embeds, noise, timesteps)
target = image_embeds
model_pred = prior(noisy_latents, timestep=timesteps, proj_embedding=prompt_embeds, encoder_hidden_states=text_encoder_hidden_states, attention_mask=text_mask).predicted_image_embedding
if (args.snr_gamma is None):
loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean')
else:
snr = compute_snr(noise_scheduler, timesteps)
if (noise_scheduler.config.prediction_type == 'v_prediction'):
snr = (snr + 1)
mse_loss_weights = (torch.stack([snr, (args.snr_gamma * torch.ones_like(timesteps))], dim=1).min(dim=1)[0] / snr)
loss = F.mse_loss(model_pred.float(), target.float(), reduction='none')
loss = (loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights)
loss = loss.mean()
avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
train_loss += (avg_loss.item() / args.gradient_accumulation_steps)
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(lora_layers.parameters(), args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
accelerator.log({'train_loss': train_loss}, step=global_step)
train_loss = 0.0
if ((global_step % args.checkpointing_steps) == 0):
if accelerator.is_main_process:
if (args.checkpoints_total_limit is not None):
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith('checkpoint')]
checkpoints = sorted(checkpoints, key=(lambda x: int(x.split('-')[1])))
if (len(checkpoints) >= args.checkpoints_total_limit):
num_to_remove = ((len(checkpoints) - args.checkpoints_total_limit) + 1)
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(f'{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints')
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f'checkpoint-{global_step}')
accelerator.save_state(save_path)
logger.info(f'Saved state to {save_path}')
logs = {'step_loss': loss.detach().item(), 'lr': lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
if (global_step >= args.max_train_steps):
break
if accelerator.is_main_process:
if ((args.validation_prompt is not None) and ((epoch % args.validation_epochs) == 0)):
logger.info(f'''Running validation...
Generating {args.num_validation_images} images with prompt: {args.validation_prompt}.''')
pipeline = AutoPipelineForText2Image.from_pretrained(args.pretrained_decoder_model_name_or_path, prior_prior=accelerator.unwrap_model(prior), torch_dtype=weight_dtype)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
generator = torch.Generator(device=accelerator.device)
if (args.seed is not None):
generator = generator.manual_seed(args.seed)
images = []
for _ in range(args.num_validation_images):
images.append(pipeline(args.validation_prompt, num_inference_steps=30, generator=generator).images[0])
for tracker in accelerator.trackers:
if (tracker.name == 'tensorboard'):
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images('validation', np_images, epoch, dataformats='NHWC')
if (tracker.name == 'wandb'):
tracker.log({'validation': [wandb.Image(image, caption=f'{i}: {args.validation_prompt}') for (i, image) in enumerate(images)]})
del pipeline
torch.cuda.empty_cache()
accelerator.wait_for_everyone()
if accelerator.is_main_process:
prior = prior.to(torch.float32)
prior.save_attn_procs(args.output_dir)
if args.push_to_hub:
save_model_card(repo_id, images=images, base_model=args.pretrained_prior_model_name_or_path, dataset_name=args.dataset_name, repo_folder=args.output_dir)
upload_folder(repo_id=repo_id, folder_path=args.output_dir, commit_message='End of training', ignore_patterns=['step_*', 'epoch_*'])
pipeline = AutoPipelineForText2Image.from_pretrained(args.pretrained_decoder_model_name_or_path, torch_dtype=weight_dtype)
pipeline = pipeline.to(accelerator.device)
pipeline.prior_prior.load_attn_procs(args.output_dir)
generator = torch.Generator(device=accelerator.device)
if (args.seed is not None):
generator = generator.manual_seed(args.seed)
images = []
for _ in range(args.num_validation_images):
images.append(pipeline(args.validation_prompt, num_inference_steps=30, generator=generator).images[0])
if accelerator.is_main_process:
for tracker in accelerator.trackers:
if (len(images) != 0):
if (tracker.name == 'tensorboard'):
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images('test', np_images, epoch, dataformats='NHWC')
if (tracker.name == 'wandb'):
tracker.log({'test': [wandb.Image(image, caption=f'{i}: {args.validation_prompt}') for (i, image) in enumerate(images)]})
accelerator.end_training() |
def launch_experiment(create_runner_fn, create_agent_fn):
run_experiment.load_gin_configs(FLAGS.gin_files, FLAGS.gin_bindings)
runner = create_runner_fn(FLAGS.base_dir, create_agent_fn, FLAGS.random_seed, FLAGS.agent_name, FLAGS.game_name, FLAGS.num_iterations)
runner.run_experiment() |
def jload_twofiles_custom(f1, f2, mode='r'):
f1 = _make_r_io_base(f1, mode)
jdict1 = json.load(f1)
f2 = _make_r_io_base(f2, mode)
jdict2 = json.load(f2)
f1.close()
f2.close()
entries = []
for (instance1, instance2) in zip(jdict1, jdict2):
entry = {'instruction': instance1['prompt'], 'input': '', 'output_1': instance2['output'], 'output_2': instance1['output']}
entries.append(entry)
return entries |
class AdapterT5BlockOutput():
feed_forward: AdapterOutput = None
self_attention: AdapterOutput = None
cross_attention: AdapterOutput = None |
def test_small_request() -> None:
(start_dt, end_dt) = sanitize_date_range('2019-06-01', None)
result = _small_request(start_dt, end_dt)
assert (result is not None)
assert (not result.empty)
assert (len(result.columns) == CURRENT_SC_COLUMNS)
assert (len(result) > 0) |
class Conv1d_layer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding='SAME', dilation=1, bias=True, norm='batch', activation='relu', mode='conv'):
super(Conv1d_layer, self).__init__()
self.conv1d = nn.Sequential()
if (mode == 'deconv'):
padding = int(((dilation * (kernel_size - 1)) / 2))
out_padding = (0 if (stride == 1) else 1)
elif ((mode == 'conv') or ('alias_free' in mode)):
if (padding == 'SAME'):
pad = int(((kernel_size - 1) * dilation))
l_pad = int((pad // 2))
r_pad = (pad - l_pad)
padding_area = (l_pad, r_pad)
elif (padding == 'VALID'):
padding_area = (0, 0)
else:
pass
' convolutional layer '
if (mode == 'deconv'):
self.conv1d.add_module('deconv1d', nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=out_padding, dilation=dilation, bias=bias))
elif (mode == 'conv'):
self.conv1d.add_module(f'{mode}1d_pad', nn.ReflectionPad1d(padding_area))
self.conv1d.add_module(f'{mode}1d', nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=0, dilation=dilation, bias=bias))
elif ('alias_free' in mode):
if ('up' in mode):
up_factor = (stride * 2)
down_factor = 2
elif ('down' in mode):
up_factor = 2
down_factor = (stride * 2)
else:
raise ValueError("choose alias-free method : 'up' or 'down'")
self.conv1d.add_module(f'{mode}1d_pad', nn.ReflectionPad1d(padding_area))
self.conv1d.add_module(f'{mode}1d', nn.Conv1d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=dilation, bias=bias))
self.conv1d.add_module(f'{mode}upsample', torchaudio.transforms.Resample(orig_freq=1, new_freq=up_factor))
self.conv1d.add_module(f'{mode}lrelu', nn.LeakyReLU())
self.conv1d.add_module(f'{mode}downsample', torchaudio.transforms.Resample(orig_freq=down_factor, new_freq=1))
' normalization '
if (norm == 'batch'):
self.conv1d.add_module('batch_norm', nn.BatchNorm1d(out_channels))
' activation '
if ('alias_free' not in mode):
if (activation == 'relu'):
self.conv1d.add_module('relu', nn.ReLU())
elif (activation == 'lrelu'):
self.conv1d.add_module('lrelu', nn.LeakyReLU())
def forward(self, input):
output = self.conv1d(input)
return output |
def model_info(model, verbose=False):
n_p = sum((x.numel() for x in model.parameters()))
n_g = sum((x.numel() for x in model.parameters() if x.requires_grad))
if verbose:
print(('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')))
for (i, (name, p)) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print(('%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())))
try:
from thop import profile
(macs, _) = profile(model, inputs=(torch.zeros(1, 3, 480, 640),), verbose=False)
fs = (', %.1f GFLOPS' % ((macs / .0) * 2))
except:
fs = ''
print(('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))) |
class _BaseMetric(ABC):
def __init__(self):
self.plottable = False
self.integer_fields = []
self.float_fields = []
self.array_labels = []
self.integer_array_fields = []
self.float_array_fields = []
self.fields = []
self.summary_fields = []
self.registered = False
_timing.time
def eval_sequence(self, data):
...
def combine_sequences(self, all_res):
...
def combine_classes_class_averaged(self, all_res):
...
def combine_classes_det_averaged(self, all_res):
...
def plot_single_tracker_results(self, all_res, tracker, output_folder, cls):
if self.plottable:
raise NotImplementedError(('plot_results is not implemented for metric %s' % self.get_name()))
else:
pass
def get_name(cls):
return cls.__name__
def _combine_sum(all_res, field):
return sum([all_res[k][field] for k in all_res.keys()])
def _combine_weighted_av(all_res, field, comb_res, weight_field):
return (sum([(all_res[k][field] * all_res[k][weight_field]) for k in all_res.keys()]) / np.maximum(1.0, comb_res[weight_field]))
def print_table(self, table_res, tracker, cls):
print('')
metric_name = self.get_name()
self._row_print(([((((metric_name + ': ') + tracker) + '-') + cls)] + self.summary_fields))
for (seq, results) in sorted(table_res.items()):
if (seq == 'COMBINED_SEQ'):
continue
summary_res = self._summary_row(results)
self._row_print(([seq] + summary_res))
summary_res = self._summary_row(table_res['COMBINED_SEQ'])
self._row_print((['COMBINED'] + summary_res))
def _summary_row(self, results_):
vals = []
for h in self.summary_fields:
if (h in self.float_array_fields):
vals.append('{0:1.5g}'.format((100 * np.mean(results_[h]))))
elif (h in self.float_fields):
vals.append('{0:1.5g}'.format((100 * float(results_[h]))))
elif (h in self.integer_fields):
vals.append('{0:d}'.format(int(results_[h])))
else:
raise NotImplementedError('Summary function not implemented for this field type.')
return vals
def _row_print(*argv):
if (len(argv) == 1):
argv = argv[0]
to_print = ('%-35s' % argv[0])
for v in argv[1:]:
to_print += ('%-10s' % str(v))
print(to_print)
def summary_results(self, table_res):
return dict(zip(self.summary_fields, self._summary_row(table_res['COMBINED_SEQ'])))
def detailed_results(self, table_res):
detailed_fields = (self.float_fields + self.integer_fields)
for h in (self.float_array_fields + self.integer_array_fields):
for alpha in [int((100 * x)) for x in self.array_labels]:
detailed_fields.append(((h + '___') + str(alpha)))
detailed_fields.append((h + '___AUC'))
detailed_results = {}
for (seq, res) in table_res.items():
detailed_row = self._detailed_row(res)
if (len(detailed_row) != len(detailed_fields)):
raise TrackEvalException(('Field names and data have different sizes (%i and %i)' % (len(detailed_row), len(detailed_fields))))
detailed_results[seq] = dict(zip(detailed_fields, detailed_row))
return detailed_results
def _detailed_row(self, res):
detailed_row = []
for h in (self.float_fields + self.integer_fields):
detailed_row.append(res[h])
for h in (self.float_array_fields + self.integer_array_fields):
for (i, alpha) in enumerate([int((100 * x)) for x in self.array_labels]):
detailed_row.append(res[h][i])
detailed_row.append(np.mean(res[h]))
return detailed_row |
class DetrForSegmentation(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def main():
env = CartPoleBulletEnv(renders=False)
model = deepq.models.mlp([64])
act = deepq.learn(env, q_func=model, lr=0.001, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, print_freq=10, callback=callback)
print('Saving model to cartpole_model.pkl')
act.save('cartpole_model.pkl') |
_task_action
class GoTowardPoint(TeleportAction):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._rotate_agent = self._config.rotate_agent
def step(self, *args: Any, r: float, theta: float, **kwargs: Any) -> Observations:
y_delta = (kwargs['y_delta'] if ('y_delta' in kwargs) else 0.0)
pos = rtheta_to_global_coordinates(self._sim, r, theta, y_delta=y_delta, dimensionality=3)
agent_pos = self._sim.get_agent_state().position
new_pos = np.array(self._sim.step_filter(agent_pos, pos))
new_rot = self._sim.get_agent_state().rotation
if (np.any(np.isnan(new_pos)) or (not self._sim.is_navigable(new_pos))):
new_pos = agent_pos
if self._rotate_agent:
(new_rot, _) = compute_heading_to(agent_pos, pos)
else:
new_pos = np.array(self._sim.pathfinder.snap_point(new_pos))
if (np.any(np.isnan(new_pos)) or (not self._sim.is_navigable(new_pos))):
new_pos = agent_pos
if self._rotate_agent:
(new_rot, _) = compute_heading_to(agent_pos, pos)
assert np.all(np.isfinite(new_pos))
return self._sim.get_observations_at(position=new_pos, rotation=new_rot, keep_agent_at_new_pose=True)
def action_space(self) -> spaces.Dict:
coord_range = (self.COORDINATE_MAX - self.COORDINATE_MIN)
return spaces.Dict({'r': spaces.Box(low=np.array([0.0]), high=np.array([np.sqrt((2 * (coord_range ** 2)))]), dtype=np.float), 'theta': spaces.Box(low=np.array([0.0]), high=np.array([(2 * np.pi)]), dtype=np.float)}) |
def evaluate(args, model, corpus_dev, corpus_dev_cnt, dev_batches):
model.eval()
acc_loss = 0
acc_kl_loss = 0
acc_real_ppl = 0
word_cnt = 0
doc_cnt = 0
start_time = time.time()
ntokens = 2000
for (idx, batch) in enumerate(dev_batches):
(data_batch, count_batch, mask) = fetch_data(corpus_dev, corpus_dev_cnt, batch, ntokens)
data_batch = torch.FloatTensor(data_batch).to(device)
mask = torch.FloatTensor(mask).to(device)
(recon_loss, kld, _) = model(data_batch, mask)
count_batch = torch.FloatTensor(count_batch).to(device)
real_ppl = (torch.div((recon_loss + kld).data, count_batch) * mask.data)
for n in real_ppl:
if (n == n):
acc_real_ppl += n
acc_loss += torch.sum(recon_loss).data
acc_kl_loss += torch.sum((kld.data * torch.sum(mask.data)))
count_batch = (count_batch + 1e-12)
word_cnt += torch.sum(count_batch)
doc_cnt += torch.sum(mask.data)
cur_loss = (acc_loss[0] / word_cnt)
cur_kl = (acc_kl_loss / doc_cnt)
print_ppl = (acc_real_ppl / doc_cnt)
elapsed = (time.time() - start_time)
print('loss {:5.2f} | KL {:5.2f} | ppl {:8.2f}'.format(cur_loss, cur_kl, np.exp(print_ppl)))
return print_ppl |
def multiprecision_track(target, start, sols, gamma=0, pwt=2, decimals=80):
from phcpy.phcpy2c3 import py2c_copy_multprec_container_to_target_system
from phcpy.phcpy2c3 import py2c_copy_multprec_container_to_start_system
from phcpy.phcpy2c3 import py2c_copy_multprec_container_to_start_solutions
from phcpy.phcpy2c3 import py2c_create_multprec_homotopy
from phcpy.phcpy2c3 import py2c_create_multprec_homotopy_with_gamma
from phcpy.phcpy2c3 import py2c_solve_by_multprec_homotopy_continuation
from phcpy.phcpy2c3 import py2c_solcon_clear_multprec_solutions
from phcpy.phcpy2c3 import py2c_copy_multprec_target_solutions_to_container
from phcpy.interface import store_multprec_system
from phcpy.interface import store_multprec_solutions
from phcpy.interface import load_multprec_solutions
store_multprec_system(target, decimals)
py2c_copy_multprec_container_to_target_system()
store_multprec_system(start, decimals)
py2c_copy_multprec_container_to_start_system()
if (gamma == 0):
py2c_create_multprec_homotopy()
else:
py2c_create_multprec_homotopy_with_gamma(gamma.real, gamma.imag, pwt=2)
dim = len(start)
store_multprec_solutions(dim, sols)
py2c_copy_multprec_container_to_start_solutions()
py2c_solve_by_multprec_homotopy_continuation(decimals)
py2c_solcon_clear_multprec_solutions()
py2c_copy_multprec_target_solutions_to_container()
return load_multprec_solutions() |
class VarPropagationLayer(Layer):
def __init__(self, layer, use_cov=False, **kwargs):
self.layer = layer
self.use_cov = use_cov
super(VarPropagationLayer, self).__init__(**kwargs)
def build(self, input_shape):
super(VarPropagationLayer, self).build(input_shape)
def call(self, x):
if self.use_cov:
out = self._call_full_cov(x)
else:
out = self._call_diag_cov(x)
return out
def _call_full_cov(self, x):
raise NotImplementedError
def _call_diag_cov(self, x):
raise NotImplementedError
def compute_output_shape(self, input_shape):
return self.layer.compute_output_shape(input_shape) |
def nfsp_default_log_filter(result: ResultDict) -> bool:
return (('avg_policy_exploitability' in result) or ((result['training_iteration'] % 100) == 0)) |
def resnet50_landscape(pretrained=False, progress=True, **kwargs):
return _resnet_landscape('resnet50_landscape', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) |
def test_tinydb_observer_artifact_event(tinydb_obs, sample_run):
tinydb_obs.started_event(**sample_run)
filename = 'setup.py'
name = 'mysetup'
tinydb_obs.artifact_event(name, filename)
assert tinydb_obs.fs.exists(filename)
db_run = tinydb_obs.runs.get(eid=1)
assert (db_run['artifacts'][0][0] == name)
with open(filename, 'rb') as f:
file_content = f.read()
assert (db_run['artifacts'][0][3].read() == file_content) |
class Elementwise(nn.ModuleList):
def __init__(self, merge=None, *args):
assert (merge in [None, 'first', 'concat', 'sum', 'mlp'])
self.merge = merge
super(Elementwise, self).__init__(*args)
def forward(self, input):
inputs = [feat.squeeze(2) for feat in input.split(1, dim=2)]
assert (len(self) == len(inputs))
outputs = [f(x) for (f, x) in zip(self, inputs)]
if (self.merge == 'first'):
return outputs[0]
elif ((self.merge == 'concat') or (self.merge == 'mlp')):
return torch.cat(outputs, 2)
elif (self.merge == 'sum'):
return sum(outputs)
else:
return outputs |
class MLP(nn.Module):
def __init__(self, dim_in, dim_hidden, dim_out):
super(MLP, self).__init__()
self.layer_input = nn.Linear(dim_in, dim_hidden)
self.relu = nn.ReLU()
self.dropout = nn.Dropout()
self.layer_hidden = nn.Linear(dim_hidden, dim_out)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = x.view((- 1), ((x.shape[1] * x.shape[(- 2)]) * x.shape[(- 1)]))
x = self.layer_input(x)
x = self.dropout(x)
x = self.relu(x)
x = self.layer_hidden(x)
return self.softmax(x) |
def add_forbidden(conf_space, pipeline, matches, dataset_properties, include, exclude):
node_i_is_choice = []
node_i_choices_names = []
node_i_choices = []
all_nodes = []
for (node_name, node) in pipeline:
all_nodes.append(node)
is_choice = hasattr(node, 'get_available_components')
node_i_is_choice.append(is_choice)
node_include = (include.get(node_name) if (include is not None) else None)
node_exclude = (exclude.get(node_name) if (exclude is not None) else None)
if is_choice:
node_i_choices_names.append(node.get_available_components(dataset_properties, include=node_include, exclude=node_exclude).keys())
node_i_choices.append(node.get_available_components(dataset_properties, include=node_include, exclude=node_exclude).values())
else:
node_i_choices_names.append([node_name])
node_i_choices.append([node])
choices_chains = []
idx = 0
while (idx < len(pipeline)):
if node_i_is_choice[idx]:
chain_start = idx
idx += 1
while ((idx < len(pipeline)) and node_i_is_choice[idx]):
idx += 1
chain_stop = idx
choices_chains.append((chain_start, chain_stop))
idx += 1
for choices_chain in choices_chains:
constraints = set()
chain_start = choices_chain[0]
chain_stop = choices_chain[1]
chain_length = (chain_stop - chain_start)
for sub_chain_length in range(2, (chain_length + 1)):
for start_idx in range(chain_start, ((chain_stop - sub_chain_length) + 1)):
indices = range(start_idx, (start_idx + sub_chain_length))
node_names = [pipeline[idx][0] for idx in indices]
num_node_choices = []
node_choice_names = []
skip_array_shape = []
for idx in indices:
node = all_nodes[idx]
available_components = node.get_available_components(dataset_properties, include=node_i_choices_names[idx])
assert (len(available_components) > 0), len(available_components)
skip_array_shape.append(len(available_components))
num_node_choices.append(range(len(available_components)))
node_choice_names.append([name for name in available_components])
skip_array = np.zeros(skip_array_shape)
for product in itertools.product(*num_node_choices):
for (node_idx, choice_idx) in enumerate(product):
node_idx += start_idx
slices_ = tuple(((slice(None) if (idx != node_idx) else slice(choice_idx, (choice_idx + 1))) for idx in range(len(matches.shape))))
if (np.sum(matches[slices_]) == 0):
skip_array[product] = 1
for product in itertools.product(*num_node_choices):
if skip_array[product]:
continue
slices = tuple(((slice(None) if (idx not in indices) else slice(product[(idx - start_idx)], (product[(idx - start_idx)] + 1))) for idx in range(len(matches.shape))))
if (np.sum(matches[slices]) == 0):
constraint = tuple([(node_names[i], node_choice_names[i][product[i]]) for i in range(len(product))])
continue_ = False
for constraint_length in range(2, len(constraint)):
constr_starts = ((len(constraint) - constraint_length) + 1)
for constraint_start_idx in range(constr_starts):
constraint_end_idx = (constraint_start_idx + constraint_length)
sub_constraint = constraint[constraint_start_idx:constraint_end_idx]
if (sub_constraint in constraints):
continue_ = True
break
if continue_:
break
if continue_:
continue
constraints.add(constraint)
forbiddens = []
for i in range(len(product)):
forbiddens.append(ForbiddenEqualsClause(conf_space.get_hyperparameter((node_names[i] + ':__choice__')), node_choice_names[i][product[i]]))
forbidden = ForbiddenAndConjunction(*forbiddens)
conf_space.add_forbidden_clause(forbidden)
return conf_space |
.skip()
def test_redwood_indoor_office2():
gt_prefix = 'RedwoodIndoorOffice2'
(_, gt_download_dir, gt_extract_dir) = get_test_data_dirs(gt_prefix)
dataset = o3d.data.RedwoodIndoorOffice2()
assert Path(gt_download_dir).is_dir()
assert Path(gt_extract_dir).is_dir()
pcd = o3d.io.read_point_cloud(dataset.point_cloud_path)
im_rgbds = []
for (color_path, depth_path) in zip(dataset.color_paths, dataset.depth_paths):
im_color = o3d.io.read_image(color_path)
im_depth = o3d.io.read_image(depth_path)
im_rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(im_color, im_depth)
im_rgbds.append(im_rgbd)
assert (len(im_rgbds) == 2538)
im_noisy_rgbds = []
for (color_path, depth_path) in zip(dataset.color_paths, dataset.noisy_depth_paths):
im_color = o3d.io.read_image(color_path)
im_depth = o3d.io.read_image(depth_path)
im_rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(im_color, im_depth)
im_noisy_rgbds.append(im_rgbd)
assert (len(im_noisy_rgbds) == 2538) |
def load_embeddings(embfile):
print('Loading embeddings... ', end='')
sys.stdout.flush()
if (not embfile):
print()
sys.stderr.write("No clusters specified. Please add line 'clusters[path]' to data config file!\n")
sys.exit(1)
f = (line.split(' ', 1)[1] for line in open(embfile))
words = [line.split(' ', 1)[0].split('_')[0] for line in open(embfile)]
w2id = {word: word_id for (word_id, word) in enumerate(words)}
emb_matrix = np.loadtxt(f)
sys.stdout.flush()
for _sym in SPECIAL_SYMBOLS.keys():
assert (_sym in w2id.keys()), 'Required special symbol {} not found in provided vectors file {}'.format(_sym, embfile)
print('Done!')
return (w2id, emb_matrix) |
def _get_stanford2d3d_pairs(folder, fold, mode='train'):
img_paths = []
if (mode == 'train'):
area_ids = __FOLD__['{}_{}'.format(fold, mode)]
elif (mode == 'val'):
area_ids = __FOLD__['{}_{}'.format(fold, mode)]
elif (mode == 'trainval'):
area_ids = __FOLD__[mode]
else:
raise NotImplementedError
for a in area_ids:
img_paths += glob.glob(os.path.join(folder, '{}/pano/rgb/*_rgb.png'.format(a)))
img_paths = sorted(img_paths)
mask_paths = [imgpath.replace('rgb', 'semantic') for imgpath in img_paths]
return (img_paths, mask_paths) |
class MLP(Layer):
def __init__(self, *args, **kwargs):
Serializable.quick_init(self, locals())
Layer.__init__(self, *args, **kwargs)
self.build_graph()
def build_graph(self):
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
if (self._params is None):
(self.input_var, self.output_var) = create_mlp(output_dim=self.output_dim, hidden_sizes=self.hidden_sizes, hidden_nonlinearity=self.hidden_nonlinearity, output_nonlinearity=self.output_nonlinearity, input_dim=(None, self.input_dim), input_var=self.input_var, batch_normalization=self.batch_normalization)
current_scope = tf.get_default_graph().get_name_scope()
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=current_scope)
self._params = OrderedDict([(remove_scope_from_name(var.name, current_scope), var) for var in trainable_vars])
else:
(self.input_var, self.output_var) = forward_mlp(output_dim=self.output_dim, hidden_sizes=self.hidden_sizes, hidden_nonlinearity=self.hidden_nonlinearity, output_nonlinearity=self.output_nonlinearity, input_var=self.input_var, mlp_params=self._params) |
def _get_file_md5sum(file_name):
hash_obj = hashlib.md5()
with open(file_name, 'r') as f:
hash_obj.update(f.read())
return hash_obj.hexdigest() |
def state_divergence_loss(prior, posterior, config, reduce=True, balance=0.2):
prior_dist = reshape_dist(prior, config)
post_dist = reshape_dist(posterior, config)
post = kl_div_categorical(post_dist, prior_dist.detach())
pri = kl_div_categorical(post_dist.detach(), prior_dist)
kl_div = ((balance * post.mean((- 1))) + ((1 - balance) * pri.mean((- 1))))
if reduce:
return torch.mean(kl_div)
else:
return kl_div |
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.train()
end = time.time()
for (i, (input, target)) in enumerate(train_loader):
lr = adjust_learning_rate(optimizer, epoch, args.epochs, args.lr, iteration=i, iterations_per_epoch=len(train_loader), method=args.lr_policy)
data_time.update((time.time() - end))
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
output = model(input_var)
loss = criterion(output, target_var)
(acc1, acc5) = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Epoch: [{0}][{1}/{2}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\ {top1.val:.3f} ({top1.avg:.3f})\ {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5)) |
def train(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu):
model.train()
losses = AverageMeter()
for (batch_idx, (imgs, pids, _)) in enumerate(trainloader):
if use_gpu:
(imgs, pids) = (imgs.cuda(), pids.cuda())
(imgs, pids) = (Variable(imgs), Variable(pids))
(outputs, features) = model(imgs)
if args.htri_only:
loss = criterion_htri(features, pids)
else:
xent_loss = criterion_xent(outputs, pids)
htri_loss = criterion_htri(features, pids)
loss = (xent_loss + htri_loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.update(loss.data[0], pids.size(0))
if (((batch_idx + 1) % args.print_freq) == 0):
print('Batch {}/{}\t Loss {:.6f} ({:.6f})'.format((batch_idx + 1), len(trainloader), losses.val, losses.avg)) |
def flops_per_step(n_blocks, dim, batch_size, model_type, seq_len=SEQ_LEN):
flops_per_token = (6 * params(n_blocks, dim))
if (model_type == 'autoregressive'):
flops_per_token += ((n_blocks * seq_len) * dim)
elif (model_type == 'diffusion'):
flops_per_token += (2 * ((n_blocks * seq_len) * dim))
flops_per_token *= (1.0 + (0.33 * 0.25))
else:
raise Exception()
tokens_per_step = (seq_len * batch_size)
return (flops_per_token * tokens_per_step) |
def main(log_dir, augmentation, dataset, batch_size, num_workers):
print(check_output(['nodejs', '--version']).decode('utf-8'))
torch.backends.cudnn.benchmark = True
transform = torchvision.transforms.Compose([CacheNPY(prefix='b64_', repeat=augmentation, pick_randomly=False, transform=torchvision.transforms.Compose([ToMesh(random_rotations=True, random_translation=0.1), ProjectOnSphere(bandwidth=64)])), (lambda xs: torch.stack([torch.FloatTensor(x) for x in xs]))])
transform = KeepName(transform)
test_set = Shrec17('data', dataset, perturbed=True, download=True, transform=transform)
loader = importlib.machinery.SourceFileLoader('model', os.path.join(log_dir, 'model.py'))
mod = types.ModuleType(loader.name)
loader.exec_module(mod)
model = mod.Model(55)
model.cuda()
model.load_state_dict(torch.load(os.path.join(log_dir, 'state.pkl')))
resdir = os.path.join(log_dir, (dataset + '_perturbed'))
if os.path.isdir(resdir):
shutil.rmtree(resdir)
os.mkdir(resdir)
predictions = []
ids = []
loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True, drop_last=False)
for (batch_idx, data) in enumerate(loader):
model.eval()
if (dataset != 'test'):
data = data[0]
(file_names, data) = data
(batch_size, rep) = data.size()[:2]
data = data.view((- 1), *data.size()[2:])
data = data.cuda()
with torch.no_grad():
pred = model(data).data
pred = pred.view(batch_size, rep, (- 1))
pred = pred.sum(1)
predictions.append(pred.cpu().numpy())
ids.extend([x.split('/')[(- 1)].split('.')[0] for x in file_names])
print('[{}/{}] '.format(batch_idx, len(loader)))
predictions = np.concatenate(predictions)
predictions_class = np.argmax(predictions, axis=1)
for i in range(len(ids)):
if ((i % 100) == 0):
print('{}/{} '.format(i, len(ids)), end='\r')
idfile = os.path.join(resdir, ids[i])
retrieved = [(predictions[(j, predictions_class[j])], ids[j]) for j in range(len(ids)) if (predictions_class[j] == predictions_class[i])]
retrieved = sorted(retrieved, reverse=True)
retrieved = [i for (_, i) in retrieved]
with open(idfile, 'w') as f:
f.write('\n'.join(retrieved))
url = '
file_path = 'evaluator.zip'
r = requests.get(url, stream=True)
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=(16 * (1024 ** 2))):
if chunk:
f.write(chunk)
f.flush()
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall('.')
zip_ref.close()
print(check_output(['nodejs', 'evaluate.js', (os.path.join('..', log_dir) + '/')], cwd='evaluator').decode('utf-8'))
shutil.copy2(os.path.join('evaluator', (log_dir + '.summary.csv')), os.path.join(log_dir, 'summary.csv')) |
(num_cpus=0)
class RemoteLinearParameterScheduler(object):
def __init__(self, start_val: float, end_val: float, timesteps_annealing: int):
self._start_val = start_val
self._end_val = end_val
assert (timesteps_annealing >= 0), timesteps_annealing
self._timesteps_annealing = timesteps_annealing
self._curr_val = self._start_val
def _calculate_new_val(self, timesteps: float):
assert (timesteps >= 0), timesteps
if (timesteps >= self._timesteps_annealing):
self._curr_val = self._end_val
else:
fraction_done = (timesteps / self._timesteps_annealing)
assert ((0.0 - 1e-09) <= fraction_done <= (1.0 + 1e-09))
self._curr_val = ((self._start_val * (1.0 - fraction_done)) + (self._end_val * fraction_done))
def update_value(self, timesteps: int):
self._calculate_new_val(timesteps=timesteps)
def get_value(self):
return self._curr_val |
class MobileViTOutput(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
super().__init__()
self.dense = nn.Linear(intermediate_size, hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = (hidden_states + input_tensor)
return hidden_states |
class CustomHFIndex(HFIndexBase):
def __init__(self, vector_size: int, dataset, index_path=None):
super().__init__(vector_size, dataset, index_initialized=(index_path is None))
self.index_path = index_path
def load_from_disk(cls, vector_size, dataset_path, index_path):
logger.info(f'Loading passages from {dataset_path}')
if ((dataset_path is None) or (index_path is None)):
raise ValueError("Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` and `dataset.get_index('embeddings').save(index_path)`.")
dataset = load_from_disk(dataset_path)
return cls(vector_size=vector_size, dataset=dataset, index_path=index_path)
def init_index(self):
if (not self.is_initialized()):
logger.info(f'Loading index from {self.index_path}')
self.dataset.load_faiss_index('embeddings', file=self.index_path)
self._index_initialized = True |
class AWACDataset(Dataset):
def __init__(self, env_name: str, clip_to_eps: bool=True, eps: float=1e-05):
dataset_path = os.path.join(d4rl.offline_env.DATASET_PATH, 'avac')
zip_path = os.path.join(dataset_path, 'all.zip')
url = '
gdown.cached_download(url, zip_path, postprocess=gdown.extractall)
observations = []
actions = []
rewards = []
terminals = []
dones_float = []
next_observations = []
env = gym.make(env_name)
for dataset_name in ['awac_off', 'awac_demo']:
file_name = ENV_NAME_TO_FILE[env_name][dataset_name]
dataset = np.load(os.path.join(dataset_path, file_name), allow_pickle=True)
for trajectory in dataset:
if (len(trajectory['observations']) == env._max_episode_steps):
trajectory['terminals'][(- 1)] = False
observations.append(trajectory['observations'])
actions.append(trajectory['actions'])
rewards.append(trajectory['rewards'])
terminals.append(trajectory['terminals'])
done_float = np.zeros_like(trajectory['rewards'])
done_float[(- 1)] = 1.0
dones_float.append(done_float)
next_observations.append(trajectory['next_observations'])
observations = np.concatenate(observations, 0)
actions = np.concatenate(actions, 0)
rewards = np.concatenate(rewards, 0)
terminals = np.concatenate(terminals, 0)
dones_float = np.concatenate(dones_float, 0)
next_observations = np.concatenate(next_observations, 0)
if clip_to_eps:
lim = (1 - eps)
actions = np.clip(actions, (- lim), lim)
super().__init__(observations=observations.astype(np.float32), actions=actions.astype(np.float32), rewards=rewards.astype(np.float32), masks=(1.0 - terminals.astype(np.float32)), dones_float=dones_float.astype(np.float32), next_observations=next_observations.astype(np.float32), size=len(observations)) |
class TestConvTBC(unittest.TestCase):
def test_convtbc(self):
conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1)
conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1)
conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2))
conv_tbc.bias.data.copy_(conv1d.bias.data)
input_tbc = torch.randn(7, 2, 4, requires_grad=True)
input1d = input_tbc.data.transpose(0, 1).transpose(1, 2)
input1d.requires_grad = True
output_tbc = conv_tbc(input_tbc)
output1d = conv1d(input1d)
self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data)
grad_tbc = torch.randn(output_tbc.size())
grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous()
output_tbc.backward(grad_tbc)
output1d.backward(grad1d)
self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data)
self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data)
self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertLess((t1 - t2).abs().max(), 0.0001) |
def save_model(path, optimizer, ema, epoch, H):
(optimizer, ema) = jax_utils.unreplicate((optimizer, ema))
checkpoints.save_checkpoint(path, (optimizer, epoch), optimizer.state.step)
checkpoints.save_checkpoint((path + '_ema'), ema, optimizer.state.step)
from_log = os.path.join(H.save_dir, 'log.jsonl')
to_log = f'{os.path.dirname(path)}/{os.path.basename(path)}-log.jsonl'
subprocess.check_output(['cp', from_log, to_log]) |
class Leaf(F):
def __init__(self, val):
self.val = val
def __str__(self):
return str(self.val)
def to_str(self, namer, sort=False):
return namer(self.val)
def to_expr(self, namer=(lambda x: x)):
return pyeda.boolalg.expr.exprvar(namer(self.val))
def __len__(self):
return 1
def __hash__(self):
return hash(str(self))
def __repr__(self):
return f'Leaf({str(self)})'
def get_vals(self):
return [self.val]
def is_leaf(self):
return True |
class GenericAntisymmetrize(Module):
fn_to_antisymmetrize: Callable[([Array], Array)]
logabs: bool = True
def setup(self):
self._fn_to_antisymmetrize = self.fn_to_antisymmetrize
def _get_single_leaf_perm(self, x: Array) -> Tuple[(Array, Array)]:
n = x.shape[(- 2)]
return ParallelPermutations(n)(x)
.compact
def __call__(self, xs: PyTree) -> Union[(Array, SLArray)]:
perms_and_signs = jax.tree_map(self._get_single_leaf_perm, xs)
(perms_and_signs_leaves, _) = jax.tree_util.tree_flatten(perms_and_signs, is_tuple_of_arrays)
nleaves = len(perms_and_signs_leaves)
nperms_per_leaf = [leaf[0].shape[(- 3)] for leaf in perms_and_signs_leaves]
broadcasted_perms = []
reshaped_signs = []
for (i, (leaf_perms, leaf_signs)) in enumerate(perms_and_signs_leaves):
ith_factorial = ((((1,) * i) + leaf_signs.shape[0:1]) + ((1,) * ((nleaves - i) - 1)))
sign_shape = (ith_factorial + (1,))
leaf_signs = jnp.reshape(leaf_signs, sign_shape)
reshaped_signs.append(leaf_signs)
reshape_x_shape = ((leaf_perms.shape[:(- 3)] + ith_factorial) + leaf_perms.shape[(- 2):])
broadcast_x_shape = ((leaf_perms.shape[:(- 3)] + tuple(nperms_per_leaf)) + leaf_perms.shape[(- 2):])
leaf_perms = jnp.reshape(leaf_perms, reshape_x_shape)
leaf_perms = jnp.broadcast_to(leaf_perms, broadcast_x_shape)
flat_leaf_perms = jnp.reshape(leaf_perms, (leaf_perms.shape[:(- 2)] + ((- 1),)))
broadcasted_perms.append(flat_leaf_perms)
concat_perms = jnp.concatenate(broadcasted_perms, axis=(- 1))
all_perms_out = self._fn_to_antisymmetrize(concat_perms)
signed_perms_out = _reduce_prod_over_leaves([all_perms_out, reshaped_signs])
antisymmetrized_out = jnp.sum(signed_perms_out, axis=tuple(((- i) for i in range(1, (nleaves + 2)))))
if (not self.logabs):
return antisymmetrized_out
return array_to_slog(antisymmetrized_out) |
def quantize(input_path: str, output_path: str, model_family: str, dtype: str='q4_0'):
invalidInputError((model_family in ['llama', 'bloom', 'gptneox', 'starcoder']), "Now we only support quantization of model family('llama', 'bloom', 'gptneox', 'starcoder')", '{} is not in the list.'.format(model_family))
invalidInputError(os.path.isfile(input_path), 'The file {} is not found'.format(input_path))
invalidInputError(os.path.isdir(output_path), 'The output_path {} is not a directory'.format(output_path))
quantize_type_map = _quantize_type[model_family]
output_filename = 'bigdl_llm_{}_{}.bin'.format(model_family, dtype.lower())
output_path = os.path.join(output_path, output_filename)
invalidInputError((dtype.lower() in quantize_type_map), '{0} model just accept {1} now, but you pass in {2}.'.format(model_family, list(quantize_type_map.keys()), dtype))
quantize_type = quantize_type_map[dtype]
if platform.platform().startswith('Windows'):
suffix = '.exe'
else:
suffix = ''
quantize_args = '{0}/libs/quantize-{1}{2} {3} {4} {5}'.format(libs_dirname, model_family, suffix, input_path, output_path, str(quantize_type))
p = subprocess.run(quantize_args.split(), capture_output=True)
error_message = p.stderr
invalidInputError((not p.returncode), 'Fail to quantize {}, error message is {}.'.format(str(input_path), error_message))
return str(output_path) |
def request_data_key_plaintext(ip, port, encrypted_primary_key, encrypted_data_key):
action = 'Decrypt'
payload = {'keyid': encrypted_primary_key, 'ciphertext': encrypted_data_key, 'aad': 'test'}
data_key_plaintext = post_request(ip, port, action, payload)['plaintext']
return data_key_plaintext |
def dict_to_tf_example(data, dataset_directory, label_map_dict, ignore_difficult_instances=False, image_subdirectory='JPEGImages'):
img_path = os.path.join(data['folder'], image_subdirectory, data['filename'])
full_path = os.path.join(dataset_directory, img_path)
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if (image.format != 'JPEG'):
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if (ignore_difficult_instances and difficult):
continue
difficult_obj.append(int(difficult))
xmin.append((float(obj['bndbox']['xmin']) / width))
ymin.append((float(obj['bndbox']['ymin']) / height))
xmax.append((float(obj['bndbox']['xmax']) / width))
ymax.append((float(obj['bndbox']['ymax']) / height))
classes_text.append(obj['name'].encode('utf8'))
classes.append(label_map_dict[obj['name']])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
example = tf.train.Example(features=tf.train.Features(feature={'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(data['filename'].encode('utf8')), 'image/source_id': dataset_util.bytes_feature(data['filename'].encode('utf8')), 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj), 'image/object/truncated': dataset_util.int64_list_feature(truncated), 'image/object/view': dataset_util.bytes_list_feature(poses)}))
return example |
class BERT_MLP_CA(BERT_MLP):
def __init__(self, max_length=128, word_embedding_size=200, **kwargs):
super(BERT_MLP_CA, self).__init__(**kwargs)
self.name = f"{'CA'}-b{self.batch_size}.e{self.epochs}.len{self.max_seq_length}.bert"
self.parent_tokenizer = Tokenizer()
self.max_length = max_length
self.word_embedding_size = word_embedding_size
def load_embeddings(self, pretrained_dict):
self.embedding_matrix = np.zeros(((self.vocab_size + 2), 100))
for (word, index) in self.parent_tokenizer.word_index.items():
embedding_vector = pretrained_dict.get(word)
if (embedding_vector is not None):
self.embedding_matrix[(index + 1)] = embedding_vector
def build(self, bias=0):
target_input = [Input(shape=(self.max_seq_length,), name='input_ids'), Input(shape=(self.max_seq_length,), name='input_masks'), Input(shape=(self.max_seq_length,), name='segment_ids')]
target_output = BERT(n_fine_tune_top_layers=self.trainable_layers)(target_input)
parent_input = Input(shape=(self.max_length,), name='parent_input')
parent_emb = Embedding((self.vocab_size + 2), self.word_embedding_size, mask_zero=True)(parent_input)
parent_rnn = LSTM(128)(parent_emb)
x = concatenate([target_output, parent_rnn])
fnn = tf.keras.layers.Dense(128, activation='tanh')(x)
fnn = Dense(1, activation='sigmoid', bias_initializer=tf.keras.initializers.Constant(bias))(fnn)
self.model = tf.keras.models.Model(inputs=(target_input + [parent_input]), outputs=fnn)
self.model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=self.lr), metrics=METRICS)
def fit(self, train, dev, pretrained_embeddings, bert_weights=None, batch_size=32, class_weights={0: 1, 1: 1}):
self.parent_tokenizer.fit_on_texts(train.text)
self.vocab_size = (len(self.parent_tokenizer.word_index) + 1)
(train_input, train_labels) = self.to_bert_input(train)
(dev_input, dev_labels) = self.to_bert_input(dev)
parent_input = self.text_process(train.parent)
parent_dev_input = self.text_process(dev.parent)
self.load_embeddings(pretrained_embeddings)
print(f'OLD-SCHOOL LOG: Building {self.name}...')
pos = sum(train_labels)
neg = (len(train_labels) - pos)
bias = np.log((pos / neg))
print('BIAS:', bias)
self.build(bias=bias)
if (bert_weights is not None):
self.model.load_weights(bert_weights)
self.model_show()
print(f'OLD-SCHOOL LOG: Training {self.name}...')
self.initialise_vars()
self.history = self.model.fit((list(train_input) + [parent_input]), train_labels, validation_data=((list(dev_input) + [parent_dev_input]), dev_labels), epochs=self.epochs, callbacks=[self.earlystop], batch_size=batch_size, class_weight=class_weights)
def text_process(self, texts):
x = self.parent_tokenizer.texts_to_sequences(texts.to_numpy())
x = sequence.pad_sequences(x, maxlen=self.max_length)
return x
def predict(self, val_pd):
(val_input, val_labels) = self.to_bert_input(val_pd)
parent_val_input = self.text_process(val_pd.parent)
predictions = self.model.predict((list(val_input) + [parent_val_input]))
score = roc_auc_score(val_labels, predictions)
print('ROC AUC: {:.4f}'.format(score))
print('Stopped epoch: ', self.earlystop.stopped_epoch)
if self.save_predictions:
self.save_evaluation_set(val_labels, predictions)
return predictions |
class FactorizationSupportedNeuralNetworkModel(torch.nn.Module):
def __init__(self, field_dims, embed_dim, mlp_dims, dropout):
super().__init__()
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = (len(field_dims) * embed_dim)
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
def forward(self, x):
embed_x = self.embedding(x)
x = self.mlp(embed_x.view((- 1), self.embed_output_dim))
return torch.sigmoid(x.squeeze(1)) |
class TestJSDDiv(unittest.TestCase):
def setUp(self) -> None:
self.shape = (10, 5, 224, 224)
self.logit = torch.randn(*self.shape, requires_grad=True)
self.pred = F.softmax(self.logit, 1)
self.target = torch.randint(low=0, high=self.shape[1], size=[self.shape[i] for i in range(self.shape.__len__()) if (i != 1)])
self.target_oh = class2one_hot(self.target, C=self.shape[1]).float()
def test_jsd(self):
for reduction in ('sum', 'mean', 'none'):
self._test_jsd(reduction=reduction)
def _test_jsd(self, reduction='none'):
jsd_criterion = loss.JSD_div(reduction=reduction)
jsd_loss1 = jsd_criterion(self.pred, self.target_oh)
jsd_loss2 = jsd_criterion(self.target_oh, self.pred)
assert torch.allclose(jsd_loss1, jsd_loss2)
kl_criterion = loss.KL_div(reduction=reduction)
mean_pred = iter_average([self.target_oh.detach(), self.pred.detach()])
mean_pred.requires_grad = True
assert torch.allclose(jsd_loss1, (0.5 * (kl_criterion(mean_pred, self.target_oh.detach()) + kl_criterion(mean_pred, self.pred.detach())))) |
def main():
if (not os.path.exists(FINAL_DIR)):
os.makedirs(FINAL_DIR)
files = [f for f in os.listdir(DIR) if f.endswith('.pck')]
files.sort()
num_files = len(files)
for (i, f) in enumerate(files):
subsample_file(f)
print('Done with {} of {}'.format(i, num_files)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.