code
stringlengths
17
6.64M
def _mp_fn(index): main()
def main(): parser = HfArgumentParser((ModelArguments, XFUNDataTrainingArguments, TrainingArguments)) if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() last_checkpoint = None if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)): last_checkpoint = get_last_checkpoint(training_args.output_dir) if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)): raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.') elif (last_checkpoint is not None): logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)) logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}')) if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f'Training/evaluation parameters {training_args}') set_seed(training_args.seed) datasets = load_dataset(os.path.abspath(LiLTfinetune.data.datasets.xfun.__file__), f'xfun.{data_args.lang}', additional_langs=data_args.additional_langs, keep_in_memory=True) if training_args.do_train: column_names = datasets['train'].column_names features = datasets['train'].features else: column_names = datasets['validation'].column_names features = datasets['validation'].features text_column_name = 'input_ids' label_column_name = 'labels' remove_columns = column_names def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = (unique_labels | set(label)) label_list = list(unique_labels) label_list.sort() return label_list if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(datasets['train'][label_column_name]) label_to_id = {l: i for (i, l) in enumerate(label_list)} num_labels = len(label_list) config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) model = AutoModelForTokenClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) if (not isinstance(tokenizer, PreTrainedTokenizerFast)): raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this requirement') padding = ('max_length' if data_args.pad_to_max_length else False) if training_args.do_train: if ('train' not in datasets): raise ValueError('--do_train requires a train dataset') train_dataset = datasets['train'] if (data_args.max_train_samples is not None): train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if ('validation' not in datasets): raise ValueError('--do_eval requires a validation dataset') eval_dataset = datasets['validation'] if (data_args.max_val_samples is not None): eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) if training_args.do_predict: if ('test' not in datasets): raise ValueError('--do_predict requires a test dataset') test_dataset = datasets['test'] if (data_args.max_test_samples is not None): test_dataset = test_dataset.select(range(data_args.max_test_samples)) data_collator = DataCollatorForKeyValueExtraction(tokenizer, pad_to_multiple_of=(8 if training_args.fp16 else None), padding=padding, max_length=512) metric = load_metric('seqeval') def compute_metrics(p): (predictions, labels) = p predictions = np.argmax(predictions, axis=2) true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)] true_labels = [[label_list[l] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)] results = metric.compute(predictions=true_predictions, references=true_labels) if data_args.return_entity_level_metrics: final_results = {} for (key, value) in results.items(): if isinstance(value, dict): for (n, v) in value.items(): final_results[f'{key}_{n}'] = v else: final_results[key] = value return final_results else: return {'precision': results['overall_precision'], 'recall': results['overall_recall'], 'f1': results['overall_f1'], 'accuracy': results['overall_accuracy']} trainer = XfunSerTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics) if training_args.do_train: checkpoint = (last_checkpoint if last_checkpoint else None) train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset)) metrics['train_samples'] = min(max_train_samples, len(train_dataset)) trainer.log_metrics('train', metrics) trainer.save_metrics('train', metrics) trainer.save_state() if training_args.do_eval: logger.info('*** Evaluate ***') metrics = trainer.evaluate() max_val_samples = (data_args.max_val_samples if (data_args.max_val_samples is not None) else len(eval_dataset)) metrics['eval_samples'] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics('eval', metrics) trainer.save_metrics('eval', metrics) if training_args.do_predict: logger.info('*** Predict ***') (predictions, labels, metrics) = trainer.predict(test_dataset) predictions = np.argmax(predictions, axis=2) true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)] trainer.log_metrics('test', metrics) trainer.save_metrics('test', metrics) output_test_predictions_file = os.path.join(training_args.output_dir, 'test_predictions.txt') if trainer.is_world_process_zero(): with open(output_test_predictions_file, 'w') as writer: for prediction in true_predictions: writer.write((' '.join(prediction) + '\n'))
def _mp_fn(index): main()
def parse_xml(path): tree = ET.parse(path) img_name = path.split('/')[(- 1)][:(- 4)] height = tree.findtext('./size/height') width = tree.findtext('./size/width') objects = [img_name, width, height] for obj in tree.findall('object'): difficult = obj.find('difficult').text if (difficult == '1'): continue name = obj.find('name').text bbox = obj.find('bndbox') xmin = bbox.find('xmin').text ymin = bbox.find('ymin').text xmax = bbox.find('xmax').text ymax = bbox.find('ymax').text name = str(names_dict[name]) objects.extend([name, xmin, ymin, xmax, ymax]) if (len(objects) > 1): return objects else: return None
def gen_test_txt(txt_path): global test_cnt f = open(txt_path, 'w') for (i, path) in enumerate(test_path): img_names = open(path, 'r').readlines() for img_name in img_names: img_name = img_name.strip() xml_path = (((anno_path[i] + '/') + img_name) + '.xml') objects = parse_xml(xml_path) if objects: objects[0] = (((img_path[i] + '/') + img_name) + '.jpg') if os.path.exists(objects[0]): objects.insert(0, str(test_cnt)) test_cnt += 1 objects = (' '.join(objects) + '\n') f.write(objects) f.close()
def gen_train_txt(txt_path): global train_cnt f = open(txt_path, 'w') for (i, path) in enumerate(trainval_path): img_names = open(path, 'r').readlines() for img_name in img_names: img_name = img_name.strip() xml_path = (((anno_path[i] + '/') + img_name) + '.xml') objects = parse_xml(xml_path) if objects: objects[0] = (((img_path[i] + '/') + img_name) + '.jpg') if os.path.exists(objects[0]): objects.insert(0, str(train_cnt)) train_cnt += 1 objects = (' '.join(objects) + '\n') f.write(objects) f.close()
def conv2d(inputs, filters, kernel_size, strides=1): def _fixed_padding(inputs, kernel_size): pad_total = (kernel_size - 1) pad_beg = (pad_total // 2) pad_end = (pad_total - pad_beg) padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]], mode='CONSTANT') return padded_inputs if (strides > 1): inputs = _fixed_padding(inputs, kernel_size) inputs = slim.conv2d(inputs, filters, kernel_size, stride=strides, padding=('SAME' if (strides == 1) else 'VALID')) return inputs
def darknet53_body(inputs): def res_block(inputs, filters): shortcut = inputs net = conv2d(inputs, (filters * 1), 1) net = conv2d(net, (filters * 2), 3) net = (net + shortcut) return net net = conv2d(inputs, 32, 3, strides=1) net = conv2d(net, 64, 3, strides=2) net = res_block(net, 32) net = conv2d(net, 128, 3, strides=2) for i in range(2): net = res_block(net, 64) net = conv2d(net, 256, 3, strides=2) for i in range(8): net = res_block(net, 128) route_1 = net net = conv2d(net, 512, 3, strides=2) for i in range(8): net = res_block(net, 256) route_2 = net net = conv2d(net, 1024, 3, strides=2) for i in range(4): net = res_block(net, 512) route_3 = net return (route_1, route_2, route_3)
def yolo_block(inputs, filters): net = conv2d(inputs, (filters * 1), 1) net = conv2d(net, (filters * 2), 3) net = conv2d(net, (filters * 1), 1) net = conv2d(net, (filters * 2), 3) net = conv2d(net, (filters * 1), 1) route = net net = conv2d(net, (filters * 2), 3) return (route, net)
def upsample_layer(inputs, out_shape): (new_height, new_width) = (out_shape[1], out_shape[2]) inputs = tf.image.resize_nearest_neighbor(inputs, (new_height, new_width), name='upsampled') return inputs
class MeshPly(): def __init__(self, filename, color=[0.0, 0.0, 0.0]): f = open(filename, 'r') self.vertices = [] self.colors = [] self.indices = [] self.normals = [] vertex_mode = False face_mode = False nb_vertices = 0 nb_faces = 0 idx = 0 with f as open_file_object: for line in open_file_object: elements = line.split() if vertex_mode: self.vertices.append([float(i) for i in elements[:3]]) self.normals.append([float(i) for i in elements[3:6]]) if elements[6:9]: self.colors.append([(float(i) / 255.0) for i in elements[6:9]]) else: self.colors.append([(float(i) / 255.0) for i in color]) idx += 1 if (idx == nb_vertices): vertex_mode = False face_mode = True idx = 0 elif face_mode: self.indices.append([float(i) for i in elements[1:4]]) idx += 1 if (idx == nb_faces): face_mode = False elif (elements[0] == 'element'): if (elements[1] == 'vertex'): nb_vertices = int(elements[2]) elif (elements[1] == 'face'): nb_faces = int(elements[2]) elif (elements[0] == 'end_header'): vertex_mode = True
def get_color_table(class_num, seed=2): random.seed(seed) color_table = {} for i in range(class_num): color_table[i] = [random.randint(0, 255) for _ in range(3)] return color_table
def plot_one_box(img, coord, label=None, color=None, line_thickness=None): '\n coord: [x_min, y_min, x_max, y_max] format coordinates.\n img: img to plot on.\n label: str. The label name.\n color: int. color index.\n line_thickness: int. rectangle line thickness.\n ' tl = (line_thickness or int(round((0.002 * max(img.shape[0:2]))))) color = (color or [random.randint(0, 255) for _ in range(3)]) (c1, c2) = ((int(coord[0]), int(coord[1])), (int(coord[2]), int(coord[3]))) cv2.rectangle(img, c1, c2, color, thickness=tl) if label: tf = max((tl - 1), 1) t_size = cv2.getTextSize(label, 0, fontScale=(float(tl) / 3), thickness=tf)[0] c2 = ((c1[0] + t_size[0]), ((c1[1] - t_size[1]) - 3)) cv2.rectangle(img, c1, c2, color, (- 1)) cv2.putText(img, label, (c1[0], (c1[1] - 2)), 0, (float(tl) / 3), [0, 0, 0], thickness=tf, lineType=cv2.LINE_AA) return img
def draw_demo_img(img, projectpts, color=(0, 255, 0)): vertices = [] for i in range(9): x = projectpts[i][0] y = projectpts[i][1] coordinates = (int(x), int(y)) vertices.append(coordinates) cv2.circle(img, coordinates, 1, (0, 255, 255), (- 1)) cv2.line(img, vertices[1], vertices[2], color, 2) cv2.line(img, vertices[1], vertices[3], color, 2) cv2.line(img, vertices[1], vertices[5], color, 2) cv2.line(img, vertices[2], vertices[6], color, 2) cv2.line(img, vertices[2], vertices[4], color, 2) cv2.line(img, vertices[3], vertices[4], color, 2) cv2.line(img, vertices[3], vertices[7], color, 2) cv2.line(img, vertices[4], vertices[8], color, 2) cv2.line(img, vertices[5], vertices[6], color, 2) cv2.line(img, vertices[5], vertices[7], color, 2) cv2.line(img, vertices[6], vertices[8], color, 2) cv2.line(img, vertices[7], vertices[8], color, 2) return img
def draw_demo_img_corners(img, projectpts, color=(0, 255, 0), nV=9, thickness=2): vertices = [] for i in range(nV): x = projectpts[i][0] y = projectpts[i][1] coordinates = (int(x), int(y)) vertices.append(coordinates) cv2.circle(img, coordinates, 2, color, (- 1)) cv2.line(img, vertices[0], vertices[1], color, thickness=thickness) cv2.line(img, vertices[0], vertices[2], color, thickness=thickness) cv2.line(img, vertices[0], vertices[4], color, thickness=thickness) cv2.line(img, vertices[1], vertices[5], color, thickness=thickness) cv2.line(img, vertices[1], vertices[3], color, thickness=thickness) cv2.line(img, vertices[2], vertices[3], color, thickness=thickness) cv2.line(img, vertices[2], vertices[6], color, thickness=thickness) cv2.line(img, vertices[3], vertices[7], color, thickness=thickness) cv2.line(img, vertices[4], vertices[5], color, thickness=thickness) cv2.line(img, vertices[4], vertices[6], color, thickness=thickness) cv2.line(img, vertices[5], vertices[7], color, thickness=thickness) cv2.line(img, vertices[6], vertices[7], color, thickness=thickness) return img
def hist(latencies, labels=[], title='', filename='hist', bins=500, xlabel='Latency (cycles)'): plt.figure(figsize=(10, 5)) for i in range(0, len(labels)): d = latencies[i] labels[i] += f' (μ={int(np.mean(d))}, σ={int(np.std(d))})' plt.hist(latencies, label=labels, bins=bins, histtype='stepfilled', alpha=0.5, range=[LATENCY_MIN, LATENCY_MAX]) font = font_manager.FontProperties(family='monospace', size=10) if (len(labels) > 0): plt.legend(prop=font) plt.xlabel(xlabel) plt.ylabel('Frequency') plt.title(title) print(f".. writing histogram '{title}' to '{filename}.pdf'") plt.savefig((filename + '.pdf'), bbox_inches='tight') plt.show()
def reject_outliers(data, m=2): stdev = np.std(data) mean = np.mean(data) mask_min = (mean - (stdev * m)) mask_max = (mean + (stdev * m)) outliers = [d for d in data if ((d < mask_min) or (d > mask_max))] print(f'Warning: removing {len(outliers)} outliers:') print(outliers) return [d for d in data if ((d >= mask_min) and (d <= mask_max))]
def load_data(file, col): print(f".. loading data from '{file}'") d = pd.read_csv(file) data = d[col] print('---------------------------------------------------------------------------') s = pd.Series(data) print(s.describe()) print(f'med {int(np.median(data))}') print('---------------------------------------------------------------------------') return data
def hist(latencies, labels=[], title='', filename='hist', xlabel='Latency (cycles)', legend_loc='best'): plt.figure(figsize=(10, 5)) for i in range(0, len(labels)): d = latencies[i] labels[i] += f' (μ={int(np.mean(d))}, σ={int(np.std(d))})' plt.hist(latencies, label=labels, bins=500, histtype='stepfilled', alpha=0.5, range=[LATENCY_MIN, LATENCY_MAX]) font = font_manager.FontProperties(family='monospace', size=10) if (len(labels) > 0): plt.legend(prop=font, loc=legend_loc) plt.xlabel(xlabel) plt.ylabel('Frequency') plt.title(title) print(f".. writing histogram '{title}' to '{filename}.pdf'") plt.savefig((filename + '.pdf'), bbox_inches='tight') plt.show()
def reject_outliers(data, m=3): stdev = np.std(data) mean = np.mean(data) mask_min = (mean - (stdev * m)) mask_max = (mean + (stdev * m)) outliers = [d for d in data if ((d < mask_min) or (d > mask_max))] print(f'Warning: removing {len(outliers)} outliers:') print(outliers) return [d for d in data if ((d >= mask_min) and (d <= mask_max))]
def reject_syscall_inc_outliers(data): for i in range(0, len(data)): if (data[i] > 1000000): print(f'Warning: removing outlier: {data[i]}') data[i] = 0 return data
def load_data(file, col): print(f".. loading data from '{file}'") d = pd.read_csv(file) data = d[col] if ((file == 'logs/icx/deadline_results_syscall.txt') and (col == 'inc_count')): data = reject_syscall_inc_outliers(data) print('---------------------------------------------------------------------------') s = pd.Series(data) print(s.describe()) print(f'med {int(np.median(data))}') print('---------------------------------------------------------------------------') return data
def get_sym_addr(name, symtab): return symtab.get_symbol_by_name(name)[0]['st_value']
class ToTensor(object): 'Convert ndarrays in sample to Tensors.' def __call__(self, sample): (image, text, label) = (sample['image'], sample['text'], sample['label']) return {'image': torch.from_numpy(image.astype(np.float32)), 'text': text, 'label': torch.from_numpy(label.astype(np.float32)), 'textlen': sample['textlen']}
class Normalize(object): 'Input image cleaning.' def __init__(self, mean_vector, std_devs): (self.mean_vector, self.std_devs) = (mean_vector, std_devs) def __call__(self, sample): image = sample['image'] return {'image': self._normalize(image, self.mean_vector, self.std_devs), 'text': sample['text'], 'label': sample['label'], 'textlen': sample['textlen']} def _normalize(self, tensor, mean, std): 'Normalize a tensor image with mean and standard deviation.\n See ``Normalize`` for more details.\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channely.\n Returns:\n Tensor: Normalized Tensor image.\n ' if (not self._is_tensor_image(tensor)): print(tensor.size()) raise TypeError('tensor is not a torch image. Its size is {}.'.format(tensor.size())) for (t, m, s) in zip(tensor, mean, std): t.sub_(m).div_(s) return tensor def _is_tensor_image(self, img): return (torch.is_tensor(img) and (img.ndimension() == 3))
class RandomModalityMuting(object): 'Randomly turn a mode off.' def __init__(self, p_muting=0.1): self.p_muting = p_muting def __call_(self, sample): rval = random.random() im = sample['image'] au = sample['text'] if (rval <= self.p_muting): vval = random.random() if (vval <= 0.5): im = (sample['image'] * 0) else: au = (sample['text'] * 0) return {'image': im, 'text': au, 'label': sample['label'], 'textlen': sample['textlen']}
class MM_IMDB(Dataset): def __init__(self, root_dir='', transform=None, stage='train', feat_dim=100, average_text=False): '\n Args:\n root_dir (string): Directory where data is.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n ' if (stage == 'train'): self.len_data = 15552 elif (stage == 'test'): self.len_data = 7799 elif (stage == 'dev'): self.len_data = 2608 self.transform = transform self.root_dir = root_dir self.stage = stage self.average_text = average_text global fdim fdim = feat_dim def __len__(self): return self.len_data def __getitem__(self, idx): imagepath = os.path.join(self.root_dir, self.stage, 'image_{:06}.npy'.format(idx)) labelpath = os.path.join(self.root_dir, self.stage, 'label_{:06}.npy'.format(idx)) textpath = os.path.join(self.root_dir, self.stage, 'text_{:06}.npy'.format(idx)) image = np.load(imagepath) label = np.load(labelpath) text = np.load(textpath) if self.average_text: text = text.mean(0) textlen = text.shape[0] sample = {'image': image, 'text': text, 'label': label, 'textlen': textlen} if self.transform: sample = self.transform(sample) return sample
def collate_imdb(list_samples): global fdim max_text_len = 0 for sample in list_samples: L = len(sample['text']) if (max_text_len < L): max_text_len = L list_images = (len(list_samples) * [None]) list_text = (len(list_samples) * [None]) list_labels = (len(list_samples) * [None]) list_textlen = (len(list_samples) * [None]) for (i, sample) in enumerate(list_samples): text_sample_len = len(sample['text']) text_i = sample['text'].astype(np.float32) padding = np.asarray(([(fdim * [(- 10.0)])] * (max_text_len - text_sample_len)), np.float32) list_images[i] = sample['image'] list_labels[i] = sample['label'] if (padding.shape[0] > 0): list_text[i] = torch.from_numpy(np.concatenate((text_i, padding), 0)) else: list_text[i] = torch.from_numpy(text_i) list_textlen[i] = sample['textlen'] images = torch.transpose(torch.stack(list_images), 1, 3) text = torch.stack(list_text) labels = torch.stack(list_labels) return {'image': images, 'text': text, 'label': labels, 'textlen': list_textlen}
def parse_args(): parser = argparse.ArgumentParser(description='Modality optimization.') parser.add_argument('--checkpointdir', type=str, help='output base dir', default='/home/juanma/Documents/Checkpoints/NTU/') parser.add_argument('--datadir', type=str, help='data directory', default='/home/juanma/Documents/Data/ROSE_Action/') parser.add_argument('--ske_cp', type=str, help='Skeleton net checkpoint (assuming is contained in checkpointdir)', default='skeleton_32frames_85.24.checkpoint') parser.add_argument('--rgb_cp', type=str, help='RGB net checkpoint (assuming is contained in checkpointdir)', default='rgb_8frames_83.91.checkpoint') parser.add_argument('--test_cp', type=str, help='Full net checkpoint (assuming is contained in checkpointdir)', default='') parser.add_argument('--num_outputs', type=int, help='output dimension', default=60) parser.add_argument('--batchsize', type=int, help='batch size', default=20) parser.add_argument('--inner_representation_size', type=int, help='output size of mixing linear layers', default=256) parser.add_argument('--epochs', type=int, help='training epochs', default=70) parser.add_argument('--eta_max', type=float, help='eta max', default=0.001) parser.add_argument('--eta_min', type=float, help='eta min', default=1e-06) parser.add_argument('--Ti', type=int, help='epochs Ti', default=5) parser.add_argument('--Tm', type=int, help='epochs multiplier Tm', default=2) parser.add_argument('--use_dataparallel', help='Use several GPUs', action='store_true', dest='use_dataparallel', default=False) parser.add_argument('--j', dest='num_workers', type=int, help='Dataloader CPUS', default=16) parser.add_argument('--modality', type=str, help='', default='both') parser.add_argument('--no-verbose', help='verbose', action='store_false', dest='verbose', default=True) parser.add_argument('--weightsharing', help='Weight sharing', action='store_true', default=False) parser.add_argument('--no-multitask', dest='multitask', help='Multitask loss', action='store_false', default=True) parser.add_argument('--alphas', help='Use alphas', action='store_true', default=False) parser.add_argument('--batchnorm', help='Use batch norm', action='store_true', dest='batchnorm', default=False) parser.add_argument('--vid_dim', action='store', default=256, dest='vid_dim', help='frame side dimension (square image assumed) ') parser.add_argument('--vid_fr', action='store', default=30, dest='vi_fr', help='video frame rate') parser.add_argument('--vid_len', action='store', default=(8, 32), dest='vid_len', type=int, nargs='+', help='length of video, as a tuple of two lengths, (rgb len, skel len)') parser.add_argument('--drpt', action='store', default=0.4, dest='drpt', type=float, help='dropout') parser.add_argument('--no_bad_skel', action='store_true', help='Remove the 300 bad samples, espec. useful to evaluate', default=False) parser.add_argument('--no_norm', action='store_true', default=False, dest='no_norm', help='Not normalizing the skeleton') parser.add_argument('--conf', type=int, help='conf to train', default=1) return parser.parse_args()
def get_dataloaders(args): import torchvision.transforms as transforms from datasets import ntu as d from torch.utils.data import DataLoader transformer_val = transforms.Compose([d.NormalizeLen(args.vid_len), d.ToTensor()]) transformer_tra = transforms.Compose([d.AugCrop(), d.NormalizeLen(args.vid_len), d.ToTensor()]) dataset_training = d.NTU(args.datadir, transform=transformer_tra, stage='train', args=args) dataset_testing = d.NTU(args.datadir, transform=transformer_val, stage='test', args=args) dataset_validation = d.NTU(args.datadir, transform=transformer_val, stage='dev', args=args) datasets = {'train': dataset_training, 'dev': dataset_validation, 'test': dataset_testing} dataloaders = {x: DataLoader(datasets[x], batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers, drop_last=False, pin_memory=True) for x in ['train', 'dev', 'test']} return dataloaders
def train_model(rmode, configuration, dataloaders, args, device): dataset_sizes = {x: len(dataloaders[x].dataset) for x in ['train', 'test', 'dev']} if (args.test_cp == ''): num_batches_per_epoch = (dataset_sizes['train'] / args.batchsize) criteria = [torch.nn.CrossEntropyLoss(), torch.nn.CrossEntropyLoss(), torch.nn.CrossEntropyLoss()] skemodel_filename = os.path.join(args.checkpointdir, args.ske_cp) rgbmodel_filename = os.path.join(args.checkpointdir, args.rgb_cp) rmode.skenet.load_state_dict(torch.load(skemodel_filename)) rmode.rgbnet.load_state_dict(torch.load(rgbmodel_filename)) params = rmode.central_params() optimizer = op.Adam(params, lr=(args.eta_max / 10), weight_decay=0.0001) scheduler = sc.LRCosineAnnealingScheduler(args.eta_max, args.eta_min, args.Ti, args.Tm, num_batches_per_epoch) if ((torch.cuda.device_count() > 1) and args.use_dataparallel): rmode = torch.nn.DataParallel(rmode) rmode.to(device) if args.verbose: print('Pretraining central weights: ') print(configuration) interm_model_acc = tr.train_ntu_track_acc(rmode, criteria, optimizer, scheduler, dataloaders, dataset_sizes, device=device, num_epochs=1, verbose=args.verbose, multitask=args.multitask) if args.verbose: print(('Intermediate val accuracy: ' + str(interm_model_acc))) if ((torch.cuda.device_count() > 1) and args.use_dataparallel): params = rmode.module.parameters() else: params = rmode.parameters() optimizer = op.Adam(params, lr=args.eta_max, weight_decay=0.0001) scheduler = sc.LRCosineAnnealingScheduler(args.eta_max, args.eta_min, args.Ti, args.Tm, num_batches_per_epoch) best_model_acc = tr.train_ntu_track_acc(rmode, criteria, optimizer, scheduler, dataloaders, dataset_sizes, device=device, num_epochs=args.epochs, verbose=args.verbose, multitask=args.multitask) if args.verbose: print(('Final val accuracy: ' + str(best_model_acc))) else: fullmodel_filename = os.path.join(args.checkpointdir, args.test_cp) rmode.load_state_dict(torch.load(fullmodel_filename)) if ((torch.cuda.device_count() > 1) and args.use_dataparallel): rmode = torch.nn.DataParallel(rmode) rmode.to(device) test_model_acc = tr.test_ntu_track_acc(rmode, dataloaders, dataset_sizes, device=device, multitask=args.multitask) if args.verbose: print(('Final test accuracy: ' + str(test_model_acc))) return test_model_acc
def parse_args(): parser = argparse.ArgumentParser(description='Modality optimization.') parser.add_argument('--checkpointdir', type=str, help='output base dir', default='/home/juanma/Documents/Checkpoints/NTU/') parser.add_argument('--datadir', type=str, help='data directory', default='/home/juanma/Documents/Data/ROSE_Action/') parser.add_argument('--ske_cp', type=str, help='Skeleton net checkpoint (assuming is contained in checkpointdir)', default='skeleton_32frames_83.42') parser.add_argument('--rgb_cp', type=str, help='RGB net checkpoint (assuming is contained in checkpointdir)', default='rgb_8frames_82.14') parser.add_argument('--num_outputs', type=int, help='output dimension', default=60) parser.add_argument('--batchsize', type=int, help='batch size', default=20) parser.add_argument('--inner_representation_size', type=int, help='output size of mixing linear layers', default=16) parser.add_argument('--epochs', type=int, help='training epochs', default=3) parser.add_argument('--lr_surrogate', type=float, help='learning rate surrogate', default=0.001) parser.add_argument('--epochs_surrogate', type=int, help='num of epochs for surrogate', default=50) parser.add_argument('--eta_max', type=float, help='eta max', default=0.001) parser.add_argument('--eta_min', type=float, help='eta min', default=1e-06) parser.add_argument('--Ti', type=int, help='epochs Ti', default=1) parser.add_argument('--Tm', type=int, help='epochs multiplier Tm', default=2) parser.add_argument('--use_dataparallel', help='Use several GPUs', action='store_true', default=False) parser.add_argument('--num_workers', type=int, help='Dataloader CPUS', default=16) parser.add_argument('--modality', type=str, help='', default='both') parser.add_argument('--max_fusions', type=int, dest='max_progression_levels', help='max fusions', default=4) parser.add_argument('--search_iterations', type=int, help='epnas iterations', default=3) parser.add_argument('--num_samples', type=int, help='number of samples to train at each explo step (K)', default=15) parser.add_argument('--initial_temperature', type=float, help='initial sampling temperature', default=10.0) parser.add_argument('--final_temperature', type=float, help='final sampling temperature', default=0.2) parser.add_argument('--temperature_decay', type=float, help='temperature decay (sigma)', default=4.0) parser.add_argument('--no-verbose', help='verbose', dest='verbose', action='store_false', default=True) parser.add_argument('--weightsharing', help='Weight sharing', action='store_true', default=False) parser.add_argument('--alphas', help='Use alphas', action='store_true', default=False) parser.add_argument('--batchnorm', help='Use batch norm', action='store_true', default=False) parser.add_argument('--multitask', help='Multitask loss', action='store_true', default=False) parser.add_argument('--vid_dim', action='store', default=256, dest='vid_dim', help='frame side dimension (square image assumed) ') parser.add_argument('--vid_fr', action='store', default=30, dest='vi_fr', help='video frame rate') parser.add_argument('--vid_len', action='store', default=(8, 32), dest='vid_len', type=int, nargs='+', help='length of video, as a tuple of two lengths, (rgb len, skel len)') parser.add_argument('--drpt', action='store', default=0.5, dest='drpt', type=float, help='dropout') parser.add_argument('--no_bad_skel', action='store_true', help='Remove the 300 bad samples, espec. useful to evaluate', default=False) parser.add_argument('--no_norm', action='store_true', default=False, dest='no_norm', help='Not normalizing the skeleton') return parser.parse_args()
def inflated_resnet(**kwargs): list_block = [Bottleneck3D, Bottleneck3D, Bottleneck3D, Bottleneck3D] list_layers = [3, 4, 6, 3] model = ResNet(list_block, list_layers, **kwargs) load_pretrained_2D_weights('resnet50', model, inflation='center') return model
class Bottleneck3D(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(Bottleneck3D, self).__init__() self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm3d(planes) self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, dilation=(1, dilation, dilation)) self.bn2 = nn.BatchNorm3d(planes) self.conv3 = nn.Conv3d(planes, (planes * 4), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm3d((planes * 4)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.input_dim = 5 self.dilation = dilation def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
class ResNet(nn.Module): def __init__(self, list_block, layers, **kwargs): self.inplanes = 64 self.input_dim = 4 super(ResNet, self).__init__() self._first_conv() self.relu = nn.ReLU(inplace=True) self.list_channels = [64, 128, 256, 512] self.layer1 = self._make_layer(list_block[0], self.list_channels[0], layers[0]) self.layer2 = self._make_layer(list_block[1], self.list_channels[1], layers[1], stride=2) self.layer3 = self._make_layer(list_block[2], self.list_channels[2], layers[2], stride=2) self.layer4 = self._make_layer(list_block[3], self.list_channels[3], layers[3], stride=2) self.out_dim = 5 for m in self.modules(): if (isinstance(m, nn.Conv3d) or isinstance(m, nn.Conv2d)): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif (isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.BatchNorm2d)): m.weight.data.fill_(1) m.bias.data.zero_() def _first_conv(self): self.conv1 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) self.maxpool = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) self.bn1 = nn.BatchNorm2d(64) self.input_dim = 4 def _make_layer(self, block, planes, blocks, stride=1, dilation=1): downsample = None stride = (1, stride, stride) if ((stride != 1) or (self.inplanes != (planes * block.expansion))): (conv, batchnorm) = (nn.Conv3d, nn.BatchNorm3d) downsample = nn.Sequential(conv(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False, dilation=dilation), batchnorm((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, dilation)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def get_feature_maps(self, x): (B, C, T, W, H) = x.size() x = transform_input(x, self.input_dim, T=T) x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = transform_input(x, self.layer1[0].input_dim, T=T) x = self.layer1(x) fm1 = x x = transform_input(x, self.layer2[0].input_dim, T=T) x = self.layer2(x) fm2 = x x = transform_input(x, self.layer3[0].input_dim, T=T) x = self.layer3(x) fm3 = x x = transform_input(x, self.layer4[0].input_dim, T=T) x = self.layer4(x) final_fm = transform_input(x, self.out_dim, T=T) return (fm1, fm2, fm3, final_fm)
def transform_input(x, dim, T=12): diff = (len(x.size()) - dim) if (diff > 0): (B, C, T, W, H) = x.size() x = x.transpose(1, 2) x = x.contiguous() x = x.view((- 1), C, W, H) elif (diff < 0): (_, C, W, H) = x.size() x = x.view((- 1), T, C, W, H) x = x.transpose(1, 2) return x
class LRCosineAnnealingScheduler(): def __init__(self, eta_max, eta_min, Ti, Tmultiplier, num_batches_per_epoch): self.eta_min = eta_min self.eta_max = eta_max self.Ti = Ti self.Tcur = 0.0 self.nbpe = num_batches_per_epoch self.iteration_counter = 0.0 self.eta = eta_max self.Tm = Tmultiplier def _compute_rule(self): self.eta = (self.eta_min + ((0.5 * (self.eta_max - self.eta_min)) * (1 + np.cos(((np.pi * self.Tcur) / self.Ti))))) return self.eta def step(self): self.Tcur = (self.iteration_counter / self.nbpe) self.iteration_counter = (self.iteration_counter + 1.0) eta = self._compute_rule() if (eta <= (self.eta_min + 1e-10)): self.Tcur = 0 self.Ti = (self.Ti * self.Tm) self.iteration_counter = 0 return eta def update_optimizer(self, optimizer): state_dict = optimizer.state_dict() for param_group in state_dict['param_groups']: param_group['lr'] = self.eta optimizer.load_state_dict(state_dict)
class FixedScheduler(): def __init__(self, lr): self.lr = lr def step(self): return self.lr def update_optimizer(self, optimizer): state_dict = optimizer.state_dict() for param_group in state_dict['param_groups']: param_group['lr'] = self.lr optimizer.load_state_dict(state_dict)
class activ(nn.Module): def __init__(self, args): super(activ, self).__init__() self.activation = args.activation if (args.activation == 'LeakyReLU'): self.act = torch.nn.LeakyReLU() elif (args.activation == 'ELU'): self.act = torch.nn.ELU() elif (args.activation == 'ReLU'): self.act = torch.nn.ReLU() elif (args.activation == 'Tanh'): self.act = torch.nn.Tanh() elif (args.activation == 'Sigmoid'): self.act = torch.nn.Sigmoid() elif (args.activation == 'Swish'): self.beta = nn.Parameter(torch.tensor(0.5)) self.act = torch.nn.Sigmoid() else: print('WARNING: REQUIRED ACTIVATION IS NOT DEFINED') def forward(self, x): if (self.activation == 'Swish'): return (self.act((self.beta * x)) * x) else: return self.act(x)
class SimpleRecurrentSurrogate(nn.Module): def __init__(self, num_hidden=100, number_input_feats=3, size_ebedding=100): super(SimpleRecurrentSurrogate, self).__init__() self.num_hidden = num_hidden self.embedding = nn.Sequential(nn.Linear(number_input_feats, size_ebedding), nn.Sigmoid()) self.lstm = nn.LSTM(size_ebedding, num_hidden) self.hid2val = nn.Linear(num_hidden, 1) self.nonlinearity = nn.Sigmoid() for m in self.modules(): if isinstance(m, nn.Linear): m.weight.data.uniform_((- 0.1), 0.1) m.bias.data.fill_(1.8) def forward(self, sequence_of_operations): embeds = [] for s in sequence_of_operations: embeds.append(self.embedding(s)) embeds = torch.stack(embeds, dim=0) (lstm_out, hidden) = self.lstm(embeds) val_space = self.hid2val(lstm_out[(- 1)]) val_space = self.nonlinearity(val_space) return val_space def eval_model(self, sequence_of_operations_np, device): npseq = np.expand_dims(sequence_of_operations_np, 1) sequence_of_operations = torch.from_numpy(npseq).float().to(device) res = self.forward(sequence_of_operations) res = res.cpu().data.numpy() return res[(0, 0)]
class SurrogateDataloader(): def __init__(self): self._dict_data = {} def add_datum(self, datum_conf, datum_acc): seq_len = len(datum_conf) datum_hash = datum_conf.data.tobytes() if (seq_len in self._dict_data): if (datum_hash in self._dict_data[seq_len]): self._dict_data[seq_len][datum_hash] = (datum_conf, max(datum_acc, self._dict_data[seq_len][datum_hash][1])) else: self._dict_data[seq_len][datum_hash] = (datum_conf, datum_acc) else: self._dict_data[seq_len] = {datum_hash: (datum_conf, datum_acc)} def get_data(self, to_torch=False): dataset_conf = list() dataset_acc = list() for (len_key, data_dict) in self._dict_data.items(): conf_list = list() acc_list = list() for (datum_hash, datum) in data_dict.items(): conf_list.append(datum[0]) acc_list.append(datum[1]) conf_list = np.transpose(np.asarray(conf_list, np.float32), (1, 0, 2)) dataset_conf.append(np.array(conf_list, np.float32)) dataset_acc.append(np.expand_dims(np.array(acc_list, np.float32), 1)) if to_torch: for index in range(len(dataset_conf)): dataset_conf[index] = torch.from_numpy(dataset_conf[index]) dataset_acc[index] = torch.from_numpy(dataset_acc[index]) return (dataset_conf, dataset_acc) def get_k_best(self, k): dataset_conf = list() dataset_acc = list() for (len_key, data_dict) in self._dict_data.items(): for (datum_hash, datum) in data_dict.items(): dataset_conf.append(datum[0]) dataset_acc.append(datum[1]) dataset_acc = np.array(dataset_acc) top_k_idx = np.argpartition(dataset_acc, (- k))[(- k):] confs = [dataset_conf[i] for i in top_k_idx] accs = [dataset_acc[i] for i in top_k_idx] return (confs, accs, top_k_idx)
def train_simple_surrogate(model, criterion, optimizer, data_tensors, num_epochs, device): for epoch in range(num_epochs): model.train(True) for batch in range(len(data_tensors[0])): (inputs, outputs) = (data_tensors[0][batch], data_tensors[1][batch]) inputs = inputs.to(device) outputs = outputs.to(device) optimizer.zero_grad() with torch.set_grad_enabled(True): f_outputs = model(inputs) loss = criterion(f_outputs, outputs) loss.backward() optimizer.step() model.train(False) return loss.item()
def train_avmnist_track_acc(model, criteria, optimizer, scheduler, dataloaders, dataset_sizes, device=None, num_epochs=200, verbose=False, multitask=False): best_model_sd = copy.deepcopy(model.state_dict()) best_acc = 0 for epoch in range(num_epochs): for phase in ['train', 'dev']: if (phase == 'train'): if (not isinstance(scheduler, sc.LRCosineAnnealingScheduler)): scheduler.step() model.train(True) else: model.train(False) running_loss = 0.0 running_corrects = 0 for data in dataloaders[phase]: (rgb, snd, label) = (data['image'], data['audio'], data['label']) rgb = rgb.to(device) snd = snd.to(device) label = label.to(device) optimizer.zero_grad() with torch.set_grad_enabled((phase == 'train')): output = model((rgb, snd)) if (not multitask): (_, preds) = torch.max(output, 1) loss = criteria[0](output, label) else: (_, preds) = torch.max(sum(output), 1) loss = ((criteria[0](output[0], label) + criteria[1](output[1], label)) + criteria[2](output[2], label)) if (phase == 'train'): if isinstance(scheduler, sc.LRCosineAnnealingScheduler): scheduler.step() scheduler.update_optimizer(optimizer) loss.backward() optimizer.step() running_loss += (loss.item() * rgb.size(0)) running_corrects += torch.sum((preds == label.data)) epoch_acc = (running_corrects.double() / dataset_sizes[phase]) print('{} Acc: {:.4f}'.format(phase, epoch_acc)) if ((phase == 'dev') and (epoch_acc > best_acc)): best_acc = epoch_acc best_model_sd = copy.deepcopy(model.state_dict()) model.load_state_dict(best_model_sd) model.train(False) return best_acc
def test_avmnist_track_acc(model, dataloaders, dataset_sizes, device=None, multitask=False): model.train(False) phase = 'test' running_corrects = 0 for data in dataloaders[phase]: (rgb, snd, label) = (data['image'], data['audio'], data['label']) rgb = rgb.to(device) snd = snd.to(device) label = label.to(device) output = model((rgb, snd)) if (not multitask): (_, preds) = torch.max(output, 1) else: (_, preds) = torch.max(sum(output), 1) running_corrects += torch.sum((preds == label.data)) acc = (running_corrects.double() / dataset_sizes[phase]) return acc
def train_cifar_track_acc(model, criterion, optimizer, scheduler, dataloaders, dataset_sizes, device, num_epochs=200, verbose=False, use_intermediate=False): best_model_sd = copy.deepcopy(model.state_dict()) best_error = 1e+100 criterion2 = torch.nn.CrossEntropyLoss() for epoch in range(num_epochs): if verbose: print() for phase in ['train', 'dev']: if (phase == 'train'): model.train(True) else: model.train(False) running_loss = 0.0 running_corrects = 0 for data in dataloaders[phase]: (rgb, gt_label) = (data[0], data[1]) rgb = rgb.to(device) gt_label = gt_label.to(device) optimizer.zero_grad() with torch.set_grad_enabled((phase == 'train')): (output, output_i) = model(rgb) if (not use_intermediate): loss = criterion(output, gt_label) else: loss = (criterion(output, gt_label) + (0.4 * criterion2(output_i, gt_label))) (_, preds) = torch.max(output, 1) if (phase == 'train'): scheduler.step() if isinstance(scheduler, sc.LRCosineAnnealingScheduler): scheduler.update_optimizer(optimizer) loss.backward() optimizer.step() running_loss += (loss.item() * rgb.size(0)) running_corrects += torch.sum((preds == gt_label.data)) epoch_error = (1.0 - (running_corrects.double() / dataset_sizes[phase])) if (phase == 'dev'): if (epoch_error < best_error): best_error = epoch_error best_model_sd = copy.deepcopy(model.state_dict()) if verbose: print('Epoch #{} val error: {}'.format(epoch, epoch_error)) model.load_state_dict(best_model_sd) model.train(False) if verbose: print('Best val error: {}'.format(best_error)) return (1.0 - best_error)
def test_cifar_track_acc(model, dataloaders, dataset_sizes, device): phase = 'test' model.train(False) running_corrects = 0 for data in dataloaders[phase]: (rgb, gt_label) = (data[0], data[1]) rgb = rgb.to(device) gt_label = gt_label.to(device) (output, _) = model(rgb) (_, preds) = torch.max(output, 1) running_corrects += torch.sum((preds == gt_label.data)) acc = (running_corrects.double() / dataset_sizes[phase]) return acc
def train_mmimdb_track_f1(model, criterion, optimizer, scheduler, dataloaders, dataset_sizes, device=None, num_epochs=200, verbose=False, init_f1=0.0, th_fscore=0.3): best_model_sd = copy.deepcopy(model.state_dict()) best_f1 = init_f1 failsafe = True cont_overloop = 0 while failsafe: for epoch in range(num_epochs): for phase in ['train', 'dev']: if (phase == 'train'): if (not isinstance(scheduler, sc.LRCosineAnnealingScheduler)): scheduler.step() model.train(True) else: model.train(False) list_preds = [] list_label = [] running_loss = 0.0 for data in dataloaders[phase]: (image, text, label) = (data['image'], data['text'], data['label']) image = image.to(device) text = text.to(device) label = label.to(device) optimizer.zero_grad() with torch.set_grad_enabled((phase == 'train')): output = model(text, image) if isinstance(output, tuple): output = output[(- 1)] (_, preds) = torch.max(output, 1) loss = criterion(output, label) if (phase == 'train'): if isinstance(scheduler, sc.LRCosineAnnealingScheduler): scheduler.step() scheduler.update_optimizer(optimizer) loss.backward() optimizer.step() if (phase == 'dev'): preds_th = (torch.nn.functional.sigmoid(output) > th_fscore) list_preds.append(preds_th.cpu()) list_label.append(label.cpu()) running_loss += (loss.item() * image.size(0)) epoch_loss = (running_loss / dataset_sizes[phase]) if (phase == 'dev'): y_pred = torch.cat(list_preds, dim=0).numpy() y_true = torch.cat(list_label, dim=0).numpy() curr_f1 = f1_score(y_true, y_pred, average='samples') if verbose: print('epoch #{} {} F1: {:.4f} '.format(epoch, phase, curr_f1)) if ((phase == 'train') and (epoch_loss != epoch_loss)): print('Nan loss during training, escaping') model.load_state_dict(best_model_sd) model.train(False) return best_f1 if (phase == 'dev'): if (curr_f1 > best_f1): best_f1 = curr_f1 best_model_sd = copy.deepcopy(model.state_dict()) if ((best_f1 != best_f1) and (num_epochs == 1) and (cont_overloop < 1)): failsafe = True print('Recording a NaN F1, training for one more epoch.') else: failsafe = False cont_overloop += 1 model.load_state_dict(best_model_sd) model.train(False) if (best_f1 != best_f1): best_f1 = 0.0 return best_f1
def train_ntu_track_acc(model, criteria, optimizer, scheduler, dataloaders, dataset_sizes, device=None, num_epochs=200, verbose=False, multitask=False): best_model_sd = copy.deepcopy(model.state_dict()) best_acc = 0 for epoch in range(num_epochs): for phase in ['train', 'dev']: if (phase == 'train'): if (not isinstance(scheduler, sc.LRCosineAnnealingScheduler)): scheduler.step() model.train(True) else: model.train(False) running_loss = 0.0 running_corrects = 0 for data in dataloaders[phase]: (rgb, ske, label) = (data['rgb'], data['ske'], data['label']) rgb = rgb.to(device) ske = ske.to(device) label = label.to(device) optimizer.zero_grad() with torch.set_grad_enabled((phase == 'train')): output = model((rgb, ske)) if (not multitask): (_, preds) = torch.max(output, 1) if isinstance(criteria, list): loss = criteria[0](output, label) else: loss = criteria(output, label) else: (_, preds) = torch.max(sum(output), 1) loss = ((criteria[0](output[0], label) + criteria[1](output[1], label)) + criteria[2](output[2], label)) if (phase == 'train'): if isinstance(scheduler, sc.LRCosineAnnealingScheduler): scheduler.step() scheduler.update_optimizer(optimizer) loss.backward() optimizer.step() running_loss += (loss.item() * rgb.size(0)) running_corrects += torch.sum((preds == label.data)) epoch_loss = (running_loss / dataset_sizes[phase]) epoch_acc = (running_corrects.double() / dataset_sizes[phase]) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) if ((phase == 'dev') and (epoch_acc > best_acc)): best_acc = epoch_acc best_model_sd = copy.deepcopy(model.state_dict()) model.load_state_dict(best_model_sd) model.train(False) return best_acc
def test_ntu_track_acc(model, dataloaders, dataset_sizes, device=None, multitask=False): model.train(False) phase = 'test' running_corrects = 0 for data in dataloaders[phase]: (rgb, ske, label) = (data['rgb'], data['ske'], data['label']) rgb = rgb.to(device) ske = ske.to(device) label = label.to(device) output = model((rgb, ske)) if (not multitask): (_, preds) = torch.max(output, 1) else: (_, preds) = torch.max(sum(output), 1) running_corrects += torch.sum((preds == label.data)) acc = (running_corrects.double() / dataset_sizes[phase]) return acc
class ModelSearcher(): def __init__(self, args): self.args = args def search(self): pass def _epnas(self, model_type, surrogate_dict, dataloaders, dataset_searchmethods, device): surrogate = surrogate_dict['model'] s_crite = surrogate_dict['criterion'] s_data = surr.SurrogateDataloader() s_optim = op.Adam(surrogate.parameters(), lr=self.args.lr_surrogate) train_sampled_models = dataset_searchmethods['train_sampled_fun'] get_possible_layer_configurations = dataset_searchmethods['get_layer_confs'] temperature = self.args.initial_temperature sampled_k_confs = [] shared_weights = dict() for si in range(self.args.search_iterations): if self.args.verbose: print((50 * '=')) print('Search iteration {}/{} '.format(si, self.args.search_iterations)) for progression_index in range(self.args.max_progression_levels): if self.args.verbose: print((25 * '-')) print('Progressive step {}/{} '.format(progression_index, self.args.max_progression_levels)) list_possible_layer_confs = get_possible_layer_configurations(progression_index) all_configurations = tools.merge_unfolded_with_sampled(sampled_k_confs, list_possible_layer_confs, progression_index) if ((si + progression_index) == 0): all_accuracies = train_sampled_models(all_configurations, model_type, dataloaders, self.args, device, state_dict=shared_weights) tools.update_surrogate_dataloader(s_data, all_configurations, all_accuracies) tools.train_surrogate(surrogate, s_data, s_optim, s_crite, self.args, device) if self.args.verbose: print('Trained architectures: ') print(list(zip(all_configurations, all_accuracies))) else: all_accuracies = tools.predict_accuracies_with_surrogate(all_configurations, surrogate, device) if self.args.verbose: print('Predicted accuracies: ') print(list(zip(all_configurations, all_accuracies))) if ((si + progression_index) == 0): sampled_k_confs = tools.sample_k_configurations(all_configurations, all_accuracies, self.args.num_samples, temperature) if self.args.verbose: estimated_accuracies = tools.predict_accuracies_with_surrogate(all_configurations, surrogate, device) diff = np.abs((np.array(estimated_accuracies) - np.array(all_accuracies))) print('Error on accuracies = {}'.format(diff)) else: sampled_k_confs = tools.sample_k_configurations(all_configurations, all_accuracies, self.args.num_samples, temperature) sampled_k_accs = train_sampled_models(sampled_k_confs, model_type, dataloaders, self.args, device, state_dict=shared_weights) tools.update_surrogate_dataloader(s_data, sampled_k_confs, sampled_k_accs) err = tools.train_surrogate(surrogate, s_data, s_optim, s_crite, self.args, device) if self.args.verbose: print('Trained architectures: ') print(list(zip(sampled_k_confs, sampled_k_accs))) print('with surrogate error: {}'.format(err)) iteration = ((si * self.args.search_iterations) + progression_index) temperature = tools.compute_temperature(iteration, self.args) if self.args.verbose: print('Temperature is being set to {}'.format(temperature)) return s_data def _randsearch(self, model_type, dataloaders, dataset_searchmethods, device): s_data = surr.SurrogateDataloader() train_sampled_models = dataset_searchmethods['train_sampled_fun'] get_possible_layer_configurations = dataset_searchmethods['get_layer_confs'] sampled_k_confs = [] shared_weights = dict() for si in range((self.args.search_iterations * self.args.max_progression_levels)): if self.args.verbose: print((50 * '=')) print('Random Search iteration {}/{} '.format(si, (self.args.search_iterations * self.args.max_progression_levels))) sampled_k_confs = tools.sample_k_configurations_directly(self.args.num_samples, self.args.max_progression_levels, get_possible_layer_configurations) sampled_k_accs = train_sampled_models(sampled_k_confs, model_type, dataloaders, self.args, device, state_dict=shared_weights) tools.update_surrogate_dataloader(s_data, sampled_k_confs, sampled_k_accs) if self.args.verbose: print('Trained architectures: ') print(list(zip(sampled_k_confs, sampled_k_accs))) return s_data
class AVMNISTSearcher(ModelSearcher): def __init__(self, args, device): super(AVMNISTSearcher, self).__init__(args) self.device = device transformer = transforms.Compose([avmnist_data.ToTensor(), avmnist_data.Normalize((0.1307,), (0.3081,))]) dataset_training = avmnist_data.AVMnist(args.datadir, transform=transformer, stage='train') dataset_validate = avmnist_data.AVMnist(args.datadir, transform=transformer, stage='train') train_indices = list(range(0, 50000)) valid_indices = list(range(50000, 55000)) train_subset = Subset(dataset_training, train_indices) valid_subset = Subset(dataset_validate, valid_indices) trainloader = torch.utils.data.DataLoader(train_subset, batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers) devloader = torch.utils.data.DataLoader(valid_subset, batch_size=args.batchsize, shuffle=False, num_workers=args.num_workers) self.dataloaders = {'train': trainloader, 'dev': devloader} def search(self): avmnist_searchmethods = {'train_sampled_fun': avmnist.train_sampled_models, 'get_layer_confs': avmnist.get_possible_layer_configurations} if (not self.args.randsearch): surrogate = surr.SimpleRecurrentSurrogate(100, 3, 100) surrogate.to(self.device) surrogate_dict = {'model': surrogate, 'criterion': torch.nn.MSELoss()} return self._epnas(avmnist.Searchable_Audio_Image_Net, surrogate_dict, self.dataloaders, avmnist_searchmethods, self.device) else: return self._randsearch(avmnist.Searchable_Audio_Image_Net, self.dataloaders, avmnist_searchmethods, self.device)
class NTUSearcher(ModelSearcher): def __init__(self, args, device): super(NTUSearcher, self).__init__(args) self.device = device transformer_val = transforms.Compose([ntu_data.NormalizeLen(args.vid_len), ntu_data.ToTensor()]) transformer_tra = transforms.Compose([ntu_data.AugCrop(), ntu_data.NormalizeLen(args.vid_len), ntu_data.ToTensor()]) dataset_training = ntu_data.NTU(args.datadir, transform=transformer_tra, stage='trainexp', args=args) dataset_dev = ntu_data.NTU(args.datadir, transform=transformer_val, stage='dev', args=args) datasets = {'train': dataset_training, 'dev': dataset_dev} self.dataloaders = {x: DataLoader(datasets[x], batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers, drop_last=False) for x in ['train', 'dev']} def search(self): surrogate = surr.SimpleRecurrentSurrogate(100, 3, 100) surrogate.to(self.device) surrogate_dict = {'model': surrogate, 'criterion': torch.nn.MSELoss()} ntu_searchmethods = {'train_sampled_fun': ntu.train_sampled_models, 'get_layer_confs': ntu.get_possible_layer_configurations} return self._epnas(ntu.Searchable_Skeleton_Image_Net, surrogate_dict, self.dataloaders, ntu_searchmethods, self.device)
class CifarSearcher(ModelSearcher): def __init__(self, args, device): super(CifarSearcher, self).__init__(args) self.device = device train_indices = list(range(0, 45000)) valid_indices = list(range(45000, 50000)) transformer_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]) transformer_val = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]) transformers = {'train': transformer_train, 'test': transformer_val} dataset_training = torchvision.datasets.CIFAR10(root=args.data_dir, train=True, download=True, transform=transformers['train']) dataset_validate = torchvision.datasets.CIFAR10(root=args.data_dir, train=True, download=True, transform=transformers['train']) train_subset = Subset(dataset_training, train_indices) valid_subset = Subset(dataset_validate, valid_indices) trainloader = torch.utils.data.DataLoader(train_subset, batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers) devloader = torch.utils.data.DataLoader(valid_subset, batch_size=args.batchsize, shuffle=False, num_workers=args.num_workers) self.dataloaders = {'train': trainloader, 'dev': devloader} def search(self): surrogate = surr.SimpleRecurrentSurrogate(100, 4, 100) surrogate.to(self.device) surrogate_dict = {'model': surrogate, 'criterion': torch.nn.MSELoss()} cifar_searchmethods = {'train_sampled_fun': cifar.train_sampled_models, 'get_layer_confs': cifar.get_possible_layer_configurations} return self._epnas(cifar.Searchable_MicroCNN, surrogate_dict, self.dataloaders, cifar_searchmethods, self.device)
def update_from_loss_module(monitors, output_dict, loss_update): (tmp_monitors, tmp_outputs) = loss_update monitors.update(tmp_monitors) output_dict.update(tmp_outputs)
class Model(LeftModel): def __init__(self, parsed_train_path, parsed_test_path, output_vocab): self.parsed_train_path = parsed_train_path self.parsed_test_path = parsed_test_path logger.critical(('Train parsing: ' + self.parsed_train_path)) logger.critical(('Test parsing: ' + self.parsed_test_path)) domain = make_domain(self.parsed_test_path) super().__init__(domain, output_vocab) from left.generalized_fol_executor import NCGeneralizedFOLExecutor self.executor = NCGeneralizedFOLExecutor(self.domain, self.parser, allow_shift_grounding=True) train_utterance_to_parsed_dict = io.load_pkl(self.parsed_train_path) test_utterance_to_parsed_dict = io.load_pkl(self.parsed_test_path) utterance_to_parsed_dict = train_utterance_to_parsed_dict.copy() utterance_to_parsed_dict.update(test_utterance_to_parsed_dict) self.utterance_to_parsed_dict = utterance_to_parsed_dict self.attribute_concepts.sort() logger.critical(('Num attribute concepts: ' + str(len(self.attribute_concepts)))) k = self.attribute_concepts v = list(range(len(self.attribute_concepts))) self.attribute_class_to_idx = dict(zip(k, v)) def forward(self, feed_dict): feed_dict = GView(feed_dict) (monitors, outputs) = ({}, {}) f_sng = self.forward_sng(feed_dict) (results, executions, parsings, scored) = (list(), list(), list(), list()) for i in range(len(feed_dict.program_tree)): with self.executor.with_grounding(self.grounding_cls(f_sng[i], self, self.training, self.attribute_class_to_idx, None)): this_input_str = feed_dict.question_text[i] parsing_list = tuple([self.utterance_to_parsed_dict[this_input_str]]) parsing = self.parser.parse_expression(parsing_list[0]) execution = self.executor.execute(parsing).tensor program = execution results.append((parsing, program, execution)) executions.append(execution) parsings.append(parsing) scored.append(1) outputs['parsing'] = parsings outputs['results'] = results outputs['executions'] = executions outputs['scored'] = scored update_from_loss_module(monitors, outputs, self.qa_loss(outputs['executions'], feed_dict.answer, feed_dict.question_type)) if self.training: loss = monitors['loss/qa'] return (loss, monitors, outputs) else: outputs['monitors'] = monitors return outputs def extract_concepts(self, domain): from left.domain import read_concepts_v2 (_, arity_2, arity_3) = read_concepts_v2(domain) from concepts.benchmark.vision_language.babel_qa.humanmotion_constants import attribute_concepts_mapping arity_1 = ((attribute_concepts_mapping['Motion'] + attribute_concepts_mapping['Part']) + attribute_concepts_mapping['Direction']) return (arity_1, arity_2, arity_3) def forward_sng(self, feed_dict): (motion_encodings, motion_encodings_rel, motion_encodings_output_vocab) = self.scene_graph(feed_dict.joints) f_sng = [] start_seg = 0 for seq_num_segs in feed_dict.num_segs: f_sng.append({'attribute': motion_encodings[start_seg:(start_seg + seq_num_segs)], 'relation': motion_encodings_rel[start_seg:(start_seg + seq_num_segs)], 'output_vocab': motion_encodings_output_vocab[start_seg:(start_seg + seq_num_segs)]}) start_seg += seq_num_segs assert (start_seg == motion_encodings.size()[0]) return f_sng
def make_model(parsed_train_path, parsed_test_path, output_vocab): return Model(parsed_train_path, parsed_test_path, output_vocab)
def make_dataset(mode, scenes_json, questions_json, image_root, output_vocab_json): return make_custom_transfer_dataset(scenes_json, questions_json, image_root=image_root, output_vocab_json=output_vocab_json, query_list_key=g_query_list_keys[mode], custom_fields=[], incl_scene=False)
def parse_arguments(notebook_options=None): "Parse the arguments for the training (or test) execution of a ReferIt3D net.\n :param notebook_options: (list) e.g., ['--max-distractors', '100'] to give/parse arguments from inside a jupyter notebook.\n :return:\n " parser = argparse.ArgumentParser(description='ReferIt3D Nets + Ablations') parser.add_argument('-scannet-file', type=str, required=True, help='pkl file containing the data of Scannet as generated by running XXX') parser.add_argument('-referit3D-file', type=str, required=True) parser.add_argument('--log-dir', type=str, help='where to save training-progress, model, etc') parser.add_argument('--resume-path', type=str, help='model-path to resume') parser.add_argument('--config-file', type=str, default=None, help='config file') parser.add_argument('--max-distractors', type=int, default=0, help='Maximum number of distracting objects to be drawn from a scan.') parser.add_argument('--max-seq-len', type=int, default=24, help='utterances with more tokens than this they will be ignored.') parser.add_argument('--points-per-object', type=int, default=1024, help='points sampled to make a point-cloud per object of a scan.') parser.add_argument('--unit-sphere-norm', type=str2bool, default=False, help='Normalize each point-cloud to be in a unit sphere.') parser.add_argument('--mentions-target-class-only', type=str2bool, default=True, help='If True, drop references that do not explicitly mention the target-class.') parser.add_argument('--min-word-freq', type=int, default=3) parser.add_argument('--max-test-objects', type=int, default=0) parser.add_argument('--mode', type=str, default='train', choices=['train', 'evaluate']) parser.add_argument('--max-train-epochs', type=int, default=100, help='number of training epochs. [default: 100]') parser.add_argument('--n-workers', type=int, default=(- 1), help='number of data loading workers [default: -1 is all cores available -1.]') parser.add_argument('--random-seed', type=int, default=2020, help='Control pseudo-randomness (net-wise, point-cloud sampling etc.) fostering reproducibility.') parser.add_argument('--init-lr', type=float, default=0.0005, help='learning rate for training.') parser.add_argument('--patience', type=int, default=10, help='if test-acc does not improve for patience consecutiveepoch, stop training.') parser.add_argument('--model', type=str, default='referIt3DNet', choices=['referIt3DNet', 'directObj2Lang', 'referIt3DNetAttentive']) parser.add_argument('--object-latent-dim', type=int, default=128) parser.add_argument('--language-latent-dim', type=int, default=128) parser.add_argument('--word-embedding-dim', type=int, default=64) parser.add_argument('--graph-out-dim', type=int, default=128) parser.add_argument('--dgcnn-intermediate-feat-dim', nargs='+', type=int, default=[128, 128, 128, 128]) parser.add_argument('--object-encoder', type=str, default='pnet_pp', choices=['pnet_pp', 'pnet']) parser.add_argument('--language-fusion', type=str, default='both', choices=['before', 'after', 'both']) parser.add_argument('--word-dropout', type=float, default=0.1) parser.add_argument('--knn', type=int, default=7, help='For DGCNN number of neighbors') parser.add_argument('--lang-cls-alpha', type=float, default=0.5, help='if > 0 a loss for guessing the target via language only is added.') parser.add_argument('--obj-cls-alpha', type=float, default=0.5, help='if > 0 a loss for guessing for each segmented object its class type is added.') parser.add_argument('--gpu', type=str, default='0', help='specify gpu device. [default: 0]') parser.add_argument('--n-gpus', type=int, default=1, help='number gpu devices. [default: 1]') parser.add_argument('--batch-size', type=int, default=32, help='batch size per gpu. [default: 32]') parser.add_argument('--save-args', type=str2bool, default=True, help='save arguments in a json.txt') parser.add_argument('--experiment-tag', type=str, default=None, help='will be used to name a subdir for log-dir if given') parser.add_argument('--cluster-pid', type=str, default=None) parser.add_argument('--augment-with-sr3d', type=str, default=None, help='csv with sr3d data to augment training dataof args.referit3D-file') parser.add_argument('--vocab-file', type=str, default=None, help='optional, .pkl file for vocabulary (useful when working with multiple dataset and single model.') parser.add_argument('--fine-tune', type=str2bool, default=False, help='use if you train with dataset x and then you continue training with another dataset') parser.add_argument('--s-vs-n-weight', type=float, default=None, help='importance weight of sr3d vs nr3d examples [use less than 1]') if (notebook_options is not None): args = parser.parse_args(notebook_options) else: args = parser.parse_args() if (args.config_file is not None): with open(args.config_file, 'r') as fin: configs_dict = json.load(fin) apply_configs(args, configs_dict) args_string = pprint.pformat(vars(args)) print(args_string) return args
def read_saved_args(config_file, override_args=None, verbose=True): "\n :param config_file:\n :param override_args: dict e.g., {'gpu': '0'}\n :param verbose:\n :return:\n " parser = ArgumentParser() args = parser.parse_args([]) with open(config_file, 'r') as f_in: args.__dict__ = json.load(f_in) if (override_args is not None): for (key, val) in override_args.items(): args.__setattr__(key, val) if verbose: args_string = pprint.pformat(vars(args)) print(args_string) return args
def apply_configs(args, config_dict): for (k, v) in config_dict.items(): setattr(args, k, v)
def str2bool(v): '\n Boolean values for argparse\n ' if isinstance(v, bool): return v if (v.lower() in ('yes', 'true', 't', 'y', '1')): return True elif (v.lower() in ('no', 'false', 'f', 'n', '0')): return False else: raise argparse.ArgumentTypeError('Boolean value expected.')
def create_dir(dir_path): "\n Creates a directory (or nested directories) if they don't exist.\n " if (not osp.exists(dir_path)): os.makedirs(dir_path) return dir_path
def unpickle_data(file_name, python2_to_3=False): '\n Restore data previously saved with pickle_data().\n :param file_name: file holding the pickled data.\n :param python2_to_3: (boolean), if True, pickle happened under python2x, unpickling under python3x.\n :return: an generator over the un-pickled items.\n Note, about implementing the python2_to_3 see\n https://stackoverflow.com/questions/28218466/unpickling-a-python-2-object-with-python-3\n ' in_file = open(file_name, 'rb') if python2_to_3: size = cPickle.load(in_file, encoding='latin1') else: size = cPickle.load(in_file) for _ in range(size): if python2_to_3: (yield cPickle.load(in_file, encoding='latin1')) else: (yield cPickle.load(in_file)) in_file.close()
def read_lines(file_name): trimmed_lines = [] with open(file_name) as fin: for line in fin: trimmed_lines.append(line.rstrip()) return trimmed_lines
def decode_stimulus_string(s): '\n Split into scene_id, instance_label, # objects, target object id,\n distractors object id.\n :param s: the stimulus string\n ' if (len(s.split('-', maxsplit=4)) == 4): (scene_id, instance_label, n_objects, target_id) = s.split('-', maxsplit=4) distractors_ids = '' else: (scene_id, instance_label, n_objects, target_id, distractors_ids) = s.split('-', maxsplit=4) instance_label = instance_label.replace('_', ' ') n_objects = int(n_objects) target_id = int(target_id) distractors_ids = [int(i) for i in distractors_ids.split('-') if (i != '')] assert (len(distractors_ids) == (n_objects - 1)) return (scene_id, instance_label, n_objects, target_id, distractors_ids)
def objects_counter_percentile(scan_ids, all_scans, prc): all_obs_len = list() for scan_id in all_scans: if (scan_id in scan_ids): all_obs_len.append(len(all_scans[scan_id].three_d_objects)) return np.percentile(all_obs_len, prc)
def mean_color(scan_ids, all_scans): mean_rgb = np.zeros((1, 3), dtype=np.float32) n_points = 0 for scan_id in scan_ids: color = all_scans[scan_id].color mean_rgb += np.sum(color, axis=0) n_points += len(color) mean_rgb /= n_points return mean_rgb
def scannet_official_train_val(pre_fix, valid_views=None, verbose=True): "\n :param valid_views: None or list like ['00', '01']\n :return:\n " train_split = osp.join(pre_fix, 'scannetv2_train.txt') train_split = read_lines(train_split) test_split = osp.join(pre_fix, 'scannetv2_val.txt') test_split = read_lines(test_split) if (valid_views is not None): train_split = [sc for sc in train_split if (sc[(- 2):] in valid_views)] test_split = [sc for sc in test_split if (sc[(- 2):] in valid_views)] if verbose: print('#train/test scans:', len(train_split), '/', len(test_split)) scans_split = dict() scans_split['train'] = set(train_split) scans_split['test'] = set(test_split) return scans_split
def load_scan_related_data(pre_fix, preprocessed_scannet_file, verbose=True, add_pad=True): (_, all_scans) = unpickle_data(preprocessed_scannet_file) if verbose: print('Loaded in RAM {} scans'.format(len(all_scans))) instance_labels = set() for scan in all_scans: idx = np.array([o.object_id for o in scan.three_d_objects]) instance_labels.update([o.instance_label for o in scan.three_d_objects]) assert np.all((idx == np.arange(len(idx)))) all_scans = {scan.scan_id: scan for scan in all_scans} class_to_idx = {} i = 0 for el in sorted(instance_labels): class_to_idx[el] = i i += 1 if verbose: print('{} instance classes exist in these scans'.format(len(class_to_idx))) if add_pad: class_to_idx['pad'] = len(class_to_idx) scans_split = scannet_official_train_val(pre_fix) return (all_scans, scans_split, class_to_idx)
def load_referential_data(args, referit_csv, scans_split): '\n :param args:\n :param referit_csv:\n :param scans_split:\n :return:\n ' referit_data_train = pd.read_csv(referit_csv) referit_data_test = pd.read_csv(referit_csv.replace('train', 'test')) referit_data = pd.concat([referit_data_train, referit_data_test], ignore_index=True, sort=False) print(len(referit_data)) if args.mentions_target_class_only: n_original = len(referit_data) referit_data = referit_data[referit_data['mentions_target_class']] referit_data.reset_index(drop=True, inplace=True) print('Dropping utterances without explicit mention to the target class {}->{}'.format(n_original, len(referit_data))) try: referit_data = referit_data[['tokens', 'instance_type', 'scan_id', 'dataset', 'target_id', 'utterance', 'stimulus_id', 'anchor_ids']] except: referit_data = referit_data[['tokens', 'instance_type', 'scan_id', 'dataset', 'target_id', 'utterance', 'stimulus_id']] referit_data.tokens = referit_data['tokens'].apply(literal_eval) is_train = referit_data.scan_id.apply((lambda x: (x in scans_split['train']))) referit_data['is_train'] = is_train train_token_lens = referit_data.tokens[is_train].apply((lambda x: len(x))) print('{}-th percentile of token length for remaining (training) data is: {:.1f}'.format(95, np.percentile(train_token_lens, 95))) n_original = len(referit_data) referit_data = referit_data[referit_data.tokens.apply((lambda x: (len(x) <= args.max_seq_len)))] referit_data.reset_index(drop=True, inplace=True) print('Dropping utterances with more than {} tokens, {}->{}'.format(args.max_seq_len, n_original, len(referit_data))) if (args.augment_with_sr3d is not None): print('Adding Sr3D as augmentation.') sr3d = pd.read_csv(args.augment_with_sr3d) sr3d.tokens = sr3d['tokens'].apply(literal_eval) is_train = sr3d.scan_id.apply((lambda x: (x in scans_split['train']))) sr3d['is_train'] = is_train sr3d = sr3d[is_train] sr3d = sr3d[referit_data.columns] print('Dataset-size before augmentation:', len(referit_data)) referit_data = pd.concat([referit_data, sr3d], axis=0) referit_data.reset_index(inplace=True, drop=True) print('Dataset-size after augmentation:', len(referit_data)) context_size = referit_data[(~ referit_data.is_train)].stimulus_id.apply((lambda x: decode_stimulus_string(x)[2])) print('(mean) Random guessing among target-class test objects {:.4f}'.format((1 / context_size).mean())) return referit_data
def compute_auxiliary_data(referit_data, all_scans, args): 'Given a train-split compute useful quantities like mean-rgb, a word-vocabulary.\n :param referit_data: pandas Dataframe, as returned from load_referential_data()\n :param all_scans:\n :param args:\n :return:\n ' if args.vocab_file: vocab = Vocabulary.load(args.vocab_file) print('Using external, provided vocabulary with {} words.'.format(len(vocab))) else: train_tokens = referit_data[referit_data.is_train]['tokens'] vocab = build_vocab([x for x in train_tokens], args.min_word_freq) print('Length of vocabulary, with min_word_freq={} is {}'.format(args.min_word_freq, len(vocab))) if (all_scans is None): return vocab training_scan_ids = set(referit_data[referit_data['is_train']]['scan_id']) print('{} training scans will be used.'.format(len(training_scan_ids))) mean_rgb = mean_color(training_scan_ids, all_scans) prc = 90 obj_cnt = objects_counter_percentile(training_scan_ids, all_scans, prc) print('{}-th percentile of number of objects in the (training) scans is: {:.2f}'.format(prc, obj_cnt)) prc = 99 testing_scan_ids = set(referit_data[(~ referit_data['is_train'])]['scan_id']) obj_cnt = objects_counter_percentile(testing_scan_ids, all_scans, prc) print('{}-th percentile of number of objects in the (testing) scans is: {:.2f}'.format(prc, obj_cnt)) return (mean_rgb, vocab)
def trim_scans_per_referit3d_data(referit_data, scans): in_r3d = referit_data.scan_id.unique() to_drop = [] for k in scans: if (k not in in_r3d): to_drop.append(k) for k in to_drop: del scans[k] print('Dropped {} scans to reduce mem-foot-print.'.format(len(to_drop))) return scans
class Vocabulary(object): 'Simple vocabulary wrapper.' def __init__(self, special_symbols=None): self.word2idx = {} self.idx2word = {} self.idx = 0 self.special_symbols = None self.intialize_special_symbols(special_symbols) def intialize_special_symbols(self, special_symbols): if (special_symbols is None): self.special_symbols = ['<pad>', '<sos>', '<eos>', '<unk>'] else: self.special_symbols = special_symbols for s in self.special_symbols: self.add_word(s) for s in self.special_symbols: name = s.replace('<', '') name = name.replace('>', '') setattr(self, name, self(s)) def n_special(self): return len(self.special_symbols) def add_word(self, word): if (word not in self.word2idx): self.word2idx[word] = self.idx self.idx2word[self.idx] = word self.idx += 1 def __call__(self, word): if (word not in self.word2idx): return self.word2idx['<unk>'] return self.word2idx[word] def __len__(self): return len(self.word2idx) def encode(self, text, max_len=None, add_begin_end=True): "\n :param text: (list) of tokens ['a', 'nice', 'sunset']\n :param max_len:\n :param add_begin_end:\n :return: (list) of encoded tokens.\n " encoded = [self(token) for token in text] if (max_len is not None): encoded = encoded[:max_len] if add_begin_end: encoded = (([self('<sos>')] + encoded) + [self('<eos>')]) if (max_len is not None): encoded += ([self('<pad>')] * (max_len - len(text))) return encoded def decode(self, tokens): return [self.idx2word[token] for token in tokens] def decode_print(self, tokens): exclude = set([self.word2idx[s] for s in ['<sos>', '<eos>', '<pad>']]) words = [self.idx2word[token] for token in tokens if (token not in exclude)] return ' '.join(words) def __iter__(self): return iter(self.word2idx) def save(self, file_name): ' Save as a .pkl the current Vocabulary instance.\n :param file_name: where to save\n :return: None\n ' with open(file_name, mode='wb') as f: pickle.dump(self, f, protocol=2) @staticmethod def load(file_name): ' Load a previously saved Vocabulary instance.\n :param file_name: where it was saved\n :return: Vocabulary instance.\n ' with open(file_name, 'rb') as f: vocab = pickle.load(f) return vocab
def build_vocab(token_list, min_word_freq): 'Build a simple vocabulary wrapper.' counter = Counter() for tokens in token_list: counter.update(tokens) words = [word for (word, cnt) in counter.items() if (cnt >= min_word_freq)] vocab = Vocabulary() for (i, word) in enumerate(words): vocab.add_word(word) return vocab
def create_bare_domain() -> FunctionDomain: domain = FunctionDomain('Left') domain.define_type(ObjectType('Object')) domain.define_type(ObjectType('Object_Set')) domain.define_type(ObjectType('Action')) domain.define_function(Function('equal', FunctionTyping[BOOL](INT64, INT64))) domain.define_function(Function('greater_than', FunctionTyping[BOOL](INT64, INT64))) domain.define_function(Function('less_than', FunctionTyping[BOOL](INT64, INT64))) return domain
def create_default_parser(domain: FunctionDomain) -> NCGeneralizedFOLPythonParser: parser = NCGeneralizedFOLPythonParser(domain, inplace_definition=True, inplace_polymorphic_function=True, inplace_definition_type=True) return parser
def create_domain_from_parsing(codes: Dict[(str, List[str])]) -> FunctionDomain: domain = create_bare_domain() parser = create_default_parser(domain) for (prompt, codes) in jacinle.tqdm_gofor(codes, desc='Creating domain from parsings'): if isinstance(codes, str): codes = [codes] for code in codes: try: _ = parser.parse_expression(code) except Exception as e: print(e) continue return domain
def read_concepts_v1(domain: FunctionDomain) -> Tuple[(List[str], List[str], List[str])]: ds_functions = list(domain.functions.keys()) (attribute_concepts, relational_concepts, multi_relational_concepts) = ([], [], []) for f in ds_functions: if ('_Object_Object_Object' in f): multi_relational_concepts.append(f) elif ('_Object_Object' in f): relational_concepts.append(f) elif ('_Object' in f): attribute_concepts.append(f) else: pass attribute_concepts.sort() relational_concepts.sort() multi_relational_concepts.sort() return (attribute_concepts, relational_concepts, multi_relational_concepts)
def get_arity(function: Function) -> Optional[int]: ftype = function.ftype if (ftype.return_type != BOOL): return None for arg_type in ftype.argument_types: if (arg_type.typename not in ['Object', 'Object_Set', 'Action']): return None return len(ftype.argument_types)
def read_concepts_v2(domain: FunctionDomain) -> Tuple[(List[str], List[str], List[str])]: functions = {1: list(), 2: list(), 3: list()} for (name, function) in domain.functions.items(): arity = get_arity(function) if ((arity is not None) and (1 <= arity <= 3)): functions[arity].append(name) return (functions[1], functions[2], functions[3])
def read_description_categories(domain: FunctionDomain) -> Tuple[List[str]]: output = list() for (name, t) in domain.types.items(): if (t.typename not in ('Object', 'Object_Set', 'Action')): output.append(name) return output
def make_domain(parsed_test_path: str) -> FunctionDomain: codes = io.load_pkl(parsed_test_path) domain = create_domain_from_parsing(codes) return domain
class ExecutionTraceGetter(object): def __init__(self, trace_obj): self.trace_obj = trace_obj def get(self) -> List[Tuple[(E.Expression, TensorValue)]]: return self.trace_obj
def _get_self_mask(m): self_mask = torch.eye(m.size((- 1)), dtype=m.dtype, device=m.device) return self_mask
def _do_apply_self_mask(m): if (not g_options.use_self_mask): return m self_mask = _get_self_mask(m) return ((m * (1 - self_mask)) + ((- 10) * self_mask))
class NCGeneralizedFOLExecutor(FunctionDomainExecutor): def __init__(self, domain: FunctionDomain, parser: Optional[ParserBase]=None, allow_shift_grounding=False): super().__init__(domain, parser) self.allow_shift_grounding = allow_shift_grounding self.variable_stack = dict() self.view_stack = list() self._record_execution_trace = False self._execution_trace = list() variable_stack: Dict[(str, Variable)] 'A variable stack, used to store the variables that are used in the current scope.' view_stack: List[TensorValue] 'A view stack, used to store the variables that are used for viewpoint anchoring.' _count_margin = 0.25 _count_tau = 0.25 @property def training(self): return self.grounding.training def _count(self, x: TensorValue) -> TensorValue: if self.training: return torch.sigmoid(x.tensor).sum(dim=(- 1)) else: return (x.tensor > 0).sum(dim=(- 1)).float() def greater_than(self, x: TensorValue, y: TensorValue) -> TensorValue: if self.training: rv = ((((x.tensor - y.tensor) - 1) + (2 * self._count_margin)) / self._count_tau) else: rv = ((- 10) + (20 * (x.tensor > y.tensor).float())) return TensorValue(BOOL, [], rv, quantized=False) def less_than(self, x: TensorValue, y: TensorValue) -> TensorValue: return self.greater_than(y, x) def equal(self, x: TensorValue, y: TensorValue) -> TensorValue: if self.training: rv = ((((2 * self._count_margin) - (x.tensor - y.tensor).abs()) / (2 * self._count_margin)) / self._count_tau) else: rv = ((- 10) + (20 * (x.tensor == y.tensor).float())) return TensorValue(BOOL, [], rv, quantized=False) @contextlib.contextmanager def record_execution_trace(self): self._record_execution_trace = True self._execution_trace = list() (yield ExecutionTraceGetter(self._execution_trace)) self._record_execution_trace = False self._execution_trace = None def _execute(self, expr: E.Expression) -> TensorValue: rv = self._execute_inner(expr) if self._record_execution_trace: self._execution_trace.append((expr, rv)) return rv def _execute_inner(self, expr: E.Expression) -> TensorValue: if isinstance(expr, E.BoolExpression): if (expr.bool_op is E.BoolOpType.AND): if (isinstance(expr.arguments[0], E.GeneralizedQuantificationExpression) and (expr.arguments[0].quantification_op == 'view')): assert (len(expr.arguments) == 2) obj_anchor = self._execute(expr.arguments[0]) self.view_stack.append(obj_anchor) try: return self._execute(expr.arguments[1]) finally: self.view_stack.pop() args = [self._execute(arg) for arg in expr.arguments] expanded_args = expand_argument_values(args) expanded_tensors = [a.tensor for a in expanded_args] result = torch.stack(expanded_tensors, dim=(- 1)).amin(dim=(- 1)) return TensorValue(expanded_args[0].dtype, expanded_args[0].batch_variables, result, quantized=False) elif (expr.bool_op is E.BoolOpType.OR): args = [self._execute(arg) for arg in expr.arguments] expanded_args = expand_argument_values(args) expanded_tensors = [a.tensor for a in expanded_args] result = torch.stack(expanded_tensors, dim=(- 1)).amax(dim=(- 1)) return TensorValue(expanded_args[0].dtype, expanded_args[0].batch_variables, result, quantized=False) elif (expr.bool_op is E.BoolOpType.NOT): args = [self._execute(arg) for arg in expr.arguments] assert (len(args) == 1) result = args[0].tensor result = (torch.zeros_like(result) - result) return TensorValue(args[0].dtype, args[0].batch_variables, result, quantized=False) elif isinstance(expr, E.FunctionApplicationExpression): if (expr.function.name in self.function_implementations): func = self.function_implementations[expr.function.name] args = [self._execute(arg) for arg in expr.arguments] return func(*args) else: args = [self._execute(arg) for arg in expr.arguments] if (len(args) == 1): grounding_tensor = self.grounding.compute_similarity('attribute', expr.function.name) elif (len(args) == 2): if (len(self.view_stack) > 0): obj_anchor = self.view_stack[(- 1)] grounding_tensor = self.grounding.compute_similarity('multi_relation', expr.function.name) grounding_tensor = torch.einsum('ijk,i->jk', grounding_tensor, obj_anchor.tensor) else: grounding_tensor = self.grounding.compute_similarity('relation', expr.function.name) grounding_tensor = _do_apply_self_mask(grounding_tensor) else: assert (len(args) == 3) grounding_tensor = self.grounding.compute_similarity('multi_relation', expr.function.name) if (self.allow_shift_grounding and (len(args) == 2) and (len(grounding_tensor.size()) == 1)): shift = True else: shift = False batch_variable_names = list() dims_to_squeeze = list() for (i, arg) in enumerate(args): if isinstance(arg, Variable): batch_variable_names.append(arg.name) else: assert isinstance(arg, TensorValue) if (not shift): grounding_tensor = (grounding_tensor * jactorch.add_dim_as_except(arg.tensor, grounding_tensor, i)).sum(i, keepdim=True) dims_to_squeeze.append(i) for dim in reversed(dims_to_squeeze): grounding_tensor = grounding_tensor.squeeze(dim) return TensorValue(BOOL, batch_variable_names, grounding_tensor, quantized=False) elif isinstance(expr, E.VariableExpression): assert (expr.variable.name in self.variable_stack) return self.variable_stack[expr.variable.name] elif isinstance(expr, E.ConstantExpression): return expr.value elif isinstance(expr, E.QuantificationExpression): assert (expr.variable.name not in self.variable_stack) self.variable_stack[expr.variable.name] = expr.variable try: value = self._execute(expr.expression) variable_index = value.batch_variables.index(expr.variable.name) if (expr.quantification_op is E.QuantificationOpType.FORALL): return TensorValue(value.dtype, (value.batch_variables[:variable_index] + value.batch_variables[(variable_index + 1):]), value.tensor.amin(variable_index), quantized=False) elif (expr.quantification_op is E.QuantificationOpType.EXISTS): return TensorValue(value.dtype, (value.batch_variables[:variable_index] + value.batch_variables[(variable_index + 1):]), value.tensor.amax(variable_index), quantized=False) else: raise ValueError(f'Unknown quantification op {expr.quantification_op}.') finally: del self.variable_stack[expr.variable.name] elif isinstance(expr, E.GeneralizedQuantificationExpression): if (expr.quantification_op == 'iota'): assert (expr.variable.name not in self.variable_stack) self.variable_stack[expr.variable.name] = expr.variable try: value = self._execute(expr.expression) assert (expr.variable.name in value.batch_variables), f'Variable {expr.variable.name} is not in {value.batch_variables}.' if (not g_options.use_softmax_iota): return value variable_index = value.batch_variables.index(expr.variable.name) return TensorValue(expr.return_type, value.batch_variables, F.softmax(value.tensor, dim=variable_index), quantized=False) finally: del self.variable_stack[expr.variable.name] elif (expr.quantification_op == 'point'): assert (expr.variable.name not in self.variable_stack) self.variable_stack[expr.variable.name] = expr.variable try: value = self._execute(expr.expression) assert (expr.variable.name in value.batch_variables), f'Variable {expr.variable.name} is not in {value.batch_variables}.' variable_index = value.batch_variables.index(expr.variable.name) return TensorValue(expr.return_type, value.batch_variables, F.softmax(value.tensor, dim=variable_index), quantized=False) finally: del self.variable_stack[expr.variable.name] elif (expr.quantification_op == 'view'): assert (expr.variable.name not in self.variable_stack) self.variable_stack[expr.variable.name] = expr.variable try: value = self._execute(expr.expression) assert (expr.variable.name in value.batch_variables), f'Variable {expr.variable.name} is not in {value.batch_variables}.' variable_index = value.batch_variables.index(expr.variable.name) return TensorValue(expr.return_type, value.batch_variables, F.softmax(value.tensor, dim=variable_index), quantized=False) finally: del self.variable_stack[expr.variable.name] elif (expr.quantification_op == 'describe'): expr: E.GeneralizedQuantificationExpression assert isinstance(expr.expression, E.FunctionApplicationExpression) if (expr.variable.dtype.typename == 'Object'): assert ((len(expr.expression.arguments) == 2) and isinstance(expr.expression.arguments[0], E.VariableExpression) and (expr.expression.arguments[0].variable.name == expr.variable.name) and (expr.expression.arguments[1].return_type.typename in ['Object', 'Action'])) value = self._execute(expr.expression.arguments[1]) assert (len(value.batch_variables) == 1), f'Variable {expr.variable.name} is not the only batch variable in {value.batch_variables}.' answer = self.grounding.compute_description('attribute', 'Shape') answer = (value.tensor @ answer) return TensorValue(expr.return_type, [], answer, quantized=False) elif (expr.variable.dtype.typename == 'Action'): raise NotImplementedError('Describe not implemented for actions.') else: assert ((len(expr.expression.arguments) == 2) and isinstance(expr.expression.arguments[0], E.VariableExpression) and (expr.expression.arguments[0].variable.name == expr.variable.name) and (expr.expression.arguments[1].return_type.typename in ['Object', 'Action'])) value = self._execute(expr.expression.arguments[1]) assert (len(value.batch_variables) == 1), f'Variable {expr.variable.name} is not the only batch variable in {value.batch_variables}.' answer = self.grounding.compute_description('attribute', expr.variable.dtype.typename) answer = (value.tensor @ answer) return TensorValue(expr.return_type, [], answer, quantized=False) elif (expr.quantification_op == 'execute'): assert (isinstance(expr.expression, E.FunctionApplicationExpression) and (len(expr.expression.arguments) == 3)) assert (isinstance(expr.expression.arguments[0], E.VariableExpression) and (expr.expression.arguments[0].variable.name == expr.variable.name)) object_1 = self._execute(expr.expression.arguments[1]) object_2 = self._execute(expr.expression.arguments[2]) return self.grounding.compute_action(object_1, object_2, expr.expression.function.name) elif (expr.quantification_op == 'count'): assert (expr.variable.name not in self.variable_stack) self.variable_stack[expr.variable.name] = expr.variable try: value = self._execute(expr.expression) assert (expr.variable.name in value.batch_variables), f'Variable {expr.variable.name} is not in {value.batch_variables}.' result = self._count(value) return TensorValue(INT64, value.batch_variables, result, quantized=False) finally: del self.variable_stack[expr.variable.name] else: raise ValueError(f'Unknown expression type {type(expr)}.')
def expand_argument_values(argument_values: Sequence[TensorValue]) -> List[TensorValue]: 'Expand a list of argument values to the same batch size.\n Args:\n argument_values: a list of argument values.\n Returns:\n the result list of argument values. All return values will have the same batch size.\n ' has_slot_var = False for arg in argument_values: if isinstance(arg, TensorValue): for var in arg.batch_variables: if (var == '??'): has_slot_var = True break if has_slot_var: return list(argument_values) if (len(argument_values) < 2): return list(argument_values) argument_values = list(argument_values) batch_variables = list() batch_sizes = list() for arg in argument_values: if isinstance(arg, TensorValue): for var in arg.batch_variables: if (var not in batch_variables): batch_variables.append(var) batch_sizes.append(arg.get_variable_size(var)) else: assert isinstance(arg, (int, slice)), arg masks = list() for (i, arg) in enumerate(argument_values): if isinstance(arg, TensorValue): argument_values[i] = arg.expand(batch_variables, batch_sizes) if (argument_values[i].tensor_mask is not None): masks.append(argument_values[i].tensor_mask) if (len(masks) > 0): final_mask = torch.stack(masks, dim=(- 1)).amin(dim=(- 1)) for arg in argument_values: if isinstance(arg, TensorValue): arg.tensor_mask = final_mask arg._mask_certified_flag = True return argument_values
class NCGeneralizedFOLPythonParser(FOLPythonParser): def _is_quantification_expression_name(self, name: str) -> bool: return (name in ['exists', 'forall', 'all', 'iota', 'describe', 'execute', 'point', 'count', 'view']) def _parse_quantification_expression_inner(self, function_name: str, var: Variable, body: ast.Call, counting_quantifier: Optional[int]=None) -> ValueOutputExpression: ctx = get_expression_definition_context() if (function_name in ['exists', 'forall']): assert (var.dtype.typename in ['Object', 'Action']), f'Quantification variable must be of type Object or Action, got {var.dtype}.' rv = super()._parse_quantification_expression_inner(function_name, var, body) if (rv.expression.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {rv.expression.return_type}.') return rv elif (function_name in ['all', 'iota']): if (counting_quantifier is not None): function_name = (function_name, counting_quantifier) assert (var.dtype.typename in ['Object', 'Action']), f'Quantification variable must be of type Object or Action, got {var.dtype}.' if (var.dtype.typename == 'Object'): if ((function_name == 'iota') and (counting_quantifier is None)): return_type = self.domain.types['Object'] else: return_type = self.domain.types['ObjectSet'] elif (var.dtype.typename == 'Action'): if ((function_name == 'iota') and (counting_quantifier is None)): return_type = self.domain.types['Action'] else: raise NotImplementedError('Does not support ActionSet') else: raise TypeError(f'Unknown type name: {var.dtype.typename}.') with ctx.with_variables(var): body = self._parse_expression_inner(body) if (body.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {body.return_type}.') return GeneralizedQuantificationExpression(function_name, var, body, return_type=return_type) elif (function_name == 'describe'): assert (counting_quantifier is None), 'Counting quantifier cannot be specified for describe().' with ctx.with_variables(var): body = self._parse_expression_inner(body) if (body.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {body.return_type}.') return GeneralizedQuantificationExpression(function_name, var, body, return_type=var.dtype) elif (function_name == 'count'): assert (counting_quantifier is None), 'Counting quantifier cannot be specified for count().' assert (var.dtype.typename == 'Object'), f'Counting variable must be of type Object, got {var.dtype}.' with ctx.with_variables(var): body = self._parse_expression_inner(body) if (body.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {body.return_type}.') return GeneralizedQuantificationExpression(function_name, var, body, return_type=INT64) elif (function_name == 'execute'): assert (counting_quantifier is None), 'Counting quantifier cannot be specified for execute().' assert (var.dtype.typename == 'Action'), f'Execute variable must be of type Action, got {var.dtype}.' with ctx.with_variables(var): body = self._parse_expression_inner(body) if (body.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {body.return_type}.') return GeneralizedQuantificationExpression(function_name, var, body, return_type=BOOL) elif (function_name == 'point'): assert (counting_quantifier is None), 'Counting quantifier cannot be specified for point().' assert (var.dtype.typename == 'Object'), f'Point variable must be of type Object, got {var.dtype}.' with ctx.with_variables(var): body = self._parse_expression_inner(body) if (body.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {body.return_type}.') return GeneralizedQuantificationExpression(function_name, var, body, return_type=var.dtype) elif (function_name == 'view'): assert (counting_quantifier is None), 'Counting quantifier cannot be specified for view().' assert (var.dtype.typename == 'Object'), f'View variable must be of type Object, got {var.dtype}.' with ctx.with_variables(var): body = self._parse_expression_inner(body) if (body.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {body.return_type}.') return GeneralizedQuantificationExpression(function_name, var, body, return_type=var.dtype) else: raise ValueError(f'Unknown quantification expression name: {function_name}.') def _parse_function_application(self, function_name: str, expression: ast.Call): if (function_name == 'query'): assert (len(expression.args) == 1), f'query() takes exactly one argument, got {len(expression.args)}: {ast.dump(expression)}' return self._parse_expression_inner(expression.args[0]) else: return self._parse_function_application_simple(function_name, expression) def _parse_function_application_simple(self, function_name: str, expression: ast.Call) -> ValueOutputExpression: ctx = get_expression_definition_context() parsed_args = [self._parse_expression_inner(arg) for arg in expression.args] function = None if (function_name not in ctx.domain.functions): if (function_name == 'and_'): return AndExpression(*parsed_args) elif (function_name == 'or_'): return OrExpression(*parsed_args) if (self.inplace_definition or self.inplace_polymorphic_function): assert self.inplace_polymorphic_function for arg in parsed_args: if (not isinstance(arg.return_type, ObjectType)): raise ValueError(f'In-place polymorphic function definition requires all arguments to be object-typed, got {arg.return_type}.') if self.inplace_polymorphic_function: function_name = ((function_name + '_') + '_'.join([arg.return_type.typename for arg in parsed_args])) if (function_name in ctx.domain.functions): function = ctx.domain.functions[function_name] elif self.inplace_definition: function = Function(function_name, FunctionType(get_types(parsed_args), BOOL)) ctx.domain.define_function(function) else: raise KeyError(f'Function {function_name} is not defined in the domain.') else: raise KeyError(f'Function {function_name} is not defined in the domain.') else: function = ctx.domain.functions[function_name] return FunctionApplicationExpression(function, parsed_args)
class LeftModel(nn.Module): @staticmethod @def_configs_func def _def_configs(): configs.model.domain = 'referit3d' configs.model.scene_graph = '3d' configs.model.concept_embedding = 'vse' configs.model.sg_dims = [None, 128, 128, 128] configs.model.vse_hidden_dims = [None, 128, 128, (128 * 3)] configs.model.output_dim = 128 configs.model.use_predefined_ccg = False configs.train.refexp_add_supervision = True configs.train.attrcls_add_supervision = False configs.train.concept_add_supervision = False configs.train.weight_decay = 0 return configs def __init__(self, domain, output_vocab: Optional[Vocab]=None): super().__init__() self._def_configs() self.domain = domain (self.attribute_concepts, self.relational_concepts, self.multi_relational_concepts) = self.extract_concepts(self.domain) self.attribute_description_categories = self.extract_description_categories(self.domain) self.output_vocab = output_vocab self.description_vocab_size = (len(self.output_vocab) if (self.output_vocab is not None) else None) self.use_resnet = False if (configs.model.scene_graph == '2d'): import left.nn.scene_graph.scene_graph_2d as sng self.scene_graph = sng.SceneGraph2D(256, configs.model.sg_dims, 16) import jactorch.models.vision.resnet as resnet self.resnet = resnet.resnet34(pretrained=True, incl_gap=False, num_classes=None) self.resnet.layer4 = jacnn.Identity() self.use_resnet = True elif (configs.model.scene_graph == '3d'): import left.nn.scene_graph.scene_graph_3d as sng self.scene_graph = sng.SceneGraph3D(configs.model.output_dim, len(self.attribute_concepts)) elif (configs.model.scene_graph == 'skeleton'): import left.nn.scene_graph.scene_graph_skeleton as sng self.scene_graph = sng.SceneGraphSkeleton(len(self.attribute_concepts), self.description_vocab_size) elif (configs.model.scene_graph is None): self.scene_graph = None else: raise ValueError(f'Unknown scene graph type: {configs.model.scene_graph}.') if (configs.model.concept_embedding == 'vse'): self.attribute_embedding = NCVSEConceptEmbedding() self.relation_embedding = NCVSEConceptEmbedding() self.multi_relation_embedding = NCVSEConceptEmbedding() from left.models.reasoning.reasoning import LeftGrounding self.grounding_cls = LeftGrounding elif (configs.model.concept_embedding == 'linear'): self.attribute_embedding = NCLinearConceptEmbedding() self.relation_embedding = NCLinearConceptEmbedding() self.multi_relation_embedding = NCLinearConceptEmbedding() from left.models.reasoning.reasoning import NCOneTimeComputingGrounding self.grounding_cls = NCOneTimeComputingGrounding elif (configs.model.concept_embedding == 'linear-tied-attr'): self.attribute_embedding = NCLinearConceptEmbedding(tied_attributes=True) self.relation_embedding = NCLinearConceptEmbedding() self.multi_relation_embedding = NCLinearConceptEmbedding() from left.models.reasoning.reasoning import NCOneTimeComputingGrounding self.grounding_cls = NCOneTimeComputingGrounding elif (configs.model.concept_embedding == 'clip'): self.attribute_embedding = NCVSEConceptEmbedding() self.relation_embedding = NCVSEConceptEmbedding() self.multi_relation_embedding = NCVSEConceptEmbedding() from left.models.reasoning.reasoning import NCDenseClipGrounding self.grounding_cls = NCDenseClipGrounding else: raise ValueError(f'Unknown concept embedding type: {configs.model.concept_embedding}.') self.init_concept_embeddings() from left.generalized_fol_executor import NCGeneralizedFOLExecutor self.parser = NCGeneralizedFOLPythonParser(self.domain, inplace_definition=False, inplace_polymorphic_function=True, inplace_definition_type=False) self.executor = NCGeneralizedFOLExecutor(self.domain, self.parser) from left.models.losses import RefExpLoss, AttrClsLoss, QALoss, PickPlaceLoss self.refexp_loss = RefExpLoss(add_supervision=configs.train.refexp_add_supervision) self.attrcls_loss = AttrClsLoss(add_supervision=configs.train.attrcls_add_supervision) self.qa_loss = QALoss(output_vocab) self.pickplace_loss = PickPlaceLoss() def extract_concepts(self, domain: FunctionDomain) -> Tuple[(List[str], List[str], List[str])]: return read_concepts_v2(domain) def extract_description_categories(self, domain: FunctionDomain) -> List[str]: return read_description_categories(domain) def init_concept_embeddings(self): if (configs.model.concept_embedding == 'vse'): for (arity, src, tgt) in zip([1, 2, 3], [self.attribute_concepts, self.relational_concepts, self.multi_relational_concepts], [self.attribute_embedding, self.relation_embedding, self.multi_relation_embedding]): tgt.init_attribute('all', configs.model.sg_dims[arity]) for word in src: tgt.init_concept(word, configs.model.vse_hidden_dims[arity], 'all') elif (configs.model.concept_embedding in ('linear', 'linear-tied-attr')): for (arity, src, tgt) in zip([1, 2, 3], [self.attribute_concepts, self.relational_concepts, self.multi_relational_concepts], [self.attribute_embedding, self.relation_embedding, self.multi_relation_embedding]): for word in src: tgt.init_concept(word, configs.model.sg_dims[arity]) if (len(self.attribute_concepts) > 0): if (self.description_vocab_size is not None): for word in self.attribute_description_categories: self.attribute_embedding.init_attribute(word, configs.model.sg_dims[1], self.description_vocab_size) for tgt in [self.attribute_embedding, self.relation_embedding, self.multi_relation_embedding]: tgt.init_linear_layers() elif (configs.model.concept_embedding == 'clip'): pass else: raise ValueError(f'Unknown concept embedding type: {configs.model.concept_embedding}.') def forward_sng(self, feed_dict): raise NotImplementedError() def execute_program_from_parsing_string(self, question: str, raw_parsing: str, grounding, outputs: Dict[(str, Any)]): (parsing, program, execution, trace) = (None, None, None, None) with self.executor.with_grounding(grounding): try: try: parsing = raw_parsing program = self.parser.parse_expression(raw_parsing) except Exception as e: raise ExecutionFailed('Parsing failed for question: {}.'.format(question)) from e try: if (not self.training): with self.executor.record_execution_trace() as trace_getter: execution = self.executor.execute(program) trace = trace_getter.get() else: execution = self.executor.execute(program) except (KeyError, AttributeError) as e: logger.exception('Execution failed for question: {}\nProgram: {}.'.format(question, program)) raise ExecutionFailed('Execution failed for question: {}\nProgram: {}.'.format(question, program)) from e except ExecutionFailed as e: print(e) outputs.setdefault('results', list()).append((parsing, program, execution)) outputs.setdefault('executions', list()).append(execution) outputs.setdefault('parsings', list()).append(parsing) outputs.setdefault('execution_traces', list()).append(trace)
class ExecutionFailed(Exception): pass
class AGCNGraph(): def __init__(self, labeling_mode='spatial'): self.A = self.get_adjacency_matrix(labeling_mode) self.num_node = num_node self.self_link = self_link self.inward = inward self.outward = outward self.neighbor = neighbor def get_adjacency_matrix(self, labeling_mode=None): if (labeling_mode is None): return self.A if (labeling_mode == 'spatial'): A = get_spatial_graph(num_node, self_link, inward, outward) else: raise ValueError() return A
def edge2mat(link, num_node): A = np.zeros((num_node, num_node)) for (i, j) in link: A[(j, i)] = 1 return A
def normalize_digraph(A): Dl = np.sum(A, 0) (h, w) = A.shape Dn = np.zeros((w, w)) for i in range(w): if (Dl[i] > 0): Dn[(i, i)] = (Dl[i] ** (- 1)) AD = np.dot(A, Dn) return AD
def get_spatial_graph(num_node, self_link, inward, outward): I = edge2mat(self_link, num_node) In = normalize_digraph(edge2mat(inward, num_node)) Out = normalize_digraph(edge2mat(outward, num_node)) A = np.stack((I, In, Out)) return A
class SigmoidCrossEntropy(nn.Module): def __init__(self, one_hot=False): super().__init__() self.one_hot = one_hot self.bce = nn.BCEWithLogitsLoss(reduction='none') def forward(self, input, target): if (not self.one_hot): target = jactorch.one_hot_nd(target, input.size((- 1))) return self.bce(input, target).sum(dim=(- 1)).mean()
class MultilabelSigmoidCrossEntropy(nn.Module): def __init__(self, one_hot=False): super().__init__() self.one_hot = one_hot self.bce = nn.BCEWithLogitsLoss(reduction='none') def forward(self, input, labels): if (type(labels) in (tuple, list)): labels = torch.tensor(labels, dtype=torch.int64, device=input.device) assert (input.dim() == 1) if (not self.one_hot): with torch.no_grad(): mask = torch.zeros_like(input) if (labels.size(0) > 0): ones = torch.ones_like(labels, dtype=torch.float32) mask.scatter_(0, labels, ones) labels = mask return self.bce(input, labels).sum(dim=(- 1)).mean()
class MultilabelSigmoidCrossEntropyAndAccuracy(nn.Module): def __init__(self, one_hot=False, softmax=False, compute_loss=True): super().__init__() self.one_hot = one_hot self.softmax = softmax self.compute_loss = compute_loss if self.softmax: self.bce = nn.BCELoss(reduction='none') else: self.bce = nn.BCEWithLogitsLoss(reduction='none') def forward(self, input, labels): if (type(labels) in (tuple, list)): labels = torch.tensor(labels, dtype=torch.int64, device=input.device) assert (input.dim() == 1) if (not self.one_hot): with torch.no_grad(): mask = torch.zeros_like(input) if (labels.size(0) > 0): ones = torch.ones_like(labels, dtype=torch.float32) mask.scatter_(0, labels, ones) labels = mask loss = 0 if self.compute_loss: loss = self.bce(input, labels).sum(dim=(- 1)).mean() if self.softmax: labels = labels.to(torch.int64) acc_raw = ((input > 0.5) == labels).all(dim=(- 1)).type(torch.float32) acc_instance_raw = ((input > 0.5) == labels).type(torch.float32) else: acc_raw = ((input > 0) == labels).all(dim=(- 1)).type(torch.float32) acc_instance_raw = ((input > 0) == labels).type(torch.float32) return (loss, acc_raw.mean(), acc_instance_raw.mean())
class MultitaskLossBase(nn.Module): def __init__(self): super().__init__() self._sigmoid_xent_loss = SigmoidCrossEntropy() self._multilabel_sigmoid_xent_loss = MultilabelSigmoidCrossEntropy() self._batched_xent_loss = nn.CrossEntropyLoss() def _mse_loss(self, pred, label): return (pred - label).abs() def _bce_loss(self, pred, label): return (- ((jactorch.log_sigmoid(pred) * label) + (jactorch.log_sigmoid((- pred)) * (1 - label))).mean()) def _bce_logprob_loss(self, pred, label): return ((pred * label) + ((1 - label) * jactorch.log1mexp(pred))) def _bce_prob_loss(self, pred, label): return (- ((torch.log(pred) * label) + (torch.log((1 - pred)) * (1 - label))).mean()) def _xent_loss(self, pred, label): logp = F.log_softmax(pred, dim=(- 1)) return (- logp[label].mean())
class _PointnetSAModuleBase(nn.Module): def __init__(self): super().__init__() self.npoint = None self.groupers = None self.mlps = None def forward(self, xyz: torch.Tensor, features: torch.Tensor=None) -> (torch.Tensor, torch.Tensor): "\n Parameters\n ----------\n xyz : torch.Tensor\n (B, N, 3) tensor of the xyz coordinates of the features\n features : torch.Tensor\n (B, N, C) tensor of the descriptors of the the features\n\n Returns\n -------\n new_xyz : torch.Tensor\n (B, npoint, 3) tensor of the new features' xyz\n new_features : torch.Tensor\n (B, npoint, \\sum_k(mlps[k][-1])) tensor of the new_features descriptors\n " new_features_list = [] xyz_flipped = xyz.transpose(1, 2).contiguous() new_xyz = (pointnet2_utils.gather_operation(xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)).transpose(1, 2).contiguous() if (self.npoint is not None) else None) for i in range(len(self.groupers)): new_features = self.groupers[i](xyz, new_xyz, features) new_features = self.mlps[i](new_features) new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)]) new_features = new_features.squeeze((- 1)) new_features_list.append(new_features) return (new_xyz, torch.cat(new_features_list, dim=1))
class PointnetSAModuleMSG(_PointnetSAModuleBase): 'Pointnet set abstrction layer with multiscale grouping\n\n Parameters\n ----------\n npoint : int\n Number of features\n radii : list of float32\n list of radii to group with\n nsamples : list of int32\n Number of samples in each ball query\n mlps : list of list of int32\n Spec of the pointnet before the global max_pool for each scale\n bn : bool\n Use batchnorm\n ' def __init__(self, *, npoint: int, radii: List[float], nsamples: List[int], mlps: List[List[int]], bn: bool=True, use_xyz: bool=True, sample_uniformly: bool=False): super().__init__() assert (len(radii) == len(nsamples) == len(mlps)) self.npoint = npoint self.groupers = nn.ModuleList() self.mlps = nn.ModuleList() for i in range(len(radii)): radius = radii[i] nsample = nsamples[i] self.groupers.append((pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly) if (npoint is not None) else pointnet2_utils.GroupAll(use_xyz))) mlp_spec = mlps[i] if use_xyz: mlp_spec[0] += 3 self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))