code
stringlengths
17
6.64M
def parse_arguments(notebook_options=None): "Parse the arguments for the training (or test) execution of a ReferIt3D net.\n :param notebook_options: (list) e.g., ['--max-distractors', '100'] to give/parse arguments from inside a jupyter notebook.\n :return:\n " parser = argparse.ArgumentParser(description='ReferIt3D Nets + Ablations') parser.add_argument('-scannet-file', type=str, required=True, help='pkl file containing the data of Scannet as generated by running XXX') parser.add_argument('-referit3D-file', type=str, required=True) parser.add_argument('--log-dir', type=str, help='where to save training-progress, model, etc') parser.add_argument('--resume-path', type=str, help='model-path to resume') parser.add_argument('--config-file', type=str, default=None, help='config file') parser.add_argument('--max-distractors', type=int, default=0, help='Maximum number of distracting objects to be drawn from a scan.') parser.add_argument('--max-seq-len', type=int, default=24, help='utterances with more tokens than this they will be ignored.') parser.add_argument('--points-per-object', type=int, default=1024, help='points sampled to make a point-cloud per object of a scan.') parser.add_argument('--unit-sphere-norm', type=str2bool, default=False, help='Normalize each point-cloud to be in a unit sphere.') parser.add_argument('--mentions-target-class-only', type=str2bool, default=True, help='If True, drop references that do not explicitly mention the target-class.') parser.add_argument('--min-word-freq', type=int, default=3) parser.add_argument('--max-test-objects', type=int, default=0) parser.add_argument('--mode', type=str, default='train', choices=['train', 'evaluate']) parser.add_argument('--max-train-epochs', type=int, default=100, help='number of training epochs. [default: 100]') parser.add_argument('--n-workers', type=int, default=(- 1), help='number of data loading workers [default: -1 is all cores available -1.]') parser.add_argument('--random-seed', type=int, default=2020, help='Control pseudo-randomness (net-wise, point-cloud sampling etc.) fostering reproducibility.') parser.add_argument('--init-lr', type=float, default=0.0005, help='learning rate for training.') parser.add_argument('--patience', type=int, default=10, help='if test-acc does not improve for patience consecutiveepoch, stop training.') parser.add_argument('--model', type=str, default='referIt3DNet', choices=['referIt3DNet', 'directObj2Lang', 'referIt3DNetAttentive']) parser.add_argument('--object-latent-dim', type=int, default=128) parser.add_argument('--language-latent-dim', type=int, default=128) parser.add_argument('--word-embedding-dim', type=int, default=64) parser.add_argument('--graph-out-dim', type=int, default=128) parser.add_argument('--dgcnn-intermediate-feat-dim', nargs='+', type=int, default=[128, 128, 128, 128]) parser.add_argument('--object-encoder', type=str, default='pnet_pp', choices=['pnet_pp', 'pnet']) parser.add_argument('--language-fusion', type=str, default='both', choices=['before', 'after', 'both']) parser.add_argument('--word-dropout', type=float, default=0.1) parser.add_argument('--knn', type=int, default=7, help='For DGCNN number of neighbors') parser.add_argument('--lang-cls-alpha', type=float, default=0.5, help='if > 0 a loss for guessing the target via language only is added.') parser.add_argument('--obj-cls-alpha', type=float, default=0.5, help='if > 0 a loss for guessing for each segmented object its class type is added.') parser.add_argument('--gpu', type=str, default='0', help='specify gpu device. [default: 0]') parser.add_argument('--n-gpus', type=int, default=1, help='number gpu devices. [default: 1]') parser.add_argument('--batch-size', type=int, default=32, help='batch size per gpu. [default: 32]') parser.add_argument('--save-args', type=str2bool, default=True, help='save arguments in a json.txt') parser.add_argument('--experiment-tag', type=str, default=None, help='will be used to name a subdir for log-dir if given') parser.add_argument('--cluster-pid', type=str, default=None) parser.add_argument('--augment-with-sr3d', type=str, default=None, help='csv with sr3d data to augment training dataof args.referit3D-file') parser.add_argument('--vocab-file', type=str, default=None, help='optional, .pkl file for vocabulary (useful when working with multiple dataset and single model.') parser.add_argument('--fine-tune', type=str2bool, default=False, help='use if you train with dataset x and then you continue training with another dataset') parser.add_argument('--s-vs-n-weight', type=float, default=None, help='importance weight of sr3d vs nr3d examples [use less than 1]') if (notebook_options is not None): args = parser.parse_args(notebook_options) else: args = parser.parse_args() if (args.config_file is not None): with open(args.config_file, 'r') as fin: configs_dict = json.load(fin) apply_configs(args, configs_dict) args_string = pprint.pformat(vars(args)) print(args_string) return args
def read_saved_args(config_file, override_args=None, verbose=True): "\n :param config_file:\n :param override_args: dict e.g., {'gpu': '0'}\n :param verbose:\n :return:\n " parser = ArgumentParser() args = parser.parse_args([]) with open(config_file, 'r') as f_in: args.__dict__ = json.load(f_in) if (override_args is not None): for (key, val) in override_args.items(): args.__setattr__(key, val) if verbose: args_string = pprint.pformat(vars(args)) print(args_string) return args
def apply_configs(args, config_dict): for (k, v) in config_dict.items(): setattr(args, k, v)
def str2bool(v): '\n Boolean values for argparse\n ' if isinstance(v, bool): return v if (v.lower() in ('yes', 'true', 't', 'y', '1')): return True elif (v.lower() in ('no', 'false', 'f', 'n', '0')): return False else: raise argparse.ArgumentTypeError('Boolean value expected.')
def create_dir(dir_path): "\n Creates a directory (or nested directories) if they don't exist.\n " if (not osp.exists(dir_path)): os.makedirs(dir_path) return dir_path
def unpickle_data(file_name, python2_to_3=False): '\n Restore data previously saved with pickle_data().\n :param file_name: file holding the pickled data.\n :param python2_to_3: (boolean), if True, pickle happened under python2x, unpickling under python3x.\n :return: an generator over the un-pickled items.\n Note, about implementing the python2_to_3 see\n https://stackoverflow.com/questions/28218466/unpickling-a-python-2-object-with-python-3\n ' in_file = open(file_name, 'rb') if python2_to_3: size = cPickle.load(in_file, encoding='latin1') else: size = cPickle.load(in_file) for _ in range(size): if python2_to_3: (yield cPickle.load(in_file, encoding='latin1')) else: (yield cPickle.load(in_file)) in_file.close()
def read_lines(file_name): trimmed_lines = [] with open(file_name) as fin: for line in fin: trimmed_lines.append(line.rstrip()) return trimmed_lines
def decode_stimulus_string(s): '\n Split into scene_id, instance_label, # objects, target object id,\n distractors object id.\n :param s: the stimulus string\n ' if (len(s.split('-', maxsplit=4)) == 4): (scene_id, instance_label, n_objects, target_id) = s.split('-', maxsplit=4) distractors_ids = '' else: (scene_id, instance_label, n_objects, target_id, distractors_ids) = s.split('-', maxsplit=4) instance_label = instance_label.replace('_', ' ') n_objects = int(n_objects) target_id = int(target_id) distractors_ids = [int(i) for i in distractors_ids.split('-') if (i != '')] assert (len(distractors_ids) == (n_objects - 1)) return (scene_id, instance_label, n_objects, target_id, distractors_ids)
def objects_counter_percentile(scan_ids, all_scans, prc): all_obs_len = list() for scan_id in all_scans: if (scan_id in scan_ids): all_obs_len.append(len(all_scans[scan_id].three_d_objects)) return np.percentile(all_obs_len, prc)
def mean_color(scan_ids, all_scans): mean_rgb = np.zeros((1, 3), dtype=np.float32) n_points = 0 for scan_id in scan_ids: color = all_scans[scan_id].color mean_rgb += np.sum(color, axis=0) n_points += len(color) mean_rgb /= n_points return mean_rgb
def scannet_official_train_val(pre_fix, valid_views=None, verbose=True): "\n :param valid_views: None or list like ['00', '01']\n :return:\n " train_split = osp.join(pre_fix, 'scannetv2_train.txt') train_split = read_lines(train_split) test_split = osp.join(pre_fix, 'scannetv2_val.txt') test_split = read_lines(test_split) if (valid_views is not None): train_split = [sc for sc in train_split if (sc[(- 2):] in valid_views)] test_split = [sc for sc in test_split if (sc[(- 2):] in valid_views)] if verbose: print('#train/test scans:', len(train_split), '/', len(test_split)) scans_split = dict() scans_split['train'] = set(train_split) scans_split['test'] = set(test_split) return scans_split
def load_scan_related_data(pre_fix, preprocessed_scannet_file, verbose=True, add_pad=True): (_, all_scans) = unpickle_data(preprocessed_scannet_file) if verbose: print('Loaded in RAM {} scans'.format(len(all_scans))) instance_labels = set() for scan in all_scans: idx = np.array([o.object_id for o in scan.three_d_objects]) instance_labels.update([o.instance_label for o in scan.three_d_objects]) assert np.all((idx == np.arange(len(idx)))) all_scans = {scan.scan_id: scan for scan in all_scans} class_to_idx = {} i = 0 for el in sorted(instance_labels): class_to_idx[el] = i i += 1 if verbose: print('{} instance classes exist in these scans'.format(len(class_to_idx))) if add_pad: class_to_idx['pad'] = len(class_to_idx) scans_split = scannet_official_train_val(pre_fix) return (all_scans, scans_split, class_to_idx)
def load_referential_data(args, referit_csv, scans_split): '\n :param args:\n :param referit_csv:\n :param scans_split:\n :return:\n ' referit_data_train = pd.read_csv(referit_csv) referit_data_test = pd.read_csv(referit_csv.replace('train', 'test')) referit_data = pd.concat([referit_data_train, referit_data_test], ignore_index=True, sort=False) print(len(referit_data)) if args.mentions_target_class_only: n_original = len(referit_data) referit_data = referit_data[referit_data['mentions_target_class']] referit_data.reset_index(drop=True, inplace=True) print('Dropping utterances without explicit mention to the target class {}->{}'.format(n_original, len(referit_data))) try: referit_data = referit_data[['tokens', 'instance_type', 'scan_id', 'dataset', 'target_id', 'utterance', 'stimulus_id', 'anchor_ids']] except: referit_data = referit_data[['tokens', 'instance_type', 'scan_id', 'dataset', 'target_id', 'utterance', 'stimulus_id']] referit_data.tokens = referit_data['tokens'].apply(literal_eval) is_train = referit_data.scan_id.apply((lambda x: (x in scans_split['train']))) referit_data['is_train'] = is_train train_token_lens = referit_data.tokens[is_train].apply((lambda x: len(x))) print('{}-th percentile of token length for remaining (training) data is: {:.1f}'.format(95, np.percentile(train_token_lens, 95))) n_original = len(referit_data) referit_data = referit_data[referit_data.tokens.apply((lambda x: (len(x) <= args.max_seq_len)))] referit_data.reset_index(drop=True, inplace=True) print('Dropping utterances with more than {} tokens, {}->{}'.format(args.max_seq_len, n_original, len(referit_data))) if (args.augment_with_sr3d is not None): print('Adding Sr3D as augmentation.') sr3d = pd.read_csv(args.augment_with_sr3d) sr3d.tokens = sr3d['tokens'].apply(literal_eval) is_train = sr3d.scan_id.apply((lambda x: (x in scans_split['train']))) sr3d['is_train'] = is_train sr3d = sr3d[is_train] sr3d = sr3d[referit_data.columns] print('Dataset-size before augmentation:', len(referit_data)) referit_data = pd.concat([referit_data, sr3d], axis=0) referit_data.reset_index(inplace=True, drop=True) print('Dataset-size after augmentation:', len(referit_data)) context_size = referit_data[(~ referit_data.is_train)].stimulus_id.apply((lambda x: decode_stimulus_string(x)[2])) print('(mean) Random guessing among target-class test objects {:.4f}'.format((1 / context_size).mean())) return referit_data
def compute_auxiliary_data(referit_data, all_scans, args): 'Given a train-split compute useful quantities like mean-rgb, a word-vocabulary.\n :param referit_data: pandas Dataframe, as returned from load_referential_data()\n :param all_scans:\n :param args:\n :return:\n ' if args.vocab_file: vocab = Vocabulary.load(args.vocab_file) print('Using external, provided vocabulary with {} words.'.format(len(vocab))) else: train_tokens = referit_data[referit_data.is_train]['tokens'] vocab = build_vocab([x for x in train_tokens], args.min_word_freq) print('Length of vocabulary, with min_word_freq={} is {}'.format(args.min_word_freq, len(vocab))) if (all_scans is None): return vocab training_scan_ids = set(referit_data[referit_data['is_train']]['scan_id']) print('{} training scans will be used.'.format(len(training_scan_ids))) mean_rgb = mean_color(training_scan_ids, all_scans) prc = 90 obj_cnt = objects_counter_percentile(training_scan_ids, all_scans, prc) print('{}-th percentile of number of objects in the (training) scans is: {:.2f}'.format(prc, obj_cnt)) prc = 99 testing_scan_ids = set(referit_data[(~ referit_data['is_train'])]['scan_id']) obj_cnt = objects_counter_percentile(testing_scan_ids, all_scans, prc) print('{}-th percentile of number of objects in the (testing) scans is: {:.2f}'.format(prc, obj_cnt)) return (mean_rgb, vocab)
def trim_scans_per_referit3d_data(referit_data, scans): in_r3d = referit_data.scan_id.unique() to_drop = [] for k in scans: if (k not in in_r3d): to_drop.append(k) for k in to_drop: del scans[k] print('Dropped {} scans to reduce mem-foot-print.'.format(len(to_drop))) return scans
class Vocabulary(object): 'Simple vocabulary wrapper.' def __init__(self, special_symbols=None): self.word2idx = {} self.idx2word = {} self.idx = 0 self.special_symbols = None self.intialize_special_symbols(special_symbols) def intialize_special_symbols(self, special_symbols): if (special_symbols is None): self.special_symbols = ['<pad>', '<sos>', '<eos>', '<unk>'] else: self.special_symbols = special_symbols for s in self.special_symbols: self.add_word(s) for s in self.special_symbols: name = s.replace('<', '') name = name.replace('>', '') setattr(self, name, self(s)) def n_special(self): return len(self.special_symbols) def add_word(self, word): if (word not in self.word2idx): self.word2idx[word] = self.idx self.idx2word[self.idx] = word self.idx += 1 def __call__(self, word): if (word not in self.word2idx): return self.word2idx['<unk>'] return self.word2idx[word] def __len__(self): return len(self.word2idx) def encode(self, text, max_len=None, add_begin_end=True): "\n :param text: (list) of tokens ['a', 'nice', 'sunset']\n :param max_len:\n :param add_begin_end:\n :return: (list) of encoded tokens.\n " encoded = [self(token) for token in text] if (max_len is not None): encoded = encoded[:max_len] if add_begin_end: encoded = (([self('<sos>')] + encoded) + [self('<eos>')]) if (max_len is not None): encoded += ([self('<pad>')] * (max_len - len(text))) return encoded def decode(self, tokens): return [self.idx2word[token] for token in tokens] def decode_print(self, tokens): exclude = set([self.word2idx[s] for s in ['<sos>', '<eos>', '<pad>']]) words = [self.idx2word[token] for token in tokens if (token not in exclude)] return ' '.join(words) def __iter__(self): return iter(self.word2idx) def save(self, file_name): ' Save as a .pkl the current Vocabulary instance.\n :param file_name: where to save\n :return: None\n ' with open(file_name, mode='wb') as f: pickle.dump(self, f, protocol=2) @staticmethod def load(file_name): ' Load a previously saved Vocabulary instance.\n :param file_name: where it was saved\n :return: Vocabulary instance.\n ' with open(file_name, 'rb') as f: vocab = pickle.load(f) return vocab
def build_vocab(token_list, min_word_freq): 'Build a simple vocabulary wrapper.' counter = Counter() for tokens in token_list: counter.update(tokens) words = [word for (word, cnt) in counter.items() if (cnt >= min_word_freq)] vocab = Vocabulary() for (i, word) in enumerate(words): vocab.add_word(word) return vocab
def create_bare_domain() -> FunctionDomain: domain = FunctionDomain('Left') domain.define_type(ObjectType('Object')) domain.define_type(ObjectType('Object_Set')) domain.define_type(ObjectType('Action')) domain.define_function(Function('equal', FunctionTyping[BOOL](INT64, INT64))) domain.define_function(Function('greater_than', FunctionTyping[BOOL](INT64, INT64))) domain.define_function(Function('less_than', FunctionTyping[BOOL](INT64, INT64))) return domain
def create_default_parser(domain: FunctionDomain) -> NCGeneralizedFOLPythonParser: parser = NCGeneralizedFOLPythonParser(domain, inplace_definition=True, inplace_polymorphic_function=True, inplace_definition_type=True) return parser
def create_domain_from_parsing(codes: Dict[(str, List[str])]) -> FunctionDomain: domain = create_bare_domain() parser = create_default_parser(domain) for (prompt, codes) in jacinle.tqdm_gofor(codes, desc='Creating domain from parsings'): if isinstance(codes, str): codes = [codes] for code in codes: try: _ = parser.parse_expression(code) except Exception as e: print(e) continue return domain
def read_concepts_v1(domain: FunctionDomain) -> Tuple[(List[str], List[str], List[str])]: ds_functions = list(domain.functions.keys()) (attribute_concepts, relational_concepts, multi_relational_concepts) = ([], [], []) for f in ds_functions: if ('_Object_Object_Object' in f): multi_relational_concepts.append(f) elif ('_Object_Object' in f): relational_concepts.append(f) elif ('_Object' in f): attribute_concepts.append(f) else: pass attribute_concepts.sort() relational_concepts.sort() multi_relational_concepts.sort() return (attribute_concepts, relational_concepts, multi_relational_concepts)
def get_arity(function: Function) -> Optional[int]: ftype = function.ftype if (ftype.return_type != BOOL): return None for arg_type in ftype.argument_types: if (arg_type.typename not in ['Object', 'Object_Set', 'Action']): return None return len(ftype.argument_types)
def read_concepts_v2(domain: FunctionDomain) -> Tuple[(List[str], List[str], List[str])]: functions = {1: list(), 2: list(), 3: list()} for (name, function) in domain.functions.items(): arity = get_arity(function) if ((arity is not None) and (1 <= arity <= 3)): functions[arity].append(name) return (functions[1], functions[2], functions[3])
def read_description_categories(domain: FunctionDomain) -> Tuple[List[str]]: output = list() for (name, t) in domain.types.items(): if (t.typename not in ('Object', 'Object_Set', 'Action')): output.append(name) return output
def make_domain(parsed_test_path: str) -> FunctionDomain: codes = io.load_pkl(parsed_test_path) domain = create_domain_from_parsing(codes) return domain
class ExecutionTraceGetter(object): def __init__(self, trace_obj): self.trace_obj = trace_obj def get(self) -> List[Tuple[(E.Expression, TensorValue)]]: return self.trace_obj
def _get_self_mask(m): self_mask = torch.eye(m.size((- 1)), dtype=m.dtype, device=m.device) return self_mask
def _do_apply_self_mask(m): if (not g_options.use_self_mask): return m self_mask = _get_self_mask(m) return ((m * (1 - self_mask)) + ((- 10) * self_mask))
class NCGeneralizedFOLExecutor(FunctionDomainExecutor): def __init__(self, domain: FunctionDomain, parser: Optional[ParserBase]=None, allow_shift_grounding=False): super().__init__(domain, parser) self.allow_shift_grounding = allow_shift_grounding self.variable_stack = dict() self.view_stack = list() self._record_execution_trace = False self._execution_trace = list() variable_stack: Dict[(str, Variable)] 'A variable stack, used to store the variables that are used in the current scope.' view_stack: List[TensorValue] 'A view stack, used to store the variables that are used for viewpoint anchoring.' _count_margin = 0.25 _count_tau = 0.25 @property def training(self): return self.grounding.training def _count(self, x: TensorValue) -> TensorValue: if self.training: return torch.sigmoid(x.tensor).sum(dim=(- 1)) else: return (x.tensor > 0).sum(dim=(- 1)).float() def greater_than(self, x: TensorValue, y: TensorValue) -> TensorValue: if self.training: rv = ((((x.tensor - y.tensor) - 1) + (2 * self._count_margin)) / self._count_tau) else: rv = ((- 10) + (20 * (x.tensor > y.tensor).float())) return TensorValue(BOOL, [], rv, quantized=False) def less_than(self, x: TensorValue, y: TensorValue) -> TensorValue: return self.greater_than(y, x) def equal(self, x: TensorValue, y: TensorValue) -> TensorValue: if self.training: rv = ((((2 * self._count_margin) - (x.tensor - y.tensor).abs()) / (2 * self._count_margin)) / self._count_tau) else: rv = ((- 10) + (20 * (x.tensor == y.tensor).float())) return TensorValue(BOOL, [], rv, quantized=False) @contextlib.contextmanager def record_execution_trace(self): self._record_execution_trace = True self._execution_trace = list() (yield ExecutionTraceGetter(self._execution_trace)) self._record_execution_trace = False self._execution_trace = None def _execute(self, expr: E.Expression) -> TensorValue: rv = self._execute_inner(expr) if self._record_execution_trace: self._execution_trace.append((expr, rv)) return rv def _execute_inner(self, expr: E.Expression) -> TensorValue: if isinstance(expr, E.BoolExpression): if (expr.bool_op is E.BoolOpType.AND): if (isinstance(expr.arguments[0], E.GeneralizedQuantificationExpression) and (expr.arguments[0].quantification_op == 'view')): assert (len(expr.arguments) == 2) obj_anchor = self._execute(expr.arguments[0]) self.view_stack.append(obj_anchor) try: return self._execute(expr.arguments[1]) finally: self.view_stack.pop() args = [self._execute(arg) for arg in expr.arguments] expanded_args = expand_argument_values(args) expanded_tensors = [a.tensor for a in expanded_args] result = torch.stack(expanded_tensors, dim=(- 1)).amin(dim=(- 1)) return TensorValue(expanded_args[0].dtype, expanded_args[0].batch_variables, result, quantized=False) elif (expr.bool_op is E.BoolOpType.OR): args = [self._execute(arg) for arg in expr.arguments] expanded_args = expand_argument_values(args) expanded_tensors = [a.tensor for a in expanded_args] result = torch.stack(expanded_tensors, dim=(- 1)).amax(dim=(- 1)) return TensorValue(expanded_args[0].dtype, expanded_args[0].batch_variables, result, quantized=False) elif (expr.bool_op is E.BoolOpType.NOT): args = [self._execute(arg) for arg in expr.arguments] assert (len(args) == 1) result = args[0].tensor result = (torch.zeros_like(result) - result) return TensorValue(args[0].dtype, args[0].batch_variables, result, quantized=False) elif isinstance(expr, E.FunctionApplicationExpression): if (expr.function.name in self.function_implementations): func = self.function_implementations[expr.function.name] args = [self._execute(arg) for arg in expr.arguments] return func(*args) else: args = [self._execute(arg) for arg in expr.arguments] if (len(args) == 1): grounding_tensor = self.grounding.compute_similarity('attribute', expr.function.name) elif (len(args) == 2): if (len(self.view_stack) > 0): obj_anchor = self.view_stack[(- 1)] grounding_tensor = self.grounding.compute_similarity('multi_relation', expr.function.name) grounding_tensor = torch.einsum('ijk,i->jk', grounding_tensor, obj_anchor.tensor) else: grounding_tensor = self.grounding.compute_similarity('relation', expr.function.name) grounding_tensor = _do_apply_self_mask(grounding_tensor) else: assert (len(args) == 3) grounding_tensor = self.grounding.compute_similarity('multi_relation', expr.function.name) if (self.allow_shift_grounding and (len(args) == 2) and (len(grounding_tensor.size()) == 1)): shift = True else: shift = False batch_variable_names = list() dims_to_squeeze = list() for (i, arg) in enumerate(args): if isinstance(arg, Variable): batch_variable_names.append(arg.name) else: assert isinstance(arg, TensorValue) if (not shift): grounding_tensor = (grounding_tensor * jactorch.add_dim_as_except(arg.tensor, grounding_tensor, i)).sum(i, keepdim=True) dims_to_squeeze.append(i) for dim in reversed(dims_to_squeeze): grounding_tensor = grounding_tensor.squeeze(dim) return TensorValue(BOOL, batch_variable_names, grounding_tensor, quantized=False) elif isinstance(expr, E.VariableExpression): assert (expr.variable.name in self.variable_stack) return self.variable_stack[expr.variable.name] elif isinstance(expr, E.ConstantExpression): return expr.value elif isinstance(expr, E.QuantificationExpression): assert (expr.variable.name not in self.variable_stack) self.variable_stack[expr.variable.name] = expr.variable try: value = self._execute(expr.expression) variable_index = value.batch_variables.index(expr.variable.name) if (expr.quantification_op is E.QuantificationOpType.FORALL): return TensorValue(value.dtype, (value.batch_variables[:variable_index] + value.batch_variables[(variable_index + 1):]), value.tensor.amin(variable_index), quantized=False) elif (expr.quantification_op is E.QuantificationOpType.EXISTS): return TensorValue(value.dtype, (value.batch_variables[:variable_index] + value.batch_variables[(variable_index + 1):]), value.tensor.amax(variable_index), quantized=False) else: raise ValueError(f'Unknown quantification op {expr.quantification_op}.') finally: del self.variable_stack[expr.variable.name] elif isinstance(expr, E.GeneralizedQuantificationExpression): if (expr.quantification_op == 'iota'): assert (expr.variable.name not in self.variable_stack) self.variable_stack[expr.variable.name] = expr.variable try: value = self._execute(expr.expression) assert (expr.variable.name in value.batch_variables), f'Variable {expr.variable.name} is not in {value.batch_variables}.' if (not g_options.use_softmax_iota): return value variable_index = value.batch_variables.index(expr.variable.name) return TensorValue(expr.return_type, value.batch_variables, F.softmax(value.tensor, dim=variable_index), quantized=False) finally: del self.variable_stack[expr.variable.name] elif (expr.quantification_op == 'point'): assert (expr.variable.name not in self.variable_stack) self.variable_stack[expr.variable.name] = expr.variable try: value = self._execute(expr.expression) assert (expr.variable.name in value.batch_variables), f'Variable {expr.variable.name} is not in {value.batch_variables}.' variable_index = value.batch_variables.index(expr.variable.name) return TensorValue(expr.return_type, value.batch_variables, F.softmax(value.tensor, dim=variable_index), quantized=False) finally: del self.variable_stack[expr.variable.name] elif (expr.quantification_op == 'view'): assert (expr.variable.name not in self.variable_stack) self.variable_stack[expr.variable.name] = expr.variable try: value = self._execute(expr.expression) assert (expr.variable.name in value.batch_variables), f'Variable {expr.variable.name} is not in {value.batch_variables}.' variable_index = value.batch_variables.index(expr.variable.name) return TensorValue(expr.return_type, value.batch_variables, F.softmax(value.tensor, dim=variable_index), quantized=False) finally: del self.variable_stack[expr.variable.name] elif (expr.quantification_op == 'describe'): expr: E.GeneralizedQuantificationExpression assert isinstance(expr.expression, E.FunctionApplicationExpression) if (expr.variable.dtype.typename == 'Object'): assert ((len(expr.expression.arguments) == 2) and isinstance(expr.expression.arguments[0], E.VariableExpression) and (expr.expression.arguments[0].variable.name == expr.variable.name) and (expr.expression.arguments[1].return_type.typename in ['Object', 'Action'])) value = self._execute(expr.expression.arguments[1]) assert (len(value.batch_variables) == 1), f'Variable {expr.variable.name} is not the only batch variable in {value.batch_variables}.' answer = self.grounding.compute_description('attribute', 'Shape') answer = (value.tensor @ answer) return TensorValue(expr.return_type, [], answer, quantized=False) elif (expr.variable.dtype.typename == 'Action'): raise NotImplementedError('Describe not implemented for actions.') else: assert ((len(expr.expression.arguments) == 2) and isinstance(expr.expression.arguments[0], E.VariableExpression) and (expr.expression.arguments[0].variable.name == expr.variable.name) and (expr.expression.arguments[1].return_type.typename in ['Object', 'Action'])) value = self._execute(expr.expression.arguments[1]) assert (len(value.batch_variables) == 1), f'Variable {expr.variable.name} is not the only batch variable in {value.batch_variables}.' answer = self.grounding.compute_description('attribute', expr.variable.dtype.typename) answer = (value.tensor @ answer) return TensorValue(expr.return_type, [], answer, quantized=False) elif (expr.quantification_op == 'execute'): assert (isinstance(expr.expression, E.FunctionApplicationExpression) and (len(expr.expression.arguments) == 3)) assert (isinstance(expr.expression.arguments[0], E.VariableExpression) and (expr.expression.arguments[0].variable.name == expr.variable.name)) object_1 = self._execute(expr.expression.arguments[1]) object_2 = self._execute(expr.expression.arguments[2]) return self.grounding.compute_action(object_1, object_2, expr.expression.function.name) elif (expr.quantification_op == 'count'): assert (expr.variable.name not in self.variable_stack) self.variable_stack[expr.variable.name] = expr.variable try: value = self._execute(expr.expression) assert (expr.variable.name in value.batch_variables), f'Variable {expr.variable.name} is not in {value.batch_variables}.' result = self._count(value) return TensorValue(INT64, value.batch_variables, result, quantized=False) finally: del self.variable_stack[expr.variable.name] else: raise ValueError(f'Unknown expression type {type(expr)}.')
def expand_argument_values(argument_values: Sequence[TensorValue]) -> List[TensorValue]: 'Expand a list of argument values to the same batch size.\n Args:\n argument_values: a list of argument values.\n Returns:\n the result list of argument values. All return values will have the same batch size.\n ' has_slot_var = False for arg in argument_values: if isinstance(arg, TensorValue): for var in arg.batch_variables: if (var == '??'): has_slot_var = True break if has_slot_var: return list(argument_values) if (len(argument_values) < 2): return list(argument_values) argument_values = list(argument_values) batch_variables = list() batch_sizes = list() for arg in argument_values: if isinstance(arg, TensorValue): for var in arg.batch_variables: if (var not in batch_variables): batch_variables.append(var) batch_sizes.append(arg.get_variable_size(var)) else: assert isinstance(arg, (int, slice)), arg masks = list() for (i, arg) in enumerate(argument_values): if isinstance(arg, TensorValue): argument_values[i] = arg.expand(batch_variables, batch_sizes) if (argument_values[i].tensor_mask is not None): masks.append(argument_values[i].tensor_mask) if (len(masks) > 0): final_mask = torch.stack(masks, dim=(- 1)).amin(dim=(- 1)) for arg in argument_values: if isinstance(arg, TensorValue): arg.tensor_mask = final_mask arg._mask_certified_flag = True return argument_values
class NCGeneralizedFOLPythonParser(FOLPythonParser): def _is_quantification_expression_name(self, name: str) -> bool: return (name in ['exists', 'forall', 'all', 'iota', 'describe', 'execute', 'point', 'count', 'view']) def _parse_quantification_expression_inner(self, function_name: str, var: Variable, body: ast.Call, counting_quantifier: Optional[int]=None) -> ValueOutputExpression: ctx = get_expression_definition_context() if (function_name in ['exists', 'forall']): assert (var.dtype.typename in ['Object', 'Action']), f'Quantification variable must be of type Object or Action, got {var.dtype}.' rv = super()._parse_quantification_expression_inner(function_name, var, body) if (rv.expression.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {rv.expression.return_type}.') return rv elif (function_name in ['all', 'iota']): if (counting_quantifier is not None): function_name = (function_name, counting_quantifier) assert (var.dtype.typename in ['Object', 'Action']), f'Quantification variable must be of type Object or Action, got {var.dtype}.' if (var.dtype.typename == 'Object'): if ((function_name == 'iota') and (counting_quantifier is None)): return_type = self.domain.types['Object'] else: return_type = self.domain.types['ObjectSet'] elif (var.dtype.typename == 'Action'): if ((function_name == 'iota') and (counting_quantifier is None)): return_type = self.domain.types['Action'] else: raise NotImplementedError('Does not support ActionSet') else: raise TypeError(f'Unknown type name: {var.dtype.typename}.') with ctx.with_variables(var): body = self._parse_expression_inner(body) if (body.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {body.return_type}.') return GeneralizedQuantificationExpression(function_name, var, body, return_type=return_type) elif (function_name == 'describe'): assert (counting_quantifier is None), 'Counting quantifier cannot be specified for describe().' with ctx.with_variables(var): body = self._parse_expression_inner(body) if (body.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {body.return_type}.') return GeneralizedQuantificationExpression(function_name, var, body, return_type=var.dtype) elif (function_name == 'count'): assert (counting_quantifier is None), 'Counting quantifier cannot be specified for count().' assert (var.dtype.typename == 'Object'), f'Counting variable must be of type Object, got {var.dtype}.' with ctx.with_variables(var): body = self._parse_expression_inner(body) if (body.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {body.return_type}.') return GeneralizedQuantificationExpression(function_name, var, body, return_type=INT64) elif (function_name == 'execute'): assert (counting_quantifier is None), 'Counting quantifier cannot be specified for execute().' assert (var.dtype.typename == 'Action'), f'Execute variable must be of type Action, got {var.dtype}.' with ctx.with_variables(var): body = self._parse_expression_inner(body) if (body.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {body.return_type}.') return GeneralizedQuantificationExpression(function_name, var, body, return_type=BOOL) elif (function_name == 'point'): assert (counting_quantifier is None), 'Counting quantifier cannot be specified for point().' assert (var.dtype.typename == 'Object'), f'Point variable must be of type Object, got {var.dtype}.' with ctx.with_variables(var): body = self._parse_expression_inner(body) if (body.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {body.return_type}.') return GeneralizedQuantificationExpression(function_name, var, body, return_type=var.dtype) elif (function_name == 'view'): assert (counting_quantifier is None), 'Counting quantifier cannot be specified for view().' assert (var.dtype.typename == 'Object'), f'View variable must be of type Object, got {var.dtype}.' with ctx.with_variables(var): body = self._parse_expression_inner(body) if (body.return_type != BOOL): raise ValueError(f'Quantification expression must return a boolean, got {body.return_type}.') return GeneralizedQuantificationExpression(function_name, var, body, return_type=var.dtype) else: raise ValueError(f'Unknown quantification expression name: {function_name}.') def _parse_function_application(self, function_name: str, expression: ast.Call): if (function_name == 'query'): assert (len(expression.args) == 1), f'query() takes exactly one argument, got {len(expression.args)}: {ast.dump(expression)}' return self._parse_expression_inner(expression.args[0]) else: return self._parse_function_application_simple(function_name, expression) def _parse_function_application_simple(self, function_name: str, expression: ast.Call) -> ValueOutputExpression: ctx = get_expression_definition_context() parsed_args = [self._parse_expression_inner(arg) for arg in expression.args] function = None if (function_name not in ctx.domain.functions): if (function_name == 'and_'): return AndExpression(*parsed_args) elif (function_name == 'or_'): return OrExpression(*parsed_args) if (self.inplace_definition or self.inplace_polymorphic_function): assert self.inplace_polymorphic_function for arg in parsed_args: if (not isinstance(arg.return_type, ObjectType)): raise ValueError(f'In-place polymorphic function definition requires all arguments to be object-typed, got {arg.return_type}.') if self.inplace_polymorphic_function: function_name = ((function_name + '_') + '_'.join([arg.return_type.typename for arg in parsed_args])) if (function_name in ctx.domain.functions): function = ctx.domain.functions[function_name] elif self.inplace_definition: function = Function(function_name, FunctionType(get_types(parsed_args), BOOL)) ctx.domain.define_function(function) else: raise KeyError(f'Function {function_name} is not defined in the domain.') else: raise KeyError(f'Function {function_name} is not defined in the domain.') else: function = ctx.domain.functions[function_name] return FunctionApplicationExpression(function, parsed_args)
class LeftModel(nn.Module): @staticmethod @def_configs_func def _def_configs(): configs.model.domain = 'referit3d' configs.model.scene_graph = '3d' configs.model.concept_embedding = 'vse' configs.model.sg_dims = [None, 128, 128, 128] configs.model.vse_hidden_dims = [None, 128, 128, (128 * 3)] configs.model.output_dim = 128 configs.model.use_predefined_ccg = False configs.train.refexp_add_supervision = True configs.train.attrcls_add_supervision = False configs.train.concept_add_supervision = False configs.train.weight_decay = 0 return configs def __init__(self, domain, output_vocab: Optional[Vocab]=None): super().__init__() self._def_configs() self.domain = domain (self.attribute_concepts, self.relational_concepts, self.multi_relational_concepts) = self.extract_concepts(self.domain) self.attribute_description_categories = self.extract_description_categories(self.domain) self.output_vocab = output_vocab self.description_vocab_size = (len(self.output_vocab) if (self.output_vocab is not None) else None) self.use_resnet = False if (configs.model.scene_graph == '2d'): import left.nn.scene_graph.scene_graph_2d as sng self.scene_graph = sng.SceneGraph2D(256, configs.model.sg_dims, 16) import jactorch.models.vision.resnet as resnet self.resnet = resnet.resnet34(pretrained=True, incl_gap=False, num_classes=None) self.resnet.layer4 = jacnn.Identity() self.use_resnet = True elif (configs.model.scene_graph == '3d'): import left.nn.scene_graph.scene_graph_3d as sng self.scene_graph = sng.SceneGraph3D(configs.model.output_dim, len(self.attribute_concepts)) elif (configs.model.scene_graph == 'skeleton'): import left.nn.scene_graph.scene_graph_skeleton as sng self.scene_graph = sng.SceneGraphSkeleton(len(self.attribute_concepts), self.description_vocab_size) elif (configs.model.scene_graph is None): self.scene_graph = None else: raise ValueError(f'Unknown scene graph type: {configs.model.scene_graph}.') if (configs.model.concept_embedding == 'vse'): self.attribute_embedding = NCVSEConceptEmbedding() self.relation_embedding = NCVSEConceptEmbedding() self.multi_relation_embedding = NCVSEConceptEmbedding() from left.models.reasoning.reasoning import LeftGrounding self.grounding_cls = LeftGrounding elif (configs.model.concept_embedding == 'linear'): self.attribute_embedding = NCLinearConceptEmbedding() self.relation_embedding = NCLinearConceptEmbedding() self.multi_relation_embedding = NCLinearConceptEmbedding() from left.models.reasoning.reasoning import NCOneTimeComputingGrounding self.grounding_cls = NCOneTimeComputingGrounding elif (configs.model.concept_embedding == 'linear-tied-attr'): self.attribute_embedding = NCLinearConceptEmbedding(tied_attributes=True) self.relation_embedding = NCLinearConceptEmbedding() self.multi_relation_embedding = NCLinearConceptEmbedding() from left.models.reasoning.reasoning import NCOneTimeComputingGrounding self.grounding_cls = NCOneTimeComputingGrounding elif (configs.model.concept_embedding == 'clip'): self.attribute_embedding = NCVSEConceptEmbedding() self.relation_embedding = NCVSEConceptEmbedding() self.multi_relation_embedding = NCVSEConceptEmbedding() from left.models.reasoning.reasoning import NCDenseClipGrounding self.grounding_cls = NCDenseClipGrounding else: raise ValueError(f'Unknown concept embedding type: {configs.model.concept_embedding}.') self.init_concept_embeddings() from left.generalized_fol_executor import NCGeneralizedFOLExecutor self.parser = NCGeneralizedFOLPythonParser(self.domain, inplace_definition=False, inplace_polymorphic_function=True, inplace_definition_type=False) self.executor = NCGeneralizedFOLExecutor(self.domain, self.parser) from left.models.losses import RefExpLoss, AttrClsLoss, QALoss, PickPlaceLoss self.refexp_loss = RefExpLoss(add_supervision=configs.train.refexp_add_supervision) self.attrcls_loss = AttrClsLoss(add_supervision=configs.train.attrcls_add_supervision) self.qa_loss = QALoss(output_vocab) self.pickplace_loss = PickPlaceLoss() def extract_concepts(self, domain: FunctionDomain) -> Tuple[(List[str], List[str], List[str])]: return read_concepts_v2(domain) def extract_description_categories(self, domain: FunctionDomain) -> List[str]: return read_description_categories(domain) def init_concept_embeddings(self): if (configs.model.concept_embedding == 'vse'): for (arity, src, tgt) in zip([1, 2, 3], [self.attribute_concepts, self.relational_concepts, self.multi_relational_concepts], [self.attribute_embedding, self.relation_embedding, self.multi_relation_embedding]): tgt.init_attribute('all', configs.model.sg_dims[arity]) for word in src: tgt.init_concept(word, configs.model.vse_hidden_dims[arity], 'all') elif (configs.model.concept_embedding in ('linear', 'linear-tied-attr')): for (arity, src, tgt) in zip([1, 2, 3], [self.attribute_concepts, self.relational_concepts, self.multi_relational_concepts], [self.attribute_embedding, self.relation_embedding, self.multi_relation_embedding]): for word in src: tgt.init_concept(word, configs.model.sg_dims[arity]) if (len(self.attribute_concepts) > 0): if (self.description_vocab_size is not None): for word in self.attribute_description_categories: self.attribute_embedding.init_attribute(word, configs.model.sg_dims[1], self.description_vocab_size) for tgt in [self.attribute_embedding, self.relation_embedding, self.multi_relation_embedding]: tgt.init_linear_layers() elif (configs.model.concept_embedding == 'clip'): pass else: raise ValueError(f'Unknown concept embedding type: {configs.model.concept_embedding}.') def forward_sng(self, feed_dict): raise NotImplementedError() def execute_program_from_parsing_string(self, question: str, raw_parsing: str, grounding, outputs: Dict[(str, Any)]): (parsing, program, execution, trace) = (None, None, None, None) with self.executor.with_grounding(grounding): try: try: parsing = raw_parsing program = self.parser.parse_expression(raw_parsing) except Exception as e: raise ExecutionFailed('Parsing failed for question: {}.'.format(question)) from e try: if (not self.training): with self.executor.record_execution_trace() as trace_getter: execution = self.executor.execute(program) trace = trace_getter.get() else: execution = self.executor.execute(program) except (KeyError, AttributeError) as e: logger.exception('Execution failed for question: {}\nProgram: {}.'.format(question, program)) raise ExecutionFailed('Execution failed for question: {}\nProgram: {}.'.format(question, program)) from e except ExecutionFailed as e: print(e) outputs.setdefault('results', list()).append((parsing, program, execution)) outputs.setdefault('executions', list()).append(execution) outputs.setdefault('parsings', list()).append(parsing) outputs.setdefault('execution_traces', list()).append(trace)
class ExecutionFailed(Exception): pass
class AGCNGraph(): def __init__(self, labeling_mode='spatial'): self.A = self.get_adjacency_matrix(labeling_mode) self.num_node = num_node self.self_link = self_link self.inward = inward self.outward = outward self.neighbor = neighbor def get_adjacency_matrix(self, labeling_mode=None): if (labeling_mode is None): return self.A if (labeling_mode == 'spatial'): A = get_spatial_graph(num_node, self_link, inward, outward) else: raise ValueError() return A
def edge2mat(link, num_node): A = np.zeros((num_node, num_node)) for (i, j) in link: A[(j, i)] = 1 return A
def normalize_digraph(A): Dl = np.sum(A, 0) (h, w) = A.shape Dn = np.zeros((w, w)) for i in range(w): if (Dl[i] > 0): Dn[(i, i)] = (Dl[i] ** (- 1)) AD = np.dot(A, Dn) return AD
def get_spatial_graph(num_node, self_link, inward, outward): I = edge2mat(self_link, num_node) In = normalize_digraph(edge2mat(inward, num_node)) Out = normalize_digraph(edge2mat(outward, num_node)) A = np.stack((I, In, Out)) return A
class SigmoidCrossEntropy(nn.Module): def __init__(self, one_hot=False): super().__init__() self.one_hot = one_hot self.bce = nn.BCEWithLogitsLoss(reduction='none') def forward(self, input, target): if (not self.one_hot): target = jactorch.one_hot_nd(target, input.size((- 1))) return self.bce(input, target).sum(dim=(- 1)).mean()
class MultilabelSigmoidCrossEntropy(nn.Module): def __init__(self, one_hot=False): super().__init__() self.one_hot = one_hot self.bce = nn.BCEWithLogitsLoss(reduction='none') def forward(self, input, labels): if (type(labels) in (tuple, list)): labels = torch.tensor(labels, dtype=torch.int64, device=input.device) assert (input.dim() == 1) if (not self.one_hot): with torch.no_grad(): mask = torch.zeros_like(input) if (labels.size(0) > 0): ones = torch.ones_like(labels, dtype=torch.float32) mask.scatter_(0, labels, ones) labels = mask return self.bce(input, labels).sum(dim=(- 1)).mean()
class MultilabelSigmoidCrossEntropyAndAccuracy(nn.Module): def __init__(self, one_hot=False, softmax=False, compute_loss=True): super().__init__() self.one_hot = one_hot self.softmax = softmax self.compute_loss = compute_loss if self.softmax: self.bce = nn.BCELoss(reduction='none') else: self.bce = nn.BCEWithLogitsLoss(reduction='none') def forward(self, input, labels): if (type(labels) in (tuple, list)): labels = torch.tensor(labels, dtype=torch.int64, device=input.device) assert (input.dim() == 1) if (not self.one_hot): with torch.no_grad(): mask = torch.zeros_like(input) if (labels.size(0) > 0): ones = torch.ones_like(labels, dtype=torch.float32) mask.scatter_(0, labels, ones) labels = mask loss = 0 if self.compute_loss: loss = self.bce(input, labels).sum(dim=(- 1)).mean() if self.softmax: labels = labels.to(torch.int64) acc_raw = ((input > 0.5) == labels).all(dim=(- 1)).type(torch.float32) acc_instance_raw = ((input > 0.5) == labels).type(torch.float32) else: acc_raw = ((input > 0) == labels).all(dim=(- 1)).type(torch.float32) acc_instance_raw = ((input > 0) == labels).type(torch.float32) return (loss, acc_raw.mean(), acc_instance_raw.mean())
class MultitaskLossBase(nn.Module): def __init__(self): super().__init__() self._sigmoid_xent_loss = SigmoidCrossEntropy() self._multilabel_sigmoid_xent_loss = MultilabelSigmoidCrossEntropy() self._batched_xent_loss = nn.CrossEntropyLoss() def _mse_loss(self, pred, label): return (pred - label).abs() def _bce_loss(self, pred, label): return (- ((jactorch.log_sigmoid(pred) * label) + (jactorch.log_sigmoid((- pred)) * (1 - label))).mean()) def _bce_logprob_loss(self, pred, label): return ((pred * label) + ((1 - label) * jactorch.log1mexp(pred))) def _bce_prob_loss(self, pred, label): return (- ((torch.log(pred) * label) + (torch.log((1 - pred)) * (1 - label))).mean()) def _xent_loss(self, pred, label): logp = F.log_softmax(pred, dim=(- 1)) return (- logp[label].mean())
class _PointnetSAModuleBase(nn.Module): def __init__(self): super().__init__() self.npoint = None self.groupers = None self.mlps = None def forward(self, xyz: torch.Tensor, features: torch.Tensor=None) -> (torch.Tensor, torch.Tensor): "\n Parameters\n ----------\n xyz : torch.Tensor\n (B, N, 3) tensor of the xyz coordinates of the features\n features : torch.Tensor\n (B, N, C) tensor of the descriptors of the the features\n\n Returns\n -------\n new_xyz : torch.Tensor\n (B, npoint, 3) tensor of the new features' xyz\n new_features : torch.Tensor\n (B, npoint, \\sum_k(mlps[k][-1])) tensor of the new_features descriptors\n " new_features_list = [] xyz_flipped = xyz.transpose(1, 2).contiguous() new_xyz = (pointnet2_utils.gather_operation(xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)).transpose(1, 2).contiguous() if (self.npoint is not None) else None) for i in range(len(self.groupers)): new_features = self.groupers[i](xyz, new_xyz, features) new_features = self.mlps[i](new_features) new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)]) new_features = new_features.squeeze((- 1)) new_features_list.append(new_features) return (new_xyz, torch.cat(new_features_list, dim=1))
class PointnetSAModuleMSG(_PointnetSAModuleBase): 'Pointnet set abstrction layer with multiscale grouping\n\n Parameters\n ----------\n npoint : int\n Number of features\n radii : list of float32\n list of radii to group with\n nsamples : list of int32\n Number of samples in each ball query\n mlps : list of list of int32\n Spec of the pointnet before the global max_pool for each scale\n bn : bool\n Use batchnorm\n ' def __init__(self, *, npoint: int, radii: List[float], nsamples: List[int], mlps: List[List[int]], bn: bool=True, use_xyz: bool=True, sample_uniformly: bool=False): super().__init__() assert (len(radii) == len(nsamples) == len(mlps)) self.npoint = npoint self.groupers = nn.ModuleList() self.mlps = nn.ModuleList() for i in range(len(radii)): radius = radii[i] nsample = nsamples[i] self.groupers.append((pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly) if (npoint is not None) else pointnet2_utils.GroupAll(use_xyz))) mlp_spec = mlps[i] if use_xyz: mlp_spec[0] += 3 self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
class PointnetSAModule(PointnetSAModuleMSG): 'Pointnet set abstrction layer\n\n Parameters\n ----------\n npoint : int\n Number of features\n radius : float\n Radius of ball\n nsample : int\n Number of samples in the ball query\n mlp : list\n Spec of the pointnet before the global max_pool\n bn : bool\n Use batchnorm\n ' def __init__(self, *, mlp: List[int], npoint: int=None, radius: float=None, nsample: int=None, bn: bool=True, use_xyz: bool=True): super().__init__(mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz)
class PointnetSAModuleVotes(nn.Module): ' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG\n with extra support for returning point indices for getting their GT votes ' def __init__(self, *, mlp: List[int], npoint: int=None, radius: float=None, nsample: int=None, bn: bool=True, use_xyz: bool=True, pooling: str='max', sigma: float=None, normalize_xyz: bool=False, sample_uniformly: bool=False, ret_unique_cnt: bool=False): super().__init__() self.npoint = npoint self.radius = radius self.nsample = nsample self.pooling = pooling self.mlp_module = None self.use_xyz = use_xyz self.sigma = sigma if (self.sigma is None): self.sigma = (self.radius / 2) self.normalize_xyz = normalize_xyz self.ret_unique_cnt = ret_unique_cnt if (npoint is not None): self.grouper = pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, ret_grouped_xyz=True, normalize_xyz=normalize_xyz, sample_uniformly=sample_uniformly, ret_unique_cnt=ret_unique_cnt) else: self.grouper = pointnet2_utils.GroupAll(use_xyz, ret_grouped_xyz=True) mlp_spec = mlp if (use_xyz and (len(mlp_spec) > 0)): mlp_spec[0] += 3 self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn) def forward(self, xyz: torch.Tensor, features: torch.Tensor=None, inds: torch.Tensor=None) -> (torch.Tensor, torch.Tensor): "\n Parameters\n ----------\n xyz : torch.Tensor\n (B, N, 3) tensor of the xyz coordinates of the features\n features : torch.Tensor\n (B, C, N) tensor of the descriptors of the the features\n inds : torch.Tensor\n (B, npoint) tensor that stores index to the xyz points (values in 0-N-1)\n\n Returns\n -------\n new_xyz : torch.Tensor\n (B, npoint, 3) tensor of the new features' xyz\n new_features : torch.Tensor\n (B, \\sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors\n inds: torch.Tensor\n (B, npoint) tensor of the inds\n " xyz_flipped = xyz.transpose(1, 2).contiguous() if (inds is None): inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) else: assert (inds.shape[1] == self.npoint) new_xyz = (pointnet2_utils.gather_operation(xyz_flipped, inds).transpose(1, 2).contiguous() if (self.npoint is not None) else None) if (not self.ret_unique_cnt): (grouped_features, grouped_xyz) = self.grouper(xyz, new_xyz, features) else: (grouped_features, grouped_xyz, unique_cnt) = self.grouper(xyz, new_xyz, features) new_features = self.mlp_module(grouped_features) if (self.pooling == 'max'): new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)]) elif (self.pooling == 'avg'): new_features = F.avg_pool2d(new_features, kernel_size=[1, new_features.size(3)]) elif (self.pooling == 'rbf'): rbf = torch.exp(((((- 1) * grouped_xyz.pow(2).sum(1, keepdim=False)) / (self.sigma ** 2)) / 2)) new_features = (torch.sum((new_features * rbf.unsqueeze(1)), (- 1), keepdim=True) / float(self.nsample)) new_features = new_features.squeeze((- 1)) if (not self.ret_unique_cnt): return (new_xyz, new_features, inds) else: return (new_xyz, new_features, inds, unique_cnt)
class PointnetSAModuleMSGVotes(nn.Module): ' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG\n with extra support for returning point indices for getting their GT votes ' def __init__(self, *, mlps: List[List[int]], npoint: int, radii: List[float], nsamples: List[int], bn: bool=True, use_xyz: bool=True, sample_uniformly: bool=False): super().__init__() assert (len(mlps) == len(nsamples) == len(radii)) self.npoint = npoint self.groupers = nn.ModuleList() self.mlps = nn.ModuleList() for i in range(len(radii)): radius = radii[i] nsample = nsamples[i] self.groupers.append((pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly) if (npoint is not None) else pointnet2_utils.GroupAll(use_xyz))) mlp_spec = mlps[i] if use_xyz: mlp_spec[0] += 3 self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) def forward(self, xyz: torch.Tensor, features: torch.Tensor=None, inds: torch.Tensor=None) -> (torch.Tensor, torch.Tensor): "\n Parameters\n ----------\n xyz : torch.Tensor\n (B, N, 3) tensor of the xyz coordinates of the features\n features : torch.Tensor\n (B, C, C) tensor of the descriptors of the the features\n inds : torch.Tensor\n (B, npoint) tensor that stores index to the xyz points (values in 0-N-1)\n\n Returns\n -------\n new_xyz : torch.Tensor\n (B, npoint, 3) tensor of the new features' xyz\n new_features : torch.Tensor\n (B, \\sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors\n inds: torch.Tensor\n (B, npoint) tensor of the inds\n " new_features_list = [] xyz_flipped = xyz.transpose(1, 2).contiguous() if (inds is None): inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) new_xyz = (pointnet2_utils.gather_operation(xyz_flipped, inds).transpose(1, 2).contiguous() if (self.npoint is not None) else None) for i in range(len(self.groupers)): new_features = self.groupers[i](xyz, new_xyz, features) new_features = self.mlps[i](new_features) new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)]) new_features = new_features.squeeze((- 1)) new_features_list.append(new_features) return (new_xyz, torch.cat(new_features_list, dim=1), inds)
class PointnetFPModule(nn.Module): 'Propigates the features of one set to another\n\n Parameters\n ----------\n mlp : list\n Pointnet module parameters\n bn : bool\n Use batchnorm\n ' def __init__(self, *, mlp: List[int], bn: bool=True): super().__init__() self.mlp = pt_utils.SharedMLP(mlp, bn=bn) def forward(self, unknown: torch.Tensor, known: torch.Tensor, unknow_feats: torch.Tensor, known_feats: torch.Tensor) -> torch.Tensor: '\n Parameters\n ----------\n unknown : torch.Tensor\n (B, n, 3) tensor of the xyz positions of the unknown features\n known : torch.Tensor\n (B, m, 3) tensor of the xyz positions of the known features\n unknow_feats : torch.Tensor\n (B, C1, n) tensor of the features to be propagated to\n known_feats : torch.Tensor\n (B, C2, m) tensor of features to be propagated\n\n Returns\n -------\n new_features : torch.Tensor\n (B, mlp[-1], n) tensor of the features of the unknown features\n ' if (known is not None): (dist, idx) = pointnet2_utils.three_nn(unknown, known) dist_recip = (1.0 / (dist + 1e-08)) norm = torch.sum(dist_recip, dim=2, keepdim=True) weight = (dist_recip / norm) interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight) else: interpolated_feats = known_feats.expand(*known_feats.size()[0:2], unknown.size(1)) if (unknow_feats is not None): new_features = torch.cat([interpolated_feats, unknow_feats], dim=1) else: new_features = interpolated_feats new_features = new_features.unsqueeze((- 1)) new_features = self.mlp(new_features) return new_features.squeeze((- 1))
class PointnetLFPModuleMSG(nn.Module): ' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG\n learnable feature propagation layer.' def __init__(self, *, mlps: List[List[int]], radii: List[float], nsamples: List[int], post_mlp: List[int], bn: bool=True, use_xyz: bool=True, sample_uniformly: bool=False): super().__init__() assert (len(mlps) == len(nsamples) == len(radii)) self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn) self.groupers = nn.ModuleList() self.mlps = nn.ModuleList() for i in range(len(radii)): radius = radii[i] nsample = nsamples[i] self.groupers.append(pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)) mlp_spec = mlps[i] if use_xyz: mlp_spec[0] += 3 self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) def forward(self, xyz2: torch.Tensor, xyz1: torch.Tensor, features2: torch.Tensor, features1: torch.Tensor) -> torch.Tensor: ' Propagate features from xyz1 to xyz2.\n Parameters\n ----------\n xyz2 : torch.Tensor\n (B, N2, 3) tensor of the xyz coordinates of the features\n xyz1 : torch.Tensor\n (B, N1, 3) tensor of the xyz coordinates of the features\n features2 : torch.Tensor\n (B, C2, N2) tensor of the descriptors of the the features\n features1 : torch.Tensor\n (B, C1, N1) tensor of the descriptors of the the features\n\n Returns\n -------\n new_features1 : torch.Tensor\n (B, \\sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors\n ' new_features_list = [] for i in range(len(self.groupers)): new_features = self.groupers[i](xyz1, xyz2, features1) new_features = self.mlps[i](new_features) new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)]) new_features = new_features.squeeze((- 1)) if (features2 is not None): new_features = torch.cat([new_features, features2], dim=1) new_features = new_features.unsqueeze((- 1)) new_features = self.post_mlp(new_features) new_features_list.append(new_features) return torch.cat(new_features_list, dim=1).squeeze((- 1))
def test_interpolation_grad(): batch_size = 1 feat_dim = 2 m = 4 feats = torch.randn(batch_size, feat_dim, m, requires_grad=True).float().cuda() def interpolate_func(inputs): idx = torch.from_numpy(np.array([[[0, 1, 2], [1, 2, 3]]])).int().cuda() weight = torch.from_numpy(np.array([[[1, 1, 1], [2, 2, 2]]])).float().cuda() interpolated_feats = pointnet2_utils.three_interpolate(inputs, idx, weight) return interpolated_feats assert gradcheck(interpolate_func, feats, atol=0.1, rtol=0.1)
class SharedMLP(nn.Sequential): def __init__(self, args: List[int], *, bn: bool=False, activation=nn.ReLU(inplace=True), preact: bool=False, first: bool=False, name: str=''): super().__init__() for i in range((len(args) - 1)): self.add_module((name + 'layer{}'.format(i)), Conv2d(args[i], args[(i + 1)], bn=(((not first) or (not preact) or (i != 0)) and bn), activation=(activation if ((not first) or (not preact) or (i != 0)) else None), preact=preact))
class _BNBase(nn.Sequential): def __init__(self, in_size, batch_norm=None, name=''): super().__init__() self.add_module((name + 'bn'), batch_norm(in_size)) nn.init.constant_(self[0].weight, 1.0) nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase): def __init__(self, in_size: int, *, name: str=''): super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase): def __init__(self, in_size: int, name: str=''): super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class BatchNorm3d(_BNBase): def __init__(self, in_size: int, name: str=''): super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name)
class _ConvBase(nn.Sequential): def __init__(self, in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=None, batch_norm=None, bias=True, preact=False, name=''): super().__init__() bias = (bias and (not bn)) conv_unit = conv(in_size, out_size, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) init(conv_unit.weight) if bias: nn.init.constant_(conv_unit.bias, 0) if bn: if (not preact): bn_unit = batch_norm(out_size) else: bn_unit = batch_norm(in_size) if preact: if bn: self.add_module((name + 'bn'), bn_unit) if (activation is not None): self.add_module((name + 'activation'), activation) self.add_module((name + 'conv'), conv_unit) if (not preact): if bn: self.add_module((name + 'bn'), bn_unit) if (activation is not None): self.add_module((name + 'activation'), activation)
class Conv1d(_ConvBase): def __init__(self, in_size: int, out_size: int, *, kernel_size: int=1, stride: int=1, padding: int=0, activation=nn.ReLU(inplace=True), bn: bool=False, init=nn.init.kaiming_normal_, bias: bool=True, preact: bool=False, name: str=''): super().__init__(in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=nn.Conv1d, batch_norm=BatchNorm1d, bias=bias, preact=preact, name=name)
class Conv2d(_ConvBase): def __init__(self, in_size: int, out_size: int, *, kernel_size: Tuple[(int, int)]=(1, 1), stride: Tuple[(int, int)]=(1, 1), padding: Tuple[(int, int)]=(0, 0), activation=nn.ReLU(inplace=True), bn: bool=False, init=nn.init.kaiming_normal_, bias: bool=True, preact: bool=False, name: str=''): super().__init__(in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=nn.Conv2d, batch_norm=BatchNorm2d, bias=bias, preact=preact, name=name)
class Conv3d(_ConvBase): def __init__(self, in_size: int, out_size: int, *, kernel_size: Tuple[(int, int, int)]=(1, 1, 1), stride: Tuple[(int, int, int)]=(1, 1, 1), padding: Tuple[(int, int, int)]=(0, 0, 0), activation=nn.ReLU(inplace=True), bn: bool=False, init=nn.init.kaiming_normal_, bias: bool=True, preact: bool=False, name: str=''): super().__init__(in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=nn.Conv3d, batch_norm=BatchNorm3d, bias=bias, preact=preact, name=name)
class FC(nn.Sequential): def __init__(self, in_size: int, out_size: int, *, activation=nn.ReLU(inplace=True), bn: bool=False, init=None, preact: bool=False, name: str=''): super().__init__() fc = nn.Linear(in_size, out_size, bias=(not bn)) if (init is not None): init(fc.weight) if (not bn): nn.init.constant_(fc.bias, 0) if preact: if bn: self.add_module((name + 'bn'), BatchNorm1d(in_size)) if (activation is not None): self.add_module((name + 'activation'), activation) self.add_module((name + 'fc'), fc) if (not preact): if bn: self.add_module((name + 'bn'), BatchNorm1d(out_size)) if (activation is not None): self.add_module((name + 'activation'), activation)
def set_bn_momentum_default(bn_momentum): def fn(m): if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): m.momentum = bn_momentum return fn
class BNMomentumScheduler(object): def __init__(self, model, bn_lambda, last_epoch=(- 1), setter=set_bn_momentum_default): if (not isinstance(model, nn.Module)): raise RuntimeError("Class '{}' is not a PyTorch nn Module".format(type(model).__name__)) self.model = model self.setter = setter self.lmbd = bn_lambda self.step((last_epoch + 1)) self.last_epoch = last_epoch def step(self, epoch=None): if (epoch is None): epoch = (self.last_epoch + 1) self.last_epoch = epoch self.model.apply(self.setter(self.lmbd(epoch)))
def conv_branch_init(conv, branches): weight = conv.weight n = weight.size(0) k1 = weight.size(1) k2 = weight.size(2) nn.init.normal_(weight, 0, math.sqrt((2.0 / (((n * k1) * k2) * branches)))) nn.init.constant_(conv.bias, 0)
def conv_init(conv): nn.init.kaiming_normal_(conv.weight, mode='fan_out') nn.init.constant_(conv.bias, 0)
def bn_init(bn, scale): nn.init.constant_(bn.weight, scale) nn.init.constant_(bn.bias, 0)
class unit_tcn(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): super(unit_tcn, self).__init__() pad = int(((kernel_size - 1) / 2)) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), stride=(stride, 1)) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU() conv_init(self.conv) bn_init(self.bn, 1) def forward(self, x): x = self.bn(self.conv(x)) return x
class unit_gcn(nn.Module): def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): super(unit_gcn, self).__init__() inter_channels = (out_channels // coff_embedding) self.inter_c = inter_channels self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32))) nn.init.constant_(self.PA, 1e-06) self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False) self.num_subset = num_subset self.conv_a = nn.ModuleList() self.conv_b = nn.ModuleList() self.conv_d = nn.ModuleList() for i in range(self.num_subset): self.conv_a.append(nn.Conv2d(in_channels, inter_channels, 1)) self.conv_b.append(nn.Conv2d(in_channels, inter_channels, 1)) self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1)) if (in_channels != out_channels): self.down = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1), nn.BatchNorm2d(out_channels)) else: self.down = (lambda x: x) self.bn = nn.BatchNorm2d(out_channels) self.soft = nn.Softmax((- 2)) self.relu = nn.ReLU() for m in self.modules(): if isinstance(m, nn.Conv2d): conv_init(m) elif isinstance(m, nn.BatchNorm2d): bn_init(m, 1) bn_init(self.bn, 1e-06) for i in range(self.num_subset): conv_branch_init(self.conv_d[i], self.num_subset) def forward(self, x): (N, C, T, V) = x.size() A = self.A if ((- 1) != x.get_device()): A = A.cuda(x.get_device()) A = (A + self.PA) y = None for i in range(self.num_subset): A1 = self.conv_a[i](x).permute(0, 3, 1, 2).contiguous().view(N, V, (self.inter_c * T)) A2 = self.conv_b[i](x).view(N, (self.inter_c * T), V) A1 = self.soft((torch.matmul(A1, A2) / A1.size((- 1)))) A1 = (A1 + A[i]) A2 = x.view(N, (C * T), V) z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V)) y = ((z + y) if (y is not None) else z) y = self.bn(y) y += self.down(x) return self.relu(y)
class TCN_GCN_unit(nn.Module): def __init__(self, in_channels, out_channels, A, stride=1, residual=True): super(TCN_GCN_unit, self).__init__() self.gcn1 = unit_gcn(in_channels, out_channels, A) self.tcn1 = unit_tcn(out_channels, out_channels, stride=stride) self.relu = nn.ReLU() if (not residual): self.residual = (lambda x: 0) elif ((in_channels == out_channels) and (stride == 1)): self.residual = (lambda x: x) else: self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride) def forward(self, x): x = (self.tcn1(self.gcn1(x)) + self.residual(x)) return self.relu(x)
class SceneGraphSkeleton(nn.Module): def __init__(self, num_attribute_concepts, num_output_vocab, include_fully_connected=True, num_class=224, num_point=22, num_person=1, graph_args=dict(), in_channels=3): super(SceneGraphSkeleton, self).__init__() self.graph = AGCNGraph(**graph_args) A = self.graph.A self.data_bn = nn.BatchNorm1d(((num_person * in_channels) * num_point)) self.l1 = TCN_GCN_unit(3, 64, A, residual=False) self.l2 = TCN_GCN_unit(64, 64, A) self.l3 = TCN_GCN_unit(64, 64, A) self.l4 = TCN_GCN_unit(64, 64, A) self.l5 = TCN_GCN_unit(64, 128, A, stride=2) self.l6 = TCN_GCN_unit(128, 128, A) self.l7 = TCN_GCN_unit(128, 128, A) self.l8 = TCN_GCN_unit(128, 256, A, stride=2) self.l9 = TCN_GCN_unit(256, 256, A) self.l10 = TCN_GCN_unit(256, 256, A) self.fc = nn.Linear(256, num_attribute_concepts) self.fc_rel = nn.Linear(256, num_class) self.fc_output_vocab = nn.Linear(256, num_output_vocab) nn.init.normal_(self.fc.weight, 0, math.sqrt((2.0 / num_class))) bn_init(self.data_bn, 1) self.include_fully_connected = include_fully_connected def forward(self, x): (N, C, T, V, M) = x.size() x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, ((M * V) * C), T) x = self.data_bn(x) x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view((N * M), C, T, V) x = self.l1(x) x = self.l2(x) x = self.l3(x) x = self.l4(x) x = self.l5(x) x = self.l6(x) x = self.l7(x) x = self.l8(x) x = self.l9(x) x = self.l10(x) c_new = x.size(1) x = x.view(N, M, c_new, (- 1)) x = x.mean(3).mean(1) if self.include_fully_connected: return (self.fc(x), self.fc_rel(x), self.fc_output_vocab(x)) else: return (x, x, x)
def run_gpt(questions, prompts, temperature: float=1.0, use_user_message: bool=False): query_str = '\n'.join(['<text>{}</text>'.format(q) for q in questions]) response = None for i in range(10): try: response = openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=[{'role': ('user' if use_user_message else 'system'), 'content': prompts['system']}, {'role': 'user', 'content': (prompts['user'] + query_str)}], max_tokens=1024, temperature=temperature) break except openai.error.RateLimitError: print('Rate limit exceeded, retrying...') time.sleep(30) except (openai.error.InvalidRequestError, openai.error.APIError, openai.error.APIConnectionError): print('API error, retrying...') time.sleep(30) assert (response is not None) return {'questions': questions, 'response': response['choices'][0]['message']['content'], 'raw_response': response}
def fix_parentheses(string): stack = list() output_string = '' for i in range(len(string)): if (string[i] == '('): stack.append(i) output_string += string[i] elif (string[i] == ')'): if (len(stack) == 0): pass else: output_string += string[i] stack.pop() else: output_string += string[i] for i in range(len(stack)): output_string += ')' return output_string
def extract_from_gpt(results_str, expected_batch_size: int): results = [] for result_str in results_str.split('<code>')[1:]: result_str = result_str.split('</code>')[0] result_str = result_str.strip() if result_str.startswith('describe('): result_str = re.sub('describe\\(([a-zA-Z]*?),\\s*iota\\((.*)\\)\\)', 'describe(\\1, lambda k: \\1(k, iota(\\2)))', result_str) result_str = fix_parentheses(result_str) results.append(result_str) if (len(results) != expected_batch_size): raise ValueError(f'Expected {expected_batch_size} results, but got {len(results)}.') return results
def main(): parser = jacinle.JacArgumentParser() parser.add_argument('--dataset', type=str, default='clevr', choices=['clevr', 'referit']) parser.add_argument('--questions', type=str, required=True) parser.add_argument('--output', type=str, required=True) parser.add_argument('--prompt', type=str, required=True) parser.add_argument('--sample', type=int, default=0) parser.add_argument('--batch-size', type=int, default=1) parser.add_argument('--temperature', type=float, default=1.0) parser.add_argument('--use-user-message', action='store_true') parser.add_argument('--append', action='store_true') parser.add_argument('--based-on', type=str, default=None) args = parser.parse_args() assert args.output.endswith('.pkl') args.output_gpt = args.output.replace('.pkl', '.gpt.pkl') args.output_export = args.output.replace('.pkl', '.export.pkl') if (args.based_on is not None): assert osp.exists(args.based_on) based_on = io.load(args.based_on) else: based_on = dict() if args.append: if (not osp.exists(args.output_export)): args.append = False else: ask = False if osp.exists(args.output_gpt): ask = True print(f'Output file {args.output} already exists.') if osp.exists(args.output_export): ask = True print(f'Output file {args.output} already exists.') if ask: if (not jacinle.yes_or_no('Continue running will overwrite the existing files. Continue?', default='no')): return with open(args.prompt) as f: prompts_str = f.read() (system_prmopt, user_prompt) = prompts_str.split('----') prompts = {'system': system_prmopt.strip(), 'user': user_prompt.strip()} rows = [] rows.append(('System Prompt', prompts['system'])) rows.append(('User Prompt', prompts['user'])) print(jacinle.tabulate(rows, headers=['name', 'prompt'])) if (args.dataset == 'clevr'): questions = io.load(args.questions)['questions'] questions = sorted({q['question'] for q in questions}) elif (args.dataset == 'referit'): import pandas as pd df = pd.read_csv(args.questions) questions = df['utterance'].tolist() else: raise ValueError(f'Unknown dataset: {args.dataset}') if (args.sample > 0): sampled_questions = random.sample(questions, args.sample) sampled_questions = list(set(sampled_questions)) else: sampled_questions = list(set(questions)) if (based_on is not None): sampled_questions = [q for q in sampled_questions if (q not in based_on)] if (not args.append): gpt_results = list() mappings = dict() else: gpt_results = io.load(args.output_gpt) mappings = io.load(args.output_export) old_length = len(sampled_questions) sampled_questions = [q for q in sampled_questions if (q not in mappings)] print(f'Removed {(old_length - len(sampled_questions))} questions that have already been processed.') total_gpt_queries = 0 meters = jacinle.GroupMeters() with jacinle.tqdm_pbar(total=len(sampled_questions), desc='Running GPT-3.5') as pbar: while (len(sampled_questions) > 0): questions_batch = list(random.sample(sampled_questions, min(args.batch_size, len(sampled_questions)))) gpt_response = run_gpt(questions_batch, prompts, args.temperature, args.use_user_message) total_gpt_queries += 1 results_str = gpt_response['response'] result_batch = None try: result_batch = extract_from_gpt(results_str, args.batch_size) except ValueError: pass if (result_batch is not None): gpt_results.append(gpt_response) for (q, r) in zip(questions_batch, result_batch): mappings[q] = [r] sampled_questions = [q for q in sampled_questions if (q not in questions_batch)] meters.update('batch-succ', 1) pbar.update(len(questions_batch)) else: meters.update('batch-succ', 0) status_values = {k: v.avg for (k, v) in meters.items()} status_values['total-gpt-queries'] = total_gpt_queries pbar.set_description(meters.format_simple('Runing GPT-3.5:', status_values, compressed=True)) io.dump(args.output_gpt, gpt_results) io.dump(args.output_export, mappings)
def run_gpt(questions, prompts): query_str = '\n'.join(['<text>{}</text>'.format(q) for q in questions]) while True: try: response = openai.ChatCompletion.create(model='gpt-4', temperature=0.7, messages=[{'role': 'system', 'content': prompts['system']}, {'role': 'user', 'content': (prompts['user'] + query_str)}], max_tokens=1024) except: print('Sleeping', flush=True) import time time.sleep(60) else: print('Success', flush=True) break return {'questions': questions, 'response': response['choices'][0]['message']['content'], 'raw_response': response}
def fix_parentheses(string): stack = list() output_string = '' for i in range(len(string)): if (string[i] == '('): stack.append(i) output_string += string[i] elif (string[i] == ')'): if (len(stack) == 0): pass else: output_string += string[i] stack.pop() else: output_string += string[i] for i in range(len(stack)): output_string += ')' return output_string
def main(): parser = jacinle.JacArgumentParser() parser.add_argument('--dataset', type=str, default='clevr', choices=['clevr-rpms', 'clevr-puzzles', 'clevr-refexps', 'referit']) parser.add_argument('--questions', type=str, required=True) parser.add_argument('--output', type=str, required=True) parser.add_argument('--prompt', type=str, required=True) parser.add_argument('--sample-size', type=int, default=100) args = parser.parse_args() assert args.output.endswith('.pkl') with open(args.prompt) as f: prompts_str = f.read() (system_prmopt, user_prompt) = prompts_str.split('----') prompts = {'system': system_prmopt.strip(), 'user': user_prompt.strip()} print('System prompt:') print(prompts['system']) print(('-' * 80)) print('User prompt:') print(prompts['user']) if args.dataset.startswith('clevr'): with open(args.questions) as f: d = json.load(f) key_name = args.dataset.split('-')[1] questions = [this_d['question'] for this_d in d[key_name]] print(questions) elif (args.dataset == 'referit'): import pandas as pd df = pd.read_csv(args.questions) questions = df['utterance'].tolist() else: raise ValueError(f'Unknown dataset: {args.dataset}') if (not osp.exists(args.output)): sampled_questions = questions batch_size = 1 results = list() start_time = time.time() for i in range(0, len(sampled_questions), batch_size): print('\rProcessing {}:{} / {}, time elapsed: {:.2f}s speed: {:.2f}q/s, eta={:.2f}s'.format(i, (i + batch_size), len(sampled_questions), (time.time() - start_time), (i / (time.time() - start_time)), (((len(sampled_questions) - i) / (i / (time.time() - start_time))) if (i > 0) else 0.0)), end='') questions_batch = sampled_questions[i:(i + batch_size)] results.append(run_gpt(questions_batch, prompts)) print('') io.set_fs_verbose() io.dump(args.output, {'questions': sampled_questions, 'results': results}) else: print('Output file already exists: directly loading from disk.') output_json = io.load(args.output) questions = output_json['questions'] results = output_json['results']
def main(args): questions = jacinle.load(args.input)['questions'] output = dict() for q in questions: question_str = q['question'] program = q['program'] fol_program_str = transform(program) output[question_str] = fol_program_str print(question_str) print(fol_program_str) jacinle.dump(args.output, output)
@dataclass class QueryXProgram(object): full_program: str object_program: str
def get_op_type(op): if ('type' in op): return op['type'] return op['function']
def transform(program): index_to_result = dict() variable_counter = 0 for (i, op) in enumerate(program): op_type = get_op_type(op) if (op_type == 'scene'): variable_counter += 1 index_to_result[i] = ('', f'x{variable_counter}') elif (op_type in ('filter_size', 'filter_color', 'filter_material', 'filter_shape')): (program_str, variable) = index_to_result[op['inputs'][0]] this_program_str = f"{op['value_inputs'][0]}({variable})" program_str = (((this_program_str + ' and ') + program_str) if program_str else this_program_str) index_to_result[i] = (program_str, variable) elif (op_type == 'unique'): (inner, variable) = index_to_result[op['inputs'][0]] program_str = f'iota(Object, lambda {variable}: {inner})' index_to_result[i] = (program_str, None) elif (op_type == 'relate'): variable_counter += 1 variable = f'x{variable_counter}' (inner, _) = index_to_result[op['inputs'][0]] program_str = f"{op['value_inputs'][0]}({variable}, {inner})" index_to_result[i] = (program_str, variable) elif (op_type in ('same_size', 'same_color', 'same_material', 'same_shape')): variable_counter += 1 variable = f'x{variable_counter}' (inner, _) = index_to_result[op['inputs'][0]] program_str = f'{op_type}({variable}, {inner})' index_to_result[i] = (program_str, variable) elif ((op_type == 'intersect') or (op_type == 'union')): (e1, v1) = index_to_result[op['inputs'][1]] (e2, v2) = index_to_result[op['inputs'][0]] if (e1 == ''): index_to_result[i] = (e2, v2) elif (e2 == ''): index_to_result[i] = (e1, v1) else: assert ((v1 in e1) and (v2 in e2)) variable_counter += 1 variable = f'x{variable_counter}' if (op_type == 'intersect'): program_str = f'{e1.replace(v1, variable)} and {e2.replace(v2, variable)}' else: program_str = f'({e1.replace(v1, variable)} or {e2.replace(v2, variable)})' index_to_result[i] = (program_str, variable) elif (op_type in ('count', 'exist')): (inner, variable) = index_to_result[op['inputs'][0]] if (inner == ''): inner = f'thing({variable})' if (op_type == 'exist'): op_type = 'exists' program_str = f'{op_type}(Object, lambda {variable}: {inner})' index_to_result[i] = program_str elif (op_type in ('query_shape', 'query_color', 'query_material', 'query_size')): metaconcept = op_type.split('_')[1] (object_str, _) = index_to_result[op['inputs'][0]] program_str = f'describe({metaconcept.capitalize()}, lambda k: {metaconcept}(k, {object_str}))' index_to_result[i] = QueryXProgram(full_program=program_str, object_program=object_str) elif (op_type == 'equal_integer'): e1 = index_to_result[op['inputs'][0]] e2 = index_to_result[op['inputs'][1]] program_str = f'equal({e1}, {e2})' index_to_result[i] = program_str elif (op_type in ('greater_than', 'less_than')): e1 = index_to_result[op['inputs'][0]] e2 = index_to_result[op['inputs'][1]] program_str = f'{op_type}({e1}, {e2})' index_to_result[i] = program_str elif (op_type in ('equal_color', 'equal_material', 'equal_shape', 'equal_size')): e1 = index_to_result[op['inputs'][0]] e2 = index_to_result[op['inputs'][1]] op_type = op_type.replace('equal_', 'same_') program_str = f'{op_type}({e1.object_program}, {e2.object_program})' index_to_result[i] = program_str else: raise ValueError(f'Unknown op type: {op_type}, {op}') ret = index_to_result[(len(program) - 1)] if isinstance(ret, QueryXProgram): ret = ret.full_program assert isinstance(ret, str) return ret
def filter(scene, name, input_): if (name == 'object'): return input_ attribute = g_concept2attribute[name] return {i for i in input_ if (scene['objects'][i][attribute] == name)}
def multi_filter(scene, names, input_=None): if (input_ is None): input_ = range(len(scene['objects'])) for name in names.split(): input_ = filter(scene, name, input_) return input_
def relate(scene, name, input_): if (len(input_) != 1): raise ValueError() input_ = list(input_)[0] return set(scene['relationships'][name][input_])
def execute(scene, slot_dict): objs_for_i = dict() for i in range(1, (4 + 1)): objs_for_i[i] = multi_filter(scene, slot_dict[f'OBJ{i}']) for objs in itertools.product(objs_for_i[1], objs_for_i[2], objs_for_i[3], objs_for_i[4]): succ = True for rel_i in range(5): if (f'R{rel_i}' not in slot_dict): continue (x, y, relation) = slot_dict[f'R{rel_i}'] x = objs[(x - 1)] y = objs[(y - 1)] if (x not in scene['relationships'][relation][y]): succ = False break if succ: (yield {1: objs[0], 2: objs[1], 3: objs[2], 4: objs[3]})
def gen_all_filter_ops(): for x in itertools.product((g_attribute_concepts['size'] + ['']), (g_attribute_concepts['color'] + ['']), (g_attribute_concepts['material'] + ['']), (g_attribute_concepts['shape'] + ['object'])): (yield ' '.join((x for x in x if x)))
def gen_all_relate_ops(): return ['left', 'right', 'front', 'behind']
def check_filter_unique(scene, x): input_ = range(len(scene['objects'])) return (len(multi_filter(scene, x, input_)) == 1)
def gen_filter_string(f, vname): return ' and '.join([f'{method}({vname})' for method in f.split() if (method != 'object')])
def get_possible_relations(scene, x, y): return [r for r in g_all_relate_ops if (x in scene['relationships'][r][y])]
def gen(scene, nr_objects, nr_relations, make_wrong=False): if (len(scene['objects']) < 8): return None object_to_nonunique = defaultdict(list) for f in g_all_filter_ops: objects = multi_filter(scene, f) if (len(objects) > 1): for obj in objects: object_to_nonunique[obj].append(f) solution = None for trial in range(1000): object_indices = random.sample(range(len(scene['objects'])), nr_objects) slot_dict = dict() for i in range(1, (nr_objects + 1)): slot_dict[f'OBJ{i}'] = random.choice(object_to_nonunique[object_indices[(i - 1)]]) relation_indices = random.sample(list(itertools.combinations(range(nr_objects), 2)), nr_relations) for (i, (x, y)) in enumerate(relation_indices): possible_relations = get_possible_relations(scene, object_indices[x], object_indices[y]) if (not possible_relations): break slot_dict[f'R{i}'] = ((x + 1), (y + 1), random.choice(possible_relations)) solutions = list(execute(scene, slot_dict)) if (len(solutions) == 1): solution = (slot_dict, solutions[0], object_indices) break if (solution is None): return None if make_wrong: (slot_dict, _, _) = solution for trial in range(1000): rel_index = random.choice(range(nr_relations)) new_slot_dict = slot_dict.copy() new_slot_dict[f'R{rel_index}'] = (slot_dict[f'R{rel_index}'][:2] + (random.choice(g_all_relate_ops),)) solutions = list(execute(scene, new_slot_dict)) if (len(solutions) == 0): solution = (new_slot_dict, None, None) break if (solution is None): return None return solution
def gen_sentence_and_program(slot_dict): fmt = 'Can you find four objects from the image such that: ' constraints = list() program_parts = list() for i in range(1, (4 + 1)): d = slot_dict[f'OBJ{i}'] if (d[0] in 'aeoiu'): constraints.append(f'object {i} is an {d}') else: constraints.append(f'object {i} is a {d}') program_d = gen_filter_string(d, f'x{i}') if (program_d != ''): program_parts.append(program_d) for i in range(5): if (f'R{i}' in slot_dict): (x, y, relation) = slot_dict[f'R{i}'] if (relation in ['left', 'right']): constraints.append(f'object {x} is {relation} of object {y}') else: constraints.append(f'object {x} is {relation} object {y}') program_parts.append(f'{relation}(x{x}, x{y})') return (((fmt + '; '.join(constraints)) + '.'), f"exists(Object, lambda x1: exists(Object, lambda x2: exists(Object, lambda x3: exists(Object, lambda x4: {' and '.join(program_parts)} ))))")
def main(): scenes = jacinle.load_json(args.scenes_json)['scenes'] puzzles = list() for (scene_index, scene) in enumerate(jacinle.tqdm(scenes)): if (len(puzzles) == 100): break wrong = bool(random.choice(range(2))) desired_answer = (not wrong) sol = gen(scene, 4, 3, make_wrong=wrong) if (sol is not None): (slot_dict, solution, solution_gt) = sol (sentence, program) = gen_sentence_and_program(slot_dict) puzzles.append({'image_index': scene_index, 'image_filename': scene['image_filename'], 'slot_dict': slot_dict, 'solution': solution, 'question': sentence, 'program': program, 'answer': desired_answer}) jacinle.dump_json(args.output, {'puzzles': puzzles[:100]}) print('Saved: "{}".'.format(args.output))
def filter(scene, name, input_): if (name == 'object'): return input_ attribute = g_concept2attribute[name] return {i for i in input_ if (scene['objects'][i][attribute] == name)}
def multi_filter(scene, names, input_): for name in names.split(): input_ = filter(scene, name, input_) return input_
def relate(scene, name, input_): if (len(input_) != 1): raise ValueError() input_ = list(input_)[0] return set(scene['relationships'][name][input_])
def execute(scene, program, template_slots): stack = list() for token in program.split(): if (token == 'S'): stack.append(set(range(len(scene['objects'])))) elif (token == 'AND'): stack.append((stack.pop() & stack.pop())) elif token.startswith('OBJ'): concept_name = template_slots[token] if isinstance(concept_name, int): stack.append({concept_name}) else: stack.append(multi_filter(scene, concept_name, stack.pop())) elif token.startswith('R'): concept_name = template_slots[token] stack.append(relate(scene, concept_name, stack.pop())) else: raise ValueError('Unknown token: {}.'.format(token)) if (len(stack) != 1): raise ValueError('Invalid program.') if (len(stack[0]) != 1): raise ValueError('Invalid program.') return list(stack[0])[0]
def gen_all_filter_ops(): for x in itertools.product((g_attribute_concepts['size'] + ['']), (g_attribute_concepts['color'] + ['']), (g_attribute_concepts['material'] + ['']), (g_attribute_concepts['shape'] + ['object'])): (yield ' '.join((x for x in x if x)))
def gen_all_relate_ops(): return ['left', 'right', 'front', 'behind']
def check_filter_unique(scene, x): input_ = range(len(scene['objects'])) return (len(multi_filter(scene, x, input_)) == 1)
def gen_filter_string(f, vname): return ' and '.join([f'{method}({vname})' for method in f.split()])
def ground_program1(scene, unique_filters): program = 'S OBJ1' sentence_for_x = {} for f in unique_filters: slot_dict = {'OBJ1': f} try: obj = execute(scene, program, slot_dict) except ValueError: continue template = random.choice(g_templates_1) sentence = template.format(**slot_dict) sentence_len = len(sentence.split()) if ((obj not in sentence_for_x) or (sentence_len < sentence_for_x[obj][(- 1)])): sentence_for_x[obj] = (sentence, program, slot_dict, obj, sentence_len) for (sentence, slot_program, slot_dict, obj, _) in sentence_for_x.values(): obj1_string = gen_filter_string(slot_dict['OBJ1'], 'x') program = f'point(Object, lambda x: {obj1_string})' (yield (sentence, program, slot_program, slot_dict, obj))