code
stringlengths
17
6.64M
class PointnetSAModule(PointnetSAModuleMSG): 'Pointnet set abstrction layer\n\n Parameters\n ----------\n npoint : int\n Number of features\n radius : float\n Radius of ball\n nsample : int\n Number of samples in the ball query\n mlp : list\n Spec of the pointnet before the global max_pool\n bn : bool\n Use batchnorm\n ' def __init__(self, *, mlp: List[int], npoint: int=None, radius: float=None, nsample: int=None, bn: bool=True, use_xyz: bool=True): super().__init__(mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz)
class PointnetSAModuleVotes(nn.Module): ' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG\n with extra support for returning point indices for getting their GT votes ' def __init__(self, *, mlp: List[int], npoint: int=None, radius: float=None, nsample: int=None, bn: bool=True, use_xyz: bool=True, pooling: str='max', sigma: float=None, normalize_xyz: bool=False, sample_uniformly: bool=False, ret_unique_cnt: bool=False): super().__init__() self.npoint = npoint self.radius = radius self.nsample = nsample self.pooling = pooling self.mlp_module = None self.use_xyz = use_xyz self.sigma = sigma if (self.sigma is None): self.sigma = (self.radius / 2) self.normalize_xyz = normalize_xyz self.ret_unique_cnt = ret_unique_cnt if (npoint is not None): self.grouper = pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, ret_grouped_xyz=True, normalize_xyz=normalize_xyz, sample_uniformly=sample_uniformly, ret_unique_cnt=ret_unique_cnt) else: self.grouper = pointnet2_utils.GroupAll(use_xyz, ret_grouped_xyz=True) mlp_spec = mlp if (use_xyz and (len(mlp_spec) > 0)): mlp_spec[0] += 3 self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn) def forward(self, xyz: torch.Tensor, features: torch.Tensor=None, inds: torch.Tensor=None) -> (torch.Tensor, torch.Tensor): "\n Parameters\n ----------\n xyz : torch.Tensor\n (B, N, 3) tensor of the xyz coordinates of the features\n features : torch.Tensor\n (B, C, N) tensor of the descriptors of the the features\n inds : torch.Tensor\n (B, npoint) tensor that stores index to the xyz points (values in 0-N-1)\n\n Returns\n -------\n new_xyz : torch.Tensor\n (B, npoint, 3) tensor of the new features' xyz\n new_features : torch.Tensor\n (B, \\sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors\n inds: torch.Tensor\n (B, npoint) tensor of the inds\n " xyz_flipped = xyz.transpose(1, 2).contiguous() if (inds is None): inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) else: assert (inds.shape[1] == self.npoint) new_xyz = (pointnet2_utils.gather_operation(xyz_flipped, inds).transpose(1, 2).contiguous() if (self.npoint is not None) else None) if (not self.ret_unique_cnt): (grouped_features, grouped_xyz) = self.grouper(xyz, new_xyz, features) else: (grouped_features, grouped_xyz, unique_cnt) = self.grouper(xyz, new_xyz, features) new_features = self.mlp_module(grouped_features) if (self.pooling == 'max'): new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)]) elif (self.pooling == 'avg'): new_features = F.avg_pool2d(new_features, kernel_size=[1, new_features.size(3)]) elif (self.pooling == 'rbf'): rbf = torch.exp(((((- 1) * grouped_xyz.pow(2).sum(1, keepdim=False)) / (self.sigma ** 2)) / 2)) new_features = (torch.sum((new_features * rbf.unsqueeze(1)), (- 1), keepdim=True) / float(self.nsample)) new_features = new_features.squeeze((- 1)) if (not self.ret_unique_cnt): return (new_xyz, new_features, inds) else: return (new_xyz, new_features, inds, unique_cnt)
class PointnetSAModuleMSGVotes(nn.Module): ' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG\n with extra support for returning point indices for getting their GT votes ' def __init__(self, *, mlps: List[List[int]], npoint: int, radii: List[float], nsamples: List[int], bn: bool=True, use_xyz: bool=True, sample_uniformly: bool=False): super().__init__() assert (len(mlps) == len(nsamples) == len(radii)) self.npoint = npoint self.groupers = nn.ModuleList() self.mlps = nn.ModuleList() for i in range(len(radii)): radius = radii[i] nsample = nsamples[i] self.groupers.append((pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly) if (npoint is not None) else pointnet2_utils.GroupAll(use_xyz))) mlp_spec = mlps[i] if use_xyz: mlp_spec[0] += 3 self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) def forward(self, xyz: torch.Tensor, features: torch.Tensor=None, inds: torch.Tensor=None) -> (torch.Tensor, torch.Tensor): "\n Parameters\n ----------\n xyz : torch.Tensor\n (B, N, 3) tensor of the xyz coordinates of the features\n features : torch.Tensor\n (B, C, C) tensor of the descriptors of the the features\n inds : torch.Tensor\n (B, npoint) tensor that stores index to the xyz points (values in 0-N-1)\n\n Returns\n -------\n new_xyz : torch.Tensor\n (B, npoint, 3) tensor of the new features' xyz\n new_features : torch.Tensor\n (B, \\sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors\n inds: torch.Tensor\n (B, npoint) tensor of the inds\n " new_features_list = [] xyz_flipped = xyz.transpose(1, 2).contiguous() if (inds is None): inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) new_xyz = (pointnet2_utils.gather_operation(xyz_flipped, inds).transpose(1, 2).contiguous() if (self.npoint is not None) else None) for i in range(len(self.groupers)): new_features = self.groupers[i](xyz, new_xyz, features) new_features = self.mlps[i](new_features) new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)]) new_features = new_features.squeeze((- 1)) new_features_list.append(new_features) return (new_xyz, torch.cat(new_features_list, dim=1), inds)
class PointnetFPModule(nn.Module): 'Propigates the features of one set to another\n\n Parameters\n ----------\n mlp : list\n Pointnet module parameters\n bn : bool\n Use batchnorm\n ' def __init__(self, *, mlp: List[int], bn: bool=True): super().__init__() self.mlp = pt_utils.SharedMLP(mlp, bn=bn) def forward(self, unknown: torch.Tensor, known: torch.Tensor, unknow_feats: torch.Tensor, known_feats: torch.Tensor) -> torch.Tensor: '\n Parameters\n ----------\n unknown : torch.Tensor\n (B, n, 3) tensor of the xyz positions of the unknown features\n known : torch.Tensor\n (B, m, 3) tensor of the xyz positions of the known features\n unknow_feats : torch.Tensor\n (B, C1, n) tensor of the features to be propagated to\n known_feats : torch.Tensor\n (B, C2, m) tensor of features to be propagated\n\n Returns\n -------\n new_features : torch.Tensor\n (B, mlp[-1], n) tensor of the features of the unknown features\n ' if (known is not None): (dist, idx) = pointnet2_utils.three_nn(unknown, known) dist_recip = (1.0 / (dist + 1e-08)) norm = torch.sum(dist_recip, dim=2, keepdim=True) weight = (dist_recip / norm) interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight) else: interpolated_feats = known_feats.expand(*known_feats.size()[0:2], unknown.size(1)) if (unknow_feats is not None): new_features = torch.cat([interpolated_feats, unknow_feats], dim=1) else: new_features = interpolated_feats new_features = new_features.unsqueeze((- 1)) new_features = self.mlp(new_features) return new_features.squeeze((- 1))
class PointnetLFPModuleMSG(nn.Module): ' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG\n learnable feature propagation layer.' def __init__(self, *, mlps: List[List[int]], radii: List[float], nsamples: List[int], post_mlp: List[int], bn: bool=True, use_xyz: bool=True, sample_uniformly: bool=False): super().__init__() assert (len(mlps) == len(nsamples) == len(radii)) self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn) self.groupers = nn.ModuleList() self.mlps = nn.ModuleList() for i in range(len(radii)): radius = radii[i] nsample = nsamples[i] self.groupers.append(pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)) mlp_spec = mlps[i] if use_xyz: mlp_spec[0] += 3 self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) def forward(self, xyz2: torch.Tensor, xyz1: torch.Tensor, features2: torch.Tensor, features1: torch.Tensor) -> torch.Tensor: ' Propagate features from xyz1 to xyz2.\n Parameters\n ----------\n xyz2 : torch.Tensor\n (B, N2, 3) tensor of the xyz coordinates of the features\n xyz1 : torch.Tensor\n (B, N1, 3) tensor of the xyz coordinates of the features\n features2 : torch.Tensor\n (B, C2, N2) tensor of the descriptors of the the features\n features1 : torch.Tensor\n (B, C1, N1) tensor of the descriptors of the the features\n\n Returns\n -------\n new_features1 : torch.Tensor\n (B, \\sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors\n ' new_features_list = [] for i in range(len(self.groupers)): new_features = self.groupers[i](xyz1, xyz2, features1) new_features = self.mlps[i](new_features) new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)]) new_features = new_features.squeeze((- 1)) if (features2 is not None): new_features = torch.cat([new_features, features2], dim=1) new_features = new_features.unsqueeze((- 1)) new_features = self.post_mlp(new_features) new_features_list.append(new_features) return torch.cat(new_features_list, dim=1).squeeze((- 1))
def test_interpolation_grad(): batch_size = 1 feat_dim = 2 m = 4 feats = torch.randn(batch_size, feat_dim, m, requires_grad=True).float().cuda() def interpolate_func(inputs): idx = torch.from_numpy(np.array([[[0, 1, 2], [1, 2, 3]]])).int().cuda() weight = torch.from_numpy(np.array([[[1, 1, 1], [2, 2, 2]]])).float().cuda() interpolated_feats = pointnet2_utils.three_interpolate(inputs, idx, weight) return interpolated_feats assert gradcheck(interpolate_func, feats, atol=0.1, rtol=0.1)
class SharedMLP(nn.Sequential): def __init__(self, args: List[int], *, bn: bool=False, activation=nn.ReLU(inplace=True), preact: bool=False, first: bool=False, name: str=''): super().__init__() for i in range((len(args) - 1)): self.add_module((name + 'layer{}'.format(i)), Conv2d(args[i], args[(i + 1)], bn=(((not first) or (not preact) or (i != 0)) and bn), activation=(activation if ((not first) or (not preact) or (i != 0)) else None), preact=preact))
class _BNBase(nn.Sequential): def __init__(self, in_size, batch_norm=None, name=''): super().__init__() self.add_module((name + 'bn'), batch_norm(in_size)) nn.init.constant_(self[0].weight, 1.0) nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase): def __init__(self, in_size: int, *, name: str=''): super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase): def __init__(self, in_size: int, name: str=''): super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class BatchNorm3d(_BNBase): def __init__(self, in_size: int, name: str=''): super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name)
class _ConvBase(nn.Sequential): def __init__(self, in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=None, batch_norm=None, bias=True, preact=False, name=''): super().__init__() bias = (bias and (not bn)) conv_unit = conv(in_size, out_size, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) init(conv_unit.weight) if bias: nn.init.constant_(conv_unit.bias, 0) if bn: if (not preact): bn_unit = batch_norm(out_size) else: bn_unit = batch_norm(in_size) if preact: if bn: self.add_module((name + 'bn'), bn_unit) if (activation is not None): self.add_module((name + 'activation'), activation) self.add_module((name + 'conv'), conv_unit) if (not preact): if bn: self.add_module((name + 'bn'), bn_unit) if (activation is not None): self.add_module((name + 'activation'), activation)
class Conv1d(_ConvBase): def __init__(self, in_size: int, out_size: int, *, kernel_size: int=1, stride: int=1, padding: int=0, activation=nn.ReLU(inplace=True), bn: bool=False, init=nn.init.kaiming_normal_, bias: bool=True, preact: bool=False, name: str=''): super().__init__(in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=nn.Conv1d, batch_norm=BatchNorm1d, bias=bias, preact=preact, name=name)
class Conv2d(_ConvBase): def __init__(self, in_size: int, out_size: int, *, kernel_size: Tuple[(int, int)]=(1, 1), stride: Tuple[(int, int)]=(1, 1), padding: Tuple[(int, int)]=(0, 0), activation=nn.ReLU(inplace=True), bn: bool=False, init=nn.init.kaiming_normal_, bias: bool=True, preact: bool=False, name: str=''): super().__init__(in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=nn.Conv2d, batch_norm=BatchNorm2d, bias=bias, preact=preact, name=name)
class Conv3d(_ConvBase): def __init__(self, in_size: int, out_size: int, *, kernel_size: Tuple[(int, int, int)]=(1, 1, 1), stride: Tuple[(int, int, int)]=(1, 1, 1), padding: Tuple[(int, int, int)]=(0, 0, 0), activation=nn.ReLU(inplace=True), bn: bool=False, init=nn.init.kaiming_normal_, bias: bool=True, preact: bool=False, name: str=''): super().__init__(in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=nn.Conv3d, batch_norm=BatchNorm3d, bias=bias, preact=preact, name=name)
class FC(nn.Sequential): def __init__(self, in_size: int, out_size: int, *, activation=nn.ReLU(inplace=True), bn: bool=False, init=None, preact: bool=False, name: str=''): super().__init__() fc = nn.Linear(in_size, out_size, bias=(not bn)) if (init is not None): init(fc.weight) if (not bn): nn.init.constant_(fc.bias, 0) if preact: if bn: self.add_module((name + 'bn'), BatchNorm1d(in_size)) if (activation is not None): self.add_module((name + 'activation'), activation) self.add_module((name + 'fc'), fc) if (not preact): if bn: self.add_module((name + 'bn'), BatchNorm1d(out_size)) if (activation is not None): self.add_module((name + 'activation'), activation)
def set_bn_momentum_default(bn_momentum): def fn(m): if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): m.momentum = bn_momentum return fn
class BNMomentumScheduler(object): def __init__(self, model, bn_lambda, last_epoch=(- 1), setter=set_bn_momentum_default): if (not isinstance(model, nn.Module)): raise RuntimeError("Class '{}' is not a PyTorch nn Module".format(type(model).__name__)) self.model = model self.setter = setter self.lmbd = bn_lambda self.step((last_epoch + 1)) self.last_epoch = last_epoch def step(self, epoch=None): if (epoch is None): epoch = (self.last_epoch + 1) self.last_epoch = epoch self.model.apply(self.setter(self.lmbd(epoch)))
def conv_branch_init(conv, branches): weight = conv.weight n = weight.size(0) k1 = weight.size(1) k2 = weight.size(2) nn.init.normal_(weight, 0, math.sqrt((2.0 / (((n * k1) * k2) * branches)))) nn.init.constant_(conv.bias, 0)
def conv_init(conv): nn.init.kaiming_normal_(conv.weight, mode='fan_out') nn.init.constant_(conv.bias, 0)
def bn_init(bn, scale): nn.init.constant_(bn.weight, scale) nn.init.constant_(bn.bias, 0)
class unit_tcn(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): super(unit_tcn, self).__init__() pad = int(((kernel_size - 1) / 2)) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), stride=(stride, 1)) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU() conv_init(self.conv) bn_init(self.bn, 1) def forward(self, x): x = self.bn(self.conv(x)) return x
class unit_gcn(nn.Module): def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): super(unit_gcn, self).__init__() inter_channels = (out_channels // coff_embedding) self.inter_c = inter_channels self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32))) nn.init.constant_(self.PA, 1e-06) self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False) self.num_subset = num_subset self.conv_a = nn.ModuleList() self.conv_b = nn.ModuleList() self.conv_d = nn.ModuleList() for i in range(self.num_subset): self.conv_a.append(nn.Conv2d(in_channels, inter_channels, 1)) self.conv_b.append(nn.Conv2d(in_channels, inter_channels, 1)) self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1)) if (in_channels != out_channels): self.down = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1), nn.BatchNorm2d(out_channels)) else: self.down = (lambda x: x) self.bn = nn.BatchNorm2d(out_channels) self.soft = nn.Softmax((- 2)) self.relu = nn.ReLU() for m in self.modules(): if isinstance(m, nn.Conv2d): conv_init(m) elif isinstance(m, nn.BatchNorm2d): bn_init(m, 1) bn_init(self.bn, 1e-06) for i in range(self.num_subset): conv_branch_init(self.conv_d[i], self.num_subset) def forward(self, x): (N, C, T, V) = x.size() A = self.A if ((- 1) != x.get_device()): A = A.cuda(x.get_device()) A = (A + self.PA) y = None for i in range(self.num_subset): A1 = self.conv_a[i](x).permute(0, 3, 1, 2).contiguous().view(N, V, (self.inter_c * T)) A2 = self.conv_b[i](x).view(N, (self.inter_c * T), V) A1 = self.soft((torch.matmul(A1, A2) / A1.size((- 1)))) A1 = (A1 + A[i]) A2 = x.view(N, (C * T), V) z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V)) y = ((z + y) if (y is not None) else z) y = self.bn(y) y += self.down(x) return self.relu(y)
class TCN_GCN_unit(nn.Module): def __init__(self, in_channels, out_channels, A, stride=1, residual=True): super(TCN_GCN_unit, self).__init__() self.gcn1 = unit_gcn(in_channels, out_channels, A) self.tcn1 = unit_tcn(out_channels, out_channels, stride=stride) self.relu = nn.ReLU() if (not residual): self.residual = (lambda x: 0) elif ((in_channels == out_channels) and (stride == 1)): self.residual = (lambda x: x) else: self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride) def forward(self, x): x = (self.tcn1(self.gcn1(x)) + self.residual(x)) return self.relu(x)
class SceneGraphSkeleton(nn.Module): def __init__(self, num_attribute_concepts, num_output_vocab, include_fully_connected=True, num_class=224, num_point=22, num_person=1, graph_args=dict(), in_channels=3): super(SceneGraphSkeleton, self).__init__() self.graph = AGCNGraph(**graph_args) A = self.graph.A self.data_bn = nn.BatchNorm1d(((num_person * in_channels) * num_point)) self.l1 = TCN_GCN_unit(3, 64, A, residual=False) self.l2 = TCN_GCN_unit(64, 64, A) self.l3 = TCN_GCN_unit(64, 64, A) self.l4 = TCN_GCN_unit(64, 64, A) self.l5 = TCN_GCN_unit(64, 128, A, stride=2) self.l6 = TCN_GCN_unit(128, 128, A) self.l7 = TCN_GCN_unit(128, 128, A) self.l8 = TCN_GCN_unit(128, 256, A, stride=2) self.l9 = TCN_GCN_unit(256, 256, A) self.l10 = TCN_GCN_unit(256, 256, A) self.fc = nn.Linear(256, num_attribute_concepts) self.fc_rel = nn.Linear(256, num_class) self.fc_output_vocab = nn.Linear(256, num_output_vocab) nn.init.normal_(self.fc.weight, 0, math.sqrt((2.0 / num_class))) bn_init(self.data_bn, 1) self.include_fully_connected = include_fully_connected def forward(self, x): (N, C, T, V, M) = x.size() x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, ((M * V) * C), T) x = self.data_bn(x) x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view((N * M), C, T, V) x = self.l1(x) x = self.l2(x) x = self.l3(x) x = self.l4(x) x = self.l5(x) x = self.l6(x) x = self.l7(x) x = self.l8(x) x = self.l9(x) x = self.l10(x) c_new = x.size(1) x = x.view(N, M, c_new, (- 1)) x = x.mean(3).mean(1) if self.include_fully_connected: return (self.fc(x), self.fc_rel(x), self.fc_output_vocab(x)) else: return (x, x, x)
def run_gpt(questions, prompts, temperature: float=1.0, use_user_message: bool=False): query_str = '\n'.join(['<text>{}</text>'.format(q) for q in questions]) response = None for i in range(10): try: response = openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=[{'role': ('user' if use_user_message else 'system'), 'content': prompts['system']}, {'role': 'user', 'content': (prompts['user'] + query_str)}], max_tokens=1024, temperature=temperature) break except openai.error.RateLimitError: print('Rate limit exceeded, retrying...') time.sleep(30) except (openai.error.InvalidRequestError, openai.error.APIError, openai.error.APIConnectionError): print('API error, retrying...') time.sleep(30) assert (response is not None) return {'questions': questions, 'response': response['choices'][0]['message']['content'], 'raw_response': response}
def fix_parentheses(string): stack = list() output_string = '' for i in range(len(string)): if (string[i] == '('): stack.append(i) output_string += string[i] elif (string[i] == ')'): if (len(stack) == 0): pass else: output_string += string[i] stack.pop() else: output_string += string[i] for i in range(len(stack)): output_string += ')' return output_string
def extract_from_gpt(results_str, expected_batch_size: int): results = [] for result_str in results_str.split('<code>')[1:]: result_str = result_str.split('</code>')[0] result_str = result_str.strip() if result_str.startswith('describe('): result_str = re.sub('describe\\(([a-zA-Z]*?),\\s*iota\\((.*)\\)\\)', 'describe(\\1, lambda k: \\1(k, iota(\\2)))', result_str) result_str = fix_parentheses(result_str) results.append(result_str) if (len(results) != expected_batch_size): raise ValueError(f'Expected {expected_batch_size} results, but got {len(results)}.') return results
def main(): parser = jacinle.JacArgumentParser() parser.add_argument('--dataset', type=str, default='clevr', choices=['clevr', 'referit']) parser.add_argument('--questions', type=str, required=True) parser.add_argument('--output', type=str, required=True) parser.add_argument('--prompt', type=str, required=True) parser.add_argument('--sample', type=int, default=0) parser.add_argument('--batch-size', type=int, default=1) parser.add_argument('--temperature', type=float, default=1.0) parser.add_argument('--use-user-message', action='store_true') parser.add_argument('--append', action='store_true') parser.add_argument('--based-on', type=str, default=None) args = parser.parse_args() assert args.output.endswith('.pkl') args.output_gpt = args.output.replace('.pkl', '.gpt.pkl') args.output_export = args.output.replace('.pkl', '.export.pkl') if (args.based_on is not None): assert osp.exists(args.based_on) based_on = io.load(args.based_on) else: based_on = dict() if args.append: if (not osp.exists(args.output_export)): args.append = False else: ask = False if osp.exists(args.output_gpt): ask = True print(f'Output file {args.output} already exists.') if osp.exists(args.output_export): ask = True print(f'Output file {args.output} already exists.') if ask: if (not jacinle.yes_or_no('Continue running will overwrite the existing files. Continue?', default='no')): return with open(args.prompt) as f: prompts_str = f.read() (system_prmopt, user_prompt) = prompts_str.split('----') prompts = {'system': system_prmopt.strip(), 'user': user_prompt.strip()} rows = [] rows.append(('System Prompt', prompts['system'])) rows.append(('User Prompt', prompts['user'])) print(jacinle.tabulate(rows, headers=['name', 'prompt'])) if (args.dataset == 'clevr'): questions = io.load(args.questions)['questions'] questions = sorted({q['question'] for q in questions}) elif (args.dataset == 'referit'): import pandas as pd df = pd.read_csv(args.questions) questions = df['utterance'].tolist() else: raise ValueError(f'Unknown dataset: {args.dataset}') if (args.sample > 0): sampled_questions = random.sample(questions, args.sample) sampled_questions = list(set(sampled_questions)) else: sampled_questions = list(set(questions)) if (based_on is not None): sampled_questions = [q for q in sampled_questions if (q not in based_on)] if (not args.append): gpt_results = list() mappings = dict() else: gpt_results = io.load(args.output_gpt) mappings = io.load(args.output_export) old_length = len(sampled_questions) sampled_questions = [q for q in sampled_questions if (q not in mappings)] print(f'Removed {(old_length - len(sampled_questions))} questions that have already been processed.') total_gpt_queries = 0 meters = jacinle.GroupMeters() with jacinle.tqdm_pbar(total=len(sampled_questions), desc='Running GPT-3.5') as pbar: while (len(sampled_questions) > 0): questions_batch = list(random.sample(sampled_questions, min(args.batch_size, len(sampled_questions)))) gpt_response = run_gpt(questions_batch, prompts, args.temperature, args.use_user_message) total_gpt_queries += 1 results_str = gpt_response['response'] result_batch = None try: result_batch = extract_from_gpt(results_str, args.batch_size) except ValueError: pass if (result_batch is not None): gpt_results.append(gpt_response) for (q, r) in zip(questions_batch, result_batch): mappings[q] = [r] sampled_questions = [q for q in sampled_questions if (q not in questions_batch)] meters.update('batch-succ', 1) pbar.update(len(questions_batch)) else: meters.update('batch-succ', 0) status_values = {k: v.avg for (k, v) in meters.items()} status_values['total-gpt-queries'] = total_gpt_queries pbar.set_description(meters.format_simple('Runing GPT-3.5:', status_values, compressed=True)) io.dump(args.output_gpt, gpt_results) io.dump(args.output_export, mappings)
def run_gpt(questions, prompts): query_str = '\n'.join(['<text>{}</text>'.format(q) for q in questions]) while True: try: response = openai.ChatCompletion.create(model='gpt-4', temperature=0.7, messages=[{'role': 'system', 'content': prompts['system']}, {'role': 'user', 'content': (prompts['user'] + query_str)}], max_tokens=1024) except: print('Sleeping', flush=True) import time time.sleep(60) else: print('Success', flush=True) break return {'questions': questions, 'response': response['choices'][0]['message']['content'], 'raw_response': response}
def fix_parentheses(string): stack = list() output_string = '' for i in range(len(string)): if (string[i] == '('): stack.append(i) output_string += string[i] elif (string[i] == ')'): if (len(stack) == 0): pass else: output_string += string[i] stack.pop() else: output_string += string[i] for i in range(len(stack)): output_string += ')' return output_string
def main(): parser = jacinle.JacArgumentParser() parser.add_argument('--dataset', type=str, default='clevr', choices=['clevr-rpms', 'clevr-puzzles', 'clevr-refexps', 'referit']) parser.add_argument('--questions', type=str, required=True) parser.add_argument('--output', type=str, required=True) parser.add_argument('--prompt', type=str, required=True) parser.add_argument('--sample-size', type=int, default=100) args = parser.parse_args() assert args.output.endswith('.pkl') with open(args.prompt) as f: prompts_str = f.read() (system_prmopt, user_prompt) = prompts_str.split('----') prompts = {'system': system_prmopt.strip(), 'user': user_prompt.strip()} print('System prompt:') print(prompts['system']) print(('-' * 80)) print('User prompt:') print(prompts['user']) if args.dataset.startswith('clevr'): with open(args.questions) as f: d = json.load(f) key_name = args.dataset.split('-')[1] questions = [this_d['question'] for this_d in d[key_name]] print(questions) elif (args.dataset == 'referit'): import pandas as pd df = pd.read_csv(args.questions) questions = df['utterance'].tolist() else: raise ValueError(f'Unknown dataset: {args.dataset}') if (not osp.exists(args.output)): sampled_questions = questions batch_size = 1 results = list() start_time = time.time() for i in range(0, len(sampled_questions), batch_size): print('\rProcessing {}:{} / {}, time elapsed: {:.2f}s speed: {:.2f}q/s, eta={:.2f}s'.format(i, (i + batch_size), len(sampled_questions), (time.time() - start_time), (i / (time.time() - start_time)), (((len(sampled_questions) - i) / (i / (time.time() - start_time))) if (i > 0) else 0.0)), end='') questions_batch = sampled_questions[i:(i + batch_size)] results.append(run_gpt(questions_batch, prompts)) print('') io.set_fs_verbose() io.dump(args.output, {'questions': sampled_questions, 'results': results}) else: print('Output file already exists: directly loading from disk.') output_json = io.load(args.output) questions = output_json['questions'] results = output_json['results']
def main(args): questions = jacinle.load(args.input)['questions'] output = dict() for q in questions: question_str = q['question'] program = q['program'] fol_program_str = transform(program) output[question_str] = fol_program_str print(question_str) print(fol_program_str) jacinle.dump(args.output, output)
@dataclass class QueryXProgram(object): full_program: str object_program: str
def get_op_type(op): if ('type' in op): return op['type'] return op['function']
def transform(program): index_to_result = dict() variable_counter = 0 for (i, op) in enumerate(program): op_type = get_op_type(op) if (op_type == 'scene'): variable_counter += 1 index_to_result[i] = ('', f'x{variable_counter}') elif (op_type in ('filter_size', 'filter_color', 'filter_material', 'filter_shape')): (program_str, variable) = index_to_result[op['inputs'][0]] this_program_str = f"{op['value_inputs'][0]}({variable})" program_str = (((this_program_str + ' and ') + program_str) if program_str else this_program_str) index_to_result[i] = (program_str, variable) elif (op_type == 'unique'): (inner, variable) = index_to_result[op['inputs'][0]] program_str = f'iota(Object, lambda {variable}: {inner})' index_to_result[i] = (program_str, None) elif (op_type == 'relate'): variable_counter += 1 variable = f'x{variable_counter}' (inner, _) = index_to_result[op['inputs'][0]] program_str = f"{op['value_inputs'][0]}({variable}, {inner})" index_to_result[i] = (program_str, variable) elif (op_type in ('same_size', 'same_color', 'same_material', 'same_shape')): variable_counter += 1 variable = f'x{variable_counter}' (inner, _) = index_to_result[op['inputs'][0]] program_str = f'{op_type}({variable}, {inner})' index_to_result[i] = (program_str, variable) elif ((op_type == 'intersect') or (op_type == 'union')): (e1, v1) = index_to_result[op['inputs'][1]] (e2, v2) = index_to_result[op['inputs'][0]] if (e1 == ''): index_to_result[i] = (e2, v2) elif (e2 == ''): index_to_result[i] = (e1, v1) else: assert ((v1 in e1) and (v2 in e2)) variable_counter += 1 variable = f'x{variable_counter}' if (op_type == 'intersect'): program_str = f'{e1.replace(v1, variable)} and {e2.replace(v2, variable)}' else: program_str = f'({e1.replace(v1, variable)} or {e2.replace(v2, variable)})' index_to_result[i] = (program_str, variable) elif (op_type in ('count', 'exist')): (inner, variable) = index_to_result[op['inputs'][0]] if (inner == ''): inner = f'thing({variable})' if (op_type == 'exist'): op_type = 'exists' program_str = f'{op_type}(Object, lambda {variable}: {inner})' index_to_result[i] = program_str elif (op_type in ('query_shape', 'query_color', 'query_material', 'query_size')): metaconcept = op_type.split('_')[1] (object_str, _) = index_to_result[op['inputs'][0]] program_str = f'describe({metaconcept.capitalize()}, lambda k: {metaconcept}(k, {object_str}))' index_to_result[i] = QueryXProgram(full_program=program_str, object_program=object_str) elif (op_type == 'equal_integer'): e1 = index_to_result[op['inputs'][0]] e2 = index_to_result[op['inputs'][1]] program_str = f'equal({e1}, {e2})' index_to_result[i] = program_str elif (op_type in ('greater_than', 'less_than')): e1 = index_to_result[op['inputs'][0]] e2 = index_to_result[op['inputs'][1]] program_str = f'{op_type}({e1}, {e2})' index_to_result[i] = program_str elif (op_type in ('equal_color', 'equal_material', 'equal_shape', 'equal_size')): e1 = index_to_result[op['inputs'][0]] e2 = index_to_result[op['inputs'][1]] op_type = op_type.replace('equal_', 'same_') program_str = f'{op_type}({e1.object_program}, {e2.object_program})' index_to_result[i] = program_str else: raise ValueError(f'Unknown op type: {op_type}, {op}') ret = index_to_result[(len(program) - 1)] if isinstance(ret, QueryXProgram): ret = ret.full_program assert isinstance(ret, str) return ret
def filter(scene, name, input_): if (name == 'object'): return input_ attribute = g_concept2attribute[name] return {i for i in input_ if (scene['objects'][i][attribute] == name)}
def multi_filter(scene, names, input_=None): if (input_ is None): input_ = range(len(scene['objects'])) for name in names.split(): input_ = filter(scene, name, input_) return input_
def relate(scene, name, input_): if (len(input_) != 1): raise ValueError() input_ = list(input_)[0] return set(scene['relationships'][name][input_])
def execute(scene, slot_dict): objs_for_i = dict() for i in range(1, (4 + 1)): objs_for_i[i] = multi_filter(scene, slot_dict[f'OBJ{i}']) for objs in itertools.product(objs_for_i[1], objs_for_i[2], objs_for_i[3], objs_for_i[4]): succ = True for rel_i in range(5): if (f'R{rel_i}' not in slot_dict): continue (x, y, relation) = slot_dict[f'R{rel_i}'] x = objs[(x - 1)] y = objs[(y - 1)] if (x not in scene['relationships'][relation][y]): succ = False break if succ: (yield {1: objs[0], 2: objs[1], 3: objs[2], 4: objs[3]})
def gen_all_filter_ops(): for x in itertools.product((g_attribute_concepts['size'] + ['']), (g_attribute_concepts['color'] + ['']), (g_attribute_concepts['material'] + ['']), (g_attribute_concepts['shape'] + ['object'])): (yield ' '.join((x for x in x if x)))
def gen_all_relate_ops(): return ['left', 'right', 'front', 'behind']
def check_filter_unique(scene, x): input_ = range(len(scene['objects'])) return (len(multi_filter(scene, x, input_)) == 1)
def gen_filter_string(f, vname): return ' and '.join([f'{method}({vname})' for method in f.split() if (method != 'object')])
def get_possible_relations(scene, x, y): return [r for r in g_all_relate_ops if (x in scene['relationships'][r][y])]
def gen(scene, nr_objects, nr_relations, make_wrong=False): if (len(scene['objects']) < 8): return None object_to_nonunique = defaultdict(list) for f in g_all_filter_ops: objects = multi_filter(scene, f) if (len(objects) > 1): for obj in objects: object_to_nonunique[obj].append(f) solution = None for trial in range(1000): object_indices = random.sample(range(len(scene['objects'])), nr_objects) slot_dict = dict() for i in range(1, (nr_objects + 1)): slot_dict[f'OBJ{i}'] = random.choice(object_to_nonunique[object_indices[(i - 1)]]) relation_indices = random.sample(list(itertools.combinations(range(nr_objects), 2)), nr_relations) for (i, (x, y)) in enumerate(relation_indices): possible_relations = get_possible_relations(scene, object_indices[x], object_indices[y]) if (not possible_relations): break slot_dict[f'R{i}'] = ((x + 1), (y + 1), random.choice(possible_relations)) solutions = list(execute(scene, slot_dict)) if (len(solutions) == 1): solution = (slot_dict, solutions[0], object_indices) break if (solution is None): return None if make_wrong: (slot_dict, _, _) = solution for trial in range(1000): rel_index = random.choice(range(nr_relations)) new_slot_dict = slot_dict.copy() new_slot_dict[f'R{rel_index}'] = (slot_dict[f'R{rel_index}'][:2] + (random.choice(g_all_relate_ops),)) solutions = list(execute(scene, new_slot_dict)) if (len(solutions) == 0): solution = (new_slot_dict, None, None) break if (solution is None): return None return solution
def gen_sentence_and_program(slot_dict): fmt = 'Can you find four objects from the image such that: ' constraints = list() program_parts = list() for i in range(1, (4 + 1)): d = slot_dict[f'OBJ{i}'] if (d[0] in 'aeoiu'): constraints.append(f'object {i} is an {d}') else: constraints.append(f'object {i} is a {d}') program_d = gen_filter_string(d, f'x{i}') if (program_d != ''): program_parts.append(program_d) for i in range(5): if (f'R{i}' in slot_dict): (x, y, relation) = slot_dict[f'R{i}'] if (relation in ['left', 'right']): constraints.append(f'object {x} is {relation} of object {y}') else: constraints.append(f'object {x} is {relation} object {y}') program_parts.append(f'{relation}(x{x}, x{y})') return (((fmt + '; '.join(constraints)) + '.'), f"exists(Object, lambda x1: exists(Object, lambda x2: exists(Object, lambda x3: exists(Object, lambda x4: {' and '.join(program_parts)} ))))")
def main(): scenes = jacinle.load_json(args.scenes_json)['scenes'] puzzles = list() for (scene_index, scene) in enumerate(jacinle.tqdm(scenes)): if (len(puzzles) == 100): break wrong = bool(random.choice(range(2))) desired_answer = (not wrong) sol = gen(scene, 4, 3, make_wrong=wrong) if (sol is not None): (slot_dict, solution, solution_gt) = sol (sentence, program) = gen_sentence_and_program(slot_dict) puzzles.append({'image_index': scene_index, 'image_filename': scene['image_filename'], 'slot_dict': slot_dict, 'solution': solution, 'question': sentence, 'program': program, 'answer': desired_answer}) jacinle.dump_json(args.output, {'puzzles': puzzles[:100]}) print('Saved: "{}".'.format(args.output))
def filter(scene, name, input_): if (name == 'object'): return input_ attribute = g_concept2attribute[name] return {i for i in input_ if (scene['objects'][i][attribute] == name)}
def multi_filter(scene, names, input_): for name in names.split(): input_ = filter(scene, name, input_) return input_
def relate(scene, name, input_): if (len(input_) != 1): raise ValueError() input_ = list(input_)[0] return set(scene['relationships'][name][input_])
def execute(scene, program, template_slots): stack = list() for token in program.split(): if (token == 'S'): stack.append(set(range(len(scene['objects'])))) elif (token == 'AND'): stack.append((stack.pop() & stack.pop())) elif token.startswith('OBJ'): concept_name = template_slots[token] if isinstance(concept_name, int): stack.append({concept_name}) else: stack.append(multi_filter(scene, concept_name, stack.pop())) elif token.startswith('R'): concept_name = template_slots[token] stack.append(relate(scene, concept_name, stack.pop())) else: raise ValueError('Unknown token: {}.'.format(token)) if (len(stack) != 1): raise ValueError('Invalid program.') if (len(stack[0]) != 1): raise ValueError('Invalid program.') return list(stack[0])[0]
def gen_all_filter_ops(): for x in itertools.product((g_attribute_concepts['size'] + ['']), (g_attribute_concepts['color'] + ['']), (g_attribute_concepts['material'] + ['']), (g_attribute_concepts['shape'] + ['object'])): (yield ' '.join((x for x in x if x)))
def gen_all_relate_ops(): return ['left', 'right', 'front', 'behind']
def check_filter_unique(scene, x): input_ = range(len(scene['objects'])) return (len(multi_filter(scene, x, input_)) == 1)
def gen_filter_string(f, vname): return ' and '.join([f'{method}({vname})' for method in f.split()])
def ground_program1(scene, unique_filters): program = 'S OBJ1' sentence_for_x = {} for f in unique_filters: slot_dict = {'OBJ1': f} try: obj = execute(scene, program, slot_dict) except ValueError: continue template = random.choice(g_templates_1) sentence = template.format(**slot_dict) sentence_len = len(sentence.split()) if ((obj not in sentence_for_x) or (sentence_len < sentence_for_x[obj][(- 1)])): sentence_for_x[obj] = (sentence, program, slot_dict, obj, sentence_len) for (sentence, slot_program, slot_dict, obj, _) in sentence_for_x.values(): obj1_string = gen_filter_string(slot_dict['OBJ1'], 'x') program = f'point(Object, lambda x: {obj1_string})' (yield (sentence, program, slot_program, slot_dict, obj))
def ground_program2(scene, unique_filters): program = 'S OBJ2 R1 OBJ1' program1 = ground_program1(scene, unique_filters) sentence_for_x = {} for (_, _, _, slot_dict1, obj2) in program1: for f in g_all_filter_ops: for r in g_all_relate_ops: slot_dict = {'OBJ1': f, 'OBJ2': obj2, 'R1': r} try: obj = execute(scene, 'OBJ2 R1 OBJ1', slot_dict) except ValueError: continue template = random.choice(g_templates_2) slot_dict = {'OBJ1': f, 'OBJ2': slot_dict1['OBJ1'], 'R1': r} sentence = template.format(**slot_dict) sentence_len = len(sentence.split()) if ((obj not in sentence_for_x) or (sentence_len < sentence_for_x[obj][(- 1)])): sentence_for_x[obj] = (sentence, program, slot_dict, obj, sentence_len) for (sentence, slot_program, slot_dict, obj, _) in sentence_for_x.values(): obj1_string = gen_filter_string(slot_dict['OBJ1'], 'x') r = slot_dict['R1'] obj2_string = gen_filter_string(slot_dict['OBJ2'], 'y') program = f'point(Object, lambda x: {obj1_string} and {r}(x, iota(Object, lambda y: {obj2_string})))' (yield (sentence, program, slot_program, slot_dict, obj))
def ground_program3(scene, unique_filters): program = 'S OBJ3 R2 S OBJ2 R1 AND OBJ1' program1 = ground_program1(scene, unique_filters) sentence_for_x = {} for (_, _, _, slot_dict1, obj2) in program1: for (_, _, _, slot_dict2, obj3) in program1: if (obj2 == obj3): continue for f in g_all_filter_ops: for r1 in g_all_relate_ops: for r2 in g_all_relate_ops: slot_dict = {'OBJ1': f, 'OBJ2': obj2, 'R1': r1, 'OBJ3': obj3, 'R2': r2} try: obj = execute(scene, 'OBJ3 R2 OBJ2 R1 AND OBJ1', slot_dict) except ValueError: continue template = random.choice(g_templates_3) slot_dict = {'OBJ1': f, 'OBJ2': slot_dict1['OBJ1'], 'R1': r1, 'OBJ3': slot_dict2['OBJ1'], 'R2': r2} sentence = template.format(**slot_dict) sentence_len = len(sentence.split()) if ((obj not in sentence_for_x) or (sentence_len < sentence_for_x[obj][(- 1)])): sentence_for_x[obj] = (sentence, program, slot_dict, obj, sentence_len) for (sentence, slot_program, slot_dict, obj, _) in sentence_for_x.values(): obj1_string = gen_filter_string(slot_dict['OBJ1'], 'x') r1 = slot_dict['R1'] obj2_string = gen_filter_string(slot_dict['OBJ2'], 'y') r2 = slot_dict['R2'] obj3_string = gen_filter_string(slot_dict['OBJ3'], 'z') program = f'point(Object, lambda x: {obj1_string} and {r1}(x, iota(Object, lambda y: {obj2_string})) and {r2}(x, iota(Object, lambda z: {obj3_string})))' (yield (sentence, program, slot_program, slot_dict, obj)) return [x[:(- 1)] for x in sentence_for_x.values()]
def ground_program4(scene, unique_filters): program = 'S OBJ3 R2 S OBJ2 R1 OBJ1' program2 = ground_program2(scene, unique_filters) sentence_for_x = {} for (_, _, _, slot_dict2, obj2) in program2: for f in g_all_filter_ops: for r1 in g_all_relate_ops: slot_dict = {'OBJ1': f, 'R1': r1, 'OBJ2': obj2} try: obj = execute(scene, 'OBJ2 R1 OBJ1', slot_dict) except ValueError: continue template = random.choice(g_templates_4) slot_dict = {'OBJ1': f, 'R1': r1, 'OBJ2': slot_dict2['OBJ1'], 'OBJ3': slot_dict2['OBJ2'], 'R2': slot_dict2['R1']} sentence = template.format(**slot_dict) sentence_len = len(sentence.split()) if ((obj not in sentence_for_x) or (sentence_len < sentence_for_x[obj][(- 1)])): sentence_for_x[obj] = (sentence, program, slot_dict, obj, sentence_len) for (sentence, slot_program, slot_dict, obj, _) in sentence_for_x.values(): obj1_string = gen_filter_string(slot_dict['OBJ1'], 'x') r1 = slot_dict['R1'] obj2_string = gen_filter_string(slot_dict['OBJ2'], 'y') r2 = slot_dict['R2'] obj3_string = gen_filter_string(slot_dict['OBJ3'], 'z') program = f'point(Object, lambda x: {obj1_string} and {r1}(x, iota(Object, lambda y: {obj2_string} and {r2}(y, iota(Object, lambda z: {obj3_string})))))' (yield (sentence, program, slot_program, slot_dict, obj))
def random_sample_and_post(scene): unique_filters = [f for f in g_all_filter_ops if check_filter_unique(scene, f)] cat = (random.choice(range(4)) + 1) func = globals()[f'ground_program{cat}'] for i in range(4): sols = list(func(scene, unique_filters)) if (len(sols) == 0): continue (sentence, program, slot_program, slot_dict, obj) = random.choice(sols) sentence = sentence.replace('left', 'left of') sentence = sentence.replace('right', 'right of') sentence = sentence.replace('-', '') return (sentence, program, slot_program, slot_dict, obj) print('Really bad...', scene['image_filename'])
def main(): scenes = jacinle.load_json(args.scenes_json)['scenes'] refexps = list() for (scene_index, scene) in enumerate(jacinle.tqdm(scenes[:150])): rv = random_sample_and_post(scene) if (rv is None): continue (sentence, program, slot_program, slot_dict, obj) = rv refexps.append({'scene_index': scene_index, 'image_filename': scene['image_filename'], 'question': sentence, 'program': program, 'slot_program': program, 'slot_dict': slot_dict, 'answer': obj}) jacinle.dump_json(args.output, {'refexps': refexps[:100]}) print('Saved: "{}".'.format(args.output))
def filter(scene, name, input_): if (name == 'object'): return input_ attribute = g_concept2attribute[name] return {i for i in input_ if (scene['objects'][i][attribute] == name)}
def multi_filter(scene, names, input_): for name in names.split(): input_ = filter(scene, name, input_) return input_
def gen_description(rule1_cat, d1, rule2_cat, d2): cat_order = ['size', 'color', 'material', 'shape'] if (cat_order.index(rule1_cat) > cat_order.index(rule2_cat)): (rule1_cat, rule2_cat) = (rule2_cat, rule1_cat) (d1, d2) = (d2, d1) d = ((d1 + ' ') + d2) if (rule2_cat != 'shape'): d += ' object' if d.startswith('aeiou'): d = ('an ' + d) else: d = ('a ' + d) return d
def main(): scenes = jacinle.load_json(args.scenes_json)['scenes'] def find_scene_matching(name, answer): for i in range(1000): scene_index = random.randint(0, (len(scenes) - 1)) scene = scenes[scene_index] res = multi_filter(scene, name, range(len(scene['objects']))) if ((answer is True) and (len(res) > 0)): return (scene_index, scene) if ((answer is False) and (len(res) == 0)): return (scene_index, scene) rpms = list() for i in range(100): (rule1_cat, rule2_cat) = random.sample(list(g_all_rules.keys()), 2) rule1 = random.choice(g_all_rules[rule1_cat]) rule2 = random.choice(g_all_rules[rule2_cat]) print(rule1, rule2) desired_answer = random.choice([True, False]) (scene_index, scene) = find_scene_matching(f'{rule1[2]} {rule2[2]}', desired_answer) question = 'There are 9 objects, ordered in a 3x3 grid: ' for i in range(3): for j in range(3): if ((i == 2) and (j == 2)): continue question += f'row {(i + 1)} col {(j + 1)} is {gen_description(rule1_cat, rule1[i], rule2_cat, rule2[j])}; ' question += 'I am missing one object at row 3 col 3. Can you find an object in the scene that can fit there?' rpm = {'rule1_cat': rule1_cat, 'rule1': rule1, 'rule2_cat': rule2_cat, 'rule2': rule2, 'answer': desired_answer, 'scene_index': scene_index, 'image_filename': scene['image_filename'], 'target_object': f'{rule1[2]} {rule2[2]}', 'question': question, 'program': f'exists(Object, lambda x: {rule1[2]}(x) and {rule2[2]}(x))'} rpms.append(rpm) jacinle.dump_json(args.output, {'rpms': rpms}) print(f'Saved: "{args.output}".')
def main(): dataset = globals()[g_dataset_loaders[args.dataset]](args.data_dir) print('Dataset statistics:') print(' Length:', len(dataset)) print('Dataset examples:') jacinle.stprint(dataset[0], 'dataset[0]', max_depth=1) from IPython import embed embed()
def load_CLEVR(data_dir: str): from concepts.benchmark.clevr.dataset import make_dataset return make_dataset(scenes_json=osp.join(args.data_dir, 'scenes.json'), questions_json=osp.join(args.data_dir, 'questions.json'), image_root=osp.join(args.data_dir, 'images'), vocab_json=osp.join(args.data_dir, 'vocab.json'))
@dataclass class FunctionGroupSummary(object): signature: str count: int = 0 examples: dict = field(default_factory=dict)
def main(): domain = create_bare_domain() parser = create_default_parser(domain) all_codes = io.load_pkl(args.parsed_filename) all_rows = list() all_function_groups: dict[(str, FunctionGroupSummary)] = dict() all_types: dict[(str, list)] = dict() for (prompt, codes) in jacinle.tqdm_gofor(all_codes, desc='Creating domain from parsings'): if isinstance(codes, str): codes = [codes] all_codes[prompt] = codes for code in codes: exception = '' parsed_expression = None parsed_expression_str = '' try: parsed_expression = parser.parse_expression(code) parsed_expression_str = str(parsed_expression) except: exception = traceback.format_exc() all_rows.append({'prompt': prompt, 'raw_code': code, 'parse_success': (parsed_expression is not None), 'parsed_expression': (parsed_expression_str if (parsed_expression is not None) else exception)}) if (parsed_expression is not None): for expr in iter_exprs(parsed_expression): if isinstance(expr, FunctionApplicationExpression): function = expr.function signature = get_function_signature(function) if (signature not in all_function_groups): all_function_groups[signature] = FunctionGroupSummary(signature) if (function.name not in all_function_groups[signature].examples): all_function_groups[signature].examples[function.name] = list() all_function_groups[signature].count += 1 if (len(all_function_groups[signature].examples[function.name]) < 3): all_function_groups[signature].examples[function.name].append({'prompt': prompt, 'raw_code': code, 'parsed_expression': (('<pre>' + parsed_expression_str.replace((function.name + '('), f'<span style="color:red">{function.name}</span>(')) + '</pre>')}) elif isinstance(expr, VariableExpression): typename = expr.return_type.typename if (typename not in all_types): all_types[typename] = list() if (len(all_types[typename]) < 5): all_types[typename].append({'prompt': prompt, 'raw_code': code, 'parsed_expression': (('<pre>' + parsed_expression_str.replace(typename, f'<span style="color:red">{typename}</span>')) + '</pre>')}) io.mkdir(args.output_dir) with open(f'{args.output_dir}/summary.csv', 'w') as f: writer = csv.writer(f) writer.writerow(['# of prompts:', len(all_codes)]) writer.writerow(['# of codes:', sum((len(x) for x in all_codes.values()))]) writer.writerow(['# of parsed codes:', sum((1 for x in all_rows if x['parse_success']))]) writer.writerow(['# of parsed types:', len(domain.types)]) writer.writerow(['# of parsed functions:', len(domain.functions)]) writer.writerow(['# of parsed function groups:', len(all_function_groups)]) writer.writerow([]) for function_group in sorted(all_function_groups.values(), key=(lambda x: x.count), reverse=True): writer.writerow([f'{function_group.signature}:', function_group.count]) print('Summary written to', f'{args.output_dir}/summary.csv') visualizer = HTMLTableVisualizer(f'{args.output_dir}/parsing.html', 'Parsing Results') with visualizer.html(): with visualizer.table('Result Summary', [HTMLTableColumnDesc('summary', 'Summary', 'code')]): string = '' string += f'''# of prompts: {len(all_codes)} ''' string += f'''# of codes: {sum((len(x) for x in all_codes.values()))} ''' string += f'''# of parsed codes: {sum((1 for x in all_rows if x['parse_success']))} ''' string += f'''# of parsed types: {len(domain.types)} ''' string += f'''# of parsed functions: {len(domain.functions)} ''' string += f'''# of parsed function groups: {len(all_function_groups)} ''' visualizer.row(summary=string) with visualizer.table('Parsing Results', [HTMLTableColumnDesc('index', 'Index', 'text'), HTMLTableColumnDesc('prompt', 'Prompt', 'text', {}, {'width': '20%'}), HTMLTableColumnDesc('raw_code', 'Raw Code', 'code', {}, {'width': '20%'}), HTMLTableColumnDesc('parse_success', 'Parse Success', 'code', {'width': '50px'}), HTMLTableColumnDesc('parsed_expression', 'Parsed Expression', 'code', {}, {'width': '50%'})]): for (i, row) in enumerate(all_rows): visualizer.row(**row, index=i) print('Parsing results written to', f'{args.output_dir}/parsing.html') with open(f'{args.output_dir}/parsing.csv', 'w') as f: writer = csv.writer(f) writer.writerow(['prompt', 'raw_code', 'parse_success', 'parsed_expression']) for row in all_rows: writer.writerow([row['prompt'], row['raw_code'], row['parse_success'], row['parsed_expression']]) print('Parsing results written to', f'{args.output_dir}/parsing.csv') visualizer = HTMLTableVisualizer(f'{args.output_dir}/function_groups.html', 'Function Groups') with visualizer.html(): for function_group in sorted(all_function_groups.values(), key=(lambda x: x.count), reverse=True): with visualizer.table(f'{function_group.signature} (count = {function_group.count})', [HTMLTableColumnDesc('prompt', 'Prompt', 'text', None, {'width': '20%'}), HTMLTableColumnDesc('raw_code', 'Raw Code', 'code', None, {'width': '30%'}), HTMLTableColumnDesc('parsed_expression', 'Parsed Expression', 'raw', None, {'width': '50%'})]): for example_list in function_group.examples.values(): for example in example_list: visualizer.row(**example) print('Function groups written to', f'{args.output_dir}/function_groups.html') visualizer = HTMLTableVisualizer(f'{args.output_dir}/types.html', 'Types') with visualizer.html(): for (typename, examples) in all_types.items(): with visualizer.table(f'{typename}', [HTMLTableColumnDesc('prompt', 'Prompt', 'text', None, {'width': '20%'}), HTMLTableColumnDesc('raw_code', 'Raw Code', 'code', None, {'width': '30%'}), HTMLTableColumnDesc('parsed_expression', 'Parsed Expression', 'raw', None, {'width': '50%'})]): for example in examples: visualizer.row(**example) print('Types written to', f'{args.output_dir}/types.html')
def get_function_signature(function): argument_types = tuple((x.typename for x in function.ftype.argument_types)) return_type = function.ftype.return_type.typename return f'{argument_types} -> {return_type}'
def main(): domain = make_domain(args.parsed_filename) domain.print_summary() print('Summary:') print(' - # of types: {}'.format(len(domain.types))) print(' - # of functions: {}'.format(len(domain.functions))) function_groups = dict() for function in domain.functions.values(): argument_types = tuple((x.typename for x in function.ftype.argument_types)) return_type = function.ftype.return_type.typename key = f'{argument_types} -> {return_type}' function_groups.setdefault(key, []).append(function) print(' - # of function groups: {}'.format(len(function_groups))) for (key, functions) in sorted(function_groups.items(), key=(lambda x: len(x[1])), reverse=True): print(' - {}: {}'.format(key, len(functions)))
@dataclass class FunctionGroupSummary(object): signature: str count: int = 0 examples: dict[(str, list[dict])] = field(default_factory=dict)
def main(): domain = create_bare_domain() parser = create_default_parser(domain) all_codes = io.load_pkl(args.parsed_filename) for (prompts, codes) in jacinle.tqdm_gofor(all_codes): for code in codes: try: _ = parser.parse_expression(code) except Exception: pass print('Summary (before pruning):') print(' - # of types: {}'.format(len(domain.types))) print(' - # of functions: {}'.format(len(domain.functions))) domain = prune_domain(domain) print('Summary (after pruning):') print(' - # of types: {}'.format(len(domain.types))) print(' - # of functions: {}'.format(len(domain.functions))) print(('-' * 80)) domain.print_summary()
def main2(): domain = create_bare_domain() parser = create_default_parser(domain) if args.parsed_filename.endswith('.json'): all_codes = io.load_json(args.parsed_filename) else: all_codes = io.load_pkl(args.parsed_filename) expressions = list() for (prompt, codes) in jacinle.tqdm_gofor(all_codes, leave=False, desc='Parsing'): if isinstance(codes, str): codes = [codes] for code in codes: code = code.strip() try: expr = parser.parse_expression(code) expressions.append((prompt, code, expr)) except Exception: pass print('Summary (before pruning):') print(' - # of types: {}'.format(len(domain.types))) print(' - # of functions: {}'.format(len(domain.functions))) print(' - # of input sentences: {}'.format(len(all_codes))) print(' - # of input expressions: {}'.format(sum((len(c) for c in all_codes.values())))) print(' - # of parsed sentences: {}'.format(len({x[0] for x in expressions}))) print(' - # of parsed expressions: {}'.format(len(expressions))) checked_expressions = list() for (prompt, code, expr) in jacinle.tqdm(expressions, leave=False, desc='Checking expressions'): if args.debug_checker: print(expr) try: check_expr_validity(expr) if args.debug_checker: print(' - OK') checked_expressions.append((prompt, code, expr)) except Exception: if args.debug_checker: print(' - failed') traceback.print_exc() input('Press any key to continue...') domain = create_bare_domain() parser = create_default_parser(domain) for (_, code, _) in jacinle.tqdm(checked_expressions, leave=False, desc='Re-parsing'): try: parser.parse_expression(code) except Exception: pass print('Summary (after pruning):') print(' - # of types: {}'.format(len(domain.types))) print(' - # of functions: {}'.format(len(domain.functions))) print(' - # of parsed sentences: {}'.format(len({s[0] for s in checked_expressions}))) print(' - # of parsed expressions: {}'.format(len(checked_expressions))) print(('-' * 80)) domain.print_summary() if (args.output is not None): expressions = dict() for (prmopt, code, expr) in checked_expressions: if (prmopt not in expressions): expressions[prmopt] = list() expressions[prmopt].append(code) io.dump(args.output, expressions) print(f'Output to {args.output}.')
def prune_domain(old_domain: FunctionDomain) -> FunctionDomain: new_domain = create_bare_domain() for (name, function) in old_domain.functions.items(): if (name in new_domain.functions): continue print('Checking function: {} {}'.format(name, function)) ftype = function.ftype argument_types = [x.typename for x in ftype.argument_types] return_type = ftype.return_type.typename pass_test = False if ((len(argument_types) > 1) and all(((x == 'Object') for x in argument_types)) and (return_type == 'bool')): pass_test = True if ((len(argument_types) > 1) and all(((x == 'Object') for x in argument_types[1:])) and (return_type == 'bool')): pass_test = True if pass_test: print(f' Pass test: {name} {argument_types} -> {return_type}') new_domain.functions[name] = function for t in ftype.argument_types: if (t.typename not in new_domain.types): new_domain.types[t.typename] = t else: print(f' Prune {name}') return new_domain
def check_expr_validity(expression: E.Expression): if isinstance(expression, E.GeneralizedQuantificationExpression): if (expression.quantification_op in ('describe', 'count')): pass else: raise ValueError('Invalid quantification op: {}'.format(expression.quantification_op)) def dfs(expr: E.Expression, allow_queries: bool=False): if isinstance(expr, E.GeneralizedQuantificationExpression): if (expr.quantification_op == 'iota'): dfs(expr.expression, allow_queries=allow_queries) elif (expr.quantification_op == 'point'): assert allow_queries dfs(expr.expression, allow_queries=False) elif (expr.quantification_op == 'view'): raise ValueError(f'Invalid view: {repr(expr)}.') elif (expr.quantification_op == 'describe'): assert allow_queries if isinstance(expr.expression, E.FunctionApplicationExpression): if (expr.variable.dtype.typename == 'Object'): pass elif (expr.variable.dtype.typename == 'Action'): pass elif ((len(expr.expression.arguments) == 2) and isinstance(expr.expression.arguments[0], E.VariableExpression) and (expr.expression.arguments[0].variable.name == expr.variable.name) and (expr.expression.arguments[1].return_type.typename in ['Object', 'Action'])): return dfs(expr.expression.arguments[1], allow_queries=allow_queries) else: raise ValueError(f'Invalid describe: {repr(expr)}.') else: raise ValueError(f'Invalid describe: {repr(expr)}.') dfs(expr.expression, allow_queries=False) elif (expr.quantification_op == 'count'): dfs(expr.expression, allow_queries=allow_queries) elif isinstance(expr, FunctionApplicationExpression): if (expr.return_type in (BOOL,)): function = expr.function if (function.name in ('equal', 'less_than', 'greater_than')): pass elif ((len(function.ftype.argument_types) > 0) and all(((x.typename == 'Object') for x in function.ftype.argument_types))): if (len(function.ftype.arguments) in (1, 2)): pass else: raise ValueError(f'Invalid function: {repr(expr)}.') else: raise ValueError('Invalid function: {}'.format(function)) else: raise ValueError('Invalid return type: {}'.format(expr.return_type)) for arg in expr.arguments: dfs(arg, allow_queries=allow_queries) elif isinstance(expr, E.VariableExpression): pass elif isinstance(expr, E.ConstantExpression): pass elif isinstance(expr, E.BoolExpression): for arg in expr.arguments: if (arg.return_type != BOOL): raise ValueError('Invalid bool expression: {}'.format(arg)) for arg in expr.arguments: dfs(arg, allow_queries=allow_queries) elif isinstance(expr, E.QuantificationExpression): dfs(expr.expression, allow_queries=allow_queries) else: raise ValueError('Invalid expression: {}'.format(repr(expr))) dfs(expression, allow_queries=True)
def main(): if (not args.debug): args.dump_dir = ensure_path(osp.join('dumps', args.series_name, args.desc_name, args.expr, args.run_name)) args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints')) args.vis_dir = ensure_path(osp.join(args.dump_dir, 'visualizations')) args.meta_file = osp.join(args.dump_dir, 'metainfo.json') args.log_file = osp.join(args.dump_dir, 'log.log') args.meter_file = osp.join(args.dump_dir, 'meter.json') if args.use_tb: args.tb_dir = ensure_path(osp.join(args.dump_dir, 'tensorboard')) else: args.tb_dir = None if (not args.debug): logger.critical('Writing logs to file: "{}".'.format(args.log_file)) set_output_file(args.log_file) logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file)) with open(args.meta_file, 'w') as f: f.write(dump_metainfo(args=args.__dict__, configs=configs)) if (args.debug and args.use_tb): logger.warning('Disabling the tensorboard in the debug mode.') args.use_tb = False if (args.evaluate and args.use_tb): logger.warning('Disabling the tensorboard in the evaluation mode.') args.use_tb = False if (args.data_questions_json is None): args.data_questions_json = osp.join(args.data_dir, 'questions.json') if (args.data_scenes_json is None): args.data_scenes_json = osp.join(args.data_dir, 'scenes.json') if (args.data_image_root is None): args.data_image_root = osp.join(args.data_dir, 'images') if (args.data_vocab_json is None): args.data_vocab_json = osp.join(args.data_dir, 'vocab.json') if (args.data_output_vocab_json is None): args.data_output_vocab_json = osp.join(args.data_dir, 'output-vocab.json') if (args.validation_data_dir is not None): args.validation_data_questions_json = osp.join(args.validation_data_dir, 'questions.json') args.validation_data_scenes_json = osp.join(args.validation_data_dir, 'scenes.json') args.validation_data_image_root = osp.join(args.validation_data_dir, 'images') all_parses = dict() if (args.data_parses is not None): for filename in args.data_parses: assert osp.isfile(filename), f'File {filename} does not exist.' logger.info('Loading parses from {}.'.format(filename)) if filename.endswith('.p'): content = io.load_pkl(filename) else: content = io.load(filename) all_parses.update(content) from left.domain import create_domain_from_parsing domain = create_domain_from_parsing(all_parses) if (args.data_concept_match is not None): import pandas as pd df = pd.read_csv(args.data_concept_match) concept_mapping = dict() for (i, row) in df.iterrows(): if row['align']: concept_mapping[row['word']] = row['mapped'] logger.critical(f'Loaded {len(concept_mapping)} concept matches from {args.data_concept_match}.') else: concept_mapping = None from jacinle.config.g import g g.concept_mapping = concept_mapping logger.critical('Total parsed sentences: {}.'.format(len(all_parses))) logger.critical('Domain: {}'.format(domain)) logger.info('Number of types: {}'.format(len(domain.types))) logger.info('Number of functions: {}'.format(len(domain.functions))) logger.critical('Loading the dataset.') if (args.evaluate_custom is None): from concepts.benchmark.clevr.dataset import make_dataset if (args.validation_data_dir is None): dataset = make_dataset(args.data_scenes_json, args.data_questions_json, args.data_image_root, vocab_json=args.data_vocab_json, output_vocab_json=args.data_output_vocab_json) (train_dataset, validation_dataset) = dataset.split_trainval(args.data_tvsplit) else: train_dataset = make_dataset(args.data_scenes_json, args.data_questions_json, args.data_image_root, vocab_json=args.data_vocab_json, output_vocab_json=args.data_output_vocab_json) validation_dataset = make_dataset(args.validation_data_scenes_json, args.validation_data_questions_json, args.validation_data_image_root, vocab_json=args.data_vocab_json, output_vocab_json=args.data_output_vocab_json) else: from left.clevr_custom_transfer import make_dataset dataset = make_dataset(args.evaluate_custom, args.data_scenes_json, args.data_questions_json, args.data_image_root, args.data_output_vocab_json) train_dataset = validation_dataset = dataset logger.critical('Building the model.') model = desc.make_model(args, domain, all_parses, (train_dataset.output_vocab if hasattr(train_dataset, 'output_vocab') else train_dataset.unwrapped.output_vocab), custom_transfer=args.evaluate_custom) if args.use_gpu: model.cuda() if args.gpu_parallel: from jactorch.parallel import JacDataParallel model = JacDataParallel(model, device_ids=args.gpus, user_scattered=True).cuda() cudnn.benchmark = True if hasattr(desc, 'make_optimizer'): logger.critical('Building customized optimizer.') optimizer = desc.make_optimizer(model, args.lr) else: from jactorch.optim import AdamW trainable_parameters = filter((lambda x: x.requires_grad), model.parameters()) optimizer = AdamW(trainable_parameters, args.lr, weight_decay=configs.train.weight_decay) if (args.acc_grad > 1): from jactorch.optim import AccumGrad optimizer = AccumGrad(optimizer, args.acc_grad) logger.warning('Use accumulated grad={:d}, effective iterations per epoch={:d}.'.format(args.acc_grad, int((args.iters_per_epoch / args.acc_grad)))) trainer = TrainerEnv(model, optimizer) parent_meta_file = None if args.resume: extra = trainer.load_checkpoint(args.resume) if extra: args.start_epoch = extra['epoch'] logger.critical('Resume from epoch {}.'.format(args.start_epoch)) elif args.load: raw = trainer.load_weights(args.load) if (raw is not None): logger.critical('Loaded weights from pretrained model: "{}".'.format(args.load)) parent_meta_file = raw['extra']['meta_file'] if args.use_tb: from jactorch.train.tb import TBLogger, TBGroupMeters tb_logger = TBLogger(args.tb_dir) meters = TBGroupMeters(tb_logger) logger.critical('Writing tensorboard logs to: "{}".'.format(args.tb_dir)) else: from jacinle.utils.meter import GroupMeters meters = GroupMeters() if (not args.debug): logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file)) with open(args.meta_file, 'w') as f: f.write(dump_metainfo(args=args.__dict__, configs=configs)) logger.critical('Writing meter logs to file: "{}".'.format(args.meter_file)) logger.critical('Initializing MLDash.') mldash.init(desc_name=((args.series_name + '/') + args.desc_name), expr_name=args.expr, run_name=args.run_name, args=args, highlight_args=parser, configs=configs) mldash.update(metainfo_file=args.meta_file, log_file=args.log_file, meter_file=args.meter_file, tb_dir=args.tb_dir) if (parent_meta_file is not None): try: parent_run = io.load(parent_meta_file)['args']['run_name'] logger.critical('Setting parent run: {}.'.format(parent_run)) if args.evaluate: mldash.update_parent(parent_run, is_master=False) else: mldash.update_parent(parent_run, is_master=True) except: logger.exception('Exception occurred during loading metainfo.') if args.embed: from IPython import embed embed() if hasattr(desc, 'customize_trainer'): desc.customize_trainer(trainer) logger.critical('Building the data loader.') train_dataloader = train_dataset.make_dataloader(args.batch_size, shuffle=True, drop_last=True, nr_workers=args.data_workers) validation_dataloader = validation_dataset.make_dataloader(args.batch_size, shuffle=False, drop_last=False, nr_workers=args.data_workers) if (args.use_gpu and args.gpu_parallel): from jactorch.data.dataloader import JacDataLoaderMultiGPUWrapper train_dataloader = JacDataLoaderMultiGPUWrapper(train_dataloader, args.gpus) validation_dataloader = JacDataLoaderMultiGPUWrapper(validation_dataloader, args.gpus) undefined_configs = configs.find_undefined_values('configs') if (len(undefined_configs) > 0): logger.warning('Undefined configs: {}'.format(undefined_configs)) if (not yes_or_no('Continue the script?', default='no')): return if (args.evaluate_custom is not None): epoch = 0 model.eval() validate_epoch_custom(epoch, trainer, validation_dataloader, meters) if (not args.debug): meters.dump(args.meter_file) if (not args.debug): mldash.log_metric('epoch', epoch, desc=False, expr=False) for (key, value) in meters.items(): if (key.startswith('loss') or key.startswith('validation/loss')): mldash.log_metric_min(key, value.avg) for (key, value) in meters.items(): if (key.startswith('acc') or key.startswith('validation/acc')): mldash.log_metric_max(key, value.avg) logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False)) return if args.evaluate: epoch = 0 model.eval() if args.evaluate_on_train: validate_epoch(epoch, trainer, train_dataloader, meters) else: validate_epoch(epoch, trainer, validation_dataloader, meters) if (not args.debug): meters.dump(args.meter_file) if (not args.debug): mldash.log_metric('epoch', epoch, desc=False, expr=False) for (key, value) in meters.items(): if (key.startswith('loss') or key.startswith('validation/loss')): mldash.log_metric_min(key, value.avg) for (key, value) in meters.items(): if (key.startswith('acc') or key.startswith('validation/acc')): mldash.log_metric_max(key, value.avg) logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False)) return for epoch in range((args.start_epoch + 1), (args.epochs + 1)): if (args.curriculum != 'none'): (this_train_dataset, this_validation_dataset) = get_curriculum_dataset(epoch, train_dataset, validation_dataset) train_dataloader = this_train_dataset.make_dataloader(args.batch_size, shuffle=True, drop_last=True, nr_workers=args.data_workers) meters.reset() model.train() train_epoch(epoch, trainer, train_dataloader, meters) if (((args.validation_interval > 0) and ((epoch % args.validation_interval) == 0)) or (epoch == 1)): model.eval() with torch.no_grad(): validate_epoch(epoch, trainer, validation_dataloader, meters) latest_parses = model.parses if (not args.debug): fname = osp.join(args.dump_dir, 'latest_parses.pkl') io.dump(fname, latest_parses) logger.critical(f'Latest parses saved to "{fname}".') if (not args.debug): meters.dump(args.meter_file) if (not args.debug): mldash.log_metric('epoch', epoch, desc=False, expr=False) for (key, value) in meters.items(): if (key.startswith('loss') or key.startswith('validation/loss')): mldash.log_metric_min(key, value.avg) for (key, value) in meters.items(): if (key.startswith('acc') or key.startswith('validation/acc')): mldash.log_metric_max(key, value.avg) logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False)) if (not args.debug): if ((epoch % args.save_interval) == 0): fname = osp.join(args.ckpt_dir, 'epoch_{}.pth'.format(epoch)) trainer.save_checkpoint(fname, dict(epoch=epoch, meta_file=args.meta_file))
def get_curriculum_dataset(epoch, train_dataset, validation_dataset): for (si, s) in enumerate(g_curriculum_strategy): if (g_curriculum_strategy[si][0] < epoch <= g_curriculum_strategy[(si + 1)][0]): (max_scene_size, max_program_size) = s[1:] if (args.curriculum in ('scene', 'all')): train_dataset = train_dataset.filter_scene_size(max_scene_size) validation_dataset = validation_dataset.filter_scene_size(max_scene_size) if (args.curriculum in ('program', 'all')): train_dataset = train_dataset.filter_program_size_raw(max_program_size) validation_dataset = validation_dataset.filter_program_size_raw(max_program_size) logger.critical('Building the data loader. Curriculum = {}/{}, length = {}.'.format(*s[1:], len(train_dataset))) break return (train_dataset, validation_dataset)
def train_epoch(epoch, trainer, train_dataloader, meters): nr_iters = args.iters_per_epoch if (nr_iters == 0): nr_iters = len(train_dataloader) meters.update(epoch=epoch) trainer.trigger_event('epoch:before', trainer, epoch) train_iter = iter(train_dataloader) end = time.time() with tqdm_pbar(total=nr_iters) as pbar: for i in range(nr_iters): feed_dict = next(train_iter) if args.use_gpu: if (not args.gpu_parallel): feed_dict = async_copy_to(feed_dict, 0) data_time = (time.time() - end) end = time.time() (loss, monitors, output_dict, extra_info) = trainer.step(feed_dict) step_time = (time.time() - end) end = time.time() meters.update(loss=loss) update_meters(meters, monitors) meters.update({'time/data': data_time, 'time/step': step_time}) if args.use_tb: meters.flush() pbar.set_description(meters.format_simple('Epoch {}'.format(epoch), {k: v for (k, v) in meters.val.items() if ((not k.startswith('validation')) and (k.count('/') <= 1))}, compressed=True), refresh=False) pbar.update() end = time.time() trainer.trigger_event('epoch:after', trainer, epoch)
@jactorch.no_grad_func def validate_epoch(epoch, trainer, val_dataloader, meters): end = time.time() run_visualizer = False if (args.evaluate and (not args.debug)): run_visualizer = True import matplotlib.pyplot as plt from PIL import Image from jaclearn.visualize.html_table import HTMLTableColumnDesc, HTMLTableVisualizer from jaclearn.visualize.box import vis_bboxes from concepts.dsl.tensor_value import TensorValue if run_visualizer: visualizer = HTMLTableVisualizer(osp.join(args.vis_dir, 'evaluation'), 'Evaluation') visualizer.begin_html() visualizer_index = 0 visualizer_total = 30 with tqdm_pbar(total=len(val_dataloader)) as pbar: for feed_dict in val_dataloader: if args.use_gpu: if (not args.gpu_parallel): feed_dict = async_copy_to(feed_dict, 0) data_time = (time.time() - end) end = time.time() (output_dict, extra_info) = trainer.evaluate(feed_dict) monitors = as_float(output_dict['monitors']) step_time = (time.time() - end) end = time.time() update_meters(meters, monitors, prefix='validation/') meters.update({'time/data': data_time, 'time/step': step_time}) if (run_visualizer and (visualizer_index < visualizer_total)): for i in range(len(feed_dict['question_index'])): if (args.validation_data_dir is None): image_filename = osp.join(args.data_image_root, feed_dict['image_filename'][i]) else: image_filename = osp.join(args.validation_data_image_root, feed_dict['image_filename'][i]) image = Image.open(image_filename) with visualizer.table(f"Question #{feed_dict['question_index'][i]}", [HTMLTableColumnDesc('image', 'Image', 'figure', {'width': '600px'}), HTMLTableColumnDesc('question', 'Question', 'text', {'width': '200px'}), HTMLTableColumnDesc('answer', 'Answer', 'text'), HTMLTableColumnDesc('prediction', 'Prediction', 'text'), HTMLTableColumnDesc('program', 'Program', 'code', {'width': '600px'})]): (fig, ax) = vis_bboxes(image, feed_dict['objects_raw'][i], 'Object', add_text=False) visualizer.row(**{'image': fig, 'question': feed_dict['question_raw'][i], 'answer': feed_dict['answer'][i], 'prediction': output_dict['pred_answers'][i], 'program': str(output_dict['parsings'][i])}) plt.close() with visualizer.table(f"Question #{feed_dict['question_index'][i]} (Program)", [HTMLTableColumnDesc('id', 'ID', 'text', {'width': '50px'}), HTMLTableColumnDesc('visualization', 'Visualization', 'figure', {'width': '600px'}), HTMLTableColumnDesc('program_and_output', 'Program and Output', 'code', {'width': '600px'})]): for (j, (program, output)) in enumerate(output_dict['execution_traces'][i]): if (isinstance(output, TensorValue) and (output.dtype.typename in ('bool', 'Object')) and (len(output.batch_variables) == 1) and (output.tensor.shape[0] == len(feed_dict['objects_raw'][i]))): (fig, ax) = vis_bboxes(image, feed_dict['objects_raw'][i], 's:', add_text=True, legends=[str(round(x, 3)) for x in output.tensor.detach().cpu().tolist()]) visualizer.row(**{'id': j, 'visualization': fig, 'program_and_output': ((str(program) + '\n\n') + str(output))}) plt.close() print('Visualized', visualizer_index) visualizer_index += 1 if (visualizer_index >= visualizer_total): break elif args.evaluate_visualization_only: break if args.use_tb: meters.flush() pbar.set_description(meters.format_simple('Epoch {} (validation)'.format(epoch), {k: v for (k, v) in meters.val.items() if (k.startswith('validation') and (k.count('/') <= 2))}, compressed=True), refresh=False) pbar.update() end = time.time() if run_visualizer: visualizer.end_html() link = '<a href="viewer://{}", target="_blank">{}</a>'.format(visualizer.visdir, visualizer.visdir) mldash.update(run_description=link)
def update_meters(meters, monitors, prefix: str=None): for k in list(monitors.keys()): if ((k + '/n') in monitors): meters.update({k: monitors[k]}, n=monitors[(k + '/n')], prefix=prefix) del monitors[k] del monitors[(k + '/n')] meters.update(monitors, prefix=prefix)
@jactorch.no_grad_func def validate_epoch_custom(epoch, trainer, val_dataloader, meters): end = time.time() run_visualizer = False if (args.evaluate and (not args.debug)): run_visualizer = True if (args.validation_visualize is False): run_visualizer = False import matplotlib.pyplot as plt from PIL import Image from jaclearn.visualize.html_table import HTMLTableColumnDesc, HTMLTableVisualizer from jaclearn.visualize.box import vis_bboxes from concepts.dsl.tensor_value import TensorValue if run_visualizer: visualizer = HTMLTableVisualizer(osp.join(args.vis_dir, 'evaluation'), 'Evaluation') visualizer.begin_html() visualizer_index = 0 visualizer_total = 30 with tqdm_pbar(total=len(val_dataloader)) as pbar: for feed_dict in val_dataloader: if args.use_gpu: if (not args.gpu_parallel): feed_dict = async_copy_to(feed_dict, 0) data_time = (time.time() - end) end = time.time() (output_dict, extra_info) = trainer.evaluate(feed_dict) monitors = as_float(output_dict['monitors']) step_time = (time.time() - end) end = time.time() update_meters(meters, monitors, prefix='validation/') meters.update({'time/data': data_time, 'time/step': step_time}) if (run_visualizer and (visualizer_index < visualizer_total)): for i in range(len(feed_dict['question_index'])): if (args.validation_data_dir is None): image_filename = osp.join(args.data_image_root, feed_dict['image_filename'][i]) else: image_filename = osp.join(args.validation_data_image_root, feed_dict['image_filename'][i]) image = Image.open(image_filename) with visualizer.table(f"Question #{feed_dict['question_index'][i]}", [HTMLTableColumnDesc('image', 'Image', 'figure', {'width': '600px'}), HTMLTableColumnDesc('question', 'Question', 'text', {'width': '200px'}), HTMLTableColumnDesc('answer', 'Answer', 'text'), HTMLTableColumnDesc('prediction', 'Prediction', 'text'), HTMLTableColumnDesc('program', 'Program', 'code', {'width': '600px'})]): (fig, ax) = vis_bboxes(image, feed_dict['objects_raw'][i], 'Object', add_text=False) visualizer.row(**{'image': fig, 'question': feed_dict['question_raw'][i], 'answer': feed_dict['answer'][i], 'prediction': output_dict['pred_answers'][i], 'program': str(output_dict['parsings'][i])}) plt.close() with visualizer.table(f"Question #{feed_dict['question_index'][i]} (Program)", [HTMLTableColumnDesc('id', 'ID', 'text', {'width': '50px'}), HTMLTableColumnDesc('visualization', 'Visualization', 'figure', {'width': '600px'}), HTMLTableColumnDesc('program_and_output', 'Program and Output', 'code', {'width': '600px'})]): for (j, (program, output)) in enumerate(output_dict['execution_traces'][i]): if (isinstance(output, TensorValue) and (output.dtype.typename in ('bool', 'Object')) and (len(output.batch_variables) == 1) and (output.tensor.shape[0] == len(feed_dict['objects_raw'][i]))): (fig, ax) = vis_bboxes(image, feed_dict['objects_raw'][i], 's:', add_text=True, legends=[str(round(x, 3)) for x in output.tensor.detach().cpu().tolist()]) visualizer.row(**{'id': j, 'visualization': fig, 'program_and_output': ((str(program) + '\n\n') + str(output))}) plt.close() print('Visualized', visualizer_index) visualizer_index += 1 if (visualizer_index >= visualizer_total): break elif args.evaluate_visualization_only: break if args.use_tb: meters.flush() pbar.set_description(meters.format_simple('Epoch {} (validation)'.format(epoch), {k: v for (k, v) in meters.val.items() if (k.startswith('validation') and (k.count('/') <= 2))}, compressed=True), refresh=False) pbar.update() end = time.time() if run_visualizer: visualizer.end_html() link = '<a href="viewer://{}", target="_blank">{}</a>'.format(visualizer.visdir, visualizer.visdir) mldash.update(run_description=link)
def main(): if (not args.debug): args.dump_dir = ensure_path(osp.join('dumps', args.series_name, args.desc_name, args.expr, args.run_name)) args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints')) args.vis_dir = ensure_path(osp.join(args.dump_dir, 'visualizations')) args.meta_file = osp.join(args.dump_dir, 'metainfo.json') args.log_file = osp.join(args.dump_dir, 'log.log') args.meter_file = osp.join(args.dump_dir, 'meter.json') if args.use_tb: args.tb_dir = ensure_path(osp.join(args.dump_dir, 'tensorboard')) else: args.tb_dir = None if (not args.debug): logger.critical('Writing logs to file: "{}".'.format(args.log_file)) import jacinle jacinle.set_logger_output_file(args.log_file) jacinle.git_guard() logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file)) with open(args.meta_file, 'w') as f: f.write(dump_metainfo(args=args.__dict__, configs=configs)) if (args.debug and args.use_tb): logger.warning('Disabling the tensorboard in the debug mode.') args.use_tb = False if (args.evaluate and args.use_tb): logger.warning('Disabling the tensorboard in the evaluation mode.') args.use_tb = False from concepts.benchmark.common.vocab import Vocab output_vocab = Vocab.from_json(args.vocab) logger.critical('Building the model.') model = desc.make_model(args.parsed_train_path, args.parsed_test_path, output_vocab) if args.use_gpu: model.cuda() if args.gpu_parallel: from jactorch.parallel import JacDataParallel model = JacDataParallel(model, device_ids=args.gpus, user_scattered=True).cuda() cudnn.benchmark = False if hasattr(desc, 'make_optimizer'): logger.critical('Building customized optimizer.') optimizer = desc.make_optimizer(model, args.lr) else: from jactorch.optim import AdamW trainable_parameters = filter((lambda x: x.requires_grad), model.parameters()) optimizer = AdamW(trainable_parameters, args.lr, weight_decay=configs.train.weight_decay) print(('LR ' + str(args.lr))) if (args.acc_grad > 1): from jactorch.optim import AccumGrad optimizer = AccumGrad(optimizer, args.acc_grad) logger.warning('Use accumulated grad={:d}, effective iterations per epoch={:d}.'.format(args.acc_grad, int((args.iters_per_epoch / args.acc_grad)))) trainer = TrainerEnv(model, optimizer) if args.resume: extra = trainer.load_checkpoint(args.resume) if extra: args.start_epoch = extra['epoch'] logger.critical('Resume from epoch {}.'.format(args.start_epoch)) elif args.load: if trainer.load_weights(args.load): logger.critical('Loaded weights from pretrained model: "{}".'.format(args.load)) if args.use_tb: from jactorch.train.tb import TBLogger, TBGroupMeters tb_logger = TBLogger(args.tb_dir) meters = TBGroupMeters(tb_logger) logger.critical('Writing tensorboard logs to: "{}".'.format(args.tb_dir)) else: from jacinle.utils.meter import GroupMeters meters = GroupMeters() if (not args.debug): logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file)) with open(args.meta_file, 'w') as f: f.write(dump_metainfo(args=args.__dict__, configs=configs)) logger.critical('Writing meter logs to file: "{}".'.format(args.meter_file)) logger.critical('Initializing MLDash.') mldash.init(desc_name=((args.series_name + '/') + args.desc_name), expr_name=args.expr, run_name=args.run_name, args=args, highlight_args=parser, configs=configs) mldash.update(metainfo_file=args.meta_file, log_file=args.log_file, meter_file=args.meter_file, tb_dir=args.tb_dir) if args.embed: from IPython import embed embed() if hasattr(desc, 'customize_trainer'): desc.customize_trainer(trainer) logger.critical('Building the data loader.') def build_human_motion_dataset(data_dir, data_split_file, split, data_source, no_gt_segments=False, filter_supervision=False): from concepts.benchmark.vision_language.babel_qa.dataset import BabelQADataset dataset = BabelQADataset(data_dir, data_split_file, split, data_source, no_gt_segments, filter_supervision) return dataset train_dataset = build_human_motion_dataset(args.datadir, args.data_split_file, 'train', 'humanml3d', no_gt_segments=args.no_gt_segments, filter_supervision=args.filter_supervision) val_dataset = build_human_motion_dataset(args.datadir, args.data_split_file, 'val', 'humanml3d', no_gt_segments=args.no_gt_segments, filter_supervision=args.filter_supervision) train_dataloader = train_dataset.make_dataloader(args.batch_size, shuffle=True, drop_last=True, nr_workers=2) validation_dataloader = val_dataset.make_dataloader(args.batch_size, shuffle=False, drop_last=False, nr_workers=2) if (args.use_gpu and args.gpu_parallel): from jactorch.data.dataloader import JacDataLoaderMultiGPUWrapper train_dataloader = JacDataLoaderMultiGPUWrapper(train_dataloader, args.gpus) validation_dataloader = JacDataLoaderMultiGPUWrapper(validation_dataloader, args.gpus) if args.evaluate: epoch = 0 model.eval() validate_epoch(epoch, trainer, validation_dataloader, meters, output_vocab) if (not args.debug): meters.dump(args.meter_file) logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False)) return for epoch in range((args.start_epoch + 1), (args.epochs + 1)): meters.reset() model.train() train_epoch(epoch, trainer, train_dataloader, meters, output_vocab) if ((args.validation_interval > 0) and ((epoch % args.validation_interval) == 0)): model.eval() with torch.no_grad(): validate_epoch(epoch, trainer, validation_dataloader, meters, output_vocab) if (not args.debug): meters.dump(args.meter_file) if (not args.debug): mldash.log_metric('epoch', epoch, desc=False, expr=False) for (key, value) in meters.items(): if (key.startswith('loss') or key.startswith('validation/loss')): mldash.log_metric_min(key, value.avg) for (key, value) in meters.items(): if (key.startswith('acc') or key.startswith('validation/acc') or key.startswith('train/acc') or key.startswith('validation/percent') or key.startswith('train/percent')): mldash.log_metric_max(key, value.avg) logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False)) if (not args.debug): if ((epoch % args.save_interval) == 0): fname = osp.join(args.ckpt_dir, 'epoch_{}.pth'.format(epoch)) trainer.save_checkpoint(fname, dict(epoch=epoch, meta_file=args.meta_file))
def train_epoch(epoch, trainer, train_dataloader, meters, output_vocab): nr_iters = args.iters_per_epoch if (nr_iters == 0): nr_iters = len(train_dataloader) meters.update(epoch=epoch) trainer.trigger_event('epoch:before', trainer, epoch) train_iter = iter(train_dataloader) end = time.time() with tqdm_pbar(total=nr_iters) as pbar: for i in range(nr_iters): feed_dict = next(train_iter) if args.use_gpu: if (not args.gpu_parallel): feed_dict = async_copy_to(feed_dict, 0) data_time = (time.time() - end) end = time.time() (loss, monitors, output_dict, extra_info) = trainer.step(feed_dict) step_time = (time.time() - end) end = time.time() meters.update(loss=loss) meters.update(monitors) meters.update({'time/data': data_time, 'time/step': step_time}) executions = output_dict['executions'] predictions = [] for i in range(len(executions)): predictions.append(torch.argmax(executions[i])) predictions = torch.stack(predictions) scored_accs = [] for i in range(len(executions)): if (output_dict['scored'][i] == 1): pred_answer = output_vocab.idx2word[int(predictions[i].cpu())] scored_accs.append((pred_answer == feed_dict['answer'][i])) if (len(scored_accs) != 0): scored_avg_acc = float((sum(scored_accs) / len(scored_accs))) meters.update({'train/acc_scored': scored_avg_acc}, n=len(scored_accs)) meters.update({'train/percent_scored': (len(scored_accs) / len(executions))}) if args.use_tb: meters.flush() pbar.set_description(meters.format_simple('Epoch {}'.format(epoch), {k: v for (k, v) in meters.val.items() if ((not k.startswith('validation')) and (k.count('/') <= 1))}, compressed=True), refresh=False) pbar.update() end = time.time() trainer.trigger_event('epoch:after', trainer, epoch)
def validate_epoch(epoch, trainer, val_dataloader, meters, output_vocab): if (not args.debug): from jaclearn.visualize.html_table import HTMLTableVisualizer, HTMLTableColumnDesc vis = HTMLTableVisualizer(osp.join(args.vis_dir, f'episode_{epoch}'), f'Left @ Epoch {epoch}') link = '<a href="viewer://{}", target="_blank">{}</a>'.format(vis.visdir, vis.visdir) columns = [HTMLTableColumnDesc('id', 'Index', 'text', {'width': '40px'}), HTMLTableColumnDesc('utterance', 'Utterance', 'code', {'width': '1000px'}), HTMLTableColumnDesc('correctness', 'Accurate', 'text', {'width': '40px'}), HTMLTableColumnDesc('attr_cls_acc', 'Attr Cls Accuracy', 'text', {'width': '150px'}), HTMLTableColumnDesc('attr_cls_pred', 'Attr Cls Preds', 'text', {'width': '200px'}), HTMLTableColumnDesc('tree', 'Parsing Tree', 'code', {'width': '500px'})] (this_count, max_viz_count, max_log_count) = (0, 5, 20) end = time.time() with tqdm_pbar(total=len(val_dataloader)) as pbar: if (not args.debug): vis.begin_html() vis.begin_table('Left', columns) accuracy = [] for feed_dict in val_dataloader: if args.use_gpu: if (not args.gpu_parallel): feed_dict = async_copy_to(feed_dict, 0) data_time = (time.time() - end) end = time.time() (output_dict, extra_info) = trainer.evaluate(feed_dict) val_monitors = {} for k in output_dict['monitors'].keys(): val_monitors[('validation/' + k)] = output_dict['monitors'][k] output_dict['monitors'] = val_monitors monitors = as_float(output_dict['monitors']) step_time = (time.time() - end) end = time.time() meters.update(monitors) meters.update({'time/data': data_time, 'time/step': step_time}) executions = output_dict['executions'] predictions = [] for i in range(len(executions)): predictions.append(torch.argmax(executions[i])) predictions = torch.stack(predictions) scored_accs = [] for i in range(len(executions)): if (output_dict['scored'][i] == 1): pred_answer = output_vocab.idx2word[int(predictions[i].cpu())] scored_accs.append((pred_answer == feed_dict['answer'][i])) if (len(scored_accs) != 0): scored_avg_acc = float((sum(scored_accs) / len(scored_accs))) meters.update({'validation/acc_scored': scored_avg_acc}, n=len(scored_accs)) meters.update({'validation/percent_scored': (len(scored_accs) / len(executions))}) if ((not args.debug) and (this_count < max_log_count)): idx = 0 utterance = feed_dict['question_text'][idx] parsing = output_dict['parsing'][idx] tree = '' if parsing: tree = str(parsing) correctness = (predictions[idx].cpu() == feed_dict['answer'][idx]) if ('concepts_to_accs' in output_dict): concepts_to_accs = str(output_dict['concepts_to_accs'][idx]) concepts_to_pred_concepts = str(output_dict['concepts_to_pred_concepts'][idx]) else: (concepts_to_accs, concepts_to_pred_concepts) = ('', '') vis.row(id=this_count, utterance=utterance, correctness=correctness, attr_cls_acc=concepts_to_accs, attr_cls_pred=concepts_to_pred_concepts, tree=tree) this_count += 1 if args.use_tb: meters.flush() pbar.set_description(meters.format_simple('Epoch {} (validation)'.format(epoch), {k: v for (k, v) in meters.val.items() if (k.startswith('validation') and (k.count('/') <= 2))}, compressed=True), refresh=False) pbar.update() end = time.time() meters.update(monitors) if (not args.debug): vis.end_table() vis.end_html() if (not args.debug): if args.evaluate: mldash.update(run_description=link) with mldash.update_extra_info(): mldash.extra_info_dict.setdefault('visualizations', []).append(f'Epoch {epoch:3} Visualizations: {link}') logger.critical(f'Visualizations: {link}')
def main(): if (not args.debug): args.dump_dir = ensure_path(osp.join('dumps', args.series_name, args.desc_name, args.expr, args.run_name)) args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints')) args.vis_dir = ensure_path(osp.join(args.dump_dir, 'visualizations')) args.meta_file = osp.join(args.dump_dir, 'metainfo.json') args.log_file = osp.join(args.dump_dir, 'log.log') args.meter_file = osp.join(args.dump_dir, 'meter.json') if args.use_tb: args.tb_dir = ensure_path(osp.join(args.dump_dir, 'tensorboard')) else: args.tb_dir = None if (not args.debug): logger.critical('Writing logs to file: "{}".'.format(args.log_file)) import jacinle jacinle.set_logger_output_file(args.log_file) jacinle.git_guard() logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file)) with open(args.meta_file, 'w') as f: f.write(dump_metainfo(args=args.__dict__, configs=configs)) if (args.debug and args.use_tb): logger.warning('Disabling the tensorboard in the debug mode.') args.use_tb = False if (args.evaluate and args.use_tb): logger.warning('Disabling the tensorboard in the evaluation mode.') args.use_tb = False logger.critical('Building the model.') model = desc.make_model(args.parsed_train_path, args.parsed_test_path, args.idx_to_class) if args.use_gpu: model.cuda() if args.gpu_parallel: from jactorch.parallel import JacDataParallel model = JacDataParallel(model, device_ids=args.gpus, user_scattered=True).cuda() cudnn.benchmark = False if hasattr(desc, 'make_optimizer'): logger.critical('Building customized optimizer.') optimizer = desc.make_optimizer(model, args.lr) else: from jactorch.optim import AdamW trainable_parameters = filter((lambda x: x.requires_grad), model.parameters()) optimizer = AdamW(trainable_parameters, args.lr, weight_decay=configs.train.weight_decay) print(('LR ' + str(args.lr))) if (args.acc_grad > 1): from jactorch.optim import AccumGrad optimizer = AccumGrad(optimizer, args.acc_grad) logger.warning('Use accumulated grad={:d}, effective iterations per epoch={:d}.'.format(args.acc_grad, int((args.iters_per_epoch / args.acc_grad)))) trainer = TrainerEnv(model, optimizer) if args.resume: extra = trainer.load_checkpoint(args.resume) if extra: args.start_epoch = extra['epoch'] logger.critical('Resume from epoch {}.'.format(args.start_epoch)) elif args.load: if trainer.load_weights(args.load): logger.critical('Loaded weights from pretrained model: "{}".'.format(args.load)) if args.use_tb: from jactorch.train.tb import TBLogger, TBGroupMeters tb_logger = TBLogger(args.tb_dir) meters = TBGroupMeters(tb_logger) logger.critical('Writing tensorboard logs to: "{}".'.format(args.tb_dir)) else: from jacinle.utils.meter import GroupMeters meters = GroupMeters() if (not args.debug): logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file)) with open(args.meta_file, 'w') as f: f.write(dump_metainfo(args=args.__dict__, configs=configs)) logger.critical('Writing meter logs to file: "{}".'.format(args.meter_file)) logger.critical('Initializing MLDash.') mldash.init(desc_name=((args.series_name + '/') + args.desc_name), expr_name=args.expr, run_name=args.run_name, args=args, highlight_args=parser, configs=configs) mldash.update(metainfo_file=args.meta_file, log_file=args.log_file, meter_file=args.meter_file, tb_dir=args.tb_dir) if args.embed: from IPython import embed embed() if hasattr(desc, 'customize_trainer'): desc.customize_trainer(trainer) logger.critical('Building the data loader.') 'From ReferIt3D' from left.data.referit3d.arguments import parse_arguments from left.data.referit3d.listening_dataset import make_data_loaders from left.data.referit3d.referit3d_reader import load_scan_related_data, load_referential_data, trim_scans_per_referit3d_data, compute_auxiliary_data referit3d_args = parse_arguments(['-scannet-file', args.scannet_file, '-referit3D-file', args.referit3d_file, '--max-distractors', '9', '--max-test-objects', '88', '--batch-size', '16', '--n-workers', '2']) (all_scans_in_dict, scans_split, class_to_idx) = load_scan_related_data(args.scannet_split_pre_fix, referit3d_args.scannet_file) referit_data = load_referential_data(referit3d_args, referit3d_args.referit3D_file, scans_split) all_scans_in_dict = trim_scans_per_referit3d_data(referit_data, all_scans_in_dict) (mean_rgb, vocab) = compute_auxiliary_data(referit_data, all_scans_in_dict, referit3d_args) data_loaders = make_data_loaders(referit3d_args, referit_data, vocab, class_to_idx, all_scans_in_dict, mean_rgb) train_dataloader = data_loaders['train'] validation_dataloader = data_loaders['test'] 'End from ReferIt3D' if (args.use_gpu and args.gpu_parallel): from jactorch.data.dataloader import JacDataLoaderMultiGPUWrapper train_dataloader = JacDataLoaderMultiGPUWrapper(train_dataloader, args.gpus) validation_dataloader = JacDataLoaderMultiGPUWrapper(validation_dataloader, args.gpus) if args.evaluate: epoch = 0 model.eval() validate_epoch(epoch, trainer, validation_dataloader, meters, all_scans_in_dict) if (not args.debug): meters.dump(args.meter_file) logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False)) return for epoch in range((args.start_epoch + 1), (args.epochs + 1)): meters.reset() model.train() train_epoch(epoch, trainer, train_dataloader, meters, all_scans_in_dict) if ((args.validation_interval > 0) and ((epoch % args.validation_interval) == 0)): model.eval() with torch.no_grad(): validate_epoch(epoch, trainer, validation_dataloader, meters, all_scans_in_dict) if (not args.debug): meters.dump(args.meter_file) if (not args.debug): mldash.log_metric('epoch', epoch, desc=False, expr=False) for (key, value) in meters.items(): if (key.startswith('loss') or key.startswith('validation/loss')): mldash.log_metric_min(key, value.avg) for (key, value) in meters.items(): if (key.startswith('acc') or key.startswith('validation/acc') or key.startswith('train/acc') or key.startswith('validation/percent') or key.startswith('train/percent')): mldash.log_metric_max(key, value.avg) logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False)) if (not args.debug): if ((epoch % args.save_interval) == 0): fname = osp.join(args.ckpt_dir, 'epoch_{}.pth'.format(epoch)) trainer.save_checkpoint(fname, dict(epoch=epoch, meta_file=args.meta_file))
def train_epoch(epoch, trainer, train_dataloader, meters, all_scans_in_dict): nr_iters = args.iters_per_epoch if (nr_iters == 0): nr_iters = len(train_dataloader) meters.update(epoch=epoch) trainer.trigger_event('epoch:before', trainer, epoch) train_iter = iter(train_dataloader) end = time.time() with tqdm_pbar(total=nr_iters) as pbar: for i in range(nr_iters): feed_dict = next(train_iter) feed_dict['input_str'] = feed_dict['utterance'] tokenized = [] for u in feed_dict['utterance']: tokenized.append(word_tokenize(u)) feed_dict['input_str_tokenized'] = tokenized feed_dict['input_objects'] = feed_dict['objects'] feed_dict['input_objects_class'] = feed_dict['class_labels'] feed_dict['input_objects_length'] = feed_dict['context_size'] feed_dict['output_target'] = feed_dict['target_pos'] feed_dict['scene'] = None if args.use_gpu: if (not args.gpu_parallel): feed_dict = async_copy_to(feed_dict, 0) data_time = (time.time() - end) end = time.time() (loss, monitors, output_dict, extra_info) = trainer.step(feed_dict) step_time = (time.time() - end) end = time.time() meters.update(loss=loss) meters.update(monitors) meters.update({'time/data': data_time, 'time/step': step_time}) target = feed_dict['output_target'] executions = output_dict['executions'] predictions = [] for i in range(len(executions)): predictions.append(torch.argmax(executions[i])) predictions = torch.stack(predictions) guessed_correctly = torch.mean((predictions == target).double()).item() meters.update({'train/acc': guessed_correctly}) scored_accs = [] for i in range(len(executions)): if (output_dict['scored'][i] == 1): scored_accs.append((predictions[i] == target[i])) if (len(scored_accs) != 0): scored_avg_acc = float((sum(scored_accs) / len(scored_accs)).cpu().numpy()) meters.update({'train/acc_scored': scored_avg_acc}, n=len(scored_accs)) meters.update({'train/percent_scored': (len(scored_accs) / len(executions))}) if args.use_tb: meters.flush() pbar.set_description(meters.format_simple('Epoch {}'.format(epoch), {k: v for (k, v) in meters.val.items() if ((not k.startswith('validation')) and (k.count('/') <= 1))}, compressed=True), refresh=False) pbar.update() end = time.time() trainer.trigger_event('epoch:after', trainer, epoch)
def decode_stimulus_string(s): '\n Split into scene_id, instance_label, # objects, target object id,\n distractors object id.\n :param s: the stimulus string\n ' if (len(s.split('-', maxsplit=4)) == 4): (scene_id, instance_label, n_objects, target_id) = s.split('-', maxsplit=4) distractors_ids = '' else: (scene_id, instance_label, n_objects, target_id, distractors_ids) = s.split('-', maxsplit=4) instance_label = instance_label.replace('_', ' ') n_objects = int(n_objects) target_id = int(target_id) distractors_ids = [int(i) for i in distractors_ids.split('-') if (i != '')] assert (len(distractors_ids) == (n_objects - 1)) return (scene_id, instance_label, n_objects, target_id, distractors_ids)
def validate_epoch(epoch, trainer, val_dataloader, meters, all_scans_in_dict): if (not args.debug): from jaclearn.visualize.html_table import HTMLTableVisualizer, HTMLTableColumnDesc vis = HTMLTableVisualizer(osp.join(args.vis_dir, f'episode_{epoch}'), f'Left @ Epoch {epoch}') link = '<a href="viewer://{}", target="_blank">{}</a>'.format(vis.visdir, vis.visdir) columns = [HTMLTableColumnDesc('id', 'Index', 'text', {'width': '40px'}), HTMLTableColumnDesc('utterance', 'Utterance', 'code', {'width': '1000px'}), HTMLTableColumnDesc('correctness', 'Accurate', 'text', {'width': '40px'}), HTMLTableColumnDesc('attr_cls_acc', 'Attr Cls Accuracy', 'text', {'width': '150px'}), HTMLTableColumnDesc('attr_cls_pred', 'Attr Cls Preds', 'text', {'width': '200px'}), HTMLTableColumnDesc('tree', 'Parsing Tree', 'code', {'width': '500px'})] (this_count, max_viz_count, max_log_count) = (0, 5, 20) end = time.time() with tqdm_pbar(total=len(val_dataloader)) as pbar: if (not args.debug): vis.begin_html() vis.begin_table('Left', columns) accuracy = [] (easy_acc, hard_acc, view_dep_acc, view_indep_acc) = ([], [], [], []) view_dependent_words = {'facing', 'looking', 'front', 'behind', 'back', 'right', 'left', 'leftmost', 'rightmost', 'across'} for feed_dict in val_dataloader: feed_dict['input_str'] = feed_dict['utterance'] tokenized = [] for u in feed_dict['utterance']: tokenized.append(word_tokenize(u)) feed_dict['input_str_tokenized'] = tokenized feed_dict['input_objects'] = feed_dict['objects'] feed_dict['input_objects_class'] = feed_dict['class_labels'] feed_dict['input_objects_length'] = feed_dict['context_size'] feed_dict['output_target'] = feed_dict['target_pos'] feed_dict['scene'] = None if args.use_gpu: if (not args.gpu_parallel): feed_dict = async_copy_to(feed_dict, 0) data_time = (time.time() - end) end = time.time() (output_dict, extra_info) = trainer.evaluate(feed_dict) monitors = as_float(output_dict['monitors']) step_time = (time.time() - end) end = time.time() meters.update(monitors) meters.update({'time/data': data_time, 'time/step': step_time}) target = feed_dict['output_target'] executions = output_dict['executions'] predictions = [] for i in range(len(executions)): predictions.append(torch.argmax(executions[i])) predictions = torch.stack(predictions) guessed_correctly = torch.mean((predictions == target).double()).item() meters.update({'validation/acc': guessed_correctly}) accuracy.append(guessed_correctly) for i in range(len(executions)): this_tokens = feed_dict['utterance'][i].split(' ') this_stimulus_id = feed_dict['stimulus_id'][i] hardness = decode_stimulus_string(this_stimulus_id)[2] this_easy = (hardness <= 2) this_view_dependent = (len(set(this_tokens).intersection(view_dependent_words)) > 0) this_pred_acc = (predictions[i] == target[i]) if this_view_dependent: view_dep_acc.append(this_pred_acc) else: view_indep_acc.append(this_pred_acc) if this_easy: easy_acc.append(this_pred_acc) else: hard_acc.append(this_pred_acc) scored_accs = [] for i in range(len(executions)): if (output_dict['scored'][i] == 1): scored_accs.append((predictions[i] == target[i])) if (len(scored_accs) != 0): scored_avg_acc = float((sum(scored_accs) / len(scored_accs)).cpu().numpy()) meters.update({'validation/acc_scored': scored_avg_acc}, n=len(scored_accs)) meters.update({'validation/percent_scored': (len(scored_accs) / len(executions))}) if ((not args.debug) and (this_count < max_log_count)): idx = 0 utterance = feed_dict['input_str'][idx] parsing = output_dict['parsing'][idx] tree = '' if parsing: tree = str(parsing) correctness = (predictions[idx].cpu() == target[idx].cpu()) if ('concepts_to_accs' in output_dict): concepts_to_accs = str(output_dict['concepts_to_accs'][idx]) concepts_to_pred_concepts = str(output_dict['concepts_to_pred_concepts'][idx]) else: (concepts_to_accs, concepts_to_pred_concepts) = ('', '') vis.row(id=this_count, utterance=utterance, correctness=correctness.cpu().numpy(), attr_cls_acc=concepts_to_accs, attr_cls_pred=concepts_to_pred_concepts, tree=tree) this_count += 1 if args.use_tb: meters.flush() pbar.set_description(meters.format_simple('Epoch {} (validation)'.format(epoch), {k: v for (k, v) in meters.val.items() if (k.startswith('validation') and (k.count('/') <= 2))}, compressed=True), refresh=False) pbar.update() end = time.time() monitors['validation/acc/view_dep_acc'] = float((sum(view_dep_acc) / float(len(view_dep_acc)))) monitors['validation/acc/view_indep_acc'] = float((sum(view_indep_acc) / float(len(view_indep_acc)))) monitors['validation/acc/easy_acc'] = float((sum(easy_acc) / float(len(easy_acc)))) monitors['validation/acc/hard_acc'] = float((sum(hard_acc) / float(len(hard_acc)))) meters.update(monitors) if (not args.debug): vis.end_table() vis.end_html() if (not args.debug): if args.evaluate: mldash.update(run_description=link) with mldash.update_extra_info(): mldash.extra_info_dict.setdefault('visualizations', []).append(f'Epoch {epoch:3} Visualizations: {link}') logger.critical(f'Visualizations: {link}')
def test(): print('Loading toy dataset from JSON...') loader = DatasetLoader() gtDataset = loader.read_json('data/toydata/gt.json') print('>> {}'.format(gtDataset.phrases)) gtBoxList = gtDataset.boxes print('Loading toy predictions from JSON...') predDataset = loader.read_json('data/toydata/pred.json') predictedBoxList = predDataset.boxes iouThreshold = 0.5 assert (predDataset.size == gtDataset.size) print('Evaluating toy dataset...') evaluator = Evaluator() (accuracy, iouList) = evaluator.evaluate(predictedBoxList, gtBoxList, iouThreshold) print('>> Accuracy: {}'.format(accuracy)) for (pred, gt, iou) in zip(predictedBoxList, gtBoxList, iouList): print('>>>> GT: {}, PRED: {}, IoU: {}'.format(gt, pred, iou))
class Dataset(object): ' A class for representing a Dataset\n\t' def __init__(self): self._instances = [] def add_instance(self, propertyDict): ' Append an instance to the dataset.\n\t\t\n\t\tParameters\n\t\t----------\n\t\tpropertyDict : dict\n\t\t\ta dictionary containing the following key/values (minimum)\n\t\t\t\t"phrase" : str\n\t\t\t\t"image" : str\n\t\t\t\t"box" : [x,y,w,h] where (x,y) are coordinates of the top-left of the bounding box and (w, h) are the width and height of the bounding box\n\t\t' self._instances.append(propertyDict) def get_phraselist(self): return [instance['phrase'] for instance in self._instances] def get_imagelist(self): return [instance['image'] for instance in self._instances] def get_boxlist(self): return [instance['box'] for instance in self._instances] def get_instances(self): return self._instances def get_count(self): return len(self._instances) count = property(get_count) size = property(get_count) instances = property(get_instances) phrases = property(get_phraselist) images = property(get_imagelist) boxes = property(get_boxlist)
class DatasetLoader(): ' Utility/factory class to load a Dataset object from a preformatted text or json file\n\t' def __init__(self): pass def read_text(self, filePath): ' Loads a Dataset object from a text file.\n\t\t\n\t\tParameters\n\t\t----------\n\t\tfilePath : str\n\t\t\tPath to text file containing the dataset.\n\t\t\tFormat of text file: image_id \t phrase \t x \t y \t w \t h \n\t\n\t\tReturns\n\t\t-------\n\t\tDataset\n\t\t\tA Dataset instance, loaded with rows from filePath\n\t\t' dataset = Dataset() for line in open(filePath): data = line.strip().split('\t') dataset.add_instance({'image': data[0], 'phrase': data[1], 'box': list(map(float, data[2:6]))}) return dataset def read_json(self, jsonFilePath): ' Loads a Dataset object from a json file.\n\t\t\n\t\tParameters\n\t\t----------\n\t\tfilePath : str\n\t\t\tPath to json file containing the dataset.\n\t\t\tMinimum format of json file: [{"image": "456329#0", "phrase": "My phrase", "box": [1, 4, 200, 300]}, ...];\n\t\t\t\t\tthere can be other keys in each entry, it will simply be ignored in this code\n\t\t\n\t\tReturns\n\t\t-------\n\t\tDataset\n\t\t\tA Dataset instance, loaded with rows from filePath\n\t\t' dataset = Dataset() obj = json.load(open(jsonFilePath, encoding='utf-8')) for entry in obj: dataset.add_instance(entry) return dataset
class Evaluator(object): ' Utility class for evaluating phrase localization\n\t' def __init__(self): pass def compute_iou(self, predictedBoxList, gtBoxList): ' Computes list of areas of IoU for all given instances.\n\n\t\tParameters\n\t\t----------\n\t\tpredictedBoxList : list\n\t\t\t[[x,y,w,h],[x,y,w,h],[x,y,w,h],...]\n\t\t\tList of predicted bounding box instances [x,y,w,h] for each query instance.\n\t\t\tx and y are the (x,y) coordinates of the top-left of the bounding box for the query term\n\t\t\tw and h are the width and height of the bounding box for the query test\n\t\tgtBoxList : list\n\t\t\tSame as above, but for ground truth bounding boxes. Must be the same length as predictedBoxList\n\n\t\tReturns\n\t\t-------\n\t\tiouList : list(float)\n\t\t\tThe area of IoU for each prediction in predictedBoxList\n\n\t\t' assert (len(predictedBoxList) == len(gtBoxList)), 'The list of predicted bounding boxes ({}) should be the same size as the list of ground truth bounding boxes ({}).'.format(len(predictedBoxList), len(gtBoxList)) iouList = [] for (box1, box2) in zip(gtBoxList, predictedBoxList): iou = self._iou(box1, box2) iouList.append(iou) return iouList def accuracy(self, iouList, iouThreshold=0.5): ' Computes the overall accuracy from a given list of iou and an iouThreshold\n\n\t\tParameters\n\t\t----------\n\t\tiouList : list(float)\n\t\t\tList of areas of IoU\n\t\tiouThreshold : float\n\t\t\tThe threshold for the IoU, such that item in iouList is True if IoU >= iouThreshold.\n\n\t\tReturns\n\t\t-------\n\t\taccuracy : float\n\t\t\tOverall accuracy (or recall to be more precise). \n\t\t\tProportion of predicted boxes that overlaps with the ground truth boxes by an IoU of >= iouThreshold.\n\t\t\n\t\t' matches = len([1 for iou in iouList if (iou >= iouThreshold)]) accuracy = ((matches * 1.0) / len(iouList)) return accuracy def evaluate(self, predictedBoxList, gtBoxList, iouThreshold=0.5): ' Computes the overall accuracy and list of areas of IoU for each test instance.\n\n\t\tParameters\n\t\t----------\n\t\tpredictedBoxList : list\n\t\t\t[[x,y,w,h],[x,y,w,h],[x,y,w,h],...]\n\t\t\tList of predicted bounding box instances [x,y,w,h] for each query instance.\n\t\t\tx and y are the (x,y) coordinates of the top-left of the bounding box for the query term\n\t\t\tw and h are the width and height of the bounding box for the query test\n\t\tgtBoxList : list\n\t\t\tSame as above, but for ground truth bounding boxes. Must be the same length as predictedBoxList\n\t\tiouThreshold : float\n\t\t\tThe threshold for the IoU, such that two bounding boxes are considered overlapping when IoU >= iouThreshold.\n\n\t\tReturns\n\t\t-------\n\t\taccuracy : float\n\t\t\tOverall accuracy (or recall to be more precise). \n\t\t\tProportion of predicted boxes that overlaps with the ground truth boxes by an IoU of >= iouThreshold.\n\t\t\n\t\tiouList : list(float)\n\t\t\tThe area of IoU for each prediction in predictedBoxList\n\n\t\t' iouList = self.compute_iou(predictedBoxList, gtBoxList) accuracy = self.accuracy(iouList, iouThreshold) return (accuracy, iouList) def evaluate_perclass(self, predictedBoxList, gtBoxList, boxCategoriesList, iouThreshold=0.5): ' Computes the overall accuracy, per-category accuracies, and list of areas of IoU for each test instance.\n\n\t\tParameters\n\t\t----------\n\t\tpredictedBoxList : list\n\t\t\t[[x,y,w,h],[x,y,w,h],[x,y,w,h],...]\n\t\t\tList of predicted bounding box instances [x,y,w,h] for each query instance.\n\t\t\tx and y are the (x,y) coordinates of the top-left of the bounding box for the query term\n\t\t\tw and h are the width and height of the bounding box for the query test\n\t\tgtBoxList : list\n\t\t\tSame as above, but for ground truth bounding boxes. Must be the same length as predictedBoxList\n\t\tiouThreshold : float\n\t\t\tThe threshold for the IoU, such that two bounding boxes are considered overlapping when IoU >= iouThreshold.\n\t\tboxCategoriesList : list of list\n\t\t\tList of categories per box instance. Each box can have more than one category. Must be the same length as gtBoxList\n\n\t\tReturns\n\t\t-------\n\t\taccuracy : float\n\t\t\tOverall accuracy (or recall to be more precise). \n\t\t\tProportion of predicted boxes that overlaps with the ground truth boxes by an IoU of >= iouThreshold.\n\t\t\n\t\tperclassAccuracies : dict\n\t\t\tPer-class accuracy. Key: category label; Value: accuracy (float).\n\n\t\tiouList : list(float)\n\t\t\tThe area of IoU for each prediction in predictedBoxList\n\n\t\t' categorySet = set() for categoryList in boxCategoriesList: categorySet.update(categoryList) iouList = self.compute_iou(predictedBoxList, gtBoxList) accuracy = self.accuracy(iouList, iouThreshold) perClassAccDict = {} for category in categorySet: subPredictedBoxList = [] subGtBoxList = [] for (pred, gt, categoryList) in zip(predictedBoxList, gtBoxList, boxCategoriesList): if (category in categoryList): subPredictedBoxList.append(pred) subGtBoxList.append(gt) subIouList = self.compute_iou(subPredictedBoxList, subGtBoxList) perClassAccDict[category] = self.accuracy(subIouList, iouThreshold) return (accuracy, perClassAccDict, iouList) def evaluate_upperbound_perclass(self, predictedBoxList, gtBoxList, boxCategoriesList, iouThreshold=0.5): ' Computes the overall accuracy, per-category accuracies, and list of areas of IoU for each test instance.\n\t\tAssumes that there are multiple candidate bounding boxes per test instance in predictedBoxList, \n\t\tand we keep the max iou across all candidates to get the best iou per test instance\n\n\t\tParameters\n\t\t----------\n\t\tpredictedBoxList : list of list\n\t\t\t[[[x,y,w,h],[x,y,w,h]],[[x,y,w,h],[x,y,w,h]],...]\n\t\t\tList of predicted bounding box instances [x,y,w,h] for each query instance.\n\t\t\tx and y are the (x,y) coordinates of the top-left of the bounding box for the query term\n\t\t\tw and h are the width and height of the bounding box for the query test\n\t\tgtBoxList : list\n\t\t\tSame as above, but for ground truth bounding boxes. Must be the same length as predictedBoxList\n\t\tiouThreshold : float\n\t\t\tThe threshold for the IoU, such that two bounding boxes are considered overlapping when IoU >= iouThreshold.\n\t\tboxCategoriesList : list of list\n\t\t\tList of categories per box instance. Each box can have more than one category. Must be the same length as gtBoxList\n\n\t\tReturns\n\t\t-------\n\t\taccuracy : float\n\t\t\tOverall accuracy (or recall to be more precise). \n\t\t\tProportion of predicted boxes that overlaps with the ground truth boxes by an IoU of >= iouThreshold.\n\t\t\n\t\tperclassAccuracies : dict\n\t\t\tPer-class accuracy. Key: category label; Value: accuracy (float).\n\n\t\tiouList : list(float)\n\t\t\tThe area of max IoU for each prediction set in predictedBoxList\n\n\t\targmaxList : list(int)\n\t\t\tThe index of the box that maximizes the IoU for each prediction set in predictedBoxList\n\n\t\t' categorySet = set() for categoryList in boxCategoriesList: categorySet.update(categoryList) iouList = [] argmaxList = [] for (i, gtBox) in enumerate(gtBoxList): nCandidates = len(predictedBoxList[i]) replicatedGtBoxList = [] for j in range(nCandidates): replicatedGtBoxList.append(gtBox) instanceIouList = self.compute_iou(predictedBoxList[i], replicatedGtBoxList) maxIou = max(instanceIouList) iouList.append(maxIou) argmaxList.append(instanceIouList.index(maxIou)) accuracy = self.accuracy(iouList, iouThreshold) perClassAccDict = {} for category in categorySet: subPredictedBoxList = [] subGtBoxList = [] for (pred, gt, categoryList) in zip(predictedBoxList, gtBoxList, boxCategoriesList): if (category in categoryList): subPredictedBoxList.append(pred) subGtBoxList.append(gt) subIouList = [] for (i, subGtBox) in enumerate(subGtBoxList): nCandidates = len(subPredictedBoxList[i]) replicatedGtBoxList = [] for j in range(nCandidates): replicatedGtBoxList.append(subGtBox) instanceIouList = self.compute_iou(subPredictedBoxList[i], replicatedGtBoxList) maxIou = max(instanceIouList) subIouList.append(maxIou) perClassAccDict[category] = self.accuracy(subIouList, iouThreshold) return (accuracy, perClassAccDict, iouList, argmaxList) def _iou(self, box1, box2): 'Computes intersection over union (IoU) for two boxes.\n\n\t\twhere each box = [x, y, w, h]\n\n\t\tParameters\n\t\t----------\n\t\tbox1 : list\n\t\t\t[x, y, w, h] of first box\n\t\tbox2 : list\n\t\t\t[x, y, w, h] of second box\n\n\t\tReturns\n\t\t-------\n\t\tfloat\n\t\t\tintersection over union for box1 and box2\n\t\n\t\t' (box1_left_x, box1_top_y, box1_w, box1_h) = box1 box1_right_x = ((box1_left_x + box1_w) - 1) box1_bottom_y = ((box1_top_y + box1_h) - 1) (box2_left_x, box2_top_y, box2_w, box2_h) = box2 box2_right_x = ((box2_left_x + box2_w) - 1) box2_bottom_y = ((box2_top_y + box2_h) - 1) intersect_left_x = max(box1_left_x, box2_left_x) intersect_top_y = max(box1_top_y, box2_top_y) intersect_right_x = min(box1_right_x, box2_right_x) intersect_bottom_y = min(box1_bottom_y, box2_bottom_y) overlap_x = max(0, ((intersect_right_x - intersect_left_x) + 1)) overlap_y = max(0, ((intersect_bottom_y - intersect_top_y) + 1)) intersect = (overlap_x * overlap_y) union = (((box1_w * box1_h) + (box2_w * box2_h)) - intersect) return ((intersect * 1.0) / union)
def test(): ' Toy example for testing the evaluation script\n\t' queryList = ['my first phrase', 'my second phrase'] imageList = ['0001.jpg', '0002.jpg'] gtBoxList = [[1, 1, 30, 30], [50, 50, 100, 200]] iouThreshold = 0.5 predictedBoxList = [[31, 31, 30, 30], [50, 50, 100, 200]] evaluator = Evaluator() (accuracy, iouList) = evaluator.evaluate(predictedBoxList, gtBoxList, iouThreshold) print(accuracy) print(iouList)
def get_dataset(name: str) -> pd.DataFrame: 'Load a processed dataset based on a name' return pd.read_csv(f'data/processed/{name}/data.csv').dropna()
def preprocess_enron() -> None: 'Clean and rename the dataset and save it in data/processed' Path('data/raw/enron').mkdir(parents=True, exist_ok=True) Path('data/processed/enron').mkdir(parents=True, exist_ok=True) url = 'https://github.com/MWiechmann/enron_spam_data/raw/master/enron_spam_data.zip' with urlopen(url) as zurl: with ZipFile(BytesIO(zurl.read())) as zfile: zfile.extractall('data/raw/enron') df = pd.read_csv('data/raw/enron/enron_spam_data.csv', encoding='ISO-8859-1') df = df.fillna('') df['text'] = (df['Subject'] + df['Message']) df['label'] = df['Spam/Ham'].map({'ham': 0, 'spam': 1}) df = df[['text', 'label']] df = df.dropna() df = df.drop_duplicates() df.to_csv('data/processed/enron/data.csv', index=False)
def preprocess_ling() -> None: 'Clean and rename the dataset and save it in data/processed' Path('data/raw/ling').mkdir(parents=True, exist_ok=True) Path('data/processed/ling').mkdir(parents=True, exist_ok=True) url = 'https://github.com/oreilly-japan/ml-security-jp/raw/master/ch02/lingspam_public.tar.gz' r = urlopen(url) t = tarfile.open(name=None, fileobj=BytesIO(r.read())) t.extractall('data/raw/ling') t.close() path = 'data/raw/ling/lingspam_public/bare/*/*' data = [] for fn in glob.glob(path): label = (1 if ('spmsg' in fn) else 0) with open(fn, 'r', encoding='ISO-8859-1') as file: text = file.read() data.append((text, label)) df = pd.DataFrame(data, columns=['text', 'label']) df = df.dropna() df = df.drop_duplicates() df.to_csv('data/processed/ling/data.csv', index=False)
def preprocess_sms() -> None: 'Clean and rename the dataset and save it in data/processed' Path('data/raw/sms').mkdir(parents=True, exist_ok=True) Path('data/processed/sms').mkdir(parents=True, exist_ok=True) url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip' with urlopen(url) as zurl: with ZipFile(BytesIO(zurl.read())) as zfile: zfile.extractall('data/raw/sms') df = pd.read_csv('data/raw/sms/SMSSpamCollection', sep='\t', header=None) df = df.drop_duplicates(keep='first') df = df.rename(columns={0: 'label', 1: 'text'}) df['label'] = df['label'].map({'ham': 0, 'spam': 1}) df = df.dropna() df = df.drop_duplicates() df.to_csv('data/processed/sms/data.csv', index=False)
def preprocess_spamassassin() -> None: 'Clean and rename the dataset and save it in data/processed' Path('data/raw/spamassassin').mkdir(parents=True, exist_ok=True) Path('data/processed/spamassassin').mkdir(parents=True, exist_ok=True) urls = ['https://spamassassin.apache.org/old/publiccorpus/20030228_easy_ham.tar.bz2', 'https://spamassassin.apache.org/old/publiccorpus/20030228_easy_ham_2.tar.bz2', 'https://spamassassin.apache.org/old/publiccorpus/20030228_hard_ham.tar.bz2', 'https://spamassassin.apache.org/old/publiccorpus/20030228_spam.tar.bz2', 'https://spamassassin.apache.org/old/publiccorpus/20050311_spam_2.tar.bz2'] for url in urls: r = urlopen(url) t = tarfile.open(name=None, fileobj=BytesIO(r.read())) t.extractall('data/raw/spamassassin') t.close() path = 'data/raw/spamassassin/*/*' data = [] for fn in glob.glob(path): label = (0 if ('ham' in fn) else 1) with open(fn, 'r', encoding='ISO-8859-1') as file: text = file.read() data.append((text, label)) df = pd.DataFrame(data, columns=['text', 'label']) df = df.dropna() df = df.drop_duplicates() df.to_csv('data/processed/spamassassin/data.csv', index=False)