code stringlengths 17 6.64M |
|---|
def best_cluster_fit(y_true, y_pred):
y_true = y_true.astype(np.int64)
D = (max(y_pred.max(), y_true.max()) + 1)
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[(y_pred[i], y_true[i])] += 1
ind = la.linear_assignment((w.max() - w))
best_fit = []
for i in range(y_pred.size):
for j in range(len(ind)):
if (ind[j][0] == y_pred[i]):
best_fit.append(ind[j][1])
return (best_fit, ind, w)
|
def cluster_acc(y_true, y_pred):
(_, ind, w) = best_cluster_fit(y_true, y_pred)
return ((sum([w[(i, j)] for (i, j) in ind]) * 1.0) / y_pred.size)
|
def plot(x, y, plot_id, names=None, n_clusters=10):
viz_df = pd.DataFrame(data=x[:5000])
viz_df['Label'] = y[:5000]
if (names is not None):
viz_df['Label'] = viz_df['Label'].map(names)
plt.subplots(figsize=(8, 5))
sns.scatterplot(x=0, y=1, hue='Label', legend='full', hue_order=sorted(viz_df['Label'].unique()), palette=sns.color_palette('hls', n_colors=n_clusters), alpha=0.5, data=viz_df)
l = plt.legend(bbox_to_anchor=((- 0.1), 1.0, 1.1, 0.5), loc='lower left', markerfirst=True, mode='expand', borderaxespad=0, ncol=(n_clusters + 1), handletextpad=0.01)
l.texts[0].set_text('')
plt.ylabel('')
plt.xlabel('')
plt.tight_layout()
plt.title(plot_id, pad=40)
|
class n2d():
'\n n2d: Class for n2d\n\n Parameters:\n ------------\n\n input_dim: int\n dimensions of input\n\n manifold_learner: initialized class, such as UmapGMM\n the manifold learner and clustering algorithm. Class should have at\n least fit and predict methods. Needs to be initialized\n\n autoencoder: class\n class of the autoencoder. Defaults to standard AutoEncoder class.\n Class must have a fit method, and be structured similar to the example\n on read the docs. At the very least, the embedding must be accessible\n by name (encoder_%d % middle layer index)\n\n architecture: list\n hidden architecture of the autoencoder. Defaults to [500,500,2000],\n meaning that the encoder is [input_dim, 500, 500, 2000, ae_dim], and\n the decoder is [ae_dim, 2000, 500, 500, input_dim].\n\n ae_dim: int\n number of dimensions you wish the autoencoded embedding to be.\n Defaults to 10. It is reasonable to set this to the number of clusters\n\n ae_args: dict\n dictionary of arguments for the autoencoder. Defaults to just\n setting the activation function to relu\n '
def __init__(self, autoencoder, manifold_learner):
self.autoencoder = autoencoder
self.manifold_learner = manifold_learner
self.encoder = self.autoencoder.encoder
self.clusterer = self.manifold_learner.cluster_manifold
self.manifolder = self.manifold_learner.manifold_in_embedding
self.preds = None
self.probs = None
self.hle = None
def fit(self, x, batch_size=256, epochs=1000, loss='mse', optimizer='adam', weights=None, verbose=1, weight_id=None, patience=None):
'fit: train the autoencoder.\n\n Parameters:\n -----------------\n x: array-like\n the input data\n\n batch_size: int\n the batch size\n\n epochs: int\n number of epochs you wish to run.\n\n loss: string or function\n loss function. Defaults to mse\n\n optimizer: string or function\n optimizer. defaults to adam\n\n weights: string\n if weights is used, the path to the pretrained nn weights.\n\n verbose: int\n how verbose you wish the autoencoder to be while training.\n\n weight_id: string\n where you wish to save the weights\n\n patience: int or None\n if patience is None, do nothing special, otherwise patience is the\n early stopping criteria\n\n\n '
self.autoencoder.fit(x=x, batch_size=batch_size, epochs=epochs, loss=loss, optimizer=optimizer, weights=weights, verbose=verbose, weight_id=weight_id, patience=patience)
hl = self.encoder.predict(x)
self.manifold_learner.fit(hl)
def predict(self, x):
hl = self.encoder.predict(x)
self.preds = self.manifold_learner.predict(hl)
self.hle = self.manifold_learner.hle
return self.preds
def predict_proba(self, x):
hl = self.encoder.predict(x)
self.probs = self.manifold_learner.predict_proba(hl)
self.hle = self.manifold_learner.hle
return self.probs
def fit_predict(self, x, batch_size=256, epochs=1000, loss='mse', optimizer='adam', weights=None, verbose=1, weight_id=None, patience=None):
self.autoencoder.fit(x=x, batch_size=batch_size, epochs=epochs, loss=loss, optimizer=optimizer, weights=weights, verbose=verbose, weight_id=weight_id, patience=patience)
hl = self.encoder.predict(x)
self.preds = self.manifold_learner.fit_predict(hl)
self.hle = self.manifold_learner.hle
return self.preds
def assess(self, y):
y = np.asarray(y)
acc = np.round(cluster_acc(y, self.preds), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, self.preds), 5)
ari = np.round(metrics.adjusted_rand_score(y, self.preds), 5)
return (acc, nmi, ari)
def visualize(self, y, names, n_clusters=10):
'\n visualize: visualize the embedding and clusters\n\n Parameters:\n -----------\n\n y: true clusters/labels, if they exist. Numeric\n names: the names of the clusters, if they exist\n savePath: path to save figures\n n_clusters: number of clusters.\n '
y = np.asarray(y)
y_pred = np.asarray(self.preds)
hle = self.hle
plot(hle, y, 'n2d', names, n_clusters=n_clusters)
(y_pred_viz, _, _) = best_cluster_fit(y, y_pred)
plot(hle, y_pred_viz, 'n2d-predicted', names, n_clusters=n_clusters)
|
def save_n2d(obj, encoder_id, manifold_id):
'\n save_n2d: save n2d objects\n --------------------------\n\n description: Saves the encoder to an h5 file and the manifold learner/clusterer\n to a pickle.\n\n parameters:\n\n - obj: the fitted n2d object\n - encoder_id: what to save the encoder as\n - manifold_id: what to save the manifold learner as\n '
obj.encoder.save(encoder_id)
pickle.dump(obj.manifold_learner, open(manifold_id, 'wb'))
|
def load_n2d(encoder_id, manifold_id):
'\n load_n2d: load n2d objects\n --------------------------\n\n description: loads fitted n2d objects from files. Note you CANNOT train\n these objects further, the only method which will perform correctly is `.predict`\n\n parameters:\n\n - encoder_id: where the encoder is stored\n - manifold_id: where the manifold learner/clusterer is stored\n '
man = pickle.load(open(manifold_id, 'rb'))
out = n2d(10, man)
out.encoder = load_model(encoder_id, compile=False)
return out
|
class manifold_cluster_generator(N2D.UmapGMM):
def __init__(self, manifold_class, manifold_args, cluster_class, cluster_args):
self.manifold_in_embedding = manifold_class(**manifold_args)
self.cluster_manifold = cluster_class(**cluster_args)
proba = getattr(self.cluster_manifold, 'predict_proba', None)
self.proba = callable(proba)
self.hle = None
def fit(self, hl):
super().fit(hl)
def predict(self, hl):
if self.proba:
super().predict(hl)
else:
manifold = self.manifold_in_embedding.transform(hl)
y_pred = self.cluster_manifold.predict(manifold)
return np.asarray(y_pred)
def fit_predict(self, hl):
if self.proba:
super().fit_predict(hl)
else:
self.hle = self.manifold_in_embedding.fit_transform(hl)
y_pred = self.cluster_manifold.fit_predict(self.hle)
return np.asarray(y_pred)
def predict_proba(self, hl):
if self.proba:
super().predict_proba(hl)
else:
print('Your clusterer cannot predict probabilities')
|
class autoencoder_generator(N2D.AutoEncoder):
def __init__(self, model_levels=(), x_lambda=(lambda x: x)):
self.Model = Model(model_levels[0], model_levels[2])
self.encoder = Model(model_levels[0], model_levels[1])
self.x_lambda = x_lambda
def fit(self, x, batch_size, epochs, loss, optimizer, weights, verbose, weight_id, patience):
super().fit(x, batch_size, epochs, loss, optimizer, weights, verbose, weight_id, patience)
|
def load_clip_cpu(backbone_name):
model_path = 'path_to_CLIP_ViT-B-16_pre-trained_parameters'
try:
model = torch.jit.load(model_path, map_location='cpu').eval()
state_dict = None
except RuntimeError:
state_dict = torch.load(model_path, map_location='cpu')
model = clip.build_model((state_dict or model.state_dict()))
return model
|
def transform_center():
interp_mode = Image.BICUBIC
tfm_test = []
tfm_test += [Resize(224, interpolation=interp_mode)]
tfm_test += [CenterCrop((224, 224))]
tfm_test += [ToTensor()]
normalize = Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711])
tfm_test += [normalize]
tfm_test = Compose(tfm_test)
return tfm_test
|
def get_videos(vidname, read_path):
allframes = []
videoins = (read_path + vidname)
vvv = cv2.VideoCapture(videoins)
if (not vvv.isOpened()):
print('Video is not opened! {}'.format(videoins))
else:
fps = vvv.get(cv2.CAP_PROP_FPS)
totalFrameNumber = vvv.get(cv2.CAP_PROP_FRAME_COUNT)
size = (int(vvv.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vvv.get(cv2.CAP_PROP_FRAME_HEIGHT)))
second = (totalFrameNumber // fps)
if (totalFrameNumber != 0):
for _ in range(int(totalFrameNumber)):
(rval, frame) = vvv.read()
if (frame is not None):
img = Image.fromarray(frame.astype('uint8')).convert('RGB')
imgtrans = centrans(img).numpy()
allframes.append(imgtrans)
return np.array(allframes)
|
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
|
@lru_cache()
def bytes_to_unicode():
"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n "
bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1))))
cs = bs[:]
n = 0
for b in range((2 ** 8)):
if (b not in bs):
bs.append(b)
cs.append(((2 ** 8) + n))
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
|
def get_pairs(word):
'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n '
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
|
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
|
def whitespace_clean(text):
text = re.sub('\\s+', ' ', text)
text = text.strip()
return text
|
class SimpleTokenizer(object):
def __init__(self, bpe_path: str=default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:(((49152 - 256) - 2) + 1)]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = (vocab + [(v + '</w>') for v in vocab])
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE)
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
return text
|
def setup_path(args):
prefix = args.prefix
postfix = args.postfix
openset = args.openset
temporal = args.temporal
tfmlayers = args.tfm_layers
batchsize = args.batchsize
numFrames = args.numFrames
iters = args.num_iterations
verbose = (args.verbose if args.verbose else 'none')
dataset = args.dataset.split('-')[0]
global dt_string, logPath, modelPath, resultsPath
now = datetime.now()
dt_string = now.strftime('%Y_%m_%d_%H_%M')
if (args.test or args.resume):
if args.test:
basename = args.test.split('/')[(- 2)]
elif args.resume:
basename = args.resume.split('/')[(- 2)]
logPath = os.path.join('../logs/', basename)
modelPath = os.path.join('../models/', basename)
try:
with open('{}/running_command.txt'.format(modelPath), 'a') as f:
json.dump({'command_time_stamp': dt_string, **args.__dict__}, f, indent=2)
except:
print({'command_time_stamp': dt_string, **args.__dict__})
else:
os.makedirs(f"../logs{(('_' + args.dir_postfix) if (args.dir_postfix != '') else '')}/", exist_ok=True)
os.makedirs(f"../models{(('_' + args.dir_postfix) if (args.dir_postfix != '') else '')}/", exist_ok=True)
logPath = os.path.join(f"../logs{(('_' + args.dir_postfix) if (args.dir_postfix != '') else '')}/", f'{dt_string}-dataset_{dataset}-openset_{openset}-iter_{iters:.0e}-bs_{batchsize}-numFrames_{numFrames}-temporal_{temporal}-tfmL_{tfmlayers}-prompt_{prefix}+X+{postfix}-{verbose}')
modelPath = os.path.join(f"../models{(('_' + args.dir_postfix) if (args.dir_postfix != '') else '')}/", f'{dt_string}-dataset_{dataset}-openset_{openset}-iter_{iters:.0e}-bs_{batchsize}-numFrames_{numFrames}-temporal_{temporal}-tfmL_{tfmlayers}-prompt_{prefix}+X+{postfix}-{verbose}')
os.makedirs(logPath, exist_ok=True)
os.makedirs(modelPath, exist_ok=True)
with open('{}/running_command.txt'.format(modelPath), 'w') as f:
json.dump({'command_time_stamp': dt_string, **args.__dict__}, f, indent=2)
return [logPath, modelPath]
|
def setup_dataloader(args):
if (args.dataset == 'HMDB51-feature-30fps-center'):
feature_root = '../feat/HMDB'
else:
raise ValueError('Unknown dataset.')
if args.dataset.startswith('HMDB'):
(trainactions, valactions) = ([], [])
trn_dataset = readFeatureHMDB51(root=feature_root, frames=args.numFrames, fpsR=[1, (1 / 2), (1 / 3), (1 / 3), (1 / 3), (1 / 4)], ensemble=1, mode='train')
val_dataset = readFeatureHMDB51(root=feature_root, frames=args.numFrames, fpsR=[1, (1 / 2), (1 / 3), (1 / 3), (1 / 3), (1 / 4)], ensemble=args.valEnsemble, mode='val')
return [trn_dataset, val_dataset, trainactions, valactions]
|
def main(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device = ('cuda' if torch.cuda.is_available() else 'cpu')
[logPath, modelPath] = cg.setup_path(args)
args.model_path = modelPath
logger = SummaryWriter(logdir=logPath)
args.return_intermediate_text_feature = 0
[trn_dataset, val_datasete, trainactions, valactions] = cg.setup_dataloader(args)
trnloader = FastDataLoader(trn_dataset, batch_size=args.batchsize, num_workers=args.workers, shuffle=True, pin_memory=False, drop_last=True)
valloader = FastDataLoader(val_datasete, batch_size=args.batchsize, num_workers=args.workers, shuffle=False, pin_memory=False, drop_last=False)
print('==> reading meta data for {}'.format(args.dataset))
(actionlist, actiondict, actiontoken) = text_prompt(dataset=args.dataset, clipbackbone=args.backbone, device=device)
print('==> initialising action recognition model')
model = CLIPrompt(args, actionlist, actiondict, actiontoken, device)
model.float()
model.to(device)
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=0.01)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=int(args.decay_steps), eta_min=(args.lr * 0.01), last_epoch=(- 1))
args.start_iter = 0
if args.test:
print('loading checkpoint {}'.format(args.test))
if (args.test == 'random/random'):
iteration = 0
print('loading random weights')
else:
checkpoint = torch.load(args.test, map_location=torch.device('cpu'))
iteration = checkpoint['iteration']
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict)
print('loading successful')
val.val_CLIPrompt(args, valloader, [actionlist, actiondict, actiontoken, trainactions, valactions], model, None, device, iteration)
print('test finish, exiting')
sys.exit()
if args.resume:
print('loading checkpoint {}'.format(args.resume))
checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))
iteration = checkpoint['iteration']
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict)
args.start_iter = iteration
print('loading successful')
print('======> start training {}, {}, use {}.'.format(args.dataset, args.verbose, device))
train.train_CLIPrompt(args, [trnloader, valloader], [actionlist, actiondict, actiontoken, trainactions, valactions], model, optimizer, lr_scheduler, logger, device)
|
def convert_to_token(xh):
xh_id = clip.tokenize(xh).cpu().data.numpy()
return xh_id
|
def text_prompt(dataset='HMDB51', clipbackbone='ViT-B/16', device='cpu'):
(actionlist, actionprompt, actiontoken) = ([], {}, [])
numC = {'HMDB51-feature-30fps-center': 51}
(clipmodel, _) = clip.load(clipbackbone, device=device, jit=False)
for paramclip in clipmodel.parameters():
paramclip.requires_grad = False
if (dataset == 'HMDB51-feature-30fps-center'):
meta = open('../data/HMDB51/HMDB51_action.list', 'rb')
actionlist = meta.readlines()
meta.close()
actionlist = np.array([a.decode('utf-8').split('\n')[0] for a in actionlist])
actiontoken = np.array([convert_to_token(a) for a in actionlist])
with torch.no_grad():
actionembed = clipmodel.encode_text_light(torch.tensor(actiontoken).to(device))
actiondict = OrderedDict(((actionlist[i], actionembed[i].cpu().data.numpy()) for i in range(numC[dataset])))
actiontoken = OrderedDict(((actionlist[i], actiontoken[i]) for i in range(numC[dataset])))
return (actionlist, actiondict, actiontoken)
|
def set_learning_rate(optimizer, lr):
for g in optimizer.param_groups:
g['lr'] = lr
|
def readtxt(metapath, datapath):
(vidDir, vidLabel) = ([], [])
f = open(metapath, 'rb')
path = f.readlines()
f.close()
for p in path:
psplit = p.decode('utf-8').strip('\n').split(',')
vidDir += [os.path.join(datapath, psplit[0])]
vidLabel += [[int(psplit[1]), psplit[2], int(psplit[3])]]
return (vidDir, vidLabel)
|
def save_checkpoint(state, is_best=0, gap=1, filename='checkpoint.pth.tar', keep_all=False):
torch.save(state, filename)
last_epoch_path = os.path.join(os.path.dirname(filename), ('checkpoint_iter%s.pth.tar' % str((state['iteration'] - gap))))
if (not keep_all):
try:
os.remove(last_epoch_path)
except:
pass
if is_best:
past_best = glob(os.path.join(os.path.dirname(filename), 'model_best_*.pth.tar'))
past_best = sorted(past_best, key=(lambda x: int(''.join(filter(str.isdigit, x)))))
if (len(past_best) >= 5):
try:
os.remove(past_best[0])
except:
pass
torch.save(state, os.path.join(os.path.dirname(filename), ('model_best_iter%s.pth.tar' % str(state['iteration']))))
|
class _RepeatSampler(object):
' Sampler that repeats forever.\n Args:\n sampler (Sampler)\n '
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
(yield from iter(self.sampler))
|
class FastDataLoader(torch.utils.data.dataloader.DataLoader):
'for reusing cpu workers, to save time'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
(yield next(self.iterator))
|
def main():
mode = argv[1]
e = Evaluator()
if (mode == 'wikt'):
e.read_all_wiktionary()
e.compare_with_triangles_stdin()
elif (mode == 'feat'):
e.write_labels(argv[2])
e.featurize_and_uniq_triangles_stdin()
|
def scan_stdin(args):
stats = {'punct': 0, 'punct ok': 0, 'sum': 0, 'invalid': 0}
for l in stdin:
stats['sum'] += 1
try:
(wc1, w1, wc2, w2) = l.decode('utf8').strip().split('\t')[0:4]
if args['punct']:
if (abs((len(punct_re.findall(w1)) - len(punct_re.findall(w2)))) >= int(args['--num'])):
print(l.strip())
stats['punct'] += 1
else:
stats['punct ok'] += 1
if args['unigram']:
if ((not (wc1 in sum_)) or (not (wc2 in sum_))):
stderr.write('INVALID, unknown language: {0}'.format(l))
continue
if ((wc1 in args['--whitelist']) or (wc2 in args['--whitelist'])):
continue
prob1 = 0.0
prob2 = 0.0
for c in w1:
prob1 += math.log((float(unigrams[wc1][c]) / sum_[wc1]))
prob1 /= len(w1)
for c in w2:
prob2 += math.log((float(unigrams[wc2][c]) / sum_[wc2]))
prob2 /= len(w2)
if ((prob1 < int(args['<prob_threshold>'])) or (prob2 < int(args['<prob_threshold>']))):
print(l.strip())
except ValueError:
stats['invalid'] += 1
stderr.write('INVALID: {0}'.format(l))
|
def read_unigrams(fn):
with open(fn) as f:
for l in f:
(wc, c, cnt) = l.decode('utf8').split('\t')
unigrams[wc][c] = int(cnt)
sum_[wc] += int(cnt)
|
def main():
args = docopt(__doc__, version='Wikt2Dict - Find anomalies 1.0')
if args['unigram']:
read_unigrams(args['<unigram_file>'])
scan_stdin(args)
|
def read_pairs(wc_filter=None, input_files=None, use_stdin=False):
tri = defaultdict(set)
if use_stdin:
for l in stdin:
add_pair(l, tri, wc_filter)
elif input_files:
for fn in input_files:
with open(fn) as f:
for l in f:
add_pair(l, tri, wc_filter)
return tri
|
def add_pair(l, tri, wc_filter):
try:
(wc1, w1, wc2, w2) = l.decode('utf8').strip().split('\t')[0:4]
if (wc_filter and ((not (wc1 in wc_filter)) or (not (wc2 in wc_filter)))):
return
tri[(wc1, w1)].add((wc2, w2))
tri[(wc2, w2)].add((wc1, w1))
except ValueError:
stderr.write('Invalid line: {0}'.format(l))
|
def find_k_long_polygons(pairs, k):
if (k == 1):
for word in pairs.keys():
(yield [word])
else:
for polygon in find_k_long_polygons(pairs, (k - 1)):
for word in pairs[polygon[(- 1)]]:
if (not (word in polygon[1:])):
(yield (polygon + [word]))
|
def find_and_print_polygons(pairs, found=None, k=4, mode='polygons'):
for polygon in find_k_long_polygons(pairs, (k + 1)):
if (polygon[0] == polygon[(- 1)]):
output(pairs, found=polygon, mode=mode)
|
def find_k_clicks(pairs, k):
if (k == 1):
for word in pairs.keys():
(yield [word])
else:
for click in find_k_clicks(pairs, (k - 1)):
if (len(click) > (k - 1)):
continue
for word in pairs[click[(- 1)]]:
if (word in click):
continue
bad = False
for c in click[:(- 2)]:
if (not (word in pairs[c])):
bad = True
if (not bad):
click.append(word)
(yield click)
|
def find_and_print_clicks(pairs, k=4):
for click in find_k_clicks(pairs, k):
output(pairs, found=sorted(click), mode='clicks')
|
def output(pairs, found, mode):
(edge_density, new_pairs) = edge_density_and_new_pairs(pairs, found)
if ((mode == 'clicks') and (edge_density == 1.0)):
if arguments['--illustrate']:
print(' --> '.join((', '.join([i, j]) for (i, j) in found)).encode('utf8'))
else:
print('\t'.join(('\t'.join([i, j]) for (i, j) in found)).encode('utf8'))
elif (mode == 'polygons'):
for pair in new_pairs:
if arguments['--illustrate']:
print((((((('\t'.join(pair[0]).encode('utf8') + '\t') + '\t'.join(pair[1]).encode('utf8')) + '\t') + str(edge_density)) + '\t') + ' --> '.join((', '.join(i) for i in found)).encode('utf8')))
else:
print((((((('\t'.join(pair[0]).encode('utf8') + '\t') + '\t'.join(pair[1]).encode('utf8')) + '\t') + str(edge_density)) + '\t') + '\t'.join(('\t'.join(i) for i in found)).encode('utf8')))
|
def edge_density_and_new_pairs(pairs, cycle):
new_pairs = list()
all_pairs = list()
for (i, e1) in enumerate(cycle):
for e2 in cycle[(i + 1):(- 1)]:
all_pairs.append(sorted([e1, e2]))
if ((not (e2 in pairs[e1])) and (not (e1 in pairs[e2]))):
new_pairs.append(sorted([e1, e2]))
n = len(all_pairs)
return ((1 - (float(len(new_pairs)) / ((n * (n - 1)) / 2))), new_pairs)
|
def main():
if arguments['--wc-filter']:
with open(arguments['--wc-filter']) as f:
wc_filter = set([wc.strip() for wc in f])
else:
wc_filter = None
k = int(arguments['--k'])
if arguments['<input>']:
pairs = read_pairs(wc_filter, input_files=arguments['<input>'])
else:
pairs = read_pairs(wc_filter, use_stdin=True)
stderr.write('Pairs read\n')
stderr.write('Number of pairs {0}\n'.format((sum((len(v) for v in pairs.values())) / 2)))
if arguments['polygons']:
find_and_print_polygons(pairs, k=k, mode='polygons')
else:
find_and_print_clicks(pairs, k=k)
|
def read_table(fn):
mapping = defaultdict(set)
with open(fn) as f:
for l in f:
fd = l.decode('utf8').strip().split('\t')
id_ = int(fd[0])
for (i, lang) in enumerate(['en', 'hu', 'la', 'pl']):
if (fd[(i + 1)] == '#'):
continue
for word in fd[(i + 1)].split('/'):
mapping[(lang, word.replace('_', ' '))].add(id_)
return mapping
|
def read_words(fn):
words = set()
with open(fn) as f:
for l in f:
fd = l.decode('utf8').strip().split('\t')
if (len(fd) >= 2):
words.add((fd[0], fd[1]))
if (len(fd) >= 4):
words.add((fd[2], fd[3]))
return words
|
def find_translations(words):
iter_no = 0
for l in stdin:
iter_no += 1
if ((iter_no % 1000000) == 0):
stderr.write('{}\n'.format(iter_no))
try:
fd = l.decode('utf8').strip().split('\t')
pair1 = (fd[0], fd[1])
pair2 = (fd[2], fd[3])
if (pair1 in words):
print('\t'.join((fd[0:4] + list(pair1))).encode('utf8'))
if (pair2 in words):
print('\t'.join((fd[0:4] + list(pair2))).encode('utf8'))
except ValueError:
stderr.write('Error in line {}'.format(l))
|
def add_orig_bindings(mapping, translations):
for ((wc, word), ids) in mapping.iteritems():
for id_ in ids:
translations[id_][wc].add(word)
|
def find_translations_to_table(mapping):
iter_no = 0
translations = defaultdict((lambda : defaultdict(set)))
add_orig_bindings(mapping, translations)
for l in stdin:
iter_no += 1
if ((iter_no % 1000000) == 0):
stderr.write('{}\n'.format(iter_no))
try:
fd = l.decode('utf8').strip().split('\t')
(wc1, w1, wc2, w2) = fd[0:4]
if ((wc1 == 'roa_rup') or (wc2 == 'roa_rup')):
continue
wc1 = ('zh' if (wc1 == 'cmn') else wc1)
wc2 = ('zh' if (wc2 == 'cmn') else wc2)
pair1 = (wc1, w1)
pair2 = (wc2, w2)
for id_ in mapping[pair1]:
translations[id_][wc2].add(w2)
for id_ in mapping[pair2]:
translations[id_][wc1].add(w1)
except ValueError:
stderr.write('Error in line {}'.format(l))
for (id_, trans) in translations.iteritems():
trans_to_dump = dict()
for (wc, words) in trans.iteritems():
trans_to_dump[wc] = sorted(words)
print('{0}\t{1}'.format(id_, json.dumps(trans_to_dump)))
|
def main():
mode = (argv[2] if (len(argv) > 2) else 'direct')
if (mode == 'direct'):
words = read_words(argv[1])
find_translations(words)
elif (mode == 'collect'):
table = read_table(argv[1])
find_translations_to_table(table)
|
def main():
if ((len(argv) > 2) and (not (argv[2] == 'all'))):
filter_wc = set([wc.strip() for wc in argv[2:]])
else:
filter_wc = None
cfg_fn = argv[1]
logger = logging.getLogger('wikt2dict')
cfg = ConfigHandler('general', cfg_fn)
logger = LogHandler(cfg)
with open(cfg['wikicodes']) as wc_f:
wikicodes = set([w.strip() for w in wc_f])
n = len(wikicodes)
if filter_wc:
m = (n - len(filter_wc))
else:
m = 0
num_of_tr = ((((n * (n - 1)) * (n - 2)) / 6) - (((m * (m - 1)) * (m - 2)) / 6))
i = 1
for triangle_wc in combinations(wikicodes, 3):
if (filter_wc and (len((set(triangle_wc) & filter_wc)) == 0)):
continue
stderr.write(((((str(i) + '/') + str(num_of_tr)) + repr(triangle_wc)) + '\n'))
i += 1
logger.info((' '.join(triangle_wc) + ' triangle'))
triangulator = Triangulator(triangle_wc, cfg_fn)
triangulator.collect_triangles()
triangulator.write_triangles()
|
def main():
unigrams = defaultdict((lambda : defaultdict(int)))
for l in stdin:
try:
(wc1, w1, wc2, w2) = l.decode('utf8').strip().split('\t')[0:4]
for c in w1:
unigrams[wc1][c] += 1
for c in w2:
unigrams[wc2][c] += 1
except ValueError:
stderr.write('Invalid line: {0}'.format(l))
for (wc, chars) in unigrams.iteritems():
for (c, cnt) in sorted(((k, v) for (k, v) in chars.iteritems()), key=(lambda x: (- x[1]))):
print(u'{0}\t{1}\t{2}'.format(wc, c, cnt).encode('utf8'))
|
class SectionAndArticleParser(ArticleParser):
'\n Class for parsing Wiktionaries that have translation tables\n in foreign articles too and section-level parsing is required.\n e.g. dewiktionary has a translation section in the article\n about the English word dog. Therefore, we need to recognize\n the language of the title word (dog) and then parse the\n translation table.\n '
def __init__(self, wikt_cfg, parser_cfg, filter_langs=None):
ArticleParser.__init__(self, wikt_cfg, parser_cfg, filter_langs)
self.read_section_langmap()
def read_section_langmap(self):
'\n The language of a section is determined based on its header.\n The header may or may not use language names.\n If a language name map is specified, then each section header\n will be looked up in that map.\n Otherwise wikicodes are used.\n '
self.section_langmap = dict()
if self.cfg.section_langmap:
f = open(self.cfg.section_langmap)
for l in f:
fields = l.strip().decode('utf8').split('\t')
for langname in fields[1:]:
self.section_langmap[langname] = fields[0]
self.section_langmap[langname.title()] = fields[0]
f.close()
else:
self.section_langmap = dict([(wc, wc) for wc in self.wikt_cfg.wikicodes])
def extract_translations(self, title, text):
translations = list()
for (section_lang, section) in self.get_sections(text):
for parser in self.wikt_cfg.section_parsers:
pairs = parser.extract_translations(title, section)
for p in pairs:
if ((self.wikt_cfg.allow_synonyms is False) and (p[0] == section_lang)):
continue
translations.extend([(section_lang, title, p[0], p[1]) for p in pairs])
return set(translations)
def get_sections(self, text):
section_titles_i = list()
lines = text.split('\n')
for (i, line) in enumerate(lines):
m = self.cfg.section_re.search(line)
if m:
lang = m.group(self.cfg.section_langfield)
section_titles_i.append((i, lang))
if (not section_titles_i):
return
for (i, (ind, lang)) in enumerate(section_titles_i[:(- 1)]):
if (lang in self.section_langmap):
(yield (self.section_langmap[lang], '\n'.join(lines[ind:section_titles_i[(i + 1)][0]])))
last_lang = section_titles_i[(- 1)][1]
if (last_lang in self.section_langmap):
(yield (self.section_langmap[last_lang], '\n'.join(lines[section_titles_i[(- 1)][0]:])))
|
class LangnamesArticleParser(ArticleParser):
'\n Class for parsing Wiktionaries that use simple lists for translations\n instead of templates '
def __init__(self, wikt_cfg, parser_cfg, filter_langs=None):
ArticleParser.__init__(self, wikt_cfg, parser_cfg, filter_langs)
self.read_langname_mapping()
def read_langname_mapping(self):
self.mapping = dict()
if self.cfg.langnames:
f = open(self.cfg.langnames)
for l in f:
fields = l.strip().decode('utf8').split('\t')
for langname in fields[1:]:
self.mapping[langname] = fields[0]
self.mapping[langname.title()] = fields[0]
self.mapping[langname.lower()] = fields[0]
f.close()
else:
self.mapping = dict([(wc, wc) for wc in self.wikt_cfg.wikicodes])
def extract_translations(self, title, text):
translations = list()
for tr in self.cfg.translation_line_re.finditer(text):
if self.skip_translation_line(tr.group(0)):
continue
langname = tr.group(self.cfg.language_name_field).lower()
if (not (langname in self.mapping)):
continue
wc = self.mapping[langname]
entities = self.get_entities(tr.group(self.cfg.translation_field))
for entity in entities:
entity_clear = self.trim_translation(entity)
if entity_clear:
translations.append((wc, entity_clear))
return set(translations)
def trim_translation(self, word):
return word.replace('\n', ' ').strip()
def get_entities(self, trans_field):
trimmed = self.cfg.bracket_re.sub('', trans_field)
entities = list()
for e in self.cfg.delimiter_re.split(trimmed):
for m in self.cfg.translation_re.finditer(e):
word = m.group(1)
if self.skip_entity(word):
continue
entities.append(word)
return set(entities)
def skip_entity(self, entity):
if self.cfg.skip_translation_re.search(entity):
return True
if (self.cfg.junk_re and self.cfg.junk_re.search(entity)):
return True
return False
|
class DefaultArticleParser(ArticleParser):
def extract_translations(self, title, text):
translations = list()
for tr in self.cfg.trad_re.finditer(text):
wc = tr.group(self.cfg.wc_field)
if ((not wc) or (not wc.strip()) or (not (wc in self.wikt_cfg.wikicodes))):
continue
word = tr.group(self.cfg.word_field)
if ((not word) or (not word.strip())):
continue
word = word.strip()
if self.skip_word(word):
continue
translations.append((wc, word))
return set(translations)
def skip_word(self, word):
if (self.cfg.skip_translation_re and self.cfg.skip_translation_re.search(word)):
return True
if ('\n' in word):
return True
return False
|
def err(msg):
' Prints a message to stderr, terminating it with a newline '
sys.stderr.write((msg + '\n'))
|
class Article():
' Stores the contents of a Wikipedia article '
def __init__(self, title, markup, is_redirect):
self.title = title
self.markup = markup
self.is_redirect = is_redirect
|
class WikiParser():
'Parses the Wikipedia XML and extracts the relevant data,\n such as sentences and vocabulary'
def __init__(self, callback, ignore_redirects=True):
self.callback = callback
self.ignore_redirects = ignore_redirects
self.buffer_size = ((10 * 1024) * 1024)
self.ignoredArticleTypes = ['wikipedia', 'category', 'template']
self.xml_parser = sax.ParserCreate()
self.xml_parser.StartElementHandler = (lambda name, attrs: self.xml_start_element(name, attrs))
self.xml_parser.EndElementHandler = (lambda name: self.xml_end_element(name))
self.xml_parser.CharacterDataHandler = (lambda data: self.xml_char_data(data))
self.article = None
self.section = None
self.word = None
self.enclosing_tags = []
self.text = []
self.article = None
def process(self):
while True:
buf = sys.stdin.read(self.buffer_size)
if (buf == ''):
break
self.xml_parser.Parse(buf)
def xml_char_data(self, data):
self.text.append(data)
pass
def xml_start_element(self, name, attrs):
name = name.lower()
self.enclosing_tags = ([name] + self.enclosing_tags)
self.text = []
if (name == 'page'):
self.article = Article(None, None, False)
def xml_end_element(self, name):
name = name.lower()
contents = ''.join(self.text)
if (name == 'title'):
self.article.title = contents
elif (name == 'redirect'):
self.article.is_redirect = True
elif (name == 'text'):
self.article.markup = contents
elif (name == 'page'):
if (self.ignore_redirects and self.article.is_redirect):
pass
else:
self.new_article(self.article)
self.article = None
if ((len(self.enclosing_tags) > 0) and (name == self.enclosing_tags[0])):
self.enclosing_tags = self.enclosing_tags[1:]
else:
err(('Mismatched closing tag: ' + name))
self.text = []
def new_article(self, article):
if (':' in article.title):
articleType = article.title.split(':')[0].lower()
if (articleType in self.ignoredArticleTypes):
return
self.callback(article)
def get_enclosing_tag(self):
return (None if (len(self.enclosing_tags) == 0) else self.enclosing_tags[0])
def close(self):
'Releases all resources associated with this class'
pass
|
class Triangulator(object):
def __init__(self, triangle_wc):
self.wikicodes = set(triangle_wc)
self.cfg = config.WiktionaryConfig()
self.pairs = defaultdict((lambda : defaultdict((lambda : defaultdict((lambda : defaultdict(list)))))))
self.triangles = defaultdict(list)
self.read_pairs_in_three_langs()
def read_pairs_in_three_langs(self):
for wc in (self.wikicodes | set(['de', 'lt'])):
try:
cfg = config.get_config_by_wc(wc)
self.read_pairs_in_lang(wc, cfg.output_path)
except IndexError:
continue
def read_pairs_in_lang(self, wc, fn):
if (not path.exists(fn)):
return
with open(fn) as f:
for l in f:
fd = l.decode('utf8').strip().split('\t')
if (len(fd) < 6):
continue
(wc1, w1, wc2, w2, src_wc, src_art) = fd[0:6]
if (wc1 == 'cmn'):
wc1 = 'zh'
if (wc2 == 'cmn'):
wc2 = 'zh'
if ((not (wc1 in self.wikicodes)) and (not (wc2 in self.wikicodes))):
continue
self.pairs[wc1][w1][wc2][w2].append((src_wc, src_art))
self.pairs[wc2][w2][wc1][w1].append((src_wc, src_art))
def collect_triangles(self):
for wc2 in self.wikicodes:
(wc1, wc3) = sorted([w for w in self.wikicodes if (not (w == wc2))])
for (w2, tr) in self.pairs[wc2].iteritems():
for (w1, src1_l) in tr[wc1].iteritems():
for (w3, src3_l) in tr[wc3].iteritems():
for pair in product(src1_l, src3_l):
if (wc1 < wc3):
self.triangles[(wc1, w1, wc3, w3)].append((pair[0][0], pair[0][1], wc2, w2, pair[1][0], pair[1][1]))
else:
self.triangles[(wc3, w3, wc1, w1)].append((pair[0][0], pair[0][1], wc2, w2, pair[1][0], pair[1][1]))
def write_triangles(self):
dir_ = self.get_dir()
if (not path.exists(dir_)):
makedirs(dir_)
for wc2 in self.wikicodes:
out_str = ''
(wc1, wc3) = sorted([w for w in self.wikicodes if (not (w == wc2))])
min_cnt = int(self.cfg.triangle_threshold)
for (tri, sources) in self.triangles.iteritems():
if ((not (tri[0] == wc1)) or (not (tri[2] == wc3))):
continue
if (len(sources) >= min_cnt):
for s in set(sources):
out_str += ((('\t'.join(tri).encode('utf8') + '\t') + '\t'.join(s).encode('utf8')) + '\n')
if out_str:
with open(((dir_ + '/') + '_'.join([wc1, wc2, wc3])), 'w') as f:
f.write(out_str)
def get_dir(self):
i = 0
file_cnt = 1000
while (file_cnt >= 998):
dir_ = ((self.cfg['triangle_dir'] + '/') + str(i))
i += 1
if (not path.exists(dir_)):
break
file_cnt = len([name for name in listdir(dir_)])
return dir_
|
class Wiktionary(object):
def __init__(self, cfg):
self.cfg = cfg
self.init_parsers()
self.pairs = list()
def init_parsers(self):
self.parsers = list()
for (parser_cl, parser_cfg) in self.cfg.parsers:
self.parsers.append(parser_cl(self.cfg, parser_cfg))
def parse_articles(self, write_immediately=False):
with open(self.cfg.output_path, 'w') as self.outf:
for (title, text) in self.read_dump():
pairs = self.extract_translations(title, text)
if pairs:
if write_immediately:
self.write_one_article_translations(pairs)
else:
self.store_translations(pairs)
if (write_immediately is False):
self.write_all_pairs()
def extract_translations(self, title, text):
if self.skip_article(title, text):
return
pairs = list()
for parser in self.parsers:
for p in parser.extract_translations(title, text):
if (len(p) == 2):
pair = ((self.cfg.wc, title, p[0], p[1]), tuple(parser.cfg.features))
elif (len(p) == 4):
pair = (p, tuple(parser.cfg.features))
else:
raise Exception('Invalid pair {0}'.format(p))
pairs.append(pair)
return set(pairs)
def skip_article(self, title, text):
if ((not title.strip()) or (not text.strip())):
return True
if (':' in title):
return True
return False
def write_one_article_translations(self, pairs):
for pair in pairs:
if (self.cfg.verbose_output is True):
self.outf.write(('\t'.join(pair[0]).encode('utf8') + '\n'))
else:
self.outf.write(('\t'.join(pair[0:4]).encode('utf8') + '\n'))
def store_translations(self, pairs):
for (pair, feat) in pairs:
(wc1, w1, wc2, w2) = pair[0:4]
if (wc1 < wc2):
self.pairs.append(([wc1, w1, wc2, w2, self.cfg.wc, w1] + list(feat)))
else:
self.pairs.append(([wc2, w2, wc1, w1, wc1, w1] + list(feat)))
def write_all_pairs(self):
for pair in sorted(self.pairs):
if (self.cfg.verbose_output is True):
self.outf.write(('\t'.join(pair).encode('utf8') + '\n'))
else:
self.outf.write(('\t'.join(pair[0:4]).encode('utf8') + '\n'))
def read_dump(self):
with open(self.cfg.dump_path) as f:
title = u''
article = u''
page_sep = '%%#PAGE'
for l_ in f:
l = l_.decode('utf8')
if l.startswith(page_sep):
if (title and article):
(yield (title, article))
title = l.split(page_sep)[(- 1)].strip()
article = u''
else:
article += l
(yield (title, article))
|
def EmbedWord2Vec(walks, dimension):
time_start = time.time()
print('Creating embeddings.')
model = Word2Vec(walks, size=dimension, window=5, min_count=0, sg=1, workers=32, iter=1)
node_ids = model.wv.index2word
node_embeddings = model.wv.vectors
print('Embedding generation runtime: ', (time.time() - time_start))
return (node_ids, node_embeddings)
|
def EmbedPoincare(relations, epochs, dimension):
model = PoincareModel(relations, size=dimension, workers=32)
model.train(epochs)
node_ids = model.index2entity
node_embeddings = model.vectors
return (node_ids, node_embeddings)
|
def TraverseAndSelect(length, num_walks, hyperedges, vertexMemberships, alpha=1.0, beta=0):
walksTAS = []
for hyperedge_index in hyperedges:
hyperedge = hyperedges[hyperedge_index]
walk_hyperedge = []
for _ in range(num_walks):
curr_vertex = random.choice(hyperedge['members'])
initial = True
curr_hyperedge_num = hyperedge_index
curr_hyperedge = hyperedge
for i in range(length):
proba = ((float(alpha) / len(vertexMemberships[curr_vertex])) + beta)
if (random.random() < proba):
adjacent_vertices = curr_hyperedge['members']
curr_vertex = random.choice(adjacent_vertices)
walk_hyperedge.append(str(curr_hyperedge_num))
adjacent_hyperedges = vertexMemberships[curr_vertex]
curr_hyperedge_num = random.choice(adjacent_hyperedges)
curr_hyperedge = hyperedges[curr_hyperedge_num]
walksTAS.append(walk_hyperedge)
return walksTAS
|
def SubsampleAndTraverse(length, num_walks, hyperedges, vertexMemberships, alpha=1.0, beta=0):
walksSAT = []
for hyperedge_index in hyperedges:
hyperedge = hyperedges[hyperedge_index]
walk_vertex = []
curr_vertex = random.choice(hyperedge['members'])
for _ in range(num_walks):
initial = True
hyperedge_num = hyperedge_index
curr_hyperedge = hyperedge
for i in range(length):
proba = ((float(alpha) / len(curr_hyperedge['members'])) + beta)
if (random.random() < proba):
adjacent_hyperedges = vertexMemberships[curr_vertex]
hyperedge_num = random.choice(adjacent_hyperedges)
curr_hyperedge = hyperedges[hyperedge_num]
walk_vertex.append(str(curr_vertex))
curr_vertex = random.choice(curr_hyperedge['members'])
walksSAT.append(walk_vertex)
return walksSAT
|
def getFeaturesTrainingData():
i = 0
lists = []
labels = []
for vertex in G.nodes:
vertex_embedding_list = []
lists.append({'f': vertex_features[vertex].tolist()})
labels.append(vertex_labels[vertex])
X_unshuffled = []
for hlist in lists:
x = np.zeros((feature_dimension,))
x[:feature_dimension] = hlist['f']
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X_Features = np.asarray(X_arr)
Y_Features = np.asarray(Y_arr)
return (X_Features, Y_Features)
|
def getTrainingData():
i = 0
lists = []
labels = []
for h in hyperedges:
vertex_embedding_list = []
hyperedge = hyperedges[h]
for vertex in hyperedge['members']:
i += 1
if ((i % 100000) == 0):
print(i)
try:
vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist())
except:
print('Missed one: ', vertex)
lists.append({'v': vertex_embedding_list, 'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()})
label = np.zeros((num_categories,))
label[(int(hyperedge['category']) - 1)] = 1
labels.append(label)
X_unshuffled = []
for hlist in lists:
np_vertex_embeddings = np.asarray(hlist['v'])
x = np.zeros((((hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)) + feature_dimension),))
i = 0
x[:hyperedge_embedding_dimension] = hlist['h']
x[(hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)):] = hlist['f']
for embedding in np_vertex_embeddings:
x[(hyperedge_embedding_dimension + (i * embedding.shape[0])):(hyperedge_embedding_dimension + ((i + 1) * embedding.shape[0]))] = embedding
i += 1
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X = np.asarray(X_arr)
Y = np.asarray(Y_arr)
return (X, Y)
|
def getMLPTrainingData():
i = 0
lists = []
labels = []
maxi = 0
for h in hyperedges:
vertex_embedding_list = []
hyperedge = hyperedges[h]
lists.append({'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()})
label = np.zeros((num_categories,))
label[(int(hyperedge['category']) - 1)] = 1
labels.append(label)
X_unshuffled = []
for hlist in lists:
x = np.zeros(((hyperedge_embedding_dimension + feature_dimension),))
x[:hyperedge_embedding_dimension] = hlist['h']
x[hyperedge_embedding_dimension:] = hlist['f']
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X_MLP = np.asarray(X_arr)
Y_MLP = np.asarray(Y_arr)
return (X_MLP, Y_MLP)
|
def getDSTrainingData():
i = 0
lists = []
labels = []
maxi = 0
for h in hyperedges:
vertex_embedding_list = []
hyperedge = hyperedges[h]
for vertex in hyperedge['members']:
i += 1
if ((i % 100000) == 0):
print(i)
try:
vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist())
except:
print('Missed one: ', vertex)
lists.append({'v': vertex_embedding_list, 'f': vertex_features[h].tolist()})
lists.append
label = np.zeros((num_categories,))
label[(int(hyperedge['category']) - 1)] = 1
labels.append(label)
X_unshuffled = []
for hlist in lists:
np_vertex_embeddings = np.asarray(hlist['v'])
x = np.zeros((((vertex_embedding_dimension * max_groupsize) + feature_dimension),))
x[(vertex_embedding_dimension * max_groupsize):] = hlist['f']
i = 0
for embedding in np_vertex_embeddings:
x[(i * embedding.shape[0]):((i + 1) * embedding.shape[0])] = embedding
i += 1
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X = np.asarray(X_arr)
Y = np.asarray(Y_arr)
return (X, Y)
|
def hyperedgesTrain(X_train, Y_train, num_epochs):
deephyperedges_transductive_model.load_weights((('models/' + dataset_name) + '/deephyperedges_transductive_model.h5'))
history = deephyperedges_transductive_model.fit(X_train, Y_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
|
def MLPTrain(X_MLP_transductive_train, Y_MLP_transductive_train, num_epochs):
MLP_transductive_model.load_weights((('models/' + dataset_name) + '/MLP_transductive_model.h5'))
history = MLP_transductive_model.fit(X_MLP_transductive_train, Y_MLP_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
|
def DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train, num_epochs):
deepsets_transductive_model.load_weights((('models/' + dataset_name) + '/deepsets_transductive_model.h5'))
history = deepsets_transductive_model.fit(X_deepset_transductive_train, Y_deepset_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
|
def testModel(model, X_tst, Y_tst):
from sklearn.metrics import classification_report, accuracy_score
target_names = ['Neural Networks', 'Case Based', 'Reinforcement Learning', 'Probabilistic Methods', 'Genetic Algorithms', 'Rule Learning', 'Theory']
y_pred = model.predict(X_tst, batch_size=16, verbose=0)
finals_pred = []
finals_test = []
for p in y_pred:
m = 0
ind = 0
final = 0
for i in p:
if (i > m):
m = i
final = ind
ind += 1
finals_pred.append(final)
for i in Y_tst:
ind = 0
for j in i:
if (j == 1):
finals_test.append(ind)
ind += 1
c = classification_report(finals_test, finals_pred, target_names=target_names, digits=4)
reports.append(c)
print(c)
|
def RunAllTests(percentTraining, num_times, num_epochs):
for i in range(num_times):
print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: deep hyperedges')
(X, Y) = getTrainingData()
(X_train, X_test, Y_train, Y_test) = train_test_split(X, Y, train_size=percentTraining, test_size=(1 - percentTraining))
hyperedgesTrain(X_train, Y_train, num_epochs)
testModel(deephyperedges_transductive_model, X_test, Y_test)
|
def getFeaturesTrainingData():
i = 0
lists = []
labels = []
for vertex in G.nodes:
vertex_embedding_list = []
lists.append({'f': vertex_features[vertex].tolist()})
labels.append(vertex_labels[vertex])
X_unshuffled = []
for hlist in lists:
x = np.zeros((feature_dimension,))
x[:feature_dimension] = hlist['f']
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X_Features = np.asarray(X_arr)
Y_Features = np.asarray(Y_arr)
return (X_Features, Y_Features)
|
def getTrainingData():
i = 0
lists = []
labels = []
for h in hyperedges:
vertex_embedding_list = []
hyperedge = hyperedges[h]
for vertex in hyperedge['members']:
i += 1
if ((i % 100000) == 0):
print(i)
try:
vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist())
except:
print('Missed one: ', vertex)
lists.append({'v': vertex_embedding_list, 'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()})
label = np.zeros((num_categories,))
label[(int(hyperedge['category']) - 1)] = 1
labels.append(label)
X_unshuffled = []
for hlist in lists:
np_vertex_embeddings = np.asarray(hlist['v'])
x = np.zeros((((hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)) + feature_dimension),))
i = 0
x[:hyperedge_embedding_dimension] = hlist['h']
x[(hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)):] = hlist['f']
for embedding in np_vertex_embeddings:
x[(hyperedge_embedding_dimension + (i * embedding.shape[0])):(hyperedge_embedding_dimension + ((i + 1) * embedding.shape[0]))] = embedding
i += 1
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X = np.asarray(X_arr)
Y = np.asarray(Y_arr)
return (X, Y)
|
def getMLPTrainingData():
i = 0
lists = []
labels = []
maxi = 0
for h in hyperedges:
vertex_embedding_list = []
hyperedge = hyperedges[h]
lists.append({'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()})
label = np.zeros((num_categories,))
label[(int(hyperedge['category']) - 1)] = 1
labels.append(label)
X_unshuffled = []
for hlist in lists:
x = np.zeros(((hyperedge_embedding_dimension + feature_dimension),))
x[:hyperedge_embedding_dimension] = hlist['h']
x[hyperedge_embedding_dimension:] = hlist['f']
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X_MLP = np.asarray(X_arr)
Y_MLP = np.asarray(Y_arr)
return (X_MLP, Y_MLP)
|
def getDSTrainingData():
i = 0
lists = []
labels = []
maxi = 0
for h in hyperedges:
vertex_embedding_list = []
hyperedge = hyperedges[h]
for vertex in hyperedge['members']:
i += 1
if ((i % 100000) == 0):
print(i)
try:
vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist())
except:
print('Missed one: ', vertex)
lists.append({'v': vertex_embedding_list, 'f': vertex_features[h].tolist()})
lists.append
label = np.zeros((num_categories,))
label[(int(hyperedge['category']) - 1)] = 1
labels.append(label)
X_unshuffled = []
for hlist in lists:
np_vertex_embeddings = np.asarray(hlist['v'])
x = np.zeros((((vertex_embedding_dimension * max_groupsize) + feature_dimension),))
x[(vertex_embedding_dimension * max_groupsize):] = hlist['f']
i = 0
for embedding in np_vertex_embeddings:
x[(i * embedding.shape[0]):((i + 1) * embedding.shape[0])] = embedding
i += 1
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X = np.asarray(X_arr)
Y = np.asarray(Y_arr)
return (X, Y)
|
def hyperedgesTrain(X_train, Y_train):
deephyperedges_transductive_model.load_weights((('models/' + dataset_name) + '/deephyperedges_transductive_model.h5'))
history = deephyperedges_transductive_model.fit(X_train, Y_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
|
def MLPTrain(X_MLP_transductive_train, Y_MLP_transductive_train):
MLP_transductive_model.load_weights((('models/' + dataset_name) + '/MLP_transductive_model.h5'))
history = MLP_transductive_model.fit(X_MLP_transductive_train, Y_MLP_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
|
def DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train):
deepsets_transductive_model.load_weights((('models/' + dataset_name) + '/deepsets_transductive_model.h5'))
history = deepsets_transductive_model.fit(X_deepset_transductive_train, Y_deepset_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
|
def testModel(model, X_tst, Y_tst):
from sklearn.metrics import classification_report, accuracy_score
target_names = target_names = ['Type-1 Diabetes', 'Type-2 Diabetes', 'Type-3 Diabetes']
y_pred = model.predict(X_tst, batch_size=16, verbose=0)
finals_pred = []
finals_test = []
for p in y_pred:
m = 0
ind = 0
final = 0
for i in p:
if (i > m):
m = i
final = ind
ind += 1
finals_pred.append(final)
for i in Y_tst:
ind = 0
for j in i:
if (j == 1):
finals_test.append(ind)
ind += 1
c = classification_report(finals_test, finals_pred, target_names=target_names, digits=4)
print(c)
reports.append(c)
print(accuracy_score(finals_test, finals_pred))
|
def RunAllTests(percentTraining, num_times=10):
for i in range(num_times):
print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: deep hyperedges')
(X, Y) = getTrainingData()
(X_train, X_test, Y_train, Y_test) = train_test_split(X, Y, train_size=percentTraining, test_size=(1 - percentTraining))
hyperedgesTrain(X_train, Y_train)
testModel(deephyperedges_transductive_model, X_test, Y_test)
print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: MLP')
(X_MLP, Y_MLP) = getMLPTrainingData()
(X_MLP_transductive_train, X_MLP_transductive_test, Y_MLP_transductive_train, Y_MLP_transductive_test) = train_test_split(X_MLP, Y_MLP, train_size=percentTraining, test_size=(1 - percentTraining))
MLPTrain(X_MLP_transductive_train, Y_MLP_transductive_train)
testModel(MLP_transductive_model, X_MLP_transductive_test, Y_MLP_transductive_test)
print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: deep sets')
(X_deepset, Y_deepset) = getDSTrainingData()
(X_deepset_transductive_train, X_deepset_transductive_test, Y_deepset_transductive_train, Y_deepset_transductive_test) = train_test_split(X_deepset, Y_deepset, train_size=percentTraining, test_size=(1 - percentTraining))
DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train)
testModel(deepsets_transductive_model, X_deepset_transductive_test, Y_deepset_transductive_test)
|
def smooth(scalars, weight):
last = scalars[0]
smoothed = list()
for point in scalars:
smoothed_val = ((last * weight) + ((1 - weight) * point))
smoothed.append(smoothed_val)
last = smoothed_val
return smoothed
|
def plot(deephyperedges_directory, MLP_directory, deepsets_directory, metric, dataset):
dhe_metrics = pd.read_csv(deephyperedges_directory)
x = []
y = []
for (index, row) in dhe_metrics.iterrows():
x.append(float(row['Step']))
y.append(float(row['Value']))
mlp_metrics = pd.read_csv(MLP_directory)
x_mlp = []
y_mlp = []
for (index, row) in mlp_metrics.iterrows():
x_mlp.append(float(row['Step']))
y_mlp.append(float(row['Value']))
ds_metrics = pd.read_csv(deepsets_directory)
x_ds = []
y_ds = []
for (index, row) in ds_metrics.iterrows():
x_ds.append(float(row['Step']))
y_ds.append(float(row['Value']))
sns.set()
ds_normal = '(0.0, 0.0, 0.7, 0.2)'
ds_smoothed = '(0.0, 0.0, 0.7, 1)'
dh_normal = '(0.0, 0.7, 0.0, 0.2)'
dh_smoothed = '(0.0, 0.7, 0.0, 1)'
mlp_normal = '(0.7, 0.2, 0.1, 0.2)'
mlp_smoothed = '(0.7, 0.2, 0.1, 1)'
plt.gca().set_prop_cycle(color=[mlp_normal, ds_normal, dh_normal, mlp_smoothed, ds_smoothed, dh_smoothed])
plt.plot(x_mlp, y_mlp)
plt.plot(x_ds, y_ds)
plt.plot(x, y)
plt.plot(x_mlp, smooth(y_mlp, 0.8))
plt.plot(x_ds, smooth(y_ds, 0.8))
plt.plot(x, smooth(y, 0.8))
plt.legend(['_nolegend_', '_nolegend_', '_nolegend_', 'MLP + TAS Walks', 'Deep Sets + SAT Walks', 'Deep Hyperedges'], loc='bottom right')
plt.savefig((((('images/paper/' + dataset) + '/') + metric) + '.png'), dpi=300)
plt.show()
|
def plotAll(dataset):
metric = 'run-.-tag-categorical_accuracy.csv'
deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric)
MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric)
deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric)
plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'train_accuracy', dataset)
metric = 'run-.-tag-loss.csv'
deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric)
MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric)
deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric)
plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'train_loss', dataset)
metric = 'run-.-tag-val_categorical_accuracy.csv'
deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric)
MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric)
deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric)
plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'validation_accuracy', dataset)
metric = 'run-.-tag-val_loss.csv'
deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric)
MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric)
deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric)
plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'validation_loss', dataset)
|
def iter_graph(root, callback):
queue = [root]
seen = set()
while queue:
fn = queue.pop()
if (fn in seen):
continue
seen.add(fn)
for (next_fn, _) in fn.next_functions:
if (next_fn is not None):
queue.append(next_fn)
callback(fn)
|
def register_hooks(var):
fn_dict = {}
def hook_cb(fn):
def register_grad(grad_input, grad_output):
fn_dict[fn] = grad_input
fn.register_hook(register_grad)
iter_graph(var.grad_fn, hook_cb)
def is_bad_grad(grad_output):
grad_output = grad_output.data
return (grad_output.ne(grad_output).any() or grad_output.gt(1000000.0).any())
def make_dot():
node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size='12,12'))
def size_to_str(size):
return (('(' + ', '.join(map(str, size))) + ')')
def build_graph(fn):
if hasattr(fn, 'variable'):
u = fn.variable
node_name = ('Variable\n ' + size_to_str(u.size()))
dot.node(str(id(u)), node_name, fillcolor='lightblue')
else:
assert (fn in fn_dict), fn
fillcolor = 'white'
if any((is_bad_grad(gi) for gi in fn_dict[fn])):
fillcolor = 'red'
dot.node(str(id(fn)), str(type(fn).__name__), fillcolor=fillcolor)
for (next_fn, _) in fn.next_functions:
if (next_fn is not None):
next_id = id(getattr(next_fn, 'variable', next_fn))
dot.edge(str(next_id), str(id(fn)))
iter_graph(var.grad_fn, build_graph)
return dot
return make_dot
|
class Checkpoints():
def __init__(self, args):
self.dir_save = args.save
self.dir_load = args.resume
if (os.path.isdir(self.dir_save) == False):
os.makedirs(self.dir_save)
def latest(self, name):
if (name == 'resume'):
if (self.dir_load == None):
return None
else:
return self.dir_load
def save(self, epoch, model, best):
if (best == True):
torch.save(model.state_dict(), ('%s/model_epoch_%d.pth' % (self.dir_save, epoch)))
return None
def load(self, filename):
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
model = torch.load(filename)
else:
print("=> no checkpoint found at '{}'".format(filename))
return model
|
class Dataloader():
def __init__(self, args):
self.args = args
self.loader_input = args.loader_input
self.loader_label = args.loader_label
self.split_test = args.split_test
self.split_train = args.split_train
self.dataset_test_name = args.dataset_test
self.dataset_train_name = args.dataset_train
self.resolution = (args.resolution_wide, args.resolution_high)
self.input_filename_test = args.input_filename_test
self.label_filename_test = args.label_filename_test
self.input_filename_train = args.input_filename_train
self.label_filename_train = args.label_filename_train
if (self.dataset_train_name == 'LSUN'):
self.dataset_train = getattr(datasets, self.dataset_train_name)(db_path=args.dataroot, classes=['bedroom_train'], transform=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_train_name == 'CIFAR10') or (self.dataset_train_name == 'CIFAR100')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.RandomCrop(self.resolution, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]))
elif ((self.dataset_train_name == 'CocoCaption') or (self.dataset_train_name == 'CocoDetection')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_train_name == 'STL10') or (self.dataset_train_name == 'SVHN')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, split='train', download=True, transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'MNIST'):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))
elif (self.dataset_train_name == 'ImageNet'):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.dataset_train = datasets.ImageFolder(root=os.path.join(self.args.dataroot, self.args.input_filename_train), transform=transforms.Compose([transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
elif (self.dataset_train_name == 'FRGC'):
self.dataset_train = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_train), transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'Folder'):
self.dataset_train = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_train), transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'FileList'):
self.dataset_train = datasets.FileList(self.input_filename_train, self.label_filename_train, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), transform_test=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
elif (self.dataset_train_name == 'FolderList'):
self.dataset_train = datasets.FileList(self.input_filename_train, self.label_filename_train, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), transform_test=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
else:
raise Exception('Unknown Dataset')
if (self.dataset_test_name == 'LSUN'):
self.dataset_test = getattr(datasets, self.dataset_test_name)(db_path=args.dataroot, classes=['bedroom_val'], transform=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_test_name == 'CIFAR10') or (self.dataset_test_name == 'CIFAR100')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]))
elif ((self.dataset_test_name == 'CocoCaption') or (self.dataset_test_name == 'CocoDetection')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_test_name == 'STL10') or (self.dataset_test_name == 'SVHN')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, split='test', download=True, transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'MNIST'):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))
elif (self.dataset_test_name == 'ImageNet'):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.dataset_test = datasets.ImageFolder(root=os.path.join(self.args.dataroot, self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]))
elif (self.dataset_test_name == 'FRGC'):
self.dataset_test = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'Folder'):
self.dataset_test = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'FileList'):
self.dataset_test = datasets.FileList(self.input_filename_test, self.label_filename_test, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
elif (self.dataset_test_name == 'FolderList'):
self.dataset_test = datasets.FileList(self.input_filename_test, self.label_filename_test, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
else:
raise Exception('Unknown Dataset')
def create(self, flag=None):
if (flag == 'Train'):
dataloader_train = torch.utils.data.DataLoader(self.dataset_train, batch_size=self.args.batch_size, shuffle=True, num_workers=int(self.args.nthreads), pin_memory=True)
return dataloader_train
if (flag == 'Test'):
dataloader_test = torch.utils.data.DataLoader(self.dataset_test, batch_size=self.args.batch_size, shuffle=False, num_workers=int(self.args.nthreads), pin_memory=True)
return dataloader_test
if (flag == None):
dataloader_train = torch.utils.data.DataLoader(self.dataset_train, batch_size=self.args.batch_size, shuffle=True, num_workers=int(self.args.nthreads), pin_memory=True)
dataloader_test = torch.utils.data.DataLoader(self.dataset_test, batch_size=self.args.batch_size, shuffle=False, num_workers=int(self.args.nthreads), pin_memory=True)
return (dataloader_train, dataloader_test)
|
class FileList(data.Dataset):
def __init__(self, ifile, lfile=None, split_train=1.0, split_test=0.0, train=True, transform_train=None, transform_test=None, loader_input=loaders.loader_image, loader_label=loaders.loader_torch):
self.ifile = ifile
self.lfile = lfile
self.train = train
self.split_test = split_test
self.split_train = split_train
self.transform_test = transform_test
self.transform_train = transform_train
self.loader_input = loader_input
self.loader_label = loader_label
if (loader_input == 'image'):
self.loader_input = loaders.loader_image
if (loader_input == 'torch'):
self.loader_input = loaders.loader_torch
if (loader_input == 'numpy'):
self.loader_input = loaders.loader_numpy
if (loader_label == 'image'):
self.loader_label = loaders.loader_image
if (loader_label == 'torch'):
self.loader_label = loaders.loader_torch
if (loader_label == 'numpy'):
self.loader_label = loaders.loader_numpy
if (ifile != None):
imagelist = utils.readtextfile(ifile)
imagelist = [x.rstrip('\n') for x in imagelist]
else:
imagelist = []
if (lfile != None):
labellist = utils.readtextfile(lfile)
labellist = [x.rstrip('\n') for x in labellist]
else:
labellist = []
if (len(imagelist) == len(labellist)):
shuffle(imagelist, labellist)
if ((len(imagelist) > 0) and (len(labellist) == 0)):
shuffle(imagelist)
if ((len(labellist) > 0) and (len(imagelist) == 0)):
shuffle(labellist)
if ((self.split_train < 1.0) & (self.split_train > 0.0)):
if (len(imagelist) > 0):
num = math.floor((self.split * len(imagelist)))
self.images_train = imagelist[0:num]
self.images_test = images[(num + 1):len(imagelist)]
if (len(labellist) > 0):
num = math.floor((self.split * len(labellist)))
self.labels_train = labellist[0:num]
self.labels_test = labellist[(num + 1):len(labellist)]
elif (self.split_train == 1.0):
if (len(imagelist) > 0):
self.images_train = imagelist
if (len(labellist) > 0):
self.labels_train = labellist
elif (self.split_test == 1.0):
if (len(imagelist) > 0):
self.images_test = imagelist
if (len(labellist) > 0):
self.labels_test = labellist
def __len__(self):
if (self.train == True):
return len(self.images_train)
if (self.train == False):
return len(self.images_test)
def __getitem__(self, index):
input = {}
if (self.train == True):
if (len(self.images_train) > 0):
path = self.images_train[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_train) > 0):
path = self.labels_train[index]
input['tgt'] = self.loader_label(path)
if (self.transform_train is not None):
input = self.transform_train(input)
image = input['inp']
label = input['tgt']
if (self.train == False):
if (len(self.images_test) > 0):
path = self.images_test[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_test) > 0):
path = self.labels_test[index]
input['tgt'] = self.loader_label(path)
if (self.transform_test is not None):
input = self.transform_test(input)
image = input['inp']
label = input['tgt']
return (image, label)
|
def is_image_file(filename):
return any((filename.endswith(extension) for extension in IMG_EXTENSIONS))
|
def make_dataset(classlist, labellist=None):
images = []
labels = []
classes = utils.readtextfile(ifile)
classes = [x.rstrip('\n') for x in classes]
classes.sort()
for i in len(classes):
for fname in os.listdir(classes[i]):
if is_image_file(fname):
label = {}
label['class'] = os.path.split(classes[i])
images.append(fname)
labels.append(label)
if (labellist != None):
labels = utils.readtextfile(ifile)
labels = [x.rstrip('\n') for x in labels]
labels.sort()
for i in len(labels):
for fname in os.listdir(labels[i]):
if is_image_file(fname):
labels.append(os.path.split(classes[i]))
return (images, labels)
|
class FolderList(data.Dataset):
def __init__(self, ifile, lfile=None, split_train=1.0, split_test=0.0, train=True, transform_train=None, transform_test=None, loader_input=loaders.loader_image, loader_label=loaders.loader_torch):
(imagelist, labellist) = make_dataset(ifile, lfile)
if (len(imagelist) == 0):
raise RuntimeError('No images found')
if (len(labellist) == 0):
raise RuntimeError('No labels found')
self.loader_input = loader_input
self.loader_label = loader_label
if (loader_input == 'image'):
self.loader_input = loaders.loader_image
if (loader_input == 'torch'):
self.loader_input = loaders.loader_torch
if (loader_input == 'numpy'):
self.loader_input = loaders.loader_numpy
if (loader_label == 'image'):
self.loader_label = loaders.loader_image
if (loader_label == 'torch'):
self.loader_label = loaders.loader_torch
if (loader_label == 'numpy'):
self.loader_label = loaders.loader_numpy
self.imagelist = imagelist
self.labellist = labellist
self.transform_test = transform_test
self.transform_train = transform_train
if (len(imagelist) == len(labellist)):
shuffle(imagelist, labellist)
if ((len(imagelist) > 0) and (len(labellist) == 0)):
shuffle(imagelist)
if ((len(labellist) > 0) and (len(imagelist) == 0)):
shuffle(labellist)
if ((args.split_train < 1.0) & (args.split_train > 0.0)):
if (len(imagelist) > 0):
num = math.floor((args.split * len(imagelist)))
self.images_train = imagelist[0:num]
self.images_test = images[(num + 1):len(imagelist)]
if (len(labellist) > 0):
num = math.floor((args.split * len(labellist)))
self.labels_train = labellist[0:num]
self.labels_test = labellist[(num + 1):len(labellist)]
elif (args.split_train == 1.0):
if (len(imagelist) > 0):
self.images_train = imagelist
if (len(labellist) > 0):
self.labels_train = labellist
elif (args.split_test == 1.0):
if (len(imagelist) > 0):
self.images_test = imagelist
if (len(labellist) > 0):
self.labels_test = labellist
def __len__(self):
if (self.train == True):
return len(self.images_train)
if (self.train == False):
return len(self.images_test)
def __getitem__(self, index):
if (self.train == True):
if (len(self.images_train) > 0):
path = self.images_train[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_train) > 0):
path = self.labels_train[index]
input['tgt'] = self.loader_label(path)
if (self.transform_train is not None):
input = self.transform_train(input)
image = input['inp']
label = input['tgt']
if (self.train == False):
if (len(self.images_test) > 0):
path = self.images_test[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_test) > 0):
path = self.labels_test[index]
input['tgt'] = self.loader_label(path)
if (self.transform_test is not None):
input = self.transform_test(input)
image = input['inp']
label = input['tgt']
return (image, label)
|
def loader_image(path):
return Image.open(path).convert('RGB')
|
def loader_torch(path):
return torch.load(path)
|
def loader_numpy(path):
return np.load(path)
|
class Classification():
def __init__(self, topk=(1,)):
self.topk = topk
def forward(self, output, target):
'Computes the precision@k for the specified values of k'
maxk = max(self.topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in self.topk:
correct_k = correct[:k].view((- 1)).float().sum(0)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
class Classification(nn.Module):
def __init__(self):
super(Classification, self).__init__()
self.loss = nn.CrossEntropyLoss()
def forward(self, input, target):
loss = self.loss(input, target)
return loss
|
class Regression(nn.Module):
def __init__(self):
super(Regression, self).__init__()
self.loss = nn.MSELoss()
def forward(self, input, target):
loss = self.loss.forward(input, target)
return loss
|
def weights_init(m):
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
class Model():
def __init__(self, args):
self.cuda = args.cuda
self.nfilters = args.nfilters
self.nclasses = args.nclasses
self.nchannels = args.nchannels
self.nblocks = args.nblocks
self.nlayers = args.nlayers
self.level = args.level
self.nchannels = args.nchannels
self.net_type = args.net_type
self.avgpool = args.avgpool
def setup(self, checkpoints):
model = getattr(models, self.net_type)(self.nchannels, self.nfilters, self.nclasses, self.avgpool, self.level)
criterion = losses.Classification()
if (checkpoints.latest('resume') == None):
model.apply(weights_init)
else:
tmp = checkpoints.load(checkpoints.latest('resume'))
model.load_state_dict(tmp)
if self.cuda:
model = model.cuda()
criterion = criterion.cuda()
return (model, criterion)
|
class NoiseLayer(nn.Module):
def __init__(self, in_planes, out_planes, level):
super(NoiseLayer, self).__init__()
self.noise = torch.randn(1, in_planes, 1, 1)
self.level = level
self.layers = nn.Sequential(nn.ReLU(True), nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1), nn.BatchNorm2d(out_planes))
def forward(self, x):
tmp1 = x.data.shape
tmp2 = self.noise.shape
if ((tmp1[1] != tmp2[1]) or (tmp1[2] != tmp2[2]) or (tmp1[3] != tmp2[3])):
self.noise = (((2 * torch.rand(x.data.shape)) - 1) * self.level)
self.noise = self.noise.cuda()
x.data = (x.data + self.noise)
x = self.layers(x)
return x
|
class NoiseModel(nn.Module):
def __init__(self, nblocks, nlayers, nchannels, nfilters, nclasses, level):
super(NoiseModel, self).__init__()
self.num = nfilters
self.level = level
layers = []
layers.append(NoiseLayer(3, nfilters, self.level))
for i in range(1, nlayers):
layers.append(self._make_layer(nfilters, nfilters, nblocks, self.level))
layers.append(nn.MaxPool2d(2, 2))
self.features = nn.Sequential(*layers)
self.classifier = nn.Linear(self.num, nclasses)
def _make_layer(self, in_planes, out_planes, nblocks, level):
layers = []
for i in range(nblocks):
layers.append(NoiseLayer(in_planes, out_planes, level))
return nn.Sequential(*layers)
def forward(self, x):
x = self.features(x)
x = x.view((- 1), self.num)
x = self.classifier(x)
return x
|
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.