repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
imagefusion-rfn-nest | imagefusion-rfn-nest-main/test_40pairs.py | # -*- coding:utf-8 -*-
# @Author: Li Hui, Jiangnan University
# @Email: hui_li_jnu@163.com
# @File : test_40pairs.py
# @Time : 2020/8/14 17:11
# test phase
import os
import torch
from torch.autograd import Variable
from net import NestFuse_light2_nodense, Fusion_network, Fusion_strategy
import utils
from args_fusion import args
import numpy as np
def load_model(path_auto, path_fusion, fs_type, flag_img):
if flag_img is True:
nc = 3
else:
nc =1
input_nc = nc
output_nc = nc
nb_filter = [64, 112, 160, 208, 256]
nest_model = NestFuse_light2_nodense(nb_filter, input_nc, output_nc, deepsupervision=False)
nest_model.load_state_dict(torch.load(path_auto))
fusion_model = Fusion_network(nb_filter, fs_type)
fusion_model.load_state_dict(torch.load(path_fusion))
fusion_strategy = Fusion_strategy(fs_type)
para = sum([np.prod(list(p.size())) for p in nest_model.parameters()])
type_size = 4
print('Model {} : params: {:4f}M'.format(nest_model._get_name(), para * type_size / 1000 / 1000))
para = sum([np.prod(list(p.size())) for p in fusion_model.parameters()])
type_size = 4
print('Model {} : params: {:4f}M'.format(fusion_model._get_name(), para * type_size / 1000 / 1000))
nest_model.eval()
fusion_model.eval()
nest_model.cuda()
fusion_model.cuda()
return nest_model, fusion_model, fusion_strategy
def run_demo(nest_model, fusion_model, fusion_strategy, infrared_path, visible_path, output_path_root, name_ir, fs_type, use_strategy, flag_img, alpha):
img_ir, h, w, c = utils.get_test_image(infrared_path, flag=flag_img) # True for rgb
img_vi, h, w, c = utils.get_test_image(visible_path, flag=flag_img)
# dim = img_ir.shape
if c is 1:
if args.cuda:
img_ir = img_ir.cuda()
img_vi = img_vi.cuda()
img_ir = Variable(img_ir, requires_grad=False)
img_vi = Variable(img_vi, requires_grad=False)
# encoder
en_r = nest_model.encoder(img_ir)
en_v = nest_model.encoder(img_vi)
# fusion net
if use_strategy:
f = fusion_strategy(en_r, en_v)
else:
f = fusion_model(en_r, en_v)
# decoder
img_fusion_list = nest_model.decoder_eval(f)
else:
# fusion each block
img_fusion_blocks = []
for i in range(c):
# encoder
img_vi_temp = img_vi[i]
img_ir_temp = img_ir[i]
if args.cuda:
img_vi_temp = img_vi_temp.cuda()
img_ir_temp = img_ir_temp.cuda()
img_vi_temp = Variable(img_vi_temp, requires_grad=False)
img_ir_temp = Variable(img_ir_temp, requires_grad=False)
en_r = nest_model.encoder(img_ir_temp)
en_v = nest_model.encoder(img_vi_temp)
# fusion net
if use_strategy:
f = fusion_strategy(en_r, en_v)
else:
f = fusion_model(en_r, en_v)
# decoder
img_fusion_temp = nest_model.decoder_eval(f)
img_fusion_blocks.append(img_fusion_temp)
img_fusion_list = utils.recons_fusion_images(img_fusion_blocks, h, w)
# ########################### multi-outputs ##############################################
output_count = 0
for img_fusion in img_fusion_list:
file_name = 'fused_' + str(alpha) + '_' + name_ir
output_path = output_path_root + file_name
output_count += 1
# save images
utils.save_image_test(img_fusion, output_path)
print(output_path)
def main():
# False - gray
flag_img = False
# ################# gray scale ########################################
test_path = "images/40_pairs_tno_vot/ir/"
path_auto = args.resume_nestfuse
output_path_root = "./outputs/alpha_1e4_40/"
if os.path.exists(output_path_root) is False:
os.mkdir(output_path_root)
fs_type = 'res' # res (RFN), add, avg, max, spa, nuclear
use_strategy = False # True - static strategy; False - RFN
path_fusion_root = args.fusion_model
with torch.no_grad():
alpha_list = [700]
w_all_list = [[6.0, 3.0]]
for alpha in alpha_list:
for w_all in w_all_list:
w, w2 = w_all
temp = 'rfnnest_' + str(alpha) + '_wir_' + str(w) + '_wvi_' + str(w2)
output_path_list = 'fused_' + temp + '_40'
output_path1 = output_path_root + output_path_list + '/'
if os.path.exists(output_path1) is False:
os.mkdir(output_path1)
output_path = output_path1
# load network
path_fusion = path_fusion_root + str(w) + '/' + 'Final_epoch_2_alpha_' + str(alpha) + '_wir_' + str(
w) + '_wvi_' + str(w2) + '_ssim_vi.model'
model, fusion_model, fusion_strategy = load_model(path_auto, path_fusion, fs_type, flag_img)
imgs_paths_ir, names = utils.list_images(test_path)
num = len(imgs_paths_ir)
for i in range(num):
name_ir = names[i]
infrared_path = imgs_paths_ir[i]
visible_path = infrared_path.replace('ir/', 'vis/')
if visible_path.__contains__('IR'):
visible_path = visible_path.replace('IR', 'VIS')
else:
visible_path = visible_path.replace('i.', 'v.')
run_demo(model, fusion_model, fusion_strategy, infrared_path, visible_path, output_path, name_ir, fs_type, use_strategy, flag_img, temp)
print('Done......')
if __name__ == '__main__':
main()
| 4,952 | 30.75 | 152 | py |
imagefusion-rfn-nest | imagefusion-rfn-nest-main/train_fusionnet.py | # Training a NestFuse network
# auto-encoder
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import sys
import time
from tqdm import tqdm, trange
import scipy.io as scio
import random
import torch
from torch.optim import Adam
from torch.autograd import Variable
import utils
from net import NestFuse_light2_nodense, Fusion_network
from args_fusion import args
import pytorch_msssim
EPSILON = 1e-5
def main():
original_imgs_path, _ = utils.list_images(args.dataset_ir)
train_num = 80000
original_imgs_path = original_imgs_path[:train_num]
random.shuffle(original_imgs_path)
# True - RGB , False - gray
img_flag = False
alpha_list = [700]
w_all_list = [[6.0, 3.0]]
for w_w in w_all_list:
w1, w2 = w_w
for alpha in alpha_list:
train(original_imgs_path, img_flag, alpha, w1, w2)
def train(original_imgs_path, img_flag, alpha, w1, w2):
batch_size = args.batch_size
# load network model
nc = 1
input_nc = nc
output_nc = nc
nb_filter = [64, 112, 160, 208, 256]
f_type = 'res'
with torch.no_grad():
deepsupervision = False
nest_model = NestFuse_light2_nodense(nb_filter, input_nc, output_nc, deepsupervision)
model_path = args.resume_nestfuse
# load auto-encoder network
print('Resuming, initializing auto-encoder using weight from {}.'.format(model_path))
nest_model.load_state_dict(torch.load(model_path))
nest_model.eval()
# fusion network
fusion_model = Fusion_network(nb_filter, f_type)
if args.resume_fusion_model is not None:
print('Resuming, initializing fusion net using weight from {}.'.format(args.resume_fusion_model))
fusion_model.load_state_dict(torch.load(args.resume_fusion_model))
optimizer = Adam(fusion_model.parameters(), args.lr)
mse_loss = torch.nn.MSELoss()
ssim_loss = pytorch_msssim.msssim
if args.cuda:
nest_model.cuda()
fusion_model.cuda()
tbar = trange(args.epochs)
print('Start training.....')
# creating save path
temp_path_model = os.path.join(args.save_fusion_model)
temp_path_loss = os.path.join(args.save_loss_dir)
if os.path.exists(temp_path_model) is False:
os.mkdir(temp_path_model)
if os.path.exists(temp_path_loss) is False:
os.mkdir(temp_path_loss)
temp_path_model_w = os.path.join(args.save_fusion_model, str(w1))
temp_path_loss_w = os.path.join(args.save_loss_dir, str(w1))
if os.path.exists(temp_path_model_w) is False:
os.mkdir(temp_path_model_w)
if os.path.exists(temp_path_loss_w) is False:
os.mkdir(temp_path_loss_w)
Loss_feature = []
Loss_ssim = []
Loss_all = []
count_loss = 0
all_ssim_loss = 0.
all_fea_loss = 0.
for e in tbar:
print('Epoch %d.....' % e)
# load training database
image_set_ir, batches = utils.load_dataset(original_imgs_path, batch_size)
fusion_model.train()
count = 0
for batch in range(batches):
image_paths_ir = image_set_ir[batch * batch_size:(batch * batch_size + batch_size)]
img_ir = utils.get_train_images(image_paths_ir, height=args.HEIGHT, width=args.WIDTH, flag=img_flag)
image_paths_vi = [x.replace('lwir', 'visible') for x in image_paths_ir]
img_vi = utils.get_train_images(image_paths_vi, height=args.HEIGHT, width=args.WIDTH, flag=img_flag)
count += 1
optimizer.zero_grad()
img_ir = Variable(img_ir, requires_grad=False)
img_vi = Variable(img_vi, requires_grad=False)
if args.cuda:
img_ir = img_ir.cuda()
img_vi = img_vi.cuda()
# get fusion image
# encoder
en_ir = nest_model.encoder(img_ir)
en_vi = nest_model.encoder(img_vi)
# fusion
f = fusion_model(en_ir, en_vi)
# decoder
outputs = nest_model.decoder_eval(f)
# resolution loss: between fusion image and visible image
x_ir = Variable(img_ir.data.clone(), requires_grad=False)
x_vi = Variable(img_vi.data.clone(), requires_grad=False)
######################### LOSS FUNCTION #########################
loss1_value = 0.
loss2_value = 0.
for output in outputs:
output = (output - torch.min(output)) / (torch.max(output) - torch.min(output) + EPSILON)
output = output * 255
# ---------------------- LOSS IMAGES ------------------------------------
# detail loss
# ssim_loss_temp1 = ssim_loss(output, x_ir, normalize=True)
ssim_loss_temp2 = ssim_loss(output, x_vi, normalize=True)
loss1_value += alpha * (1 - ssim_loss_temp2)
# feature loss
g2_ir_fea = en_ir
g2_vi_fea = en_vi
g2_fuse_fea = f
# w_ir = [3.5, 3.5, 3.5, 3.5]
w_ir = [w1, w1, w1, w1]
w_vi = [w2, w2, w2, w2]
w_fea = [1, 10, 100, 1000]
for ii in range(4):
g2_ir_temp = g2_ir_fea[ii]
g2_vi_temp = g2_vi_fea[ii]
g2_fuse_temp = g2_fuse_fea[ii]
(bt, cht, ht, wt) = g2_ir_temp.size()
loss2_value += w_fea[ii]*mse_loss(g2_fuse_temp, w_ir[ii]*g2_ir_temp + w_vi[ii]*g2_vi_temp)
loss1_value /= len(outputs)
loss2_value /= len(outputs)
# total loss
total_loss = loss1_value + loss2_value
total_loss.backward()
optimizer.step()
all_fea_loss += loss2_value.item() #
all_ssim_loss += loss1_value.item() #
if (batch + 1) % args.log_interval == 0:
mesg = "{}\t Alpha: {} \tW-IR: {}\tEpoch {}:\t[{}/{}]\t ssim loss: {:.6f}\t fea loss: {:.6f}\t total: {:.6f}".format(
time.ctime(), alpha, w1, e + 1, count, batches,
all_ssim_loss / args.log_interval,
all_fea_loss / args.log_interval,
(all_fea_loss + all_ssim_loss) / args.log_interval
)
tbar.set_description(mesg)
Loss_ssim.append( all_ssim_loss / args.log_interval)
Loss_feature.append(all_fea_loss / args.log_interval)
Loss_all.append((all_fea_loss + all_ssim_loss) / args.log_interval)
count_loss = count_loss + 1
all_ssim_loss = 0.
all_fea_loss = 0.
if (batch + 1) % (200 * args.log_interval) == 0:
# save model
fusion_model.eval()
fusion_model.cpu()
save_model_filename = "Epoch_" + str(e) + "_iters_" + str(count) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".model"
save_model_path = os.path.join(temp_path_model, save_model_filename)
torch.save(fusion_model.state_dict(), save_model_path)
# save loss data
# pixel loss
loss_data_ssim = Loss_ssim
loss_filename_path = temp_path_loss_w + "/loss_ssim_epoch_" + str(args.epochs) + "_iters_" + str(count) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'loss_ssim': loss_data_ssim})
# SSIM loss
loss_data_fea = Loss_feature
loss_filename_path = temp_path_loss_w + "/loss_fea_epoch_" + str(args.epochs) + "_iters_" + str(count) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'loss_fea': loss_data_fea})
# all loss
loss_data = Loss_all
loss_filename_path = temp_path_loss_w + "/loss_all_epoch_" + str(args.epochs) + "_iters_" + str(count) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'loss_all': loss_data})
fusion_model.train()
fusion_model.cuda()
tbar.set_description("\nCheckpoint, trained model saved at", save_model_path)
# ssim loss
loss_data_ssim = Loss_ssim
loss_filename_path = temp_path_loss_w + "/Final_loss_ssim_epoch_" + str(
args.epochs) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'final_loss_ssim': loss_data_ssim})
loss_data_fea = Loss_feature
loss_filename_path = temp_path_loss_w + "/Final_loss_2_epoch_" + str(
args.epochs) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'final_loss_fea': loss_data_fea})
# SSIM loss
loss_data = Loss_all
loss_filename_path = temp_path_loss_w + "/Final_loss_all_epoch_" + str(
args.epochs) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'final_loss_all': loss_data})
# save model
fusion_model.eval()
fusion_model.cpu()
save_model_filename = "Final_epoch_" + str(args.epochs) + "_alpha_" + str(alpha) + "_wir_" + str(
w1) + "_wvi_" + str(w2) + ".model"
save_model_path = os.path.join(temp_path_model_w, save_model_filename)
torch.save(fusion_model.state_dict(), save_model_path)
print("\nDone, trained model saved at", save_model_path)
def check_paths(args):
try:
if not os.path.exists(args.vgg_model_dir):
os.makedirs(args.vgg_model_dir)
if not os.path.exists(args.save_model_dir):
os.makedirs(args.save_model_dir)
except OSError as e:
print(e)
sys.exit(1)
if __name__ == "__main__":
main()
| 8,542 | 33.447581 | 181 | py |
imagefusion-rfn-nest | imagefusion-rfn-nest-main/pytorch_msssim/__init__.py | import torch
import torch.nn.functional as F
from math import exp
import numpy as np
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel=1):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
else:
L = val_range
padd = 0
(_, channel, height, width) = img1.size()
if window is None:
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return ret, cs
return ret
def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=False):
device = img1.device
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
levels = weights.size()[0]
mssim = []
mcs = []
for _ in range(levels):
sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
mssim.append(sim)
mcs.append(cs)
img1 = F.avg_pool2d(img1, (2, 2))
img2 = F.avg_pool2d(img2, (2, 2))
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
# Normalize (to avoid NaNs during training unstable models, not compliant with original definition)
if normalize:
mssim = (mssim + 1) / 2
mcs = (mcs + 1) / 2
pow1 = mcs ** weights
pow2 = mssim ** weights
# From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
output = torch.prod(pow1[:-1] * pow2[-1])
return output
# Classes to re-use window
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, val_range=None):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.val_range = val_range
# Assume 1 channel for SSIM
self.channel = 1
self.window = create_window(window_size)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.dtype == img1.dtype:
window = self.window
else:
window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype)
self.window = window
self.channel = channel
return ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average)
class MSSSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, channel=3):
super(MSSSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = channel
def forward(self, img1, img2):
# TODO: store window between calls if possible
return msssim(img1, img2, window_size=self.window_size, size_average=self.size_average)
| 4,380 | 31.69403 | 118 | py |
tasksource | tasksource-main/src/tasksource/metadata/popularity.py | dataset_rank = {'glue': 0,
'super_glue': 12,
'tweet_eval': 23,
'blimp': 34,
'imdb': 101,
'wikitext': 102,
'squad': 106,
'trec': 107,
'openwebtext': 108,
'rotten_tomatoes': 109,
'anli': 110,
'adversarial_qa': 111,
'ai2_arc': 115,
'xsum': 117,
'amazon_reviews_multi': 118,
'ag_news': 125,
'yelp_review_full': 126,
'wino_bias': 127,
'piqa': 131,
'duorc': 132,
'quail': 134,
'trivia_qa': 135,
'cnn_dailymail': 143,
'common_gen': 146,
'sst': 147,
'conll2003': 150,
'financial_phrasebank': 151,
'babi_qa': 155,
'poem_sentiment': 163,
'dream': 164,
'paws': 165,
'emotion': 168,
'kilt_tasks': 169,
'sciq': 180,
'cos_e': 181,
'dbpedia_14': 183,
'newsgroup': 184,
'cosmos_qa': 244,
'squad_v2': 245,
'samsum': 246,
'amazon_polarity': 247,
'multi_news': 248,
'wiki_hop': 249,
'quartz': 251,
'qasc': 252,
'wiki_qa': 253,
'openbookqa': 254,
'ropes': 256,
'quoref': 257,
'snli': 258,
'app_reviews': 259,
'gigaword': 260,
'wiki_bio': 261,
'amazon_us_reviews': 262,
'scan': 308,
'race': 320,
'swag': 323,
'codah': 325,
'ccdv/arxiv-summarization': 331,
'subjqa': 333,
'universal_morphologies': 339,
'hans': 447,
'sst2': 448,
'guardian_authorship': 449,
'math_qa': 465,
'librispeech_asr': 466,
'hendrycks_test': 469,
'openai_humaneval': 526,
'ptb_text_only': 527,
'pubmed_qa': 528,
'head_qa': 531,
'ought/raft': 533,
'ade_corpus_v2': 544,
'cbt': 547,
'bookcorpus': 552,
'squadshifts': 553,
'story_cloze': 557,
'multi_nli': 559,
'qanta': 560,
'hate_speech18': 564,
'gem': 565,
'lex_glue': 599,
'deepmind/code_contests': 606,
'imagenet-1k': 607,
'blended_skill_talk': 608,
'sms_spam': 609,
'asset': 610,
'fever': 612,
'commonsense_qa': 615,
'scientific_papers': 616,
'evidence_infer_treatment': 618,
'hotpot_qa': 620,
'superb': 622,
'sick': 628,
'humicroedit': 629,
'snips_built_in_intents': 631,
'winograd_wsc': 632,
'bigbench': 634,
'multi_woz_v22': 801,
'lambada': 803,
'banking77': 804,
'hate_speech_offensive': 805,
'yahoo_answers_topics': 806,
'ccdv/cnn_dailymail': 807,
'hyperpartisan_news_detection': 810,
'gsm8k': 812,
'wikisql': 814,
'the_pile': 815,
'health_fact': 825,
'mdd': 826,
'web_questions': 830,
'ethos': 831,
'wnut_17': 833,
'medical_questions_pairs': 834,
'scitldr': 835,
'drop': 838,
'squad_adversarial': 839,
'e2e_nlg_cleaned': 841,
'onestop_english': 842,
'pragmeval': 843,
'relbert/analogy_questions': 863,
'nq_open': 869,
'daily_dialog': 870,
'mc_taco': 871,
'crows_pairs': 872,
'go_emotions': 873,
'ncbi_disease': 875,
'boolq': 876,
'movie_rationales': 877,
'climate_fever': 878,
'discovery': 879,
'lama': 881,
'ecthr_cases': 885,
'jfleg': 887,
'selqa': 888,
'acronym_identification': 892,
'scicite': 893,
'tab_fact': 894,
'wiki_asp': 896,
'enriched_web_nlg': 916,
'svhn': 918,
'docred': 920,
'conllpp': 921,
'liar': 922,
'multi_x_science_sum': 923,
'discofuse': 924,
'competition_math': 926,
'biosses': 927,
'jnlpba': 928,
'web_nlg': 929,
'qa_srl': 937,
'neural_code_search': 938,
'conv_ai_2': 940,
'craigslist_bargains': 941,
'qed': 942,
'conv_ai_3': 943,
'conv_ai': 944,
'turk': 945,
'covid_qa_castorini': 946,
'sem_eval_2014_task_1': 947,
'mwsc': 948,
'gutenberg_time': 949,
'billsum': 950,
'riddle_sense': 951,
'species_800': 952,
'hlgd': 953,
'definite_pronoun_resolution': 954,
'tmu_gfm_dataset': 955,
'relbert/semeval2012_relational_similarity_v4': 956,
'clinc_oos': 957,
'imppres': 960,
'mrqa': 976,
'cc_news': 977,
'lmqg/qag_tweetqa': 978,
'aeslc': 979,
'big_patent': 980,
'eli5': 990,
'scene_parse_150': 991,
'circa': 993,
'aqua_rat': 994,
'nlu_evaluation_data': 996,
'newspop': 997,
'relbert/lexical_relation_classification': 998,
'yahoo_answers_qa': 1003,
'emo': 1004,
'silicone': 1005,
'cord19': 1015,
'tweet_qa': 1018,
'meta_woz': 1019,
'md_gender_bias': 1021,
'art': 1031,
'google_wellformed_query': 1032,
'ambig_qa': 1033,
'taskmaster2': 1035,
'quac': 1042,
'freebase_qa': 1043,
'quora': 1044,
'numer_sense': 1045,
'narrativeqa': 1046,
'ccdv/pubmed-summarization': 1047,
'qa_zre': 1049,
'limit': 1050,
'tweets_hate_speech_detection': 1051,
'mocha': 1052,
'hatexplain': 1053,
'bing_coronavirus_query_set': 1054,
'great_code': 1055,
'medal': 1056,
'sent_comp': 1057,
'kelm': 1058,
'natural_questions': 1059,
'wiki_split': 1061,
'zest': 1062,
'cfq': 1063,
'multi_re_qa': 1071,
'stereoset': 1080,
'coqa': 1082,
'cuad': 1083,
'break_data': 1084,
'mbpp': 1089,
'knkarthick/dialogsum': 1091,
'wiki_auto': 1092,
'pile-of-law/pile-of-law': 1097,
'pg19': 1132,
'DFKI-SLT/few-nerd': 1133,
'wikicorpus': 1136,
'e2e_nlg': 1142,
'anton-l/superb': 1143,
'ghomasHudson/muld': 1144,
'Exr0n/wiki-entity-similarity': 1150,
'BeIR/nfcorpus': 1156,
'ccdv/govreport-summarization': 1158,
'woz_dialogue': 1159,
'reddit': 1164,
'EMBO/sd-nlp': 1165,
'empathetic_dialogues': 1170,
'BeIR/fiqa': 1171,
'generics_kb': 1173,
'swda': 1177,
'wikitablequestions': 1178,
'pubmed': 1183,
'chr_en': 1184,
'sharc': 1185,
'sharc_modified': 1186,
'BeIR/scifact': 1190,
'nell': 1192,
'patriziobellan/PET': 1196,
'EMBO/biolang': 1198,
'dynabench/qa': 1202,
'reddit_tifu': 1206,
'BeIR/scidocs': 1208,
'pec': 1210,
'tner/tweetner7': 1213,
'BeIR/arguana': 1214,
'multidoc2dial': 1216,
'taskmaster1': 1219,
'spider': 1221,
'adv_glue': 1222,
'allenai/mslr2022': 1228,
'conceptnet5': 1230,
'tyqiangz/multilingual-sentiments': 1233,
'newsqa': 1246,
'metashift': 1249,
'so_stacksample': 1250,
'doc2dial': 1253,
'search_qa': 1256,
'yhavinga/mc4_nl_cleaned': 1258,
'hope_edi': 1270,
'proto_qa': 1273,
'tuple_ie': 1276,
'simple_questions_v2': 1279,
'nlpaueb/finer-139': 1282,
'bookcorpusopen': 1283,
'tner/ontonotes5': 1284,
'crd3': 1285,
'ucberkeley-dlab/measuring-hate-speech': 1286,
'gap': 1287,
'recipe_nlg': 1288,
'schema_guided_dstc8': 1289,
'BeIR/beir': 1291,
'sagnikrayc/mctest': 1294,
'eurlex': 1296,
'corypaik/coda': 1297,
'bc2gm_corpus': 1298,
'ascent_kb': 1299,
'curiosity_dialogs': 1301,
'covid_qa_deepset': 1302,
'air_dialogue': 1303,
'taskmaster3': 1305,
'xsum_factuality': 1306,
'medical_dialog': 1308,
'BeIR/trec-covid': 1312,
'lhoestq/test': 1314,
'newsroom': 1315,
'tne': 1316,
'covid_qa_ucsd': 1317,
'fhamborg/news_sentiment_newsmtsc': 1319,
'prachathai67k': 1321,
'cardiffnlp/tweet_topic_multi': 1322,
'datacommons_factcheck': 1323,
'deal_or_no_dialog': 1325,
'ubuntu_dialogs_corpus': 1327,
'eu_regulatory_ir': 1329,
'scifact': 1331,
'wi_locness': 1333,
'relbert/relation_mapping': 1335,
'coastalcph/fairlex': 1336,
'asnq': 1340,
'peer_read': 1341,
'metaeval/linguisticprobing': 1343,
'jigsaw_unintended_bias': 1353,
'totto': 1354,
'irc_disentangle': 1355,
'med_hop': 1357,
'numeric_fused_head': 1359,
'ollie': 1361,
'per_sent': 1363,
'SocialGrep/ten-million-reddit-answers': 1364,
'lmqg/qg_squad': 1366,
's2orc': 1367,
'Hellisotherpeople/DebateSum': 1368,
'SocialGrep/reddit-crypto-aug-2021': 1369,
'jigsaw_toxicity_pred': 1371,
'GroNLP/ik-nlp-22_slp': 1372,
'SocialGrep/reddit-nonewnormal-complete': 1374,
'SocialGrep/reddit-wallstreetbets-aug-2021': 1376,
'SocialGrep/the-reddit-covid-dataset': 1378,
'SocialGrep/top-american-universities-on-reddit': 1380,
'BeIR/beir-corpus': 1382,
'SocialGrep/one-year-of-r-india': 1384,
'BritishLibraryLabs/EThOS-PhD-metadata': 1386,
'librispeech_lm': 1388,
'few_rel': 1389,
'arxiv_dataset': 1390,
'lc_quad': 1391,
'diplomacy_detection': 1392,
'lmqg/qa_squadshifts_pseudo': 1393,
'grail_qa': 1461,
'tner/wnut2017': 1462,
'demo-org/auditor_review': 1463,
'allenai/real-toxicity-prompts': 1464,
'BeIR/nfcorpus-qrels': 1465,
'onestop_qa': 1466,
'demelin/moral_stories': 1467,
'atomic': 1493,
'crawl_domain': 1494,
'BeIR/quora': 1495,
'Abirate/english_quotes': 1497,
'narrativeqa_manual': 1498,
'BeIR/fiqa-qrels': 1499,
'social_bias_frames': 1500,
'pkavumba/balanced-copa': 1501,
'eraser_multi_rc': 1502,
'sled-umich/TRIP': 1503,
'opinosis': 1504,
'PiC/phrase_sense_disambiguation': 1505,
'enwik8': 1506,
'sem_eval_2020_task_11': 1508,
'gooaq': 1509,
'linnaeus': 1510,
'hover': 1511,
'GonzaloA/fake_news': 1512,
'consumer-finance-complaints': 1513,
'ohsumed': 1514,
'casino': 1515,
'gfissore/arxiv-abstracts-2021': 1516,
'conv_questions': 1517,
'hate_offensive': 1518,
'sofc_materials_articles': 1519,
'wanyu/IteraTeR_human_sent': 1520,
'dialog_re': 1521,
'fake_news_english': 1522,
'dart': 1523,
'blog_authorship_corpus': 1524,
'msr_zhen_translation_parity': 1525,
'cryptonite': 1526,
'disfl_qa': 1527,
'olm/olm-CC-MAIN-2022-21-sampling-ratio-0.14775510204': 1528,
'olm/olm-CC-MAIN-2022-33-sampling-ratio-0.20': 1529,
'coarse_discourse': 1530,
'eth_py150_open': 1531,
'event2Mind': 1532,
'Paul/hatecheck': 1533,
'eli5_category': 1534,
'hippocorpus': 1535,
'the_pile_books3': 1536,
'coached_conv_pref': 1537,
'has_part': 1538,
'times_of_india_news_headlines': 1539,
'medmcqa': 1540,
'Babelscape/rebel-dataset': 1541,
'glucose': 1542,
'msr_text_compression': 1543,
'msr_genomics_kbcomp': 1544,
'SpeedOfMagic/ontonotes_english': 1545,
'msr_sqa': 1546,
'wiki_movies': 1547,
'hybrid_qa': 1548,
'metooma': 1549,
'multi_nli_mismatch': 1550,
'text2log': 1551,
'the_pile_stack_exchange': 1552,
're_dial': 1553,
'inquisitive_qg': 1554,
'SocialGrep/one-million-reddit-jokes': 1555,
'time_dial': 1556,
'BeIR/scifact-qrels': 1557,
'sede': 1558,
'mutual_friends': 1559,
'pass': 1560,
'allenai/multi_lexsum': 1561,
'youtube_caption_corrections': 1562,
'NbAiLab/norec_agg': 1563,
'DanL/scientific-challenges-and-directions-dataset': 1564,
'SocialGrep/one-million-reddit-questions': 1565,
'Motahar/github-issues': 1566,
'SocialGrep/the-2022-trucker-strike-on-reddit': 1567,
'allenai/qasper': 1568,
'CyranoB/polarity': 1569,
'SocialGrep/one-million-reddit-confessions': 1570,
'debatelab/deepa2': 1571,
'bhavnicksm/sentihood': 1572,
'debatelab/aaac': 1573,
'jgammack/SAE-door-abstracts': 1574,
'erwanlc/cocktails_recipe': 1575,
'erwanlc/cocktails_recipe_no_brand': 1576,
'BeIR/arguana-qrels': 1577,
'tner/fin': 1578,
'BeIR/scidocs-qrels': 1579,
'tner/bc5cdr': 1580,
'olm/olm-CC-MAIN-2022-27-sampling-ratio-0.16142697881': 1581,
'BeIR/fever': 1582,
'cardiffnlp/tweet_topic_single': 1584,
'speechcolab/gigaspeech': 1585,
'BeIR/webis-touche2020': 1586,
'aquamuse': 1588,
'olm/olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295': 1590,
'tner/btc': 1591,
'truthful_qa': 1592,
'McGill-NLP/FaithDial': 1594,
'ekinakyurek/ftrace': 1595,
'tomasg25/scientific_lay_summarisation': 1597,
'tner/mit_restaurant': 1599,
'bigscience-biomedical/bioasq_task_b': 1600,
'strombergnlp/broad_twitter_corpus': 1619,
'tner/bionlp2004': 1620,
'metaeval/recast': 1621,
'the_pile_openwebtext2': 1629,
'taln-ls2n/inspec': 1630,
'lmqg/qa_squadshifts': 1631,
'BeIR/hotpotqa': 1636,
'jpwahle/machine-paraphrase-dataset': 1638,
'tner/mit_movie_trivia': 1639,
'tner/conll2003': 1640,
'OxAISH-AL-LLM/wiki_toxic': 1641,
'ccdv/WCEP-10': 1642,
'BeIR/trec-covid-qrels': 1646,
'g8a9/europarl_en-it': 1647,
'carblacac/twitter-sentiment-analysis': 1648,
'usc-isi/WikiConvert': 1649,
'visual_genome': 1650,
'florianbussmann/FUNSD-vu2020revising': 1660,
'Felix-ML/quoteli3': 1661,
'allenai/scico': 1662,
'drAbreu/bc4chemd_ner': 1663,
'tner/tweebank_ner': 1664,
'alisawuffles/WANLI': 1665,
'Team-PIXEL/rendered-bookcorpus': 1666,
'Team-PIXEL/rendered-wikipedia-english': 1667,
'wanyu/IteraTeR_full_sent': 1668,
'EMBO/BLURB': 1669,
'metaeval/crowdflower': 1676,
'AlexaAI/bold': 1685,
'metaeval/ethics': 1686,
'sileod/movie_recommendation': 1691,
'lmqg/qg_subjqa': 1692,
'copenlu/scientific-exaggeration-detection': 1699,
'esb/datasets': 1700,
'BeIR/msmarco': 1701,
'biwi_kinect_head_pose': 1703,
'BeIR/quora-qrels': 1704,
'wardenga/lsoie': 1705,
'nlphuji/vasr': 1707,
'BeIR/nq': 1708,
'BeIR/dbpedia-entity': 1710,
'sadrasabouri/ShahNegar': 1712,
'knkarthick/xsum': 1713,
'ColumbiaNLP/FLUTE': 1714,
'bigscience-biomedical/scitail': 1715,
'lmqg/qg_squadshifts': 1717,
'BeIR/climate-fever': 1722,
'PiC/phrase_retrieval': 1724,
'bdotloh/empathetic-dialogues-contexts': 1726,
'ccdv/mediasum': 1727,
'BeIR/msmarco-qrels': 1735,
'alexfabbri/answersumm': 1736,
'pszemraj/text2image-multi-prompt': 1737,
'shibing624/source_code': 1738,
'kensho/spgispeech': 1741,
'jamescalam/channel-metadata': 1742,
'EMBO/sd-nlp-non-tokenized': 1743,
'facebook/pmd': 1748,
'drt/kqa_pro': 1749,
'BeIR/fever-qrels': 1751,
'TheFusion21/PokemonCards': 1752,
'zeroshot/twitter-financial-news-sentiment': 1753,
'bigscience-biomedical/blurb': 1754,
'mteb/bucc-bitext-mining': 1759,
'pinecone/core-2020-05-10-deduplication': 1763,
'tals/vitaminc': 1764,
'BeIR/hotpotqa-qrels': 1765,
'gigant/ted_descriptions': 1766,
'jpwahle/autoencoder-paraphrase-dataset': 1767,
'beki/privy': 1768,
'Muennighoff/P3': 1770,
'jpwahle/dblp-discovery-dataset': 1771,
'taln-ls2n/kp20k': 1773,
'bigscience-biomedical/biosses': 1774,
'allenai/prosocial-dialog': 1776,
'pacovaldez/stackoverflow-questions': 1777,
'kasnerz/hitab': 1778,
'relbert/semeval2012_relational_similarity': 1779,
'sagnikrayc/snli-cf-kaushik': 1780,
'mwritescode/slither-audited-smart-contracts': 1781,
'BeIR/webis-touche2020-qrels': 1787,
'bigscience-biomedical/mednli': 1788,
'pinecone/movielens-recent-ratings': 1790,
'BeIR/dbpedia-entity-qrels': 1791,
'shanya/crd3': 1792,
'knkarthick/samsum': 1793,
'BeIR/climate-fever-qrels': 1794,
'BeIR/nq-qrels': 1795,
'sanchit-gandhi/librispeech_asr_dummy': 1796,
'taln-ls2n/semeval-2010-pre': 1797,
'Bingsu/openwebtext_20p': 1798,
'PolyAI/banking77': 1799,
'JulesBelveze/tldr_news': 1800,
'Freed-Wu/kodak': 1801,
'biglam/gutenberg-poetry-corpus': 1802,
'SocialGrep/reddit-r-bitcoin-data-for-jun-2022': 1803,
'taln-ls2n/kptimes': 1805,
'biglam/old_bailey_proceedings': 1806,
'launch/gov_report': 1807,
'knkarthick/AMI': 1810,
'voidful/NMSQA': 1811,
'DTU54DL/dmeo': 1812,
'FinanceInc/auditor_sentiment': 1813,
'jamescalam/unsplash-25k-photos': 1814,
'Tidrael/tsl_news': 1815,
'DTU54DL/common3k-train': 1816,
'okite97/news-data': 1817,
'lmqg/qa_squad': 1818,
'ConvLab/woz': 1819,
'ConvLab/camrest': 1820,
'ConvLab/metalwoz': 1821,
'kakaobrain/coyo-700m': 1822,
'taln-ls2n/kpbiomed': 1823,
'abhinavk/openpi_v2': 1826,
'mwong/fever-claim-related': 1831,
'ConvLab/tm1': 1832,
'joey234/nan-nli': 1833,
'ConvLab/tm2': 1834,
'ConvLab/tm3': 1835,
'ConvLab/kvret': 1836,
'ConvLab/sgd': 1837,
'relbert/semeval2012_relational_similarity_v5': 1838,
'cmudrc/wave-energy': 1839,
'llangnickel/long-covid-classification-data': 1840,
'webis/args_me': 1841,
'HuggingFaceM4/something_something_v2': 1844,
'ConvLab/dailydialog': 1845,
'huanggab/reddit_haiku': 1846,
'relbert/semeval2012_relational_similarity_v6': 1847,
'pszemraj/riddlesense_plusplus': 1848,
'rungalileo/20_Newsgroups_Fixed': 1849,
'DTU54DL/common-voice-test16k': 1850,
'lhoestq/custom_squad': 1851,
'merve/poetry': 1852,
'yoshitomo-matsubara/srsd-feynman_easy': 1853,
'nightingal3/fig-qa': 1854,
'matejklemen/vuamc': 1855,
'strombergnlp/twitter_pos': 1856,
'nlphuji/winogavil': 1858,
'DFKI-SLT/tacred': 1859,
'valurank/News_Articles_Categorization': 1861,
'nbroad/mediasum': 1862,
'asapp/slue': 1863,
'zbnsl/emoteModified': 1865,
'adsabs/WIESP2022-NER': 1866,
'arize-ai/ecommerce_reviews_with_language_drift': 1867,
'UCL-DARK/ludwig': 1868,
'Aunsiels/InfantBooks': 1874,
'openclimatefix/uk_pv': 1875,
'copenlu/fever_gold_evidence': 1876,
'rungalileo/mit_movies_fixed_connll_format': 1877,
'jamescalam/youtube-transcriptions': 1878,
'lmqg/qa_harvesting_from_wikipedia': 1879,
'qanastek/Biosses-BLUE': 1880,
'zeronix1020/Strawberry-Disease': 1881,
'dferndz/cSQuAD2': 1882,
'taln-ls2n/pubmed': 1883,
'BeIR/scidocs-generated-queries': 1884,
'jmhessel/newyorker_caption_contest': 1885,
'inverse-scaling/NeQA': 1915,
'DTU54DL/common-voice': 1916,
'turingbench/TuringBench': 1917,
'demelin/understanding_fables': 1937,
'RUCAIBox/Open-Dialogue': 1938,
'allenai/multinews_sparse_max': 1939,
'RamAnanth1/lex-fridman-podcasts': 1940,
'sled-umich/Conversation-Entailment': 1941,
'stevhliu/demo': 1942,
'svakulenk0/qrecc': 1943,
'arize-ai/movie_reviews_with_context_drift': 1944,
'launch/ampere': 1945,
'AnonymousSub/recipe_RL_data_roberta-base': 1946,
'dreamproit/bill_summary_us': 1947,
'bgstud/libri-whisper-raw': 1948,
'jpwahle/etpc': 1949,
'DTU54DL/common-native-proc': 1950,
'mbartolo/synQA': 1951,
'wanyu/IteraTeR_full_doc': 1952,
'wanyu/IteraTeR_human_doc': 1953,
'orieg/elsevier-oa-cc-by': 1954,
'climatebert/environmental_claims': 1955,
'SocialGrep/the-reddit-climate-change-dataset': 1956,
'KGraph/FB15k-237': 1958,
'KheemDH/data': 1959,
'mwong/fever-evidence-related': 1960,
'HuggingFaceM4/TGIF': 1961,
'BeIR/fever-generated-queries': 1962,
'nateraw/ade20k-tiny': 1963,
'BeIR/cqadupstack-qrels': 1964,
'knkarthick/highlightsum': 1965,
'RUCAIBox/Data-to-text-Generation': 1966,
'GateNLP/broad_twitter_corpus': 1967,
'Tidrael/finance-headlines': 1968,
'lmqg/qag_squad': 1969,
'pacovaldez/stackoverflow-questions-2016': 1970,
'BeIR/fiqa-generated-queries': 1971,
'BeIR/signal1m-generated-queries': 1972,
'MicPie/unpredictable_msdn-microsoft-com': 1973,
'zeroshot/twitter-financial-news-topic': 1974,
'inverse-scaling/quote-repetition': 1975,
'esc-bench/esc-diagnostic-backup': 1976,
'lmqg/qg_annotation': 1977,
'sileod/wep-probes': 1978,
'DTU54DL/common-voice-test3k': 1981,
'jakartaresearch/causalqa': 1982,
'copenlu/sufficient_facts': 2002,
'ConvLab/multiwoz21': 2005,
'arka0821/multi_document_summarization': 2006,
'strombergnlp/rumoureval_2019': 2007,
'rongzhangibm/NaturalQuestionsV2': 2008,
'Muennighoff/mbpp': 2009,
'RUCAIBox/Simplification': 2011,
'shubhamg2208/lexicap': 2012,
'olm/olm-wikipedia-20220701': 2013,
'esc-bench/esc-diagnostic-dataset': 2014,
'jpwahle/autoregressive-paraphrase-dataset': 2015,
'GabrielVidal/dead-by-daylight-perks': 2016,
'DTU54DL/common-proc-whisper': 2017,
'valurank/PoliticalBias': 2018,
'McGill-NLP/TopiOCQA': 2019,
'gsarti/magpie': 2020,
'BeIR/cqadupstack-generated-queries': 2021,
'MicPie/unpredictable_mmo-champion-com': 2022,
'RUCAIBox/Question-Generation': 2023,
'allenai/multinews_sparse_mean': 2024,
'demo-org/diabetes': 2025,
'StonyBrookNLP/tellmewhy': 2026,
'bergr7/weakly_supervised_ag_news': 2027,
'din0s/msmarco-nlgen': 2028,
'frankier/cross_domain_reviews': 2029,
'gart-labor/pumpnli': 2030,
'AndyChiang/cloth': 2031,
'olm/olm-CC-MAIN-2017-22-sampling-ratio-0.16178770949': 2032,
'bgstud/libri': 2033,
'DTU54DL/commonvoice_accent_test': 2034,
'lewtun/my-awesome-dataset': 2035,
'peixian/rtGender': 2036,
'pmc/open_access': 2039,
'uva-irlab/trec-cast-2019-multi-turn': 2043,
'DFKI-SLT/scidtb': 2044,
'surrey-nlp/PLOD-filtered': 2045,
'wanyu/IteraTeR_v2': 2046,
'strombergnlp/ipm_nel': 2047,
'HuggingFaceM4/charades': 2048,
'ncats/EpiSet4NER-v2': 2050,
'HuggingFaceM4/ActivitiyNet_Captions': 2051,
'sileod/discourse_marker_qa': 2052,
'yoshitomo-matsubara/srsd-feynman_medium': 2053,
'BeIR/nfcorpus-generated-queries': 2054,
'BeIR/trec-news-generated-queries': 2055,
'BeIR/robust04-generated-queries': 2056,
'BeIR/quora-generated-queries': 2057,
'valurank/Adult-content-dataset': 2058,
'launch/open_question_type': 2059,
'knkarthick/topicsum': 2060,
'yuningm/citesum': 2061,
'elihoole/asrs-aviation-reports': 2062,
'DeveloperOats/DBPedia_Classes': 2063,
'hoskinson-center/proof-pile': 2064,
'RUCAIBox/Summarization': 2065,
'RUCAIBox/Question-Answering': 2066,
'RUCAIBox/Story-Generation': 2067,
'RUCAIBox/Paraphrase': 2068,
'jakartaresearch/semeval-absa': 2069,
'tner/ttc_dummy': 2071,
'copenlu/citeworth': 2072,
'allenai/multinews_sparse_oracle': 2073,
'allenai/multixscience_sparse_oracle': 2074,
'allenai/multixscience_sparse_mean': 2075,
'allenai/multixscience_sparse_max': 2076,
'allenai/ms2_sparse_oracle': 2077,
'mschi/blogspot_raw': 2078,
'gaurikapse/civis-consultation-summaries': 2079,
'chenghao/cuad_qa': 2080,
'esc-bench/esc-datasets': 2081,
'olm/olm-wikipedia-20221001': 2082,
'allenai/wcep_dense_oracle': 2083,
'dennlinger/wiki-paragraphs': 2084,
'AndyChiang/dgen': 2085,
'esb/diagnostic-dataset': 2086,
'havens2/naacl2022': 2087,
'fkdosilovic/docee-event-classification': 2088,
'DTU54DL/demo-common-whisper': 2089,
'dferndz/cSQuAD1': 2090,
'jpcorb20/multidogo': 2091,
'julien-c/reactiongif': 2092,
'lara-martin/Scifi_TV_Shows': 2093,
'lukesjordan/worldbank-project-documents': 2094,
'mnemlaghi/widdd': 2095,
'mvarma/medwiki': 2096,
'nateraw/beans': 2098,
'nateraw/cats_vs_dogs': 2099,
'nateraw/food101': 2100,
'nateraw/sync_food101': 2101,
'ncats/EpiSet4BinaryClassification': 2102,
'ncats/EpiSet4NER-v1': 2103,
'peixian/equity_evaluation_corpus': 2104,
'rajeshradhakrishnan/malayalam_wiki': 2105,
'softcatala/open-source-english-catalan-corpus': 2106,
'toloka/CrowdSpeech': 2107,
'valurank/12-factor': 2108,
'valurank/PoliticalBias_AllSides_Txt': 2109,
'valurank/PoliticalBias_Sources': 2110,
'valurank/hate-multi': 2111,
'valurank/news-12factor': 2112,
'valurank/offensive-multi': 2113,
'webimmunization/COVID-19-vaccine-attitude-tweets': 2114,
'wpicard/nostradamus-propheties': 2115,
'yuanchuan/annotated_reference_strings': 2116,
'ruanchaves/stan_large': 2117,
'ruanchaves/stan_small': 2118,
'ruanchaves/boun': 2119,
'ruanchaves/dev_stanford': 2120,
'ruanchaves/test_stanford': 2121,
'ruanchaves/snap': 2122,
'z-uo/qasper-squad': 2123,
'SocialGrep/the-antiwork-subreddit-dataset': 2124,
'CLUTRR/v1': 2126,
'malteos/test2': 2132,
'TomTBT/pmc_open_access_xml': 2133,
'SocialGrep/the-reddit-dataset-dataset': 2137,
'SocialGrep/the-reddit-place-dataset': 2139,
'projecte-aina/gencata': 2141,
'mwong/climate-evidence-related': 2142,
'mwong/climate-claim-related': 2143,
'surrey-nlp/PLOD-unfiltered': 2144,
'SocialGrep/the-reddit-irl-dataset': 2145,
'Lexi/spanextract': 2147,
'mwong/climatetext-claim-related-evaluation': 2148,
'mwong/climatetext-evidence-related-evaluation': 2149,
'ylacombe/xsum_factuality': 2150,
'mwong/climatetext-climate_evidence-claim-related-evaluation': 2151,
'mwong/climatetext-claim-climate_evidence-related-evaluation': 2152,
'mwong/climatetext-evidence-claim-pair-related-evaluation': 2153,
'mwong/climatetext-claim-evidence-pair-related-evaluation': 2154,
'patrickvonplaten/librispeech_asr_self_contained': 2155,
'BritishLibraryLabs/web_archive_classification': 2158,
'albertxu/CrosswordQA': 2159,
'SocialGrep/the-reddit-nft-dataset': 2160,
'janck/bigscience-lama': 2162,
'strombergnlp/twitter_pos_vcb': 2163,
'Filippo/osdg_cd': 2164,
'Ukhushn/home-depot': 2165,
'pile-of-law/eoir_privacy': 2166,
'drAbreu/sd-nlp-2': 2168,
'Leyo/TGIF': 2173,
'strombergnlp/named_timexes': 2174,
'domenicrosati/TruthfulQA': 2175,
'Roh/ryanspeech': 2176,
'Leyo/ActivityNet_Captions': 2177,
'IsaacBot/SQuAD-single-sentence-QA': 2178,
'morteza/cogtext': 2179,
'wdc/products-2017': 2180,
'rajeshvarma/QA_on_SLA': 2196,
'statworx/haiku': 2197,
'rajistics/million-headlines': 2198,
'feyzaakyurek/BBNLI': 2199,
'launch/gov_report_qs': 2200,
'DFKI-SLT/wikitext_linked': 2202,
'dianalogan/Marketing-Budget-and-Actual-Sales-Dataset': 2204,
'mehnaazasad/arxiv-co-ga': 2205,
'JeremyAlain/123_test': 2206,
'BeIR/arguana-generated-queries': 2209,
'BeIR/climate-fever-generated-queries': 2210,
'BeIR/dbpedia-entity-generated-queries': 2211,
'wise-east/spolin': 2212,
'yoshitomo-matsubara/srsd-feynman_hard': 2213,
'florentgbelidji/edmunds-car-ratings': 2214,
'olivierdehaene/xkcd': 2215,
'rajistics/auditor_review': 2216,
'BeIR/scifact-generated-queries': 2217,
'BeIR/trec-covid-generated-queries': 2218,
'BeIR/webis-touche2020-generated-queries': 2219,
'BeIR/nq-generated-queries': 2220,
'BeIR/hotpotqa-generated-queries': 2221,
'BeIR/bioasq-generated-queries': 2222,
'icelab/ntrs_meta': 2223,
'iejMac/CLIP-Kinetics700': 2224,
'fever/feverous': 2225,
'Livingwithmachines/hmd-erwt-training': 2226,
'wkrl/cord': 2227,
'launch/reddit_qg': 2228,
'arize-ai/xtreme_en': 2229}
dataset_rank['Anthropic/model-written-evals']=13
dataset_rank['Anthropic/hh-rlhf']=14 | 24,310 | 28.290361 | 69 | py |
sm-vit | sm-vit-main/train.py | # coding=utf-8
from __future__ import absolute_import, division, print_function
wnb = False
if wnb:
import wandb
wandb.init(project="sm-vit", entity="xxx")
import logging
import argparse
import os
import random
import numpy as np
from datetime import timedelta
import time
import torch
import torch.distributed as dist
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
from models.modeling import VisionTransformer, CONFIGS
from utils.scheduler import WarmupLinearSchedule, WarmupCosineSchedule
from utils.data_utils import get_loader
from utils.dist_util import get_world_size
logger = logging.getLogger(__name__)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def save_model(args, model):
model_to_save = model.module if hasattr(model, 'module') else model
model_checkpoint = os.path.join(args.output_dir, "%s_checkpoint.bin" % args.name)
torch.save(model_to_save.state_dict(), model_checkpoint)
logger.info("Saved model checkpoint to [DIR: %s]", args.output_dir)
def reduce_mean(tensor, nprocs):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= nprocs
return rt
def setup(args):
# Prepare model
config = CONFIGS[args.model_type]
if args.dataset == "dogs":
num_classes = 120
elif args.dataset == "CUB":
num_classes=200
elif args.dataset == "nabirds":
num_classes = 555
else:
raise Exception(f'Unknown dataset "{args.dataset}"')
model = VisionTransformer(config, args.img_size, zero_head=True, num_classes=num_classes, vis=True, smoothing_value=args.smoothing_value, dataset=args.dataset, \
coeff_max=args.coeff_max, contr_loss=args.contr_loss, focal_loss=args.focal_loss)
model.load_from(np.load(args.pretrained_dir))
model.to(args.device)
num_params = count_parameters(model)
logger.info("{}".format(config))
logger.info("Training parameters %s", args)
logger.info("Total Parameter: \t%2.1fM" % num_params)
return args, model
def count_parameters(model):
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params/1000000
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def valid(args, model, writer, test_loader, global_step):
# Validation!
eval_losses = AverageMeter()
logger.info("***** Running Validation *****")
logger.info(" Num steps = %d", len(test_loader))
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
all_preds, all_label = [], []
epoch_iterator = tqdm(test_loader,
desc="Validating... (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True,
disable=args.local_rank not in [-1, 0])
loss_fct = torch.nn.CrossEntropyLoss()
for step, batch in enumerate(epoch_iterator):
if wnb: wandb.log({"step": step})
batch = tuple(t.to(args.device) for t in batch)
if args.sm_vit:
x, y, mask = batch
else:
x, y = batch
with torch.no_grad():
if args.sm_vit:
logits = model(x, None, mask)[0]
else:
logits = model(x)[0]
eval_loss = loss_fct(logits, y)
if args.contr_loss:
eval_loss = eval_loss.mean()
eval_losses.update(eval_loss.item())
preds = torch.argmax(logits, dim=-1)
if len(all_preds) == 0:
all_preds.append(preds.detach().cpu().numpy())
all_label.append(y.detach().cpu().numpy())
else:
all_preds[0] = np.append(
all_preds[0], preds.detach().cpu().numpy(), axis=0 )
all_label[0] = np.append(
all_label[0], y.detach().cpu().numpy(), axis=0 )
epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val)
all_preds, all_label = all_preds[0], all_label[0]
accuracy = simple_accuracy(all_preds, all_label)
logger.info("\n")
logger.info("Validation Results")
logger.info("Global Steps: %d" % global_step)
logger.info("Valid Loss: %2.5f" % eval_losses.avg)
logger.info("Valid Accuracy: %2.5f" % accuracy)
writer.add_scalar("test/accuracy", scalar_value=accuracy, global_step=global_step)
if wnb: wandb.log({"acc_test": accuracy})
return accuracy
def train(args, model):
""" Train the model """
if args.local_rank in [-1, 0]:
os.makedirs(args.output_dir, exist_ok=True)
writer = SummaryWriter(log_dir=os.path.join("logs", args.name))
best_step=0
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
# Prepare dataset
train_loader, test_loader = get_loader(args)
# Prepare optimizer and scheduler
optimizer = torch.optim.SGD(model.parameters(),
lr=args.learning_rate,
momentum=0.9,
weight_decay=args.weight_decay)
t_total = args.num_steps
if args.decay_type == "cosine":
scheduler = WarmupCosineSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
else:
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
model, optimizer = amp.initialize(models=model,
optimizers=optimizer,
opt_level=args.fp16_opt_level)
amp._amp_state.loss_scalers[0]._loss_scale = 2**20
# Distributed training
if args.local_rank != -1:
model = DDP(model, message_size=250000000, gradient_predivide_factor=get_world_size())
# Train!
start_time = time.time()
logger.info("***** Running training *****")
logger.info(" Total optimization steps = %d", args.num_steps)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
model.zero_grad()
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
losses = AverageMeter()
global_step, best_acc = 0, 0
while True:
model.train()
epoch_iterator = tqdm(train_loader,
desc="Training (X / X Steps) (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True,
disable=args.local_rank not in [-1, 0])
all_preds, all_label = [], []
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch)
if args.sm_vit:
x, y, mask = batch
loss, logits = model(x, y, mask)
else:
x, y = batch
loss, logits = model(x, y)
if args.contr_loss:
loss = loss.mean()
preds = torch.argmax(logits, dim=-1)
if len(all_preds) == 0:
all_preds.append(preds.detach().cpu().numpy())
all_label.append(y.detach().cpu().numpy())
else:
all_preds[0] = np.append(
all_preds[0], preds.detach().cpu().numpy(), axis=0 )
all_label[0] = np.append(
all_label[0], y.detach().cpu().numpy(), axis=0 )
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
losses.update(loss.item()*args.gradient_accumulation_steps)
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
global_step += 1
epoch_iterator.set_description(
"Training (%d / %d Steps) (loss=%2.5f)" % (global_step, t_total, losses.val) )
if args.local_rank in [-1, 0]:
writer.add_scalar("train/loss", scalar_value=losses.val, global_step=global_step)
writer.add_scalar("train/lr", scalar_value=scheduler.get_lr()[0], global_step=global_step)
if global_step % args.eval_every == 0 and args.local_rank in [-1, 0]:
accuracy = valid(args, model, writer, test_loader, global_step)
if best_acc < accuracy:
save_model(args, model)
best_acc = accuracy
best_step = global_step
logger.info("best accuracy so far: %f" % best_acc)
logger.info("best accuracy in step: %f" % best_step)
model.train()
if global_step % t_total == 0:
break
all_preds, all_label = all_preds[0], all_label[0]
accuracy = simple_accuracy(all_preds, all_label)
accuracy = torch.tensor(accuracy).to(args.device)
dist.barrier()
train_accuracy = reduce_mean(accuracy, args.nprocs)
train_accuracy = train_accuracy.detach().cpu().numpy()
writer.add_scalar("train/accuracy", scalar_value=train_accuracy, global_step=global_step)
if wnb: wandb.log({"acc_train": train_accuracy})
logger.info("train accuracy so far: %f" % train_accuracy)
logger.info("best valid accuracy in step: %f" % best_step)
losses.reset()
if global_step % t_total == 0:
break
if args.local_rank in [-1, 0]:
writer.close()
end_time = time.time()
logger.info("Best Accuracy: \t%f" % best_acc)
logger.info("Total Training Time: \t%f" % ((end_time - start_time) / 3600))
logger.info("End Training!")
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--name", required=True,
default="output",
help="Name of this run. Used for monitoring.")
parser.add_argument("--dataset", choices=["CUB", "dogs", "nabirds"], default="CUB",
help="Which downstream task.")
parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16",
"ViT-L_32", "ViT-H_14", "R50-ViT-B_16"],
default="ViT-B_16",
help="Which ViT variant to use.")
parser.add_argument("--pretrained_dir", type=str, default="models/pre_trained/ViT-B_16.npz",
help="Where to search for pretrained ViT models.")
parser.add_argument("--output_dir", default="output", type=str,
help="The output directory where checkpoints will be saved.")
parser.add_argument("--img_size", default=400, type=int,
help="After-crop image resolution")
parser.add_argument("--resize_size", default=448, type=int,
help="Pre-crop image resolution")
parser.add_argument("--train_batch_size", default=16, type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=16, type=int,
help="Total batch size for eval.")
parser.add_argument("--eval_every", default=200, type=int,
help="Run prediction on validation set every so many steps."
"Will always run one evaluation at the end of training.")
parser.add_argument("--num_workers", default=4, type=int,
help="Number of workers for dataset preparation.")
parser.add_argument("--learning_rate", default=3e-2, type=float,
help="The initial learning rate for SGD.")
parser.add_argument("--weight_decay", default=0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--num_steps", default=10000, type=int,
help="Total number of training steps to perform.")
parser.add_argument("--decay_type", choices=["cosine", "linear"], default="cosine",
help="How to decay the learning rate.")
parser.add_argument("--warmup_steps", default=500, type=int,
help="Step of training to perform learning rate warmup for.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O2',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--loss_scale', type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--smoothing_value', type=float, default=0.0,
help="Label smoothing value\n")
parser.add_argument('--sm_vit', action='store_true',
help="Whether to use SM-ViT")
parser.add_argument('--coeff_max', type=float, default=0.25,
help="Coefficient for attention guiding (see Eq. 3 in the SM-ViT paper). Best for CUB adn NABirds: '0.25', best for St Dogs: '0.3'.\n")
parser.add_argument('--low_memory', action='store_true',
help="Allows to use less memory (RAM) during input image feeding. False: Slower - Do image pre-processing for the whole dataset at the beginning and store the results in memory. True: Faster - Do pre-processing on-the-go.")
parser.add_argument('--contr_loss', action='store_true',
help="Whether to use contrastive loss")
parser.add_argument('--focal_loss', action='store_true',
help="Whether to use focal loss")
parser.add_argument('--data_root', type=str, default='./data', # Originall
help="Path to the dataset\n")
# '/l/users/20020067/Datasets/CUB_200_2011/CUB_200_2011/CUB_200_2011') # CUB
# '/l/users/20020067/Datasets/Stanford Dogs/Stanford_Dogs') # dogs
# '/l/users/20020067/Datasets/NABirds/NABirds') # NABirds
args = parser.parse_args()
#args.data_root = '{}/{}'.format(args.data_root, args.dataset) # for future development
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl',
timeout=timedelta(minutes=60))
args.n_gpu = 1
args.device = device
args.nprocs = torch.cuda.device_count()
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" %
(args.local_rank, args.device, args.n_gpu, bool(args.local_rank != -1), args.fp16))
# Set seed
set_seed(args)
# Model & Tokenizer Setup
args, model = setup(args)
if wnb: wandb.watch(model)
# Training
train(args, model)
if __name__ == "__main__":
main()
| 17,894 | 39.763098 | 247 | py |
sm-vit | sm-vit-main/models/modeling.py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import math
from os.path import join as pjoin
from re import X
from matplotlib.cbook import flatten
import torch
import torch.nn as nn
import numpy as np
from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm
from torch.nn.modules.utils import _pair
from scipy import ndimage
import torch.nn.functional as F
import models.configs as configs
debug_mode = False # For debug
if debug_mode: import random
ATTENTION_Q = "MultiHeadDotProductAttention_1/query"
ATTENTION_K = "MultiHeadDotProductAttention_1/key"
logger = logging.getLogger(__name__)
ATTENTION_V = "MultiHeadDotProductAttention_1/value"
ATTENTION_OUT = "MultiHeadDotProductAttention_1/out"
FC_0 = "MlpBlock_3/Dense_0"
FC_1 = "MlpBlock_3/Dense_1"
ATTENTION_NORM = "LayerNorm_0"
MLP_NORM = "LayerNorm_2"
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
def swish(x):
return x * torch.sigmoid(x)
class LabelSmoothing(nn.Module):
"""
NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.0):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(LabelSmoothing, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
class FocalLoss(torch.nn.Module):
def __init__(self, alpha=1, gamma=2, reduce=True):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduce = reduce
def forward(self, inputs, targets):
BCE_loss = torch.nn.CrossEntropyLoss()(inputs, targets)
pt = torch.exp(-BCE_loss)
F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
ACT2FN = {"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish}
class Attention(nn.Module):
def __init__(self, config, vis, coeff_max=0.25):
super(Attention, self).__init__()
self.coeff_max = coeff_max
self.vis = vis
self.num_attention_heads = config.transformer["num_heads"]
self.attention_head_size = int(config.hidden_size / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.out = Linear(config.hidden_size, config.hidden_size)
self.attn_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.proj_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.softmax = Softmax(dim=-1)
self.softmax2 = Softmax(dim=-2)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if mask is not None:
if debug_mode:
print_info = True if (random.random() < 0.000001) else False
x = random.random()
if (x > 0.00005) and (x < 0.00007):
print_info = True
else:
print_info = False
else:
print_info = False
max_as = torch.max(attention_scores[:, :, 0, :], dim=2, keepdim=False)[0]
max_as = max_as.to(device='cuda')
if print_info:
print("mask before:", mask)
print("attn scores before:", attention_scores[:, :, 0, :])
print("attn scores max_min before:")
print(max_as, torch.min(attention_scores[:, :, 0, :], dim=2, keepdim=False)[0])
print(torch.topk(attention_scores[:, :, 0, :], 5, largest=True), torch.topk(attention_scores[:, :, 0, :], 5, largest=False))
mask_626 = torch.zeros(mask.size(0), (mask.size(1) + 1)) #, dtype=torch.float64) # dtype=torch.double)
mask_626 = mask_626.to(device='cuda')
mask_626[:, 1:] = mask[:, :]
mask_626[:, 0] = 0
if print_info: print("mask626:", mask_626)
# positive only, obj + (max * coeff):
attention_scores[:, :, 0, :] = \
torch.where( mask_626[:, None, :] < 0.5, \
torch.add( attention_scores[:, :, 0, :], \
torch.mul( max_as[:, :, None] , torch.tensor(self.coeff_max).cuda()) ), \
attention_scores[:, :, 0, :] #.float()
)
if print_info:
print("attn scores after:", attention_scores[:, :, 0, :])
print("attn scores max_min after:")
print(torch.max(attention_scores[:, :, 0, :]), torch.min(attention_scores[:, :, 0, :]))
print(torch.topk(attention_scores[:, :, 0, :], 5, largest=True), torch.topk(attention_scores[:, :, 0, :], 5, largest=False))
attention_probs = self.softmax(attention_scores)
weights = attention_probs if self.vis else None
attention_probs = self.attn_dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
attention_output = self.out(context_layer)
attention_output = self.proj_dropout(attention_output)
return attention_output, weights, self.softmax2(attention_scores)[:,:,:,0]
class Mlp(nn.Module):
def __init__(self, config):
super(Mlp, self).__init__()
self.fc1 = Linear(config.hidden_size, config.transformer["mlp_dim"])
self.fc2 = Linear(config.transformer["mlp_dim"], config.hidden_size)
self.act_fn = ACT2FN["gelu"]
self.dropout = Dropout(config.transformer["dropout_rate"])
self._init_weights()
def _init_weights(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.normal_(self.fc1.bias, std=1e-6)
nn.init.normal_(self.fc2.bias, std=1e-6)
def forward(self, x):
x = self.fc1(x)
x = self.act_fn(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class Embeddings(nn.Module):
"""Construct the embeddings from patch, position embeddings.
"""
def __init__(self, config, img_size, in_channels=3):
super(Embeddings, self).__init__()
self.hybrid = None
img_size = _pair(img_size)
# EXPERIMENTAL. Overlapping patches:
overlap = False
if overlap: slide = 12 # 14
if config.patches.get("grid") is not None:
grid_size = config.patches["grid"]
patch_size = (img_size[0] // 16 // grid_size[0], img_size[1] // 16 // grid_size[1])
n_patches = (img_size[0] // 16) * (img_size[1] // 16)
self.hybrid = True
else:
patch_size = _pair(config.patches["size"])
if overlap:
n_patches = ((img_size[0] - patch_size[0]) // slide + 1) * ((img_size[1] - patch_size[1]) // slide + 1)
else:
n_patches = (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1])
self.hybrid = False
if overlap:
self.patch_embeddings = Conv2d(in_channels=in_channels,
out_channels=config.hidden_size,
kernel_size=patch_size,
stride=(slide, slide) )
else:
self.patch_embeddings = Conv2d(in_channels=in_channels,
out_channels=config.hidden_size,
kernel_size=patch_size,
stride=patch_size )
self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches+1, config.hidden_size))
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.dropout = Dropout(config.transformer["dropout_rate"])
def forward(self, x):
B = x.shape[0]
cls_tokens = self.cls_token.expand(B, -1, -1)
x = self.patch_embeddings(x)
x = x.flatten(2)
x = x.transpose(-1, -2)
x = torch.cat((cls_tokens, x), dim=1)
embeddings = x + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
class Block(nn.Module):
def __init__(self, config, vis, coeff_max):
super(Block, self).__init__()
self.hidden_size = config.hidden_size
self.attention_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn = Mlp(config)
self.attn = Attention(config, vis, coeff_max)
def forward(self, x, mask=None):
h = x
x = self.attention_norm(x)
x, weights, contribution = self.attn(x, mask)
x = x + h
h = x
x = self.ffn_norm(x)
x = self.ffn(x)
x = x + h
return x, weights, contribution
def load_from(self, weights, n_block):
ROOT = f"Transformer/encoderblock_{n_block}"
with torch.no_grad():
query_weight = np2th(weights[pjoin(ROOT, ATTENTION_Q, "kernel")]).view(self.hidden_size, self.hidden_size).t()
key_weight = np2th(weights[pjoin(ROOT, ATTENTION_K, "kernel")]).view(self.hidden_size, self.hidden_size).t()
value_weight = np2th(weights[pjoin(ROOT, ATTENTION_V, "kernel")]).view(self.hidden_size, self.hidden_size).t()
out_weight = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "kernel")]).view(self.hidden_size, self.hidden_size).t()
query_bias = np2th(weights[pjoin(ROOT, ATTENTION_Q, "bias")]).view(-1)
key_bias = np2th(weights[pjoin(ROOT, ATTENTION_K, "bias")]).view(-1)
value_bias = np2th(weights[pjoin(ROOT, ATTENTION_V, "bias")]).view(-1)
out_bias = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "bias")]).view(-1)
self.attn.query.weight.copy_(query_weight)
self.attn.key.weight.copy_(key_weight)
self.attn.value.weight.copy_(value_weight)
self.attn.out.weight.copy_(out_weight)
self.attn.query.bias.copy_(query_bias)
self.attn.key.bias.copy_(key_bias)
self.attn.value.bias.copy_(value_bias)
self.attn.out.bias.copy_(out_bias)
mlp_weight_0 = np2th(weights[pjoin(ROOT, FC_0, "kernel")]).t()
mlp_weight_1 = np2th(weights[pjoin(ROOT, FC_1, "kernel")]).t()
mlp_bias_0 = np2th(weights[pjoin(ROOT, FC_0, "bias")]).t()
mlp_bias_1 = np2th(weights[pjoin(ROOT, FC_1, "bias")]).t()
self.ffn.fc1.weight.copy_(mlp_weight_0)
self.ffn.fc2.weight.copy_(mlp_weight_1)
self.ffn.fc1.bias.copy_(mlp_bias_0)
self.ffn.fc2.bias.copy_(mlp_bias_1)
self.attention_norm.weight.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "scale")]))
self.attention_norm.bias.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "bias")]))
self.ffn_norm.weight.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "scale")]))
self.ffn_norm.bias.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "bias")]))
class Encoder(nn.Module):
def __init__(self, config, vis, coeff_max):
super(Encoder, self).__init__()
self.vis = vis
self.layer = nn.ModuleList()
num_layers = config.transformer["num_layers"]
self.encoder_norm = LayerNorm(config.hidden_size, eps=1e-6)
for _ in range(num_layers):
layer = Block(config, vis, coeff_max)
self.layer.append(copy.deepcopy(layer))
def forward(self, hidden_states, mask=None):
attn_weights = []
contributions = []
tokens = [[] for i in range(hidden_states.shape[0])]
for layer_block in self.layer:
hidden_states, weights, contribution = layer_block(hidden_states, mask)
if self.vis:
attn_weights.append(weights)
contributions.append(contribution)
encoded = self.encoder_norm(hidden_states)
return encoded, attn_weights
class Transformer(nn.Module):
def __init__(self, config, img_size, vis, coeff_max):
super(Transformer, self).__init__()
self.embeddings = Embeddings(config, img_size=img_size)
self.encoder = Encoder(config, vis, coeff_max)
def forward(self, input_ids, mask=None):
embedding_output = self.embeddings(input_ids)
encoded, attn_weights = self.encoder(embedding_output, mask)
return encoded, attn_weights
class VisionTransformer(nn.Module):
def __init__(self, config, img_size=400, num_classes=200, smoothing_value=0, zero_head=False, vis=False, dataset='CUB', coeff_max=0.25, contr_loss=False, focal_loss=False):
super(VisionTransformer, self).__init__()
self.num_classes = num_classes
self.zero_head = zero_head
self.smoothing_value = smoothing_value
self.classifier = config.classifier
self.dataset=dataset
self.contr_loss = contr_loss
self.focal_loss = focal_loss
self.transformer = Transformer(config, img_size, vis, coeff_max)
self.head = Linear(config.hidden_size, num_classes)
def forward(self, x, labels=None, mask=None):
x, attn_weights = self.transformer(x, mask)
logits = self.head(x[:, 0])
if labels is not None:
if self.smoothing_value == 0:
loss_fct = CrossEntropyLoss()
else:
loss_fct = LabelSmoothing(self.smoothing_value)
if self.focal_loss: # enforce another type of loss
loss_fct = FocalLoss()
ce_loss = loss_fct(logits.view(-1, self.num_classes), labels.view(-1))
if self.contr_loss:
contrast_loss = con_loss(x[:, 0], labels.view(-1))
loss = ce_loss + contrast_loss
else:
loss = ce_loss # FFVT
return loss, logits
else:
return logits, attn_weights
def load_from(self, weights):
with torch.no_grad():
if self.zero_head:
nn.init.zeros_(self.head.weight)
nn.init.zeros_(self.head.bias)
else:
self.head.weight.copy_(np2th(weights["head/kernel"]).t())
self.head.bias.copy_(np2th(weights["head/bias"]).t())
self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights["embedding/kernel"], conv=True))
self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights["embedding/bias"]))
self.transformer.embeddings.cls_token.copy_(np2th(weights["cls"]))
self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights["Transformer/encoder_norm/scale"]))
self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights["Transformer/encoder_norm/bias"]))
posemb = np2th(weights["Transformer/posembed_input/pos_embedding"])
posemb_new = self.transformer.embeddings.position_embeddings
if posemb.size() == posemb_new.size():
self.transformer.embeddings.position_embeddings.copy_(posemb)
else:
logger.info("load_pretrained: resized variant: %s to %s" % (posemb.size(), posemb_new.size()))
ntok_new = posemb_new.size(1)
if self.classifier == "token":
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)
posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)
posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)
self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))
for bname, block in self.transformer.encoder.named_children():
if bname.startswith('ff') == False:
for uname, unit in block.named_children():
unit.load_from(weights, n_block=uname)
if self.transformer.embeddings.hybrid:
self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights["conv_root/kernel"], conv=True))
gn_weight = np2th(weights["gn_root/scale"]).view(-1)
gn_bias = np2th(weights["gn_root/bias"]).view(-1)
self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)
self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)
for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, n_block=bname, n_unit=uname)
def con_loss(features, labels):
B, _ = features.shape
features = F.normalize(features)
cos_matrix = features.mm(features.t())
pos_label_matrix = torch.stack([labels == labels[i] for i in range(B)]).float()
neg_label_matrix = 1 - pos_label_matrix
pos_cos_matrix = 1 - cos_matrix
neg_cos_matrix = cos_matrix - 0.4
neg_cos_matrix[neg_cos_matrix < 0] = 0
loss = (pos_cos_matrix * pos_label_matrix).sum() + (neg_cos_matrix * neg_label_matrix).sum()
loss /= (B * B)
return loss
CONFIGS = {
'ViT-B_16': configs.get_b16_config(),
'ViT-B_32': configs.get_b32_config(),
'ViT-L_16': configs.get_l16_config(),
'ViT-L_32': configs.get_l32_config(),
'ViT-H_14': configs.get_h14_config(),
'R50-ViT-B_16': configs.get_r50_b16_config(),
'testing': configs.get_testing(),
} | 19,835 | 38.12426 | 176 | py |
sm-vit | sm-vit-main/utils/data_utils.py | import logging
import torch
from torchvision import transforms, datasets
from .dataset import *
from torch.utils.data import DataLoader, RandomSampler, DistributedSampler, SequentialSampler
from PIL import Image
from .autoaugment import AutoAugImageNetPolicy
import os
logger = logging.getLogger(__name__)
def get_loader(args):
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
transform_train = transforms.Compose([
transforms.RandomResizedCrop((args.img_size, args.img_size), scale=(0.05, 1.0)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
transform_test = transforms.Compose([
transforms.Resize((args.img_size, args.img_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
if args.dataset == 'dogs':
if args.sm_vit:
train_transform=transforms.Compose([
transforms.Resize((args.img_size, args.img_size), Image.BILINEAR),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
# transforms.RandomHorizontalFlip(), !!! FLIPPING in dataset.py !!!
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_transform=transforms.Compose([
transforms.Resize((args.img_size, args.img_size), Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
train_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size), Image.BILINEAR),
transforms.RandomCrop((args.img_size, args.img_size)),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size), Image.BILINEAR),
transforms.CenterCrop((args.img_size, args.img_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
trainset = dogs(args.dataset,
root=args.data_root,
is_train=True,
cropped=False,
transform=train_transform,
download=False,
sm_vit=args.sm_vit,
low_memory=args.low_memory,
img_size=args.img_size
)
testset = dogs(args.dataset,
root=args.data_root,
is_train=False,
cropped=False,
transform=test_transform,
download=False,
sm_vit=args.sm_vit,
low_memory=args.low_memory,
img_size=args.img_size
)
elif args.dataset== "CUB":
if args.sm_vit:
train_transform=transforms.Compose([
transforms.Resize((args.img_size, args.img_size),Image.BILINEAR),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
#transforms.RandomHorizontalFlip(), # !!! FLIPPING in dataset.py !!!
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
test_transform=transforms.Compose([
transforms.Resize((args.img_size, args.img_size),Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
train_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size),Image.BILINEAR),
transforms.RandomCrop((args.img_size, args.img_size)),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
test_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size), Image.BILINEAR),
transforms.CenterCrop((args.img_size, args.img_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
trainset = eval(args.dataset)(args.dataset, root=args.data_root, is_train=True, \
transform=train_transform, sm_vit=args.sm_vit, low_memory=args.low_memory, img_size=args.img_size)
testset = eval(args.dataset)(args.dataset, root=args.data_root, is_train=False, \
transform = test_transform, sm_vit=args.sm_vit, low_memory=args.low_memory, img_size=args.img_size)
elif args.dataset == 'nabirds':
if args.sm_vit:
train_transform=transforms.Compose([
transforms.Resize((args.img_size, args.img_size), Image.BILINEAR),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), # my add (from FFVT) mb try?
#transforms.RandomHorizontalFlip(), # !!! FLIPPING in dataset.py !!!
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_transform=transforms.Compose([
transforms.Resize((args.img_size, args.img_size), Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
train_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size), Image.BILINEAR),
transforms.RandomCrop((args.img_size, args.img_size)),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size), Image.BILINEAR),
transforms.CenterCrop((args.img_size, args.img_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
trainset = NABirds(args.dataset, root=args.data_root, is_train=True, \
transform=train_transform, sm_vit=args.sm_vit, low_memory=args.low_memory, img_size=args.img_size)
testset = NABirds(args.dataset, root=args.data_root, is_train=False, \
transform=test_transform, sm_vit=args.sm_vit, low_memory=args.low_memory, img_size=args.img_size)
### Not optimised datasets:
if args.dataset == 'INat2017':
train_transform=transforms.Compose([transforms.Resize((400, 400), Image.BILINEAR),
transforms.RandomCrop((304, 304)),
transforms.RandomHorizontalFlip(),
AutoAugImageNetPolicy(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
test_transform=transforms.Compose([transforms.Resize((400, 400), Image.BILINEAR),
transforms.CenterCrop((304, 304)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
trainset = INat2017(args.data_root, 'train', train_transform)
testset = INat2017(args.data_root, 'val', test_transform)
elif args.dataset == 'car':
trainset = CarsDataset(os.path.join(args.data_root,'devkit/cars_train_annos.mat'),
os.path.join(args.data_root,'cars_train'),
os.path.join(args.data_root,'devkit/cars_meta.mat'),
# cleaned=os.path.join(data_dir,'cleaned.dat'),
transform=transforms.Compose([
transforms.Resize((600, 600), Image.BILINEAR),
transforms.RandomCrop((448, 448)),
transforms.RandomHorizontalFlip(),
AutoAugImageNetPolicy(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
)
testset = CarsDataset(os.path.join(args.data_root,'cars_test_annos_withlabels.mat'),
os.path.join(args.data_root,'cars_test'),
os.path.join(args.data_root,'devkit/cars_meta.mat'),
# cleaned=os.path.join(data_dir,'cleaned_test.dat'),
transform=transforms.Compose([
transforms.Resize((600, 600), Image.BILINEAR),
transforms.CenterCrop((448, 448)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
)
elif args.dataset== "air":
train_transform=transforms.Compose([transforms.Resize((args.resize_size, args.resize_size),Image.BILINEAR),
transforms.RandomCrop((args.img_size, args.img_size)),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), # my add
transforms.RandomHorizontalFlip(),
#transforms.RandomVerticalFlip(),
transforms.ToTensor(),
#transforms.Normalize([0.8416, 0.867, 0.8233], [0.2852, 0.246, 0.3262])])
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
test_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size), Image.BILINEAR),
transforms.CenterCrop((args.img_size, args.img_size)),
#transforms.Resize((args.img_size, args.img_size)),
transforms.ToTensor(),
#transforms.Normalize([0.8416, 0.867, 0.8233], [0.2852, 0.246, 0.3262])])
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
trainset = FGVC_aircraft(root=args.data_root, is_train=True, transform=train_transform)
testset = FGVC_aircraft(root=args.data_root, is_train=False, transform = test_transform)
if args.local_rank == 0:
torch.distributed.barrier()
train_sampler = RandomSampler(trainset) if args.local_rank == -1 else DistributedSampler(trainset)
test_sampler = SequentialSampler(testset)
train_loader = DataLoader(trainset,
sampler=train_sampler,
batch_size=args.train_batch_size,
num_workers=args.num_workers,
pin_memory=True)
test_loader = DataLoader(testset,
sampler=test_sampler,
batch_size=args.eval_batch_size,
num_workers=args.num_workers,
pin_memory=True) if testset is not None else None
return train_loader, test_loader
| 12,747 | 46.924812 | 124 | py |
sm-vit | sm-vit-main/utils/dataset.py | import os
import json
from os.path import join
import numpy as np
import scipy
from scipy import io
import scipy.misc
from PIL import Image
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset
from torchvision.datasets import VisionDataset
from torchvision.datasets.folder import default_loader
from torchvision.datasets.utils import download_url, list_dir, check_integrity, extract_archive, verify_str_arg
# My:
from torchvision import transforms
from torchvision.utils import save_image
import random
from torchvision.transforms import functional as F
import U2Net
from U2Net.u2net_test import mask_hw
from skimage import transform as transform_sk
import gc
#
class Generic_smvit_DS():
def generic_preprocess(self, file_list, file_list_full, shape_hw_list, data_len, train_test_list=None):
img = []
mask = []
## For other experiments:
if self.ds_name != "CUB":
self.gt_bbox = False
self.gt_parts = False
if self.gt_bbox:
bounding_boxes_file = open(os.path.join(self.root, 'bounding_boxes.txt'))
bb_list = []
for line in bounding_boxes_file:
bb_list_x = line[:-1].split(' ')[-4]
bb_list_y = line[:-1].split(' ')[-3]
bb_list_w = line[:-1].split(' ')[-2]
bb_list_h = line[:-1].split(' ')[-1]
bb_list.append( [ int(bb_list_x.split('.')[0]),
int(bb_list_y.split('.')[0]),
int(bb_list_w.split('.')[0]),
int(bb_list_h.split('.')[0]) ]
)
bb_list = [x for i, x in zip(train_test_list, bb_list) if i]
if self.gt_parts:
parts_file = open(os.path.join(self.root, 'parts/part_locs.txt'))
PARTS_NUM = 15
parts_list = []
part_t = []
part_count = 0
for line in parts_file:
part_t_raw_x = line[:-1].split(' ')[-3]
part_t_raw_y = line[:-1].split(' ')[-2]
part_t_pres = line[:-1].split(' ')[-1]
part_t.append ( [ int(part_t_pres),
int(part_t_raw_x.split('.')[0]),
int(part_t_raw_y.split('.')[0]) ]
)
part_count = part_count + 1
if (part_count >= PARTS_NUM):
parts_list.append( part_t )
part_t = []
part_count = 0
parts_list = [x for i, x in zip(train_test_list, parts_list) if i]
##
print(f'[INFO] Pre-processing {self.mode} files...')
if self.sm_vit:
if self.full_ds:
mask_u2n_list, x_u2n_list, y_u2n_list, h_u2n_list, w_u2n_list = \
mask_hw(full_ds=self.full_ds, img_path=file_list_full, shape_hw=shape_hw_list)
else: # for debug
img_path = os.path.join(self.root, self.base_folder, file_list)
img_temp = scipy.misc.imread(img_path)
h_max_temp = img_temp.shape[0] # y
w_max_temp = img_temp.shape[1] # x
mask_u2n, x_u2n, y_u2n, h_u2n, w_u2n = \
mask_hw(full_ds=self.full_ds, img_path=img_path, shape_hw=(h_max_temp, w_max_temp))
mask_temp, x, y, h, w = mask_u2n, x_u2n, y_u2n, h_u2n, w_u2n
for ind, file in enumerate(file_list[:data_len]):
if self.debug: print(f"{self.mode} file:", file)
img_temp = scipy.misc.imread(os.path.join(self.root, self.base_folder, file))
## Downscale large images for memory efficiency
if self.ds_name != "CUB":
img_temp = (img_temp).astype(np.uint8)
if (img_temp.shape[0] > self.max_res) or (img_temp.shape[1] > self.max_res):
if self.debug and ind < 10:
print("Before:", img_temp.shape[0], img_temp.shape[1])
img_name = ("test/img_before_tr" + str(ind) + ".png")
Image.fromarray(img_temp, mode='RGB').save(img_name)
if img_temp.shape[0] > img_temp.shape[1]:
downscale_coef = img_temp.shape[0] / self.max_res
else:
downscale_coef = img_temp.shape[1] / self.max_res
img_temp = transform_sk.resize( img_temp, ( int((img_temp.shape[0] // downscale_coef)), int((img_temp.shape[1] // downscale_coef)) ), \
mode='constant', anti_aliasing=True, anti_aliasing_sigma=None, preserve_range=True )
if self.debug and ind < 10:
print("After:", img_temp.shape[0], img_temp.shape[1])
img_temp = (img_temp).astype(np.uint8)
img_name = ("test/img_after_tr" + str(ind) + ".png")
Image.fromarray(img_temp, mode='RGB').save(img_name)
else:
if self.debug and ind < 10:
print("Normal:", img_temp.shape[0], img_temp.shape[1])
img_name = ("test/img_normal_tr" + str(ind) + ".png")
Image.fromarray(img_temp, mode='RGB').save(img_name)
h_max = img_temp.shape[0] # y
w_max = img_temp.shape[1] # x
#ch_max = img_temp.shape[2] # ch
if self.gt_bbox:
x, y, w, h = bb_list[ind] # x - distance from top up left (width), y - distance from top up left (height)
if self.gt_parts:
parts = parts_list[ind] # list of 15 parts with [x, y] center corrdinates
#mask_temp = np.zeros((int(h_max), int(w_max))) # Black mask
mask_temp = np.ones((int(h_max), int(w_max)))
p_part = 16*3 # padding around center point
for part_n in range(len(parts)):
part = parts[part_n]
if part[0] != 0:
x_min_p = part[1] - p_part
if x_min_p < 0:
x_min_p = 0
x_max_p = part[1] + p_part
if x_max_p > w_max:
x_max_p = w_max
y_min_p = part[2] - p_part
if y_min_p < 0:
y_min_p = 0
y_max_p = part[2] + p_part
if y_max_p > h_max:
y_max_p = h_max
#mask_temp[int(y_min_p):int(y_max_p), int(x_min_p):int(x_max_p)] = 1 # Black mask
mask_temp[int(y_min_p):int(y_max_p), int(x_min_p):int(x_max_p)] = 0
if self.sm_vit and self.full_ds:
mask_temp = mask_u2n_list[ind]
x = x_u2n_list[ind]
y = y_u2n_list[ind]
h = h_u2n_list[ind]
w = w_u2n_list[ind]
## Image and Mask Padding:
if self.sm_vit or self.gt_bbox:
if self.padding:
p = 15 # extra space around bbox
else:
p = 0
x_min = x - p
if x_min < 0:
x_min = 0
x_max = x + w + p
if x_max > w_max:
x_max = w_max
y_min = y - p
if y_min < 0:
y_min = 0
y_max = y + h + p
if y_max > h_max:
y_max = h_max
if h_max <=1:
print("[WARNING] bad_h", h_max)
if w_max <=1:
print("[WARNING] bad_w", w_max)
if y_min >= y_max:
print("[WARNING] bad_y", "min:", y_min, "max:", y_max)
print("[WARNING] y:", y, "h:", h)
if x_min >= x_max:
print("[WARNING] bad_x", "min:", x_min, "max:", x_max)
print("[WARNING] x:", x, "w:", w)
##
## Crop with bbox:
if self.rand_crop:
#prob_rcrop = 0.25 # 0.07 # 0.3 # 0.5
#rand_crop_mask_temp = bool(random.random() < prob_rcrop)
#if rand_crop_mask_temp:
h_max_img = img_temp.shape[0]
w_max_img = img_temp.shape[1]
#h_crop_mid = 368 # 384 (92%), 368 (84%), 352 (77%), 336 (70%), 320 (64%), 304 (57%)
h_crop_mid_img = int(h_max_img * 0.88) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
#w_crop_mid = 368 # 384 (92%), 368 (84%), 352 (77%), 336 (70%), 320 (64%), 304 (57%)
w_crop_mid_img = int(w_max_img * 0.88) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
h_crop_min_img = random.randint(0, (h_max_img - h_crop_mid_img)) # 40) #, 400-360) #, h - th)
w_crop_min_img = random.randint(0, (w_max_img - w_crop_mid_img)) # 40) #, 400-360) #, w - tw)
h_crop_max_img = h_crop_mid_img + h_crop_min_img
w_crop_max_img = w_crop_mid_img + w_crop_min_img
# Crop image with bbox:
if len(img_temp.shape) == 3:
img_temp = img_temp[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img), :] # h, w, ch
else:
img_temp = img_temp[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img)] # h, w
# Crop mask with bbox:
mask_temp = mask_temp[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img)]
else:
# Crop image with bbox:
if len(img_temp.shape) == 3:
if self.gt_parts:
for j in range(3):
img_temp[:, :, j] = img_temp[:, :, j] * mask_temp # Black mask
else:
#test_img_temp = test_img_temp[int(y):int(y + h), int(x):int(x + w), :] # h, w, ch
img_temp = img_temp[int(y_min):int(y_max), int(x_min):int(x_max), :] # h, w, ch
else:
if self.gt_parts:
img_temp[:, :] = img_temp[:, :] * mask_temp # Black mask:
else:
img_temp = img_temp[int(y_min):int(y_max), int(x_min):int(x_max)] # h, w
# Crop mask with bbox:
if self.sm_vit or self.gt_bbox:
mask_temp = mask_temp[int(y_min):int(y_max), int(x_min):int(x_max)]
##
if ( (img_temp.shape[0] != mask_temp.shape[0]) or (img_temp.shape[1] != mask_temp.shape[1]) ):
print("[WARNING] Image shape does not match mask shape for sample:", ind, ". \t" , "Found shapes:", img_temp.shape, mask_temp.shape)
img.append(img_temp)
mask.append(mask_temp)
return img, mask
def generic_preprocess_lowMem(self, file_list, file_list_full, shape_hw_list):
print(f'[INFO] Pre-processing {self.mode} files in the low memory mode...')
if self.sm_vit:
if self.full_ds:
mask_u2n_list, x_u2n_list, y_u2n_list, h_u2n_list, w_u2n_list = \
mask_hw(full_ds=self.full_ds, img_path=file_list_full, shape_hw=shape_hw_list)
else: # for debug
img_path = os.path.join(self.root, self.base_folder, file_list)
img_temp = scipy.misc.imread(img_path)
h_max_temp = img_temp.shape[0] # y
w_max_temp = img_temp.shape[1] # x
mask_u2n, x_u2n, y_u2n, h_u2n, w_u2n = \
mask_hw(full_ds=self.full_ds, img_path=img_path, shape_hw=(h_max_temp, w_max_temp))
mask_temp, x, y, h, w = mask_u2n, x_u2n, y_u2n, h_u2n, w_u2n
# mask_u2n_list, x_u2n_list, y_u2n_list, h_u2n_list, w_u2n_list = mask_temp, x, y, h, w
return mask_u2n_list, x_u2n_list, y_u2n_list, h_u2n_list, w_u2n_list
def generic_getitem(self, index, img, mask):
if self.is_train:
if self.rand_crop_im_mask:
h_max_img = img.shape[0]
w_max_img = img.shape[1]
h_crop_mid_img = int(h_max_img * 0.88) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
w_crop_mid_img = int(w_max_img * 0.88) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
h_crop_min_img = random.randint(0, (h_max_img - h_crop_mid_img)) # 40) #, 400-360) #, h - th)
w_crop_min_img = random.randint(0, (w_max_img - w_crop_mid_img)) # 40) #, 400-360) #, w - tw)
h_crop_max_img = h_crop_mid_img + h_crop_min_img
w_crop_max_img = w_crop_mid_img + w_crop_min_img
# Crop image:
if len(img.shape) == 3:
img = img[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img), :] # h, w, ch
else:
img = img[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img)] # h, w
# Crop mask:
mask = mask[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img)]
if len(img.shape) == 2:
img = np.stack([img] * 3, 2)
if self.ds_name != "CUB":
img = (img).astype(np.uint8)
img = Image.fromarray(img, mode='RGB')
if self.debug and index < 10:
img_tem = transforms.ToTensor()(img)
img_name = ("test/img_bef" + str(index) + ".png")
save_image( img_tem, img_name)
## Image:
if self.transform is not None:
if self.is_train:
if not self.flip_mask_as_image: # normal
img = self.transform(img)
else:
if random.random() < 0.5:
flipped = False
img = self.transform(img)
else:
flipped = True
transform_img_flip = transforms.Compose([
#transforms.Resize((args.resize_size, args.resize_size),Image.BILINEAR),
#transforms.RandomCrop((args.img_size, args.img_size)),
transforms.Resize((self.img_size, self.img_size),Image.BILINEAR), # my for bbox
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), # my add (FFVT)
transforms.RandomHorizontalFlip(p=1.0), # !!! FLIPPING in dataset.py !!!
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
img = transform_img_flip(img)
else:
img = self.transform(img)
if self.debug and index < 10:
img_name = ("test/img_aft" + str(index) + ".png")
save_image( img, img_name)
## Mask:
if self.crop_mask:
h_max_im = mask.shape[0]
w_max_im = mask.shape[1]
h_crop_mid = int(h_max_im * 0.84) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
w_crop_mid = int(w_max_im * 0.84) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
cropped = np.ones_like(mask)
if self.mid_val:
cropped = cropped * 0.125 # (for 0.2)
h_crop_min = random.randint(0, (h_max_im - h_crop_mid)) # 40) #, 400-360) #, h - th)
w_crop_min = random.randint(0, (w_max_im - w_crop_mid)) # 40) #, 400-360) #, w - tw)
h_crop_max = h_crop_mid + h_crop_min
w_crop_max = w_crop_mid + w_crop_min
cropped[int(h_crop_min):int(h_crop_max), int(w_crop_min):int(w_crop_max)] = 0
mask = mask + cropped
if self.mid_val:
mask[mask > 1.1] = 1
else:
mask[mask > 1] = 1
mask = (mask * 255).astype(np.uint8)
mask = Image.fromarray(mask, mode='L')
if self.debug and index < 10:
mask_tem = transforms.ToTensor()(mask)
img_name = ("test/mask_bef" + str(index) + ".png")
save_image( mask_tem, img_name)
mask_size = int(self.img_size // 16)
if self.is_train:
if not self.flip_mask_as_image: # normal
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()
])
else:
if flipped:
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
transforms.RandomHorizontalFlip(p=1.0),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()
])
else:
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()
])
else:
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()])
mask = transform_mask(mask)
if self.debug and index < 10:
img_name = ("test/mask_aft" + str(index) + ".png")
save_image(mask, img_name)
mask = torch.flatten(mask)
return img, mask
def generic_getitem_lowMem(self, index):
file_temp = self.file_list[index]
img_temp = scipy.misc.imread(os.path.join(self.root, self.base_folder, file_temp))
## Downscale large images for memory efficiency
if self.ds_name != "CUB":
self.gt_bbox = False
self.gt_parts = False
img_temp = (img_temp).astype(np.uint8)
if (img_temp.shape[0] > self.max_res) or (img_temp.shape[1] > self.max_res):
if self.debug and index < 10:
print("Before:", img_temp.shape[0], img_temp.shape[1])
img_name = ("test/img_before_tr" + str(index) + ".png")
Image.fromarray(img_temp, mode='RGB').save(img_name)
if img_temp.shape[0] > img_temp.shape[1]:
downscale_coef = img_temp.shape[0] / self.max_res
else:
downscale_coef = img_temp.shape[1] / self.max_res
img_temp = transform_sk.resize( img_temp, ( int((img_temp.shape[0] // downscale_coef)), int((img_temp.shape[1] // downscale_coef)) ), \
mode='constant', anti_aliasing=True, anti_aliasing_sigma=None, preserve_range=True )
if self.debug and index < 10:
print("After:", img_temp.shape[0], img_temp.shape[1])
img_temp = (img_temp).astype(np.uint8)
img_name = ("test/img_after_tr" + str(index) + ".png")
Image.fromarray(img_temp, mode='RGB').save(img_name)
else:
if self.debug and index < 10:
print("Normal:", img_temp.shape[0], img_temp.shape[1])
img_name = ("test/img_normal_tr" + str(index) + ".png")
Image.fromarray(img_temp, mode='RGB').save(img_name)
##
h_max = img_temp.shape[0] # y
w_max = img_temp.shape[1] # x
#ch_max = img_temp.shape[2] # ch
mask_temp = self.mask_u2n_list[index]
x, y, h, w = self.x_u2n_list[index], self.y_u2n_list[index], self.h_u2n_list[index], self.w_u2n_list[index]
## Image and Mask Padding:
if self.sm_vit or self.gt_bbox:
if self.padding:
p = 15 # extra space around bbox
else:
p = 0
x_min = x - p
if x_min < 0:
x_min = 0
x_max = x + w + p
if x_max > w_max:
x_max = w_max
y_min = y - p
if y_min < 0:
y_min = 0
y_max = y + h + p
if y_max > h_max:
y_max = h_max
if h_max <=1:
print("[WARNING] bad_h", h_max)
if w_max <=1:
print("[WARNING] bad_w", w_max)
if y_min >= y_max:
print("[WARNING] bad_y", "min:", y_min, "max:", y_max)
print("[WARNING] y:", y, "h:", h)
if x_min >= x_max:
print("[WARNING] bad_x", "min:", x_min, "max:", x_max)
print("[WARNING] x:", x, "w:", w)
##
## Crop with bbox:
if self.rand_crop:
#prob_rcrop = 0.25 # 0.07 # 0.3 # 0.5
#rand_crop_mask_temp = bool(random.random() < prob_rcrop)
#if rand_crop_mask_temp:
h_max_img = img_temp.shape[0]
w_max_img = img_temp.shape[1]
#h_crop_mid = 368 # 384 (92%), 368 (84%), 352 (77%), 336 (70%), 320 (64%), 304 (57%)
h_crop_mid_img = int(h_max_img * 0.88) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
#w_crop_mid = 368 # 384 (92%), 368 (84%), 352 (77%), 336 (70%), 320 (64%), 304 (57%)
w_crop_mid_img = int(w_max_img * 0.88) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
h_crop_min_img = random.randint(0, (h_max_img - h_crop_mid_img)) # 40) #, 400-360) #, h - th)
w_crop_min_img = random.randint(0, (w_max_img - w_crop_mid_img)) # 40) #, 400-360) #, w - tw)
h_crop_max_img = h_crop_mid_img + h_crop_min_img
w_crop_max_img = w_crop_mid_img + w_crop_min_img
# Crop image with bbox:
if len(img_temp.shape) == 3:
img_temp = img_temp[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img), :] # h, w, ch
else:
img_temp = img_temp[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img)] # h, w
# Crop mask with bbox:
mask_temp = mask_temp[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img)]
else:
# Crop image with bbox:
if len(img_temp.shape) == 3:
if self.gt_parts:
for j in range(3):
img_temp[:, :, j] = img_temp[:, :, j] * mask_temp # Black mask
else:
#test_img_temp = test_img_temp[int(y):int(y + h), int(x):int(x + w), :] # h, w, ch
img_temp = img_temp[int(y_min):int(y_max), int(x_min):int(x_max), :] # h, w, ch
else:
if self.gt_parts:
img_temp[:, :] = img_temp[:, :] * mask_temp # Black mask:
else:
img_temp = img_temp[int(y_min):int(y_max), int(x_min):int(x_max)] # h, w
# Crop mask with bbox:
if self.sm_vit or self.gt_bbox:
mask_temp = mask_temp[int(y_min):int(y_max), int(x_min):int(x_max)]
##
if ( (img_temp.shape[0] != mask_temp.shape[0]) or (img_temp.shape[1] != mask_temp.shape[1]) ):
print("[WARNING] Image shape does not match mask shape for sample:", index, ". \t" , \
"Found shapes:", img_temp.shape, mask_temp.shape)
img = img_temp
if len(img.shape) == 2:
img = np.stack([img] * 3, 2)
if self.ds_name != "CUB":
img = (img).astype(np.uint8)
img = Image.fromarray(img, mode='RGB')
if self.debug and index < 10:
img_tem = transforms.ToTensor()(img)
img_name = ("test/img_bef" + str(index) + ".png")
save_image( img_tem, img_name)
## Image:
if self.transform is not None:
if self.is_train:
if not self.flip_mask_as_image: # normal
img = self.transform(img)
else:
if random.random() < 0.5:
flipped = False
img = self.transform(img)
else:
flipped = True
transform_img_flip = transforms.Compose([
#transforms.Resize((args.resize_size, args.resize_size),Image.BILINEAR),
#transforms.RandomCrop((args.img_size, args.img_size)),
transforms.Resize((self.img_size, self.img_size),Image.BILINEAR), # my for bbox
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), # my add (FFVT)
transforms.RandomHorizontalFlip(p=1.0), # !!! FLIPPING in dataset.py !!!
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
img = transform_img_flip(img)
else:
img = self.transform(img)
if self.debug and index < 10:
img_name = ("test/img_aft" + str(index) + ".png")
save_image( img, img_name)
## Mask:
mask = mask_temp
if self.crop_mask:
h_max_im = mask.shape[0]
w_max_im = mask.shape[1]
h_crop_mid = int(h_max_im * 0.84) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
w_crop_mid = int(w_max_im * 0.84) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
cropped = np.ones_like(mask)
if self.mid_val:
cropped = cropped * 0.125 # (for 0.2)
h_crop_min = random.randint(0, (h_max_im - h_crop_mid)) # 40) #, 400-360) #, h - th)
w_crop_min = random.randint(0, (w_max_im - w_crop_mid)) # 40) #, 400-360) #, w - tw)
h_crop_max = h_crop_mid + h_crop_min
w_crop_max = w_crop_mid + w_crop_min
cropped[int(h_crop_min):int(h_crop_max), int(w_crop_min):int(w_crop_max)] = 0
mask = mask + cropped
if self.mid_val:
mask[mask > 1.1] = 1
else:
mask[mask > 1] = 1
mask = (mask * 255).astype(np.uint8)
mask = Image.fromarray(mask, mode='L')
if self.debug and index < 10:
mask_tem = transforms.ToTensor()(mask)
img_name = ("test/mask_bef" + str(index) + ".png")
save_image( mask_tem, img_name)
mask_size = int(self.img_size // 16)
if self.is_train:
if not self.flip_mask_as_image: # normal
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()
])
else:
if flipped:
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
transforms.RandomHorizontalFlip(p=1.0),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()
])
else:
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()
])
else:
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()])
mask = transform_mask(mask)
if self.debug and index < 10:
img_name = ("test/mask_aft" + str(index) + ".png")
save_image(mask, img_name)
mask = torch.flatten(mask)
return img, mask
class CUB(Generic_smvit_DS):
def __init__(self, ds_name, root, is_train=True, data_len=None, transform=None, sm_vit=True, low_memory=True, img_size=400):
self.ds_name = ds_name
self.img_size = img_size
self.max_res = int(self.img_size * 1.5)
self.full_ds = True # pre-processing full dataset
self.padding = True # image and mask padding
self.rand_crop = False # if no other cropping
self.flip_mask_as_image = True # if False - turn on RandomHorizontalFlip in data_utils !!!
self.rand_crop_im_mask = False # randomly crop both image and mask
self.crop_mask = False # randomly crop mask only
self.mid_val = False # 3-state mask
self.debug = False # for debug info
if self.debug:
os.makedirs("./test")
self.gt_bbox = False # for other experiments
self.gt_parts = False # for other experiments
self.sm_vit = sm_vit
self.low_memory = low_memory
if (self.sm_vit + self.gt_bbox + self.gt_parts) > 1 :
raise Exception("Only one cropping mode (SM-ViT, bbox, parts) can be chosen")
self.root = root
self.base_folder = "images"
self.transform = transform
self.is_train = is_train
if self.is_train:
self.mode = "Train"
else:
self.mode = "Test"
img_txt_file = open(os.path.join(self.root, 'images.txt'))
label_txt_file = open(os.path.join(self.root, 'image_class_labels.txt'))
train_val_file = open(os.path.join(self.root, 'train_test_split.txt'))
img_name_list = []
for line in img_txt_file:
img_name_list.append(line[:-1].split(' ')[-1])
label_list = []
for line in label_txt_file:
label_list.append(int(line[:-1].split(' ')[-1]) - 1)
train_test_list = []
for line in train_val_file:
train_test_list.append(int(line[:-1].split(' ')[-1]))
if self.is_train:
self.file_list = [x for i, x in zip(train_test_list, img_name_list) if i]
file_list_full = [ os.path.join(self.root, self.base_folder, x) for i, x in zip(train_test_list, img_name_list) if i]
else:
self.file_list = [x for i, x in zip(train_test_list, img_name_list) if not i]
file_list_full = [ os.path.join(self.root, self.base_folder, x) for i, x in zip(train_test_list, img_name_list) if not i]
if self.sm_vit:
print(f"[INFO] Preparing {self.mode} shape_hw list...")
shape_hw_list = []
for img_name in self.file_list:
img_temp = scipy.misc.imread(os.path.join(self.root, self.base_folder, img_name))
shape_hw_temp = [img_temp.shape[0], img_temp.shape[1]] # h_max (y), w_max (x)
shape_hw_list.append(shape_hw_temp)
if self.low_memory:
self.mask_u2n_list, self.x_u2n_list, self.y_u2n_list, self.h_u2n_list, self.w_u2n_list = \
super(CUB, self).generic_preprocess_lowMem(self.file_list,
file_list_full,
shape_hw_list
)
del shape_hw_list
del file_list_full
gc.collect()
else:
self.img, self.mask = \
super(CUB, self).generic_preprocess(self.file_list,
file_list_full,
shape_hw_list,
data_len,
train_test_list
)
else:
self.img = \
[scipy.misc.imread(os.path.join(self.root, self.base_folder, file)) \
for file in self.file_list[:data_len]]
if self.is_train:
self.label = [x for i, x in zip(train_test_list, label_list) if i][:data_len]
else:
self.label = [x for i, x in zip(train_test_list, label_list) if not i][:data_len]
self.imgname = [x for x in self.file_list[:data_len]]
def __getitem__(self, index):
if self.sm_vit:
if self.low_memory:
target, imgname = self.label[index], self.imgname[index]
img, mask = super(CUB, self).generic_getitem_lowMem(index)
else:
img, target, imgname, mask = self.img[index], self.label[index], self.imgname[index], self.mask[index]
img, mask = super(CUB, self).generic_getitem(index, img, mask)
return img, target, mask
else:
img, target, imgname = self.img[index], self.label[index], self.imgname[index]
if len(img.shape) == 2:
img = np.stack([img] * 3, 2)
img = Image.fromarray(img, mode='RGB')
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.label)
class dogs(Generic_smvit_DS): #(Dataset):
"""`Stanford Dogs <http://vision.stanford.edu/aditya86/ImageNetDogs/>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``omniglot-py`` exists.
cropped (bool, optional): If true, the images will be cropped into the bounding box specified
in the annotations
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset tar files from the internet and
puts it in root directory. If the tar files are already downloaded, they are not
downloaded again.
"""
folder = 'dog'
download_url_prefix = 'http://vision.stanford.edu/aditya86/ImageNetDogs'
def __init__(self,
ds_name,
root,
is_train=True,
cropped=False,
transform=None,
target_transform=None,
download=False,
sm_vit=True,
low_memory=True,
img_size=400):
self.ds_name = ds_name
self.img_size = img_size
self.max_res = int(self.img_size * 1.5)
self.full_ds = True # pre-processing full dataset
self.padding = True # image and mask padding
self.rand_crop = False # if no other cropping
self.flip_mask_as_image = True # if False - turn on RandomHorizontalFlip in data_utils !!!
self.rand_crop_im_mask = False # randomly crop both image and mask
self.crop_mask = False # randomly crop mask only
self.mid_val = False # 3-state mask
self.debug = False # for debug info
if self.debug:
os.makedirs("./test")
self.sm_vit = sm_vit
self.low_memory = low_memory
# self.root = join(os.path.expanduser(root), self.folder)
self.root = root
self.base_folder = "Images"
self.is_train = is_train
if self.is_train:
self.mode = "Train"
else:
self.mode = "Test"
self.cropped = cropped
self.transform = transform
self.target_transform = target_transform
if download:
self.download()
split = self.load_split()
self.images_folder = join(self.root, 'Images')
self.annotations_folder = join(self.root, 'Annotation')
self._breeds = list_dir(self.images_folder)
if self.cropped:
self._breed_annotations = [[(annotation, box, idx)
for box in self.get_boxes(join(self.annotations_folder, annotation))]
for annotation, idx in split]
self._flat_breed_annotations = sum(self._breed_annotations, [])
self._flat_breed_images = [(annotation+'.jpg', idx) for annotation, box, idx in self._flat_breed_annotations]
else:
self._breed_images = [(annotation+'.jpg', idx) for annotation, idx in split]
self._flat_breed_images = self._breed_images
self.classes = ["Chihuaha",
"Japanese Spaniel",
"Maltese Dog",
"Pekinese",
"Shih-Tzu",
"Blenheim Spaniel",
"Papillon",
"Toy Terrier",
"Rhodesian Ridgeback",
"Afghan Hound",
"Basset Hound",
"Beagle",
"Bloodhound",
"Bluetick",
"Black-and-tan Coonhound",
"Walker Hound",
"English Foxhound",
"Redbone",
"Borzoi",
"Irish Wolfhound",
"Italian Greyhound",
"Whippet",
"Ibizian Hound",
"Norwegian Elkhound",
"Otterhound",
"Saluki",
"Scottish Deerhound",
"Weimaraner",
"Staffordshire Bullterrier",
"American Staffordshire Terrier",
"Bedlington Terrier",
"Border Terrier",
"Kerry Blue Terrier",
"Irish Terrier",
"Norfolk Terrier",
"Norwich Terrier",
"Yorkshire Terrier",
"Wirehaired Fox Terrier",
"Lakeland Terrier",
"Sealyham Terrier",
"Airedale",
"Cairn",
"Australian Terrier",
"Dandi Dinmont",
"Boston Bull",
"Miniature Schnauzer",
"Giant Schnauzer",
"Standard Schnauzer",
"Scotch Terrier",
"Tibetan Terrier",
"Silky Terrier",
"Soft-coated Wheaten Terrier",
"West Highland White Terrier",
"Lhasa",
"Flat-coated Retriever",
"Curly-coater Retriever",
"Golden Retriever",
"Labrador Retriever",
"Chesapeake Bay Retriever",
"German Short-haired Pointer",
"Vizsla",
"English Setter",
"Irish Setter",
"Gordon Setter",
"Brittany",
"Clumber",
"English Springer Spaniel",
"Welsh Springer Spaniel",
"Cocker Spaniel",
"Sussex Spaniel",
"Irish Water Spaniel",
"Kuvasz",
"Schipperke",
"Groenendael",
"Malinois",
"Briard",
"Kelpie",
"Komondor",
"Old English Sheepdog",
"Shetland Sheepdog",
"Collie",
"Border Collie",
"Bouvier des Flandres",
"Rottweiler",
"German Shepard",
"Doberman",
"Miniature Pinscher",
"Greater Swiss Mountain Dog",
"Bernese Mountain Dog",
"Appenzeller",
"EntleBucher",
"Boxer",
"Bull Mastiff",
"Tibetan Mastiff",
"French Bulldog",
"Great Dane",
"Saint Bernard",
"Eskimo Dog",
"Malamute",
"Siberian Husky",
"Affenpinscher",
"Basenji",
"Pug",
"Leonberg",
"Newfoundland",
"Great Pyrenees",
"Samoyed",
"Pomeranian",
"Chow",
"Keeshond",
"Brabancon Griffon",
"Pembroke",
"Cardigan",
"Toy Poodle",
"Miniature Poodle",
"Standard Poodle",
"Mexican Hairless",
"Dingo",
"Dhole",
"African Hunting Dog"]
data_len = None
if self.sm_vit:
print(f"[INFO] Preparing {self.mode} shape_hw list...")
shape_hw_list = []
self.file_list = []
file_list_full = []
for image_name, target_class in self._flat_breed_images:
img_name = join(self.images_folder, image_name)
img_temp = scipy.misc.imread(os.path.join(img_name))
shape_hw_temp = [img_temp.shape[0], img_temp.shape[1]] # h_max (y), w_max (x)
if (shape_hw_temp[0] > self.max_res) or (shape_hw_temp[1] > self.max_res):
if shape_hw_temp[0] > shape_hw_temp[1]:
downscale_coef = shape_hw_temp[0] / self.max_res
else:
downscale_coef = shape_hw_temp[1] / self.max_res
shape_hw_temp[0] = int(shape_hw_temp[0] // downscale_coef)
shape_hw_temp[1] = int(shape_hw_temp[1] // downscale_coef)
shape_hw_list.append(shape_hw_temp)
self.file_list.append(image_name)
file_list_full.append(img_name)
if self.low_memory:
self.mask_u2n_list, self.x_u2n_list, self.y_u2n_list, self.h_u2n_list, self.w_u2n_list = \
super(dogs, self).generic_preprocess_lowMem(self.file_list,
file_list_full,
shape_hw_list
)
del shape_hw_list
del file_list_full
gc.collect()
else:
self.img, self.mask = \
super(dogs, self).generic_preprocess(self.file_list,
file_list_full,
shape_hw_list,
data_len
)
if self.is_train:
self.label = [x for i, x in self._flat_breed_images][:data_len]
else:
self.label = [x for i, x in self._flat_breed_images][:data_len]
self.imgname = [x for x in self.file_list[:data_len]]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
if self.sm_vit:
if self.low_memory:
target, imgname = self.label[index], self.imgname[index]
img, mask = super(dogs, self).generic_getitem_lowMem(index)
else:
img, target, imgname, mask = self.img[index], self.label[index], self.imgname[index], self.mask[index]
img, mask = super(dogs, self).generic_getitem(index, img, mask)
return img, target, mask
else:
image_name, target = self._flat_breed_images[index]
image_path = join(self.images_folder, image_name)
img = Image.open(image_path).convert('RGB')
if self.cropped:
img = img.crop(self._flat_breed_annotations[index][1])
if self.transform:
img = self.transform(img)
if self.target_transform:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self._flat_breed_images)
def download(self):
import tarfile
if os.path.exists(join(self.root, 'Images')) and os.path.exists(join(self.root, 'Annotation')):
if len(os.listdir(join(self.root, 'Images'))) == len(os.listdir(join(self.root, 'Annotation'))) == 120:
print('Files already downloaded and verified')
return
for filename in ['images', 'annotation', 'lists']:
tar_filename = filename + '.tar'
url = self.download_url_prefix + '/' + tar_filename
download_url(url, self.root, tar_filename, None)
print('Extracting downloaded file: ' + join(self.root, tar_filename))
with tarfile.open(join(self.root, tar_filename), 'r') as tar_file:
tar_file.extractall(self.root)
os.remove(join(self.root, tar_filename))
@staticmethod
def get_boxes(path):
import xml.etree.ElementTree
e = xml.etree.ElementTree.parse(path).getroot()
boxes = []
for objs in e.iter('object'):
boxes.append([int(objs.find('bndbox').find('xmin').text),
int(objs.find('bndbox').find('ymin').text),
int(objs.find('bndbox').find('xmax').text),
int(objs.find('bndbox').find('ymax').text)])
return boxes
def load_split(self):
if self.is_train:
# split = scipy.io.loadmat(join(self.root, 'train_list.mat'))['annotation_list']
# labels = scipy.io.loadmat(join(self.root, 'train_list.mat'))['labels']
split = scipy.io.loadmat(join(self.root, 'splits/train_list.mat'))['annotation_list']
labels = scipy.io.loadmat(join(self.root, 'splits/train_list.mat'))['labels']
else:
# split = scipy.io.loadmat(join(self.root, 'test_list.mat'))['annotation_list']
# labels = scipy.io.loadmat(join(self.root, 'test_list.mat'))['labels']
split = scipy.io.loadmat(join(self.root, 'splits/test_list.mat'))['annotation_list']
labels = scipy.io.loadmat(join(self.root, 'splits/test_list.mat'))['labels']
split = [item[0][0] for item in split]
labels = [item[0]-1 for item in labels]
return list(zip(split, labels))
def stats(self):
counts = {}
for index in range(len(self._flat_breed_images)):
image_name, target_class = self._flat_breed_images[index]
if target_class not in counts.keys():
counts[target_class] = 1
else:
counts[target_class] += 1
print("%d samples spanning %d classes (avg %f per class)"%(len(self._flat_breed_images), len(counts.keys()), float(len(self._flat_breed_images))/float(len(counts.keys()))))
return counts
class NABirds(Generic_smvit_DS): #(Dataset):
"""`NABirds <https://dl.allaboutbirds.org/nabirds>`_ Dataset.
Args:
root (string): Root directory of the dataset.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
#base_folder = 'nabirds/images'
def __init__(self, ds_name, root, is_train=True, data_len=None, transform=None, sm_vit=True, low_memory=True, img_size=448):
self.ds_name = ds_name
self.img_size = img_size
self.max_res = int(self.img_size * 1.25) # 1.5
self.full_ds = True # pre-processing full dataset
self.padding = True # image and mask padding
self.rand_crop = False # if no other cropping
self.flip_mask_as_image = True # if False - turn on RandomHorizontalFlip in data_utils !!!
self.rand_crop_im_mask = False # randomly crop both image and mask
self.crop_mask = False # randomly crop mask only
self.mid_val = False # 3-state mask
self.debug = False # for debug info
if self.debug:
os.makedirs("./test")
self.sm_vit = sm_vit
self.low_memory = low_memory
dataset_path = os.path.join(root)
self.root = root
self.base_folder = "images"
self.loader = default_loader
self.transform = transform
self.is_train = is_train
if self.is_train:
self.mode = "Train"
else:
self.mode = "Test"
image_paths = pd.read_csv(os.path.join(dataset_path, 'images.txt'),
sep=' ', names=['img_id', 'filepath'])
image_class_labels = pd.read_csv(os.path.join(dataset_path, 'image_class_labels.txt'),
sep=' ', names=['img_id', 'target'])
# Since the raw labels are non-continuous, map them to new ones
self.label_map = get_continuous_class_map(image_class_labels['target'])
train_test_split = pd.read_csv(os.path.join(dataset_path, 'train_test_split.txt'),
sep=' ', names=['img_id', 'is_training_img'])
data = image_paths.merge(image_class_labels, on='img_id')
self.data = data.merge(train_test_split, on='img_id')
# Load in the train / test split
if self.is_train:
self.data = self.data[self.data.is_training_img == 1]
else:
self.data = self.data[self.data.is_training_img == 0]
# Load in the class data
self.class_names = load_class_names(dataset_path)
self.class_hierarchy = load_hierarchy(dataset_path)
self.data_len = None
if self.sm_vit:
print(f"[INFO] Preparing {self.mode} shape_hw list...")
shape_hw_list = []
self.file_list = []
file_list_full = []
for sample in self.data.iloc:
image_name = sample.filepath
img_name_full = join(self.root, self.base_folder, image_name)
img_temp = scipy.misc.imread(os.path.join(img_name_full))
shape_hw_temp = [img_temp.shape[0], img_temp.shape[1]] # h_max (y), w_max (x)
if (shape_hw_temp[0] > self.max_res) or (shape_hw_temp[1] > self.max_res):
if shape_hw_temp[0] > shape_hw_temp[1]:
downscale_coef = shape_hw_temp[0] / self.max_res
else:
downscale_coef = shape_hw_temp[1] / self.max_res
shape_hw_temp[0] = int(shape_hw_temp[0] // downscale_coef)
shape_hw_temp[1] = int(shape_hw_temp[1] // downscale_coef)
shape_hw_list.append(shape_hw_temp)
self.file_list.append(image_name)
file_list_full.append(img_name_full)
if self.low_memory:
self.mask_u2n_list, self.x_u2n_list, self.y_u2n_list, self.h_u2n_list, self.w_u2n_list = \
super(NABirds, self).generic_preprocess_lowMem(self.file_list,
file_list_full,
shape_hw_list
)
del shape_hw_list
del file_list_full
gc.collect()
else:
self.img, self.mask = \
super(NABirds, self).generic_preprocess(self.file_list,
file_list_full,
shape_hw_list,
self.data_len
)
if self.is_train:
self.label = [ (self.label_map[x.target]) for x in self.data.iloc ][:self.data_len]
else:
self.label = [ (self.label_map[x.target]) for x in self.data.iloc ][:self.data_len]
self.imgname = [x for x in self.file_list[:self.data_len]]
def __getitem__(self, index):
if self.sm_vit:
if self.low_memory:
target, imgname = self.label[index], self.imgname[index]
img, mask = super(NABirds, self).generic_getitem_lowMem(index)
else:
img, target, imgname, mask = self.img[index], self.label[index], self.imgname[index], self.mask[index]
img, mask = super(NABirds, self).generic_getitem(index, img, mask)
return img, target, mask
else:
sample = self.data.iloc[index]
path = os.path.join(self.root, self.base_folder, sample.filepath)
target = self.label_map[sample.target]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.data)
def get_continuous_class_map(class_labels):
label_set = set(class_labels)
return {k: i for i, k in enumerate(label_set)}
def load_class_names(dataset_path=''):
names = {}
with open(os.path.join(dataset_path, 'classes.txt')) as f:
for line in f:
pieces = line.strip().split()
class_id = pieces[0]
names[class_id] = ' '.join(pieces[1:])
return names
def load_hierarchy(dataset_path=''):
parents = {}
with open(os.path.join(dataset_path, 'hierarchy.txt')) as f:
for line in f:
pieces = line.strip().split()
child_id, parent_id = pieces
parents[child_id] = parent_id
return parents
### Not optimised datasets:
class INat2017(VisionDataset):
"""`iNaturalist 2017 <https://github.com/visipedia/inat_comp/blob/master/2017/README.md>`_ Dataset.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``train``, or ``val``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = 'train_val_images/'
file_list = {
'imgs': ('https://storage.googleapis.com/asia_inat_data/train_val/train_val_images.tar.gz',
'train_val_images.tar.gz',
'7c784ea5e424efaec655bd392f87301f'),
'annos': ('https://storage.googleapis.com/asia_inat_data/train_val/train_val2017.zip',
'train_val2017.zip',
'444c835f6459867ad69fcb36478786e7')
}
def __init__(self, root, split='train', transform=None, target_transform=None, download=False):
super(INat2017, self).__init__(root, transform=transform, target_transform=target_transform)
self.loader = default_loader
self.split = verify_str_arg(split, "split", ("train", "val",))
if self._check_exists():
print('Files already downloaded and verified.')
elif download:
if not (os.path.exists(os.path.join(self.root, self.file_list['imgs'][1]))
and os.path.exists(os.path.join(self.root, self.file_list['annos'][1]))):
print('Downloading...')
self._download()
print('Extracting...')
extract_archive(os.path.join(self.root, self.file_list['imgs'][1]))
extract_archive(os.path.join(self.root, self.file_list['annos'][1]))
else:
raise RuntimeError(
'Dataset not found. You can use download=True to download it.')
anno_filename = split + '2017.json'
with open(os.path.join(self.root, anno_filename), 'r') as fp:
all_annos = json.load(fp)
self.annos = all_annos['annotations']
self.images = all_annos['images']
def __getitem__(self, index):
path = os.path.join(self.root, self.images[index]['file_name'])
target = self.annos[index]['category_id']
image = self.loader(path)
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
target = self.target_transform(target)
return image, target
def __len__(self):
return len(self.images)
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.base_folder))
def _download(self):
for url, filename, md5 in self.file_list.values():
download_url(url, root=self.root, filename=filename)
if not check_integrity(os.path.join(self.root, filename), md5):
raise RuntimeError("File not found or corrupted.")
class CarsDataset(Dataset):
def __init__(self, mat_anno, data_dir, car_names, cleaned=None, transform=None):
"""
Args:
mat_anno (string): Path to the MATLAB annotation file.
data_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.full_data_set = io.loadmat(mat_anno)
self.car_annotations = self.full_data_set['annotations']
self.car_annotations = self.car_annotations[0]
if cleaned is not None:
cleaned_annos = []
print("Cleaning up data set (only take pics with rgb chans)...")
clean_files = np.loadtxt(cleaned, dtype=str)
for c in self.car_annotations:
if c[-1][0] in clean_files:
cleaned_annos.append(c)
self.car_annotations = cleaned_annos
self.car_names = scipy.io.loadmat(car_names)['class_names']
self.car_names = np.array(self.car_names[0])
self.data_dir = data_dir
self.transform = transform
def __len__(self):
return len(self.car_annotations)
def __getitem__(self, idx):
img_name = os.path.join(self.data_dir, self.car_annotations[idx][-1][0])
image = Image.open(img_name).convert('RGB')
car_class = self.car_annotations[idx][-2][0][0]
car_class = torch.from_numpy(np.array(car_class.astype(np.float32))).long() - 1
assert car_class < 196
if self.transform:
image = self.transform(image)
# return image, car_class, img_name
return image, car_class
def map_class(self, id):
id = np.ravel(id)
ret = self.car_names[id - 1][0][0]
return ret
def show_batch(self, img_batch, class_batch):
for i in range(img_batch.shape[0]):
ax = plt.subplot(1, img_batch.shape[0], i + 1)
title_str = self.map_class(int(class_batch[i]))
img = np.transpose(img_batch[i, ...], (1, 2, 0))
ax.imshow(img)
ax.set_title(title_str.__str__(), {'fontsize': 5})
plt.tight_layout()
def make_dataset(dir, image_ids, targets):
assert(len(image_ids) == len(targets))
images = []
dir = os.path.expanduser(dir)
for i in range(len(image_ids)):
item = (os.path.join(dir, 'data', 'images',
'%s.jpg' % image_ids[i]), targets[i])
images.append(item)
return images
def find_classes(classes_file):
# read classes file, separating out image IDs and class names
image_ids = []
targets = []
f = open(classes_file, 'r')
for line in f:
split_line = line.split(' ')
image_ids.append(split_line[0])
targets.append(' '.join(split_line[1:]))
f.close()
# index class names
classes = np.unique(targets)
class_to_idx = {classes[i]: i for i in range(len(classes))}
targets = [class_to_idx[c] for c in targets]
return (image_ids, targets, classes, class_to_idx)
class FGVC_aircraft():
def __init__(self, root, is_train=True, data_len=None, transform=None):
self.root = root
self.is_train = is_train
self.transform = transform
train_img_path = os.path.join(self.root, 'data', 'images')
test_img_path = os.path.join(self.root, 'data', 'images')
train_label_file = open(os.path.join(self.root, 'data', 'train.txt'))
test_label_file = open(os.path.join(self.root, 'data', 'test.txt'))
train_img_label = []
test_img_label = []
for line in train_label_file:
train_img_label.append([os.path.join(train_img_path,line[:-1].split(' ')[0]), int(line[:-1].split(' ')[1])-1])
for line in test_label_file:
test_img_label.append([os.path.join(test_img_path,line[:-1].split(' ')[0]), int(line[:-1].split(' ')[1])-1])
self.train_img_label = train_img_label[:data_len]
self.test_img_label = test_img_label[:data_len]
def __getitem__(self, index):
if self.is_train:
img, target = scipy.misc.imread(self.train_img_label[index][0]), self.train_img_label[index][1]
if len(img.shape) == 2:
img = np.stack([img] * 3, 2)
img = Image.fromarray(img, mode='RGB')
if self.transform is not None:
img = self.transform(img)
else:
img, target = scipy.misc.imread(self.test_img_label[index][0]), self.test_img_label[index][1]
if len(img.shape) == 2:
img = np.stack([img] * 3, 2)
img = Image.fromarray(img, mode='RGB')
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
if self.is_train:
return len(self.train_img_label)
else:
return len(self.test_img_label)
| 67,209 | 39.659407 | 180 | py |
sm-vit | sm-vit-main/utils/scheduler.py | import logging
import math
from torch.optim.lr_scheduler import LambdaLR
logger = logging.getLogger(__name__)
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSchedule(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
return 1.
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
| 2,799 | 42.75 | 117 | py |
sm-vit | sm-vit-main/utils/dist_util.py | import torch.distributed as dist
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def is_main_process():
return get_rank() == 0
def format_step(step):
if isinstance(step, str):
return step
s = ""
if len(step) > 0:
s += "Training Epoch: {} ".format(step[0])
if len(step) > 1:
s += "Training Iteration: {} ".format(step[1])
if len(step) > 2:
s += "Validation Iteration: {} ".format(step[2])
return s
| 711 | 21.967742 | 56 | py |
sm-vit | sm-vit-main/U2Net/data_loader.py | # data loader
from __future__ import print_function, division
import glob
import torch
from skimage import io, transform, color
import numpy as np
import random
import math
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
#==========================dataset load==========================
class RescaleT(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'],sample['label']
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# # #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
# # img = transform.resize(image,(new_h,new_w),mode='constant')
# # lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
# img = transform.resize(image,(self.output_size,self.output_size),mode='constant')
# lbl = transform.resize(label,(self.output_size,self.output_size),mode='constant', order=0, preserve_range=True)
img = transform.resize(image,(self.output_size,self.output_size),mode='constant', anti_aliasing=True, anti_aliasing_sigma=None)
lbl = transform.resize(label,(self.output_size,self.output_size),mode='constant', order=0, preserve_range=True, anti_aliasing=True, anti_aliasing_sigma=None)
return {'imidx':imidx, 'image':img,'label':lbl}
class Rescale(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'],sample['label']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
# img = transform.resize(image,(new_h,new_w),mode='constant')
# lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
img = transform.resize(image,(new_h,new_w),mode='constant', anti_aliasing=True, anti_aliasing_sigma=None)
lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True, anti_aliasing=True, anti_aliasing_sigma=None)
return {'imidx':imidx, 'image':img,'label':lbl}
class RandomCrop(object):
def __init__(self,output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'], sample['label']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h, left: left + new_w]
label = label[top: top + new_h, left: left + new_w]
return {'imidx':imidx,'image':image, 'label':label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
imidx, image, label = sample['imidx'], sample['image'], sample['label']
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
tmpLbl = np.zeros(label.shape)
image = image/np.max(image)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl)}
class ToTensorLab(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self,flag=0):
self.flag = flag
def __call__(self, sample):
imidx, image, label =sample['imidx'], sample['image'], sample['label']
tmpLbl = np.zeros(label.shape)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
# change the color space
if self.flag == 2: # with rgb and Lab colors
tmpImg = np.zeros((image.shape[0],image.shape[1],6))
tmpImgt = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImgt[:,:,0] = image[:,:,0]
tmpImgt[:,:,1] = image[:,:,0]
tmpImgt[:,:,2] = image[:,:,0]
else:
tmpImgt = image
tmpImgtl = color.rgb2lab(tmpImgt)
# nomalize image to range [0,1]
tmpImg[:,:,0] = (tmpImgt[:,:,0]-np.min(tmpImgt[:,:,0]))/(np.max(tmpImgt[:,:,0])-np.min(tmpImgt[:,:,0]))
tmpImg[:,:,1] = (tmpImgt[:,:,1]-np.min(tmpImgt[:,:,1]))/(np.max(tmpImgt[:,:,1])-np.min(tmpImgt[:,:,1]))
tmpImg[:,:,2] = (tmpImgt[:,:,2]-np.min(tmpImgt[:,:,2]))/(np.max(tmpImgt[:,:,2])-np.min(tmpImgt[:,:,2]))
tmpImg[:,:,3] = (tmpImgtl[:,:,0]-np.min(tmpImgtl[:,:,0]))/(np.max(tmpImgtl[:,:,0])-np.min(tmpImgtl[:,:,0]))
tmpImg[:,:,4] = (tmpImgtl[:,:,1]-np.min(tmpImgtl[:,:,1]))/(np.max(tmpImgtl[:,:,1])-np.min(tmpImgtl[:,:,1]))
tmpImg[:,:,5] = (tmpImgtl[:,:,2]-np.min(tmpImgtl[:,:,2]))/(np.max(tmpImgtl[:,:,2])-np.min(tmpImgtl[:,:,2]))
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
tmpImg[:,:,3] = (tmpImg[:,:,3]-np.mean(tmpImg[:,:,3]))/np.std(tmpImg[:,:,3])
tmpImg[:,:,4] = (tmpImg[:,:,4]-np.mean(tmpImg[:,:,4]))/np.std(tmpImg[:,:,4])
tmpImg[:,:,5] = (tmpImg[:,:,5]-np.mean(tmpImg[:,:,5]))/np.std(tmpImg[:,:,5])
elif self.flag == 1: #with Lab color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImg[:,:,0] = image[:,:,0]
tmpImg[:,:,1] = image[:,:,0]
tmpImg[:,:,2] = image[:,:,0]
else:
tmpImg = image
tmpImg = color.rgb2lab(tmpImg)
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.min(tmpImg[:,:,0]))/(np.max(tmpImg[:,:,0])-np.min(tmpImg[:,:,0]))
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.min(tmpImg[:,:,1]))/(np.max(tmpImg[:,:,1])-np.min(tmpImg[:,:,1]))
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.min(tmpImg[:,:,2]))/(np.max(tmpImg[:,:,2])-np.min(tmpImg[:,:,2]))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
else: # with rgb color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
image = image/np.max(image)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl)}
class SalObjDataset(Dataset):
def __init__(self,img_name_list,lbl_name_list,transform=None):
# self.root_dir = root_dir
# self.image_name_list = glob.glob(image_dir+'*.png')
# self.label_name_list = glob.glob(label_dir+'*.png')
self.image_name_list = img_name_list
self.label_name_list = lbl_name_list
self.transform = transform
def __len__(self):
return len(self.image_name_list)
def __getitem__(self,idx):
# image = Image.open(self.image_name_list[idx])#io.imread(self.image_name_list[idx])
# label = Image.open(self.label_name_list[idx])#io.imread(self.label_name_list[idx])
image = io.imread(self.image_name_list[idx])
imname = self.image_name_list[idx]
imidx = np.array([idx])
if(0==len(self.label_name_list)):
label_3 = np.zeros(image.shape)
else:
label_3 = io.imread(self.label_name_list[idx])
label = np.zeros(label_3.shape[0:2])
if(3==len(label_3.shape)):
label = label_3[:,:,0]
elif(2==len(label_3.shape)):
label = label_3
if(3==len(image.shape) and 2==len(label.shape)):
label = label[:,:,np.newaxis]
elif(2==len(image.shape) and 2==len(label.shape)):
image = image[:,:,np.newaxis]
label = label[:,:,np.newaxis]
sample = {'imidx':imidx, 'image':image, 'label':label}
if self.transform:
sample = self.transform(sample)
return sample | 9,327 | 32.797101 | 159 | py |
sm-vit | sm-vit-main/U2Net/u2net_test.py | import os
from re import X
from skimage import io, transform
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms#, utils
# import torch.optim as optim
import numpy as np
from PIL import Image
import glob
from .data_loader import RescaleT
from .data_loader import ToTensor
from .data_loader import ToTensorLab
from .data_loader import SalObjDataset
from .model import U2NET # full size version 173.6 MB
from .model import U2NETP # small version u2net 4.7 MB
#import cv2
import copy
from torchvision.utils import save_image
# normalize the predicted SOD probability map
def normPRED(d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d-mi)/(ma-mi)
return dn
def save_output(image_name,pred,d_dir):
predict = pred
predict = predict.squeeze()
predict_np = predict.cpu().data.numpy()
im = Image.fromarray(predict_np*255).convert('RGB')
img_name = image_name.split(os.sep)[-1]
image = io.imread(image_name)
imo = im.resize((image.shape[1],image.shape[0]),resample=Image.BILINEAR)
pb_np = np.array(imo)
aaa = img_name.split(".")
bbb = aaa[0:-1]
imidx = bbb[0]
for i in range(1,len(bbb)):
imidx = imidx + "." + bbb[i]
imo.save(d_dir+imidx+'.png')
def mask_hw(full_ds=True, img_path=None, shape_hw=None):
print_info = False
# --------- 1. get image path and name ---------
model_name='u2net' #u2netp
if img_path is None:
image_dir = os.path.join(os.getcwd(), 'U2Net/images')
img_name_list = glob.glob(image_dir + os.sep + '*')
if print_info: print("local image")
if print_info: print(img_name_list)
else:
if full_ds:
img_name_list = img_path
shape_hw_list = shape_hw
else:
img_name_list = glob.glob(img_path)
if print_info: print(img_path)
model_dir = os.path.join(os.getcwd(), 'U2Net/model/pre_trained', model_name + '.pth')
# --------- 2. dataloader ---------
test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,
lbl_name_list = [],
transform=transforms.Compose([RescaleT(320),
ToTensorLab(flag=0)])
)
test_salobj_dataloader = DataLoader(test_salobj_dataset,
batch_size=1,
shuffle=False,
num_workers=4) # 1
# --------- 3. model define ---------
if(model_name=='u2net'):
net = U2NET(3,1)
elif(model_name=='u2netp'):
net = U2NETP(3,1)
if torch.cuda.is_available():
net.load_state_dict(torch.load(model_dir))
net.cuda()
else:
net.load_state_dict(torch.load(model_dir, map_location='cpu'))
net.eval()
# --------- 4. inference for each image ---------
mask_out_np_list = []
start_x_list = []
start_y_list = []
h_list = []
w_list = []
bad_mask_count = 0
refined_mask_count = 0
for i_test, data_test in enumerate(test_salobj_dataloader):
if print_info: print("U2N:", i_test, img_name_list[i_test])
inputs_test = data_test['image']
inputs_test = inputs_test.type(torch.FloatTensor)
if full_ds:
shape_hw_i = shape_hw_list[i_test]
if torch.cuda.is_available():
inputs_test = Variable(inputs_test.cuda())
else:
inputs_test = Variable(inputs_test)
with torch.no_grad():
d1,d2,d3,d4,d5,d6,d7= net(inputs_test)
# normalization
pred = d1[:,0,:,:]
pred = normPRED(pred)
THRESHOLD = 0.8 # 0.5 # 0.8 # 0.5 #0.8 # for the original mask (better not smaller than 0.7 cuz artifacts)
THRESHOLD_resize = 0.2 # 0.1 # 0.2 # for the resized mask
THRESHOLD_deep = 0.1 # for the non-detected mask
pred = pred[0, :, :]
pred_cpu = pred.cpu()
out_img = pred_cpu.detach().numpy()
out_img_refine = copy.deepcopy(out_img)
# BACKGROUND REMOVAL
out_img[out_img > THRESHOLD] = 1
out_img[out_img <= THRESHOLD] = 0
out_img = (out_img * 255).astype(np.uint8)
out_img = Image.fromarray(out_img, mode='L')
# BOUNDING BOX CREATION
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(mode='L'), #(mode='1'),
#transforms.Resize((image.shape[1],image.shape[0]), Image.BILINEAR),
transforms.Resize((shape_hw_i[0], shape_hw_i[1]), Image.BILINEAR), # shape_hw (0 - height, 1 - width)
transforms.ToTensor(),
])
out_img = transform_mask(out_img)
out_img = out_img[0, :, :]
mask_out = out_img
mask_out = mask_out.cpu()
mask_out = torch.where( mask_out > THRESHOLD_resize, torch.tensor(0.),
torch.tensor(1.))
mask_out_np = mask_out.detach().numpy()
out_layer = out_img
out_layer = out_layer.cpu()
out_layer = torch.where( out_layer > THRESHOLD_resize, torch.tensor(1.),
torch.tensor(0.))
out_layer = out_layer.detach().numpy()
x_starts = [np.where(out_layer[i]==1)[0][0] if len(np.where(out_layer[i]==1)[0])!=0 \
else out_layer.shape[0]+1 for i in range(out_layer.shape[0])]
x_ends = [np.where(out_layer[i]==1)[0][-1] if len(np.where(out_layer[i]==1)[0])!=0 \
else 0 for i in range(out_layer.shape[0])]
y_starts = [np.where(out_layer.T[i]==1)[0][0] if len(np.where(out_layer.T[i]==1)[0])!=0 \
else out_layer.T.shape[0]+1 for i in range(out_layer.T.shape[0])]
y_ends = [np.where(out_layer.T[i]==1)[0][-1] if len(np.where(out_layer.T[i]==1)[0])!=0 \
else 0 for i in range(out_layer.T.shape[0])]
startx = min(x_starts)
endx = max(x_ends)
starty = min(y_starts)
endy = max(y_ends)
start = (startx,starty)
end = (endx,endy)
## For cases when U2N couldn't detect mask:
# [DONE] 1.1 if (end - start) < 30-50px -> decrease the THRESHOLD
# [DONE] 1.2 if (start>end) or end==0 ? -> decrease the THRESHOLD
# [DONE] 2.1 if no mask found anyway -> create center crop mask (x, y) +-10 %
# [DONE] 2.2 + restore h,w from (0,0) to (x, y) +-10 %
w_temp = end[0] - start[0]
h_temp = end[1] - start[1]
mask_px = np.count_nonzero(out_layer > 0.9) # (expected to be == 1.0)
if print_info: print("Mask px old:", mask_px)
if (end[0] <= start[0]) or (end[1] <= start[1]) or (mask_px < 5000) or (w_temp < 50) or (h_temp < 50) :
if print_info: print("[WARNING] Mask was not detected by U2N for image", img_name_list[i_test])
if print_info: print("Trying to refine image and then detect mask again.")
if print_info: print("Old x (start, end):", startx, endx)
if print_info: print("Old y (start, end):", starty, endy)
# img_dir = ("test/" + str(i_test))
# if not os.path.exists(img_dir):
# os.makedirs(img_dir, exist_ok=True)
# img_name = ("test/" + str(i_test) + "/1mask_init" + str(i_test) + ".png")
# img_temp = transforms.ToTensor()(out_img_refine)
# save_image(img_temp, img_name)
# img_name = ("test/" + str(i_test) + "/2mask_old" + str(i_test) + ".png")
# img_temp = transforms.ToTensor()(mask_out_np)
# save_image(img_temp, img_name)
out_img_refine[out_img_refine > THRESHOLD_deep] = 1
out_img_refine[out_img_refine <= THRESHOLD_deep] = 0
out_img_refine = (out_img_refine * 255).astype(np.uint8)
out_img_refine = Image.fromarray(out_img_refine, mode='L')
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(mode='L'), #(mode='1'),
#transforms.Resize((image.shape[1],image.shape[0]), Image.BILINEAR),
transforms.Resize((shape_hw_i[0], shape_hw_i[1]), Image.BILINEAR), # shape_hw (0 - height, 1 - width)
transforms.ToTensor(),
])
out_img_refine = transform_mask(out_img_refine)
out_img_refine = out_img_refine[0, :, :]
out_layer_refine = out_img_refine
out_layer_refine = out_layer_refine.cpu()
out_layer_refine = torch.where( out_img_refine > THRESHOLD_resize, torch.tensor(1.),
torch.tensor(0.))
out_layer_refine = out_layer_refine.detach().numpy()
x_starts = [np.where(out_layer_refine[i]==1)[0][0] if len(np.where(out_layer_refine[i]==1)[0])!=0 \
else out_layer_refine.shape[0]+1 for i in range(out_layer_refine.shape[0])]
x_ends = [np.where(out_layer_refine[i]==1)[0][-1] if len(np.where(out_layer_refine[i]==1)[0])!=0 \
else 0 for i in range(out_layer_refine.shape[0])]
y_starts = [np.where(out_layer_refine.T[i]==1)[0][0] if len(np.where(out_layer_refine.T[i]==1)[0])!=0 \
else out_layer_refine.T.shape[0]+1 for i in range(out_layer_refine.T.shape[0])]
y_ends = [np.where(out_layer_refine.T[i]==1)[0][-1] if len(np.where(out_layer_refine.T[i]==1)[0])!=0 \
else 0 for i in range(out_layer_refine.T.shape[0])]
startx = min(x_starts)
endx = max(x_ends)
starty = min(y_starts)
endy = max(y_ends)
start = (startx,starty)
end = (endx,endy)
if print_info: print("New x (start, end):", startx, endx)
if print_info: print("New y (start, end):", starty, endy)
w_temp = end[0] - start[0]
h_temp = end[1] - start[1]
mask_px = np.count_nonzero(out_layer_refine > 0.9) # (expected to be == 1.0)
if print_info: print("Mask px new:", mask_px)
if (end[0] <= start[0]) or (end[1] <= start[1]) or (mask_px < 5000) or (w_temp < 50) or (h_temp < 50) :
if print_info: print("[WARNING] Mask was not deteted by U2N even after refining.")
if print_info: print("Changing mask size (0, 0) to img size (", shape_hw_i[1], shape_hw_i[0], ") -10 p/c boundaries: ")
if print_info: print("Old x (start, end):", startx, endx)
startx = shape_hw_i[1] * 0.1
endx = shape_hw_i[1] * 0.9 # w -> x
if print_info: print("New x (start, end):", startx, endx)
if print_info: print("Old y (start, end):", starty, endy)
starty = shape_hw_i[0] * 0.1
endy = shape_hw_i[0] * 0.9 # h -> y
if print_info: print("New y (start, end):", starty, endy)
start = (startx,starty)
end = (endx,endy)
mask_out_np = np.ones((int(shape_hw_i[0]), int(shape_hw_i[1])))
mask_out_np[int(starty):int(endy), int(startx):int(endx)] = 0
# img_name = ("test/" + str(i_test) + "/4mask_new2_" + str(i_test) + ".png")
# img_temp = transforms.ToTensor()(mask_out_np)
# save_image(img_temp, img_name)
bad_mask_count+=1
else:
mask_out_refine = out_img_refine
mask_out_refine = mask_out_refine.cpu()
mask_out_refine = torch.where( mask_out_refine > THRESHOLD_resize, torch.tensor(0.),
torch.tensor(1.))
# img_name = ("test/" + str(i_test) + "/3mask_new1_" + str(i_test) + ".png")
# #mask_tem = transforms.ToTensor()(mask)
# save_image(mask_out_refine, img_name)
mask_out_np = mask_out_refine.detach().numpy()
refined_mask_count+=1
w = end[0] - start[0]
h = end[1] - start[1]
# save results to test_results folder
# if not os.path.exists(prediction_dir):
# os.makedirs(prediction_dir, exist_ok=True)
# save_output(img_name_list[i_test], mask_out, prediction_dir)
del d1,d2,d3,d4,d5,d6,d7
if print_info: print(start[0], start[1], h, w)
mask_out_np_list.append(mask_out_np)
start_x_list.append(start[0])
start_y_list.append(start[1])
h_list.append(h)
w_list.append(w)
if i_test % 1000 == 0:
print(i_test)
print("Refined masks total:", refined_mask_count)
print("Bad masks total:", bad_mask_count)
return mask_out_np_list, start_x_list, start_y_list, h_list, w_list
if __name__ == "__main__":
mask_hw() | 13,512 | 37.719198 | 139 | py |
sm-vit | sm-vit-main/U2Net/model/u2net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class REBNCONV(nn.Module):
def __init__(self,in_ch=3,out_ch=3,dirate=1):
super(REBNCONV,self).__init__()
self.conv_s1 = nn.Conv2d(in_ch,out_ch,3,padding=1*dirate,dilation=1*dirate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self,x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
## upsample tensor 'src' to have the same spatial size with tensor 'tar'
def _upsample_like(src,tar):
src = F.upsample(src,size=tar.shape[2:],mode='bilinear')
return src
### RSU-7 ###
class RSU7(nn.Module):#UNet07DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU7,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool5 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv7 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv6d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7,hx6),1))
hx6dup = _upsample_like(hx6d,hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-6 ###
class RSU6(nn.Module):#UNet06DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU6,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx6 = self.rebnconv6(hx5)
hx5d = self.rebnconv5d(torch.cat((hx6,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-5 ###
class RSU5(nn.Module):#UNet05DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU5,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4 ###
class RSU4(nn.Module):#UNet04DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4F ###
class RSU4F(nn.Module):#UNet04FRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4F,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=4)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=8)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=4)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=2)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx2 = self.rebnconv2(hx1)
hx3 = self.rebnconv3(hx2)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx2d = self.rebnconv2d(torch.cat((hx3d,hx2),1))
hx1d = self.rebnconv1d(torch.cat((hx2d,hx1),1))
return hx1d + hxin
##### U^2-Net ####
class U2NET(nn.Module):
def __init__(self,in_ch=3,out_ch=1):
super(U2NET,self).__init__()
self.stage1 = RSU7(in_ch,32,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,32,128)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(128,64,256)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(256,128,512)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(512,256,512)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(512,256,512)
# decoder
self.stage5d = RSU4F(1024,256,512)
self.stage4d = RSU4(1024,128,256)
self.stage3d = RSU5(512,64,128)
self.stage2d = RSU6(256,32,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(128,out_ch,3,padding=1)
self.side4 = nn.Conv2d(256,out_ch,3,padding=1)
self.side5 = nn.Conv2d(512,out_ch,3,padding=1)
self.side6 = nn.Conv2d(512,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def forward(self,x):
hx = x
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#-------------------- decoder --------------------
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
### U^2-Net small ###
class U2NETP(nn.Module):
def __init__(self,in_ch=3,out_ch=1):
super(U2NETP,self).__init__()
self.stage1 = RSU7(in_ch,16,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,16,64)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(64,16,64)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(64,16,64)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(64,16,64)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(64,16,64)
# decoder
self.stage5d = RSU4F(128,16,64)
self.stage4d = RSU4(128,16,64)
self.stage3d = RSU5(128,16,64)
self.stage2d = RSU6(128,16,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(64,out_ch,3,padding=1)
self.side4 = nn.Conv2d(64,out_ch,3,padding=1)
self.side5 = nn.Conv2d(64,out_ch,3,padding=1)
self.side6 = nn.Conv2d(64,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def forward(self,x):
hx = x
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#decoder
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
| 14,719 | 26.984791 | 118 | py |
sm-vit | sm-vit-main/U2Net/model/u2net_refactor.py | import torch
import torch.nn as nn
import math
__all__ = ['U2NET_full', 'U2NET_lite']
def _upsample_like(x, size):
return nn.Upsample(size=size, mode='bilinear', align_corners=False)(x)
def _size_map(x, height):
# {height: size} for Upsample
size = list(x.shape[-2:])
sizes = {}
for h in range(1, height):
sizes[h] = size
size = [math.ceil(w / 2) for w in size]
return sizes
class REBNCONV(nn.Module):
def __init__(self, in_ch=3, out_ch=3, dilate=1):
super(REBNCONV, self).__init__()
self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dilate, dilation=1 * dilate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu_s1(self.bn_s1(self.conv_s1(x)))
class RSU(nn.Module):
def __init__(self, name, height, in_ch, mid_ch, out_ch, dilated=False):
super(RSU, self).__init__()
self.name = name
self.height = height
self.dilated = dilated
self._make_layers(height, in_ch, mid_ch, out_ch, dilated)
def forward(self, x):
sizes = _size_map(x, self.height)
x = self.rebnconvin(x)
# U-Net like symmetric encoder-decoder structure
def unet(x, height=1):
if height < self.height:
x1 = getattr(self, f'rebnconv{height}')(x)
if not self.dilated and height < self.height - 1:
x2 = unet(getattr(self, 'downsample')(x1), height + 1)
else:
x2 = unet(x1, height + 1)
x = getattr(self, f'rebnconv{height}d')(torch.cat((x2, x1), 1))
return _upsample_like(x, sizes[height - 1]) if not self.dilated and height > 1 else x
else:
return getattr(self, f'rebnconv{height}')(x)
return x + unet(x)
def _make_layers(self, height, in_ch, mid_ch, out_ch, dilated=False):
self.add_module('rebnconvin', REBNCONV(in_ch, out_ch))
self.add_module('downsample', nn.MaxPool2d(2, stride=2, ceil_mode=True))
self.add_module(f'rebnconv1', REBNCONV(out_ch, mid_ch))
self.add_module(f'rebnconv1d', REBNCONV(mid_ch * 2, out_ch))
for i in range(2, height):
dilate = 1 if not dilated else 2 ** (i - 1)
self.add_module(f'rebnconv{i}', REBNCONV(mid_ch, mid_ch, dilate=dilate))
self.add_module(f'rebnconv{i}d', REBNCONV(mid_ch * 2, mid_ch, dilate=dilate))
dilate = 2 if not dilated else 2 ** (height - 1)
self.add_module(f'rebnconv{height}', REBNCONV(mid_ch, mid_ch, dilate=dilate))
class U2NET(nn.Module):
def __init__(self, cfgs, out_ch):
super(U2NET, self).__init__()
self.out_ch = out_ch
self._make_layers(cfgs)
def forward(self, x):
sizes = _size_map(x, self.height)
maps = [] # storage for maps
# side saliency map
def unet(x, height=1):
if height < 6:
x1 = getattr(self, f'stage{height}')(x)
x2 = unet(getattr(self, 'downsample')(x1), height + 1)
x = getattr(self, f'stage{height}d')(torch.cat((x2, x1), 1))
side(x, height)
return _upsample_like(x, sizes[height - 1]) if height > 1 else x
else:
x = getattr(self, f'stage{height}')(x)
side(x, height)
return _upsample_like(x, sizes[height - 1])
def side(x, h):
# side output saliency map (before sigmoid)
x = getattr(self, f'side{h}')(x)
x = _upsample_like(x, sizes[1])
maps.append(x)
def fuse():
# fuse saliency probability maps
maps.reverse()
x = torch.cat(maps, 1)
x = getattr(self, 'outconv')(x)
maps.insert(0, x)
return [torch.sigmoid(x) for x in maps]
unet(x)
maps = fuse()
return maps
def _make_layers(self, cfgs):
self.height = int((len(cfgs) + 1) / 2)
self.add_module('downsample', nn.MaxPool2d(2, stride=2, ceil_mode=True))
for k, v in cfgs.items():
# build rsu block
self.add_module(k, RSU(v[0], *v[1]))
if v[2] > 0:
# build side layer
self.add_module(f'side{v[0][-1]}', nn.Conv2d(v[2], self.out_ch, 3, padding=1))
# build fuse layer
self.add_module('outconv', nn.Conv2d(int(self.height * self.out_ch), self.out_ch, 1))
def U2NET_full():
full = {
# cfgs for building RSUs and sides
# {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}
'stage1': ['En_1', (7, 3, 32, 64), -1],
'stage2': ['En_2', (6, 64, 32, 128), -1],
'stage3': ['En_3', (5, 128, 64, 256), -1],
'stage4': ['En_4', (4, 256, 128, 512), -1],
'stage5': ['En_5', (4, 512, 256, 512, True), -1],
'stage6': ['En_6', (4, 512, 256, 512, True), 512],
'stage5d': ['De_5', (4, 1024, 256, 512, True), 512],
'stage4d': ['De_4', (4, 1024, 128, 256), 256],
'stage3d': ['De_3', (5, 512, 64, 128), 128],
'stage2d': ['De_2', (6, 256, 32, 64), 64],
'stage1d': ['De_1', (7, 128, 16, 64), 64],
}
return U2NET(cfgs=full, out_ch=1)
def U2NET_lite():
lite = {
# cfgs for building RSUs and sides
# {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}
'stage1': ['En_1', (7, 3, 16, 64), -1],
'stage2': ['En_2', (6, 64, 16, 64), -1],
'stage3': ['En_3', (5, 64, 16, 64), -1],
'stage4': ['En_4', (4, 64, 16, 64), -1],
'stage5': ['En_5', (4, 64, 16, 64, True), -1],
'stage6': ['En_6', (4, 64, 16, 64, True), 64],
'stage5d': ['De_5', (4, 128, 16, 64, True), 64],
'stage4d': ['De_4', (4, 128, 16, 64), 64],
'stage3d': ['De_3', (5, 128, 16, 64), 64],
'stage2d': ['De_2', (6, 128, 16, 64), 64],
'stage1d': ['De_1', (7, 128, 16, 64), 64],
}
return U2NET(cfgs=lite, out_ch=1)
| 6,097 | 35.08284 | 101 | py |
HighOrderAtten | HighOrderAtten-master/image_model/download_model.py | """
Download the VGG and deep residual model to extract image features.
Version: 1.0
Contributor: Jiasen Lu
"""
import os
import argparse
import json
def download_VGG():
print('Downloading VGG model from http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel')
os.system('wget http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel')
os.system('wget https://gist.githubusercontent.com/ksimonyan/3785162f95cd2d5fee77/raw/bb2b4fe0a9bb0669211cf3d0bc949dfdda173e9e/VGG_ILSVRC_19_layers_deploy.prototxt')
def download_deep_residual():
print('Downloading deep residual model from https://d2j0dndfm35trm.cloudfront.net/resnet-200.t7')
os.system('wget https://d2j0dndfm35trm.cloudfront.net/resnet-200.t7')
os.system('wget https://raw.githubusercontent.com/facebook/fb.resnet.torch/master/datasets/transforms.lua')
def main(params):
if params['download'] == 'VGG':
download_VGG()
else:
download_deep_residual()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--download', default='VGG', help='VGG or Residual')
# input json
args = parser.parse_args()
params = vars(args)
print 'parsed input parameters:'
print json.dumps(params, indent = 2)
main(params)
| 1,337 | 35.162162 | 169 | py |
HighOrderAtten | HighOrderAtten-master/data/prepro_vqa.py | '''
Preoricess a raw json dataset into hdf5/json files.
Caption: Use NLTK or split function to get tokens.
'''
from random import shuffle, seed
import sys
import os.path
import argparse
import numpy as np
import scipy.io
import pdb
import h5py
from nltk.tokenize import word_tokenize
import json
import re
import math
def tokenize(sentence):
return [i for i in re.split(r"([-.\"',:? !\$#@~()*&\^%;\[\]/\\\+<>\n=])", sentence) if i!='' and i!=' ' and i!='\n'];
def prepro_question(dataset, params):
# preprocess all the question
print 'example processed tokens:'
for i,ques in enumerate(dataset):
s = ques['question']
if params['token_method'] == 'nltk':
txt = word_tokenize(str(s).lower())
else:
txt = tokenize(s)
ques['processed_tokens'] = txt
if i < 10: print txt
if i % 1000 == 0:
sys.stdout.write("processing %d/%d (%.2f%% done) \r" % (i, len(dataset), i*100.0/len(dataset)) )
sys.stdout.flush()
return dataset
def build_vocab_question(dataset, params):
# build vocabulary for question and answers.
count_thr = params['word_count_threshold']
# count up the number of words
counts = {}
for ques in dataset:
for w in ques['processed_tokens']:
counts[w] = counts.get(w, 0) + 1
cw = sorted([(count,w) for w,count in counts.iteritems()], reverse=True)
print 'top words and their counts:'
print '\n'.join(map(str,cw[:20]))
# print some stats
total_words = sum(counts.itervalues())
print 'total words:', total_words
bad_words = [w for w,n in counts.iteritems() if n <= count_thr]
vocab = [w for w,n in counts.iteritems() if n > count_thr]
bad_count = sum(counts[w] for w in bad_words)
print 'number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)*100.0/len(counts))
print 'number of words in vocab would be %d' % (len(vocab), )
print 'number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, bad_count*100.0/total_words)
# lets now produce the final annotation
# additional special UNK token we will use below to map infrequent words to
print 'inserting the special UNK token'
vocab.append('UNK')
for ques in dataset:
txt = ques['processed_tokens']
question = [w if counts.get(w,0) > count_thr else 'UNK' for w in txt]
ques['final_question'] = question
return dataset, vocab
def apply_vocab_question(dataset, wtoi):
# apply the vocab on test.
for ques in dataset:
txt = ques['processed_tokens']
question = [w if w in wtoi else 'UNK' for w in txt]
ques['final_question'] = question
return dataset
def get_top_answers(dataset, params):
counts = {}
for ques in dataset:
ans = ques['ans']
counts[ans] = counts.get(ans, 0) + 1
cw = sorted([(count,w) for w,count in counts.iteritems()], reverse=True)
print 'top answer and their counts:'
print '\n'.join(map(str,cw[:20]))
vocab = []
for i in range(params['num_ans']):
vocab.append(cw[i][1])
return vocab[:params['num_ans']]
def encode_question(dataset, params, wtoi):
max_length = params['max_length']
N = len(dataset)
label_arrays = np.zeros((N, max_length), dtype='uint32')
label_length = np.zeros(N, dtype='uint32')
question_id = np.zeros(N, dtype='uint32')
question_counter = 0
for i,ques in enumerate(dataset):
question_id[question_counter] = ques['ques_id']
label_length[question_counter] = min(max_length, len(ques['final_question'])) # record the length of this sequence
question_counter += 1
for k,w in enumerate(ques['final_question']):
if k < max_length:
label_arrays[i,k] = wtoi[w]
return label_arrays, label_length, question_id
def encode_answer(dataset, atoi, num_ans):
N = len(dataset)
ans_arrays = np.zeros(N, dtype='uint32')
for i, ques in enumerate(dataset):
ans_arrays[i] = atoi.get(ques['ans'], num_ans+1) # -1 means wrong answer.
return ans_arrays
def encode_mc_answer(dataset, atoi, num_ans):
N = len(dataset)
mc_ans_arrays = np.zeros((N, 18), dtype='uint32')
for i, ques in enumerate(dataset):
for j, ans in enumerate(ques['MC_ans']):
mc_ans_arrays[i,j] = atoi.get(ans, num_ans+1)
return mc_ans_arrays
def filter_question(dataset, atoi):
new_dataset = []
for i, ques in enumerate(dataset):
if ques['ans'] in atoi:
new_dataset.append(ques)
print 'question number reduce from %d to %d '%(len(dataset), len(new_dataset))
return new_dataset
def get_unqiue_img(dataset):
count_img = {}
N = len(dataset)
img_pos = np.zeros(N, dtype='uint32')
ques_pos_tmp = {}
for ques in dataset:
count_img[ques['img_path']] = count_img.get(ques['img_path'], 0) + 1
unique_img = [w for w,n in count_img.iteritems()]
imgtoi = {w:i+1 for i,w in enumerate(unique_img)} # add one for torch, since torch start from 1.
for i, ques in enumerate(dataset):
idx = imgtoi.get(ques['img_path'])
img_pos[i] = idx
if idx-1 not in ques_pos_tmp:
ques_pos_tmp[idx-1] = []
ques_pos_tmp[idx-1].append(i+1)
img_N = len(ques_pos_tmp)
ques_pos = np.zeros((img_N,3), dtype='uint32')
ques_pos_len = np.zeros(img_N, dtype='uint32')
for idx, ques_list in ques_pos_tmp.iteritems():
ques_pos_len[idx] = len(ques_list)
for j in range(len(ques_list)):
ques_pos[idx][j] = ques_list[j]
return unique_img, img_pos, ques_pos, ques_pos_len
def main(params):
# create output h5 file for training set.
f = h5py.File(params['output_h5'], "w")
if params['input_json']=='':
dataset_train = json.load(open(params['input_train_json'], 'r'))
#dataset_train = dataset_train[:5000]
#dataset_test = dataset_test[:5000]
# get top answers
top_ans = get_top_answers(dataset_train, params)
atoi = {w:i+1 for i,w in enumerate(top_ans)}
atoi['error'] = params['num_ans']+1
itoa = {i+1:w for i,w in enumerate(top_ans)}
itoa[params['num_ans']+1] = 'error'
# filter question, which isn't in the top answers.
dataset_train = filter_question(dataset_train, atoi)
# tokenization and preprocessing training question
dataset_train = prepro_question(dataset_train, params)
# create the vocab for question
dataset_train, vocab = build_vocab_question(dataset_train, params)
itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table
wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table
ques_train, ques_length_train, question_id_train = encode_question(dataset_train, params, wtoi)
# get the unique image for train
unique_img_train, img_pos_train, ques_pos_train, ques_pos_len_train = get_unqiue_img(dataset_train)
# get the answer encoding.
ans_train = encode_answer(dataset_train, atoi, params['num_ans'])
MC_ans_train = encode_mc_answer(dataset_train, atoi, params['num_ans'])
N_train = len(dataset_train)
split_train = np.zeros(N_train)
f.create_dataset("ques_train", dtype='uint32', data=ques_train)
f.create_dataset("answers", dtype='uint32', data=ans_train)
f.create_dataset("ques_id_train", dtype='uint32', data=question_id_train)
f.create_dataset("img_pos_train", dtype='uint32', data=img_pos_train)
f.create_dataset("ques_pos_train", dtype='uint32', data=ques_pos_train)
f.create_dataset("ques_pos_len_train", dtype='uint32', data=ques_pos_len_train)
f.create_dataset("split_train", dtype='uint32', data=split_train)
f.create_dataset("ques_len_train", dtype='uint32', data=ques_length_train)
f.create_dataset("MC_ans_train", dtype='uint32', data=MC_ans_train)
else:
loaded_train_data = json.load(open(params['input_json'], 'r'))
itow = loaded_train_data['ix_to_word']
wtoi = {v: k for k, v in itow.iteritems()}
itoa = loaded_train_data['ix_to_ans']
atoi = {v: k for k, v in itoa.iteritems()}
unique_img_train = loaded_train_data['unique_img_train']
dataset_test = json.load(open(params['input_test_json'], 'r'))
# tokenization and preprocessing testing question
dataset_test = prepro_question(dataset_test, params)
dataset_test = apply_vocab_question(dataset_test, wtoi)
ques_test, ques_length_test, question_id_test = encode_question(dataset_test, params, wtoi)
# get the unique image for test
unique_img_test, img_pos_test, ques_pos_test, ques_pos_len_test = get_unqiue_img(dataset_test)
if not params['test']:
ans_test = encode_answer(dataset_test, atoi, params['num_ans']) #also comment line 238
MC_ans_test = encode_mc_answer(dataset_test, atoi, params['num_ans'])
# get the split
N_test = len(dataset_test)
# since the train image is already suffled, we just use the last val_num image as validation
# train = 0, val = 1, test = 2
#split_train[N_train - params['val_num']: N_train] = 1
split_test = np.zeros(N_test)
split_test[:] = 2
f.create_dataset("ques_test", dtype='uint32', data=ques_test)
if not params['test']:
f.create_dataset("ans_test", dtype='uint32', data=ans_test)
f.create_dataset("ques_id_test", dtype='uint32', data=question_id_test)
f.create_dataset("img_pos_test", dtype='uint32', data=img_pos_test)
f.create_dataset("ques_pos_test", dtype='uint32', data=ques_pos_test)
f.create_dataset("ques_pos_len_test", dtype='uint32', data=ques_pos_len_test)
f.create_dataset("split_test", dtype='uint32', data=split_test)
f.create_dataset("ques_len_test", dtype='uint32', data=ques_length_test)
f.create_dataset("MC_ans_test", dtype='uint32', data=MC_ans_test)
f.close()
print 'wrote ', params['output_h5']
# create output json file
out = {}
out['ix_to_word'] = itow # encode the (1-indexed) vocab
out['ix_to_ans'] = itoa
out['unique_img_train'] = unique_img_train
out['uniuqe_img_test'] = unique_img_test
json.dump(out, open(params['output_json'], 'w'))
print 'wrote ', params['output_json']
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_train_json', default='vqa_raw_train.json', help='input json file to process into hdf5')
parser.add_argument('--input_test_json', default='vqa_raw_test.json', help='input json file to process into hdf5')
parser.add_argument('--num_ans', default=3000, type=int, help='number of top answers for the final classifications.')
parser.add_argument('--input_json', default='' ,help='input existing train perprocess, usefull to process a new test file')
parser.add_argument('--output_json', default='vqa_data_prepro.json', help='output json file')
parser.add_argument('--output_h5', default='vqa_data_prepro.h5', help='output h5 file')
# options
parser.add_argument('--max_length', default=15, type=int, help='max length of a caption, in number of words. captions longer than this get clipped.')
parser.add_argument('--word_count_threshold', default=0, type=int, help='only words that occur more than this number of times will be put in vocab')
parser.add_argument('--token_method', default='nltk', help='token method, nltk is much more slower.')
parser.add_argument('--test', default=0 ,type=int, help='token method, nltk is much more slower.')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print 'parsed input parameters:'
print json.dumps(params, indent = 2)
main(params)
| 11,897 | 37.882353 | 153 | py |
MCEdit-Unified | MCEdit-Unified-master/renderer.py | """Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
"""
renderer.py
What is going on in this file?
Here is an attempt to show the relationships between classes and
their responsibilities
MCRenderer:
has "position", "origin", optionally "viewFrustum"
Loads chunks near position+origin, draws chunks offset by origin
Calls visible on viewFrustum to exclude chunks
(+) ChunkRenderer
Has "chunkPosition", "invalidLayers", "lists"
One per chunk and detail level.
Creates display lists from BlockRenderers
(*) BlockRenderer
Has "vertexArrays"
One per block type, plus one for low detail and one for Entity
BlockRender documentation
Each Block renderer renders a particular block types or entities.
The block renderer that is chosen to draw that block type(by ID)
is the block renderer class that is lowest in the list within the
makeRenderstates method. Each blockRenderer is assigned a materialIndex
and the blockMaterial parameter indicates what material index each block
in the chunk is therefore what block renderer is used to render it.
Vertex arrays are arrays of vertices(groups of six elements) and
every group of 4 vertices is a quad that will be drawn.
Before the vertex arrays will be drawn `.ravel()` will be called
(flattened to one dimension arrays).
The vertex arrays will draw quads and each vertex elements
with the foramt:
0:3 index - xyz values
3:5 index - st(texture coordinates) values
5 index - rgba(colour) value
Note: each element of rgba value is a uint8 type(the 4 colour
elements makes up 32 bits) to view/change the values use
`.view('uint8')` to change the view of the array into uint8 type.
To implement a block renderer either makeVertices or makeFaceVertices
needs to be implemented. The base class BlockRenderer implements
makeVertices in terms of makeFaceVertices, by iterating over the different
face directions.
The makeVertices function is called on the block renderer to gat a
list of vertexArrays that will draw the blocks for a 16x16x16 chunk.
parameters:
all parameters are in xzy order
facingBlockIndices:
list of 6, (16, 16, 16) numpy boolean arrays
each array corresponds to the blocks within the chunk that
has it face exposed in that direction. The direction is the
index into the list defined by the constants in pymclevel/faces.py
This is used to only draw exposed faces
blocks:
(16, 16, 16) numpy array of the id of blocks in the chunk
blockMaterials:
(16, 16, 16) numpy array of the material index of each block in the chunk
each material refers to a different block renderer to get the
material index for this block renderer `self.materialIndex`
blockData:
(16, 16, 16) numpy array of the metadata value of each block
in the chunk
areaBlockLights:
(18, 18, 18) numpy array of light value(max of block light and
skylight) of the chunk and 1 block 'border' aroun it.
texMap:
function that takes id, data value and directions
and returns texture coordinates
returns a list of vertex arrays in the form of float32 numpy arrays.
For this chunk.
The makeFaceVertices gets an vertexArray for a particular face.
parameters:
all parameters are in xzy order
direction:
the face defined by constants in pymclevel/faces.py
materialIndices:
list of (x, z, y) indices of blocks in this chunks that
is of this material(in blocktypes).
exposedFaceIndices:
list of (x, z, y) indices of blocks that has an exposed face
in the direction `direction`.
blocks:
(16, 16, 16) numpy array of the id of blocks in the chunk
blockData:
(16, 16, 16) numpy array of the metadata value of each block
in the chunk
blockLights:
(16, 16, 16) numpy array of light values(max of block light and
skylight) of the blocks in the chunk chunk.
facingBlockLight:
(16, 16, 16) numpy array of light values(max of block light and
skylight) of the blocks just in front of the face.
i.e.
if direction = pymclevel.faces.FaceXDecreasing
facingBlockLight[1, 0, 0] refers to the light level
at position (0, 0, 0) within the chunk.
texMap:
function that takes id, data value and directions
and returns texture coordinates
returns a list of vertex arrays in the form of float32 numpy arrays.
Fields
blocktypes / getBlocktypes(mats)
list of block ids the block renderer handles
detailLevels
what detail level the renderer render at
layer
what layer is this block renderer in
renderstate
the render state this block renderer uses
Models:
There are also several functions that make it easy to translate
json models to block renderer.
makeVertexTemplatesFromJsonModel:
creates a template from information that is in json models
rotateTemplate:
rotate templates. This is equivalent to the rotation in block states files.
makeVerticesFromModel:
creates function based on templates to be used for makeVertices function in block renderer.
Helper functions:
self.MaterialIndices(blockMaterial):
Given blockMaterial(parameter in makeVertices) it return a list of
(x, z, y) indices of blocks in the chunk that are of this block renderer
material(blocktypes).
self.makeTemplate(direction, blockIndices):
get a vertex array filled with default values for face `direction`
and for the block relating to `blockIndices`
makeVertexTemplates(xmin=0, ymin=0, zmin=0, xmax=1, ymax=1, zmax=1):
returns a numpy array with dimensions (6, 4, 6) filled with values to create
a vertex array for a cube.
For Entities:
renderer's for entities are similar to blocks but:
- they extend EntityRendererGeneric class
- they are added to the list in calcFacesForChunkRenderer method
- makeChunkVertices(chunk) where chunk is a chunk object
is called rather than makeVertices
there is also a helper method _computeVertices(positions, colors, offset, chunkPosition):
parameters:
positions
locations of entity
colors
colors of entity boxes
offset
whether to offset the box
chunkPosition
chunk position of the chunk
creates a vertex array that draws entity boxes
"""
from collections import defaultdict, deque
from datetime import datetime, timedelta
from depths import DepthOffset
from glutils import gl, Texture
from albow.resource import _2478aq_heot
import logging
import numpy
from OpenGL import GL
import pymclevel
from pymclevel.materials import alphaMaterials, pocketMaterials
import sys
from config import config
# import time
def get_materials():
alphaMaterials = pymclevel.materials.alphaMaterials
pocketMaterials = pymclevel.materials.pocketMaterials
def chunkMarkers(chunkSet):
""" Returns a mapping { size: [position, ...] } for different powers of 2
as size.
"""
sizedChunks = defaultdict(list)
size = 1
def all4(cx, cz):
cx &= ~size
cz &= ~size
return [(cx, cz), (cx + size, cz), (cx + size, cz + size), (cx, cz + size)]
# lastsize = 6
size = 1
while True:
nextsize = size << 1
chunkSet = set(chunkSet)
while len(chunkSet):
cx, cz = chunkSet.pop()
chunkSet.add((cx, cz))
o = all4(cx, cz)
others = set(o).intersection(chunkSet)
if len(others) == 4:
sizedChunks[nextsize].append(o[0]) # Possibly cache append?
for c in others:
chunkSet.discard(c)
else:
for c in others:
sizedChunks[size].append(c) # Possibly cache append?
chunkSet.discard(c)
if len(sizedChunks[nextsize]):
chunkSet = set(sizedChunks[nextsize])
sizedChunks[nextsize] = []
size <<= 1
else:
break
return sizedChunks
class ChunkRenderer(object):
maxlod = 2
minlod = 0
def __init__(self, renderer, chunkPosition):
self.renderer = renderer
self.blockRenderers = []
self.detailLevel = 0
self.invalidLayers = set(Layer.AllLayers)
self.chunkPosition = chunkPosition
self.bufferSize = 0
self.renderstateLists = None
@property
def visibleLayers(self):
return self.renderer.visibleLayers
def forgetDisplayLists(self, states=None):
if self.renderstateLists is not None:
# print "Discarded {0}, gained {1} bytes".format(self.chunkPosition,self.bufferSize)
for k in states or self.renderstateLists.iterkeys():
a = self.renderstateLists.get(k, [])
# print a
for i in a:
gl.glDeleteLists(i, 1)
if states:
del self.renderstateLists[states]
else:
self.renderstateLists = None
self.needsRedisplay = True
self.renderer.discardMasterList()
def debugDraw(self):
for blockRenderer in self.blockRenderers:
blockRenderer.drawArrays(self.chunkPosition, False)
def makeDisplayLists(self):
if not self.needsRedisplay:
return
self.forgetDisplayLists()
if not self.blockRenderers:
return
lists = defaultdict(list)
showRedraw = self.renderer.showRedraw
if not (showRedraw and self.needsBlockRedraw):
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
renderers = self.blockRenderers
for blockRenderer in renderers:
if self.detailLevel not in blockRenderer.detailLevels:
continue
if blockRenderer.layer not in self.visibleLayers:
continue
l = blockRenderer.makeArrayList(self.chunkPosition, self.needsBlockRedraw and showRedraw)
lists[blockRenderer.renderstate].append(l)
if not (showRedraw and self.needsBlockRedraw):
GL.glDisableClientState(GL.GL_COLOR_ARRAY)
self.needsRedisplay = False
self.renderstateLists = lists
@property
def needsBlockRedraw(self):
return Layer.Blocks in self.invalidLayers
def invalidate(self, layers=None):
if layers is None:
layers = Layer.AllLayers
if layers:
layers = set(layers)
self.invalidLayers.update(layers)
blockRenderers = [br for br in self.blockRenderers
if br.layer is Layer.Blocks
or br.layer not in layers]
if len(blockRenderers) < len(self.blockRenderers):
self.forgetDisplayLists()
self.blockRenderers = blockRenderers
if self.renderer.showRedraw and Layer.Blocks in layers:
self.needsRedisplay = True
def calcFaces(self):
minlod = self.renderer.detailLevelForChunk(self.chunkPosition)
minlod = min(minlod, self.maxlod)
if self.detailLevel != minlod:
self.forgetDisplayLists()
self.detailLevel = minlod
self.invalidLayers.add(Layer.Blocks)
# discard the standard detail renderers
if minlod > 0:
blockRenderers = []
append = blockRenderers.append
for br in self.blockRenderers:
if br.detailLevels != (0,):
append(br)
self.blockRenderers = blockRenderers
if self.renderer.chunkCalculator:
for _ in self.renderer.chunkCalculator.calcFacesForChunkRenderer(self):
yield
else:
raise StopIteration
def vertexArraysDone(self):
bufferSize = 0
for br in self.blockRenderers:
bufferSize += br.bufferSize()
if self.renderer.alpha != 0xff:
br.setAlpha(self.renderer.alpha)
self.bufferSize = bufferSize
self.invalidLayers = set()
self.needsRedisplay = True
self.renderer.invalidateMasterList()
needsRedisplay = False
@property
def done(self):
return not self.invalidLayers
_XYZ = numpy.s_[..., 0:3]
_ST = numpy.s_[..., 3:5]
_XYZST = numpy.s_[..., :5]
_RGBA = numpy.s_[..., 20:24]
_RGB = numpy.s_[..., 20:23]
_A = numpy.s_[..., 23]
def makeVertexTemplatesFromJsonModel(fromVertices, toVertices, uv):
"""
This is similar to makeVertexTemplates but is a more convenient
when reading off of the json model files.
:param fromVertices: from
:param toVertices: to
:param uv: keywords uv map
:return: template for a cube
"""
xmin = fromVertices[0] / 16.
xmax = toVertices[0] / 16.
ymin = fromVertices[1] / 16.
ymax = toVertices[1] / 16.
zmin = fromVertices[2] / 16.
zmax = toVertices[2] / 16.
return numpy.array([
# FaceXIncreasing:
[[xmax, ymin, zmax, uv["east"][0], uv["east"][3], 0x0b],
[xmax, ymin, zmin, uv["east"][2], uv["east"][3], 0x0b],
[xmax, ymax, zmin, uv["east"][2], uv["east"][1], 0x0b],
[xmax, ymax, zmax, uv["east"][0], uv["east"][1], 0x0b],
],
# FaceXDecreasing:
[[xmin, ymin, zmin, uv["west"][0], uv["west"][3], 0x0b],
[xmin, ymin, zmax, uv["west"][2], uv["west"][3], 0x0b],
[xmin, ymax, zmax, uv["west"][2], uv["west"][1], 0x0b],
[xmin, ymax, zmin, uv["west"][0], uv["west"][1], 0x0b]],
# FaceYIncreasing:
[[xmin, ymax, zmin, uv["up"][0], uv["up"][1], 0x11], # ne
[xmin, ymax, zmax, uv["up"][0], uv["up"][3], 0x11], # nw
[xmax, ymax, zmax, uv["up"][2], uv["up"][3], 0x11], # sw
[xmax, ymax, zmin, uv["up"][2], uv["up"][1], 0x11]], # se
# FaceYDecreasing:
[[xmin, ymin, zmin, uv["down"][0], uv["down"][3], 0x08],
[xmax, ymin, zmin, uv["down"][2], uv["down"][3], 0x08],
[xmax, ymin, zmax, uv["down"][2], uv["down"][1], 0x08],
[xmin, ymin, zmax, uv["down"][0], uv["down"][1], 0x08]],
# FaceZIncreasing:
[[xmin, ymin, zmax, uv["south"][0], uv["south"][3], 0x0d],
[xmax, ymin, zmax, uv["south"][2], uv["south"][3], 0x0d],
[xmax, ymax, zmax, uv["south"][2], uv["south"][1], 0x0d],
[xmin, ymax, zmax, uv["south"][0], uv["south"][1], 0x0d]],
# FaceZDecreasing:
[[xmax, ymin, zmin, uv["north"][0], uv["north"][3], 0x0d],
[xmin, ymin, zmin, uv["north"][2], uv["north"][3], 0x0d],
[xmin, ymax, zmin, uv["north"][2], uv["north"][1], 0x0d],
[xmax, ymax, zmin, uv["north"][0], uv["north"][1], 0x0d],
],
])
def rotateTemplate(template, x=0, y=0):
"""
Rotate template around x-axis and then around
y-axis. Both angles must to multiples of 90.
TODO: Add ability for multiples of 45
"""
template = template.copy()
for _ in xrange(0, x, 90):
# y -> -z and z -> y
template[..., (1, 2)] = template[..., (2, 1)]
template[..., 2] -= 0.5
template[..., 2] *= -1
template[..., 2] += 0.5
for _ in xrange(0, y, 90):
# z -> -x and x -> z
template[..., (0, 2)] = template[..., (2, 0)]
template[..., 0] -= 0.5
template[..., 0] *= -1
template[..., 0] += 0.5
return template
def makeVerticesFromModel(templates, dataMask=0):
"""
Returns a function that creates vertex arrays.
This produces vertex arrays based on the passed
templates. This doesn't cull any faces based on
if they are exposed.
:param templates: list of templates to draw
:param dataMask: mask to mask the data
"""
if isinstance(templates, list):
templates = numpy.array(templates)
if templates.shape == (6, 4, 6):
templates = numpy.array([templates])
if len(templates.shape) == 4:
templates = templates[numpy.newaxis, ...]
elements = templates.shape[0]
def makeVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
mask = self.getMaterialIndices(blockMaterials)
blockIndices = mask.nonzero()
yield
data = blockData[mask]
data &= dataMask
self.vertexArrays = []
append = self.vertexArrays.append
for i in xrange(elements):
vertexArray = numpy.zeros((len(blockIndices[0]), 6, 4, 6), dtype='float32')
for indicies in xrange(3):
dimension = (0, 2, 1)[indicies]
vertexArray[..., indicies] = blockIndices[dimension][:, numpy.newaxis,
numpy.newaxis] # xxx swap z with y using ^
vertexArray[..., 0:5] += templates[i, data][..., 0:5]
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices] & 15)[..., numpy.newaxis, :]
vertexArray.view('uint8')[_RGB] = templates[i, data][..., 5][..., numpy.newaxis]
vertexArray.view('uint8')[_A] = 0xFF
vertexArray.view('uint8')[_RGB] *= areaBlockLights[1:-1, 1:-1, 1:-1][blockIndices][
..., numpy.newaxis, numpy.newaxis, numpy.newaxis]
vertexArray.shape = (vertexArray.shape[0] * 6, 4, 6)
yield
append(vertexArray)
return makeVertices
def makeVertexTemplates(xmin=0, ymin=0, zmin=0, xmax=1, ymax=1, zmax=1):
return numpy.array([
# FaceXIncreasing:
[[xmax, ymin, zmax, (zmin * 16), 16 - (ymin * 16), 0x0b],
[xmax, ymin, zmin, (zmax * 16), 16 - (ymin * 16), 0x0b],
[xmax, ymax, zmin, (zmax * 16), 16 - (ymax * 16), 0x0b],
[xmax, ymax, zmax, (zmin * 16), 16 - (ymax * 16), 0x0b],
],
# FaceXDecreasing:
[[xmin, ymin, zmin, (zmin * 16), 16 - (ymin * 16), 0x0b],
[xmin, ymin, zmax, (zmax * 16), 16 - (ymin * 16), 0x0b],
[xmin, ymax, zmax, (zmax * 16), 16 - (ymax * 16), 0x0b],
[xmin, ymax, zmin, (zmin * 16), 16 - (ymax * 16), 0x0b]],
# FaceYIncreasing:
[[xmin, ymax, zmin, xmin * 16, 16 - (zmax * 16), 0x11], # ne
[xmin, ymax, zmax, xmin * 16, 16 - (zmin * 16), 0x11], # nw
[xmax, ymax, zmax, xmax * 16, 16 - (zmin * 16), 0x11], # sw
[xmax, ymax, zmin, xmax * 16, 16 - (zmax * 16), 0x11]], # se
# FaceYDecreasing:
[[xmin, ymin, zmin, xmin * 16, 16 - (zmax * 16), 0x08],
[xmax, ymin, zmin, xmax * 16, 16 - (zmax * 16), 0x08],
[xmax, ymin, zmax, xmax * 16, 16 - (zmin * 16), 0x08],
[xmin, ymin, zmax, xmin * 16, 16 - (zmin * 16), 0x08]],
# FaceZIncreasing:
[[xmin, ymin, zmax, xmin * 16, 16 - (ymin * 16), 0x0d],
[xmax, ymin, zmax, xmax * 16, 16 - (ymin * 16), 0x0d],
[xmax, ymax, zmax, xmax * 16, 16 - (ymax * 16), 0x0d],
[xmin, ymax, zmax, xmin * 16, 16 - (ymax * 16), 0x0d]],
# FaceZDecreasing:
[[xmax, ymin, zmin, xmin * 16, 16 - (ymin * 16), 0x0d],
[xmin, ymin, zmin, xmax * 16, 16 - (ymin * 16), 0x0d],
[xmin, ymax, zmin, xmax * 16, 16 - (ymax * 16), 0x0d],
[xmax, ymax, zmin, xmin * 16, 16 - (ymax * 16), 0x0d],
],
])
elementByteLength = 24
def createPrecomputedVertices():
height = 16
precomputedVertices = [numpy.zeros(shape=(16, 16, height, 4, 6), # x,y,z,s,t,rg, ba
dtype='float32') for d in faceVertexTemplates]
xArray = numpy.arange(16)[:, numpy.newaxis, numpy.newaxis, numpy.newaxis]
zArray = numpy.arange(16)[numpy.newaxis, :, numpy.newaxis, numpy.newaxis]
yArray = numpy.arange(height)[numpy.newaxis, numpy.newaxis, :, numpy.newaxis]
for dir in xrange(len(faceVertexTemplates)):
precomputedVertices[dir][_XYZ][..., 0] = xArray
precomputedVertices[dir][_XYZ][..., 1] = yArray
precomputedVertices[dir][_XYZ][..., 2] = zArray
precomputedVertices[dir][_XYZ] += faceVertexTemplates[dir][..., 0:3] # xyz
precomputedVertices[dir][_ST] = faceVertexTemplates[dir][..., 3:5] # s
precomputedVertices[dir].view('uint8')[_RGB] = faceVertexTemplates[dir][..., 5, numpy.newaxis]
precomputedVertices[dir].view('uint8')[_A] = 0xff
return precomputedVertices
faceVertexTemplates = makeVertexTemplates()
class ChunkCalculator(object):
cachedTemplate = None
cachedTemplateHeight = 0
whiteLight = numpy.array([[[15] * 16] * 16] * 16, numpy.uint8)
precomputedVertices = createPrecomputedVertices()
def __init__(self, level):
if not hasattr(alphaMaterials, 'Stone'):
get_materials()
self.stoneid = stoneid = alphaMaterials.Stone.ID
self.hiddenOreMaterials[alphaMaterials.Dirt.ID] = stoneid
self.hiddenOreMaterials[alphaMaterials.Grass.ID] = stoneid
self.hiddenOreMaterials[alphaMaterials.Sand.ID] = stoneid
self.hiddenOreMaterials[alphaMaterials.Gravel.ID] = stoneid
self.hiddenOreMaterials[alphaMaterials.Netherrack.ID] = stoneid
self.level = level
self.makeRenderstates(level.materials)
# del xArray, zArray, yArray
self.nullVertices = numpy.zeros((0,) * len(self.precomputedVertices[0].shape),
dtype=self.precomputedVertices[0].dtype)
config.settings.fastLeaves.addObserver(self)
config.settings.roughGraphics.addObserver(self)
class renderstatePlain(object):
@classmethod
def bind(cls):
pass
@classmethod
def release(cls):
pass
class renderstateVines(object):
@classmethod
def bind(cls):
GL.glDisable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_ALPHA_TEST)
@classmethod
def release(cls):
GL.glEnable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_ALPHA_TEST)
class renderstateLowDetail(object):
@classmethod
def bind(cls):
GL.glDisable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_TEXTURE_2D)
@classmethod
def release(cls):
GL.glEnable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_TEXTURE_2D)
class renderstateAlphaTest(object):
@classmethod
def bind(cls):
GL.glEnable(GL.GL_ALPHA_TEST)
@classmethod
def release(cls):
GL.glDisable(GL.GL_ALPHA_TEST)
class _renderstateAlphaBlend(object):
@classmethod
def bind(cls):
GL.glEnable(GL.GL_BLEND)
@classmethod
def release(cls):
GL.glDisable(GL.GL_BLEND)
class renderstateWater(_renderstateAlphaBlend):
pass
class renderstateIce(_renderstateAlphaBlend):
pass
class renderstateEntity(object):
@classmethod
def bind(cls):
GL.glDisable(GL.GL_DEPTH_TEST)
# GL.glDisable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_TEXTURE_2D)
GL.glEnable(GL.GL_BLEND)
@classmethod
def release(cls):
GL.glEnable(GL.GL_DEPTH_TEST)
# GL.glEnable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glDisable(GL.GL_BLEND)
renderstates = (
renderstatePlain,
renderstateVines,
renderstateLowDetail,
renderstateAlphaTest,
renderstateIce,
renderstateWater,
renderstateEntity,
)
def makeRenderstates(self, materials):
self.blockRendererClasses = [
GenericBlockRenderer,
LeafBlockRenderer,
PlantBlockRenderer,
TorchBlockRenderer,
WaterBlockRenderer,
SlabBlockRenderer,
]
if materials.name in ("Alpha", "Pocket"):
self.blockRendererClasses += [
RailBlockRenderer,
LadderBlockRenderer,
SnowBlockRenderer,
CarpetBlockRenderer,
CactusBlockRenderer,
PaneBlockRenderer,
CakeBlockRenderer,
DaylightBlockRenderer,
StandingSignRenderer,
WallSignBlockRenderer,
LeverBlockRenderer,
BedBlockRenderer,
EnchantingBlockRenderer,
RedstoneBlockRenderer,
IceBlockRenderer,
DoorRenderer,
ButtonRenderer,
TrapDoorRenderer,
FenceBlockRenderer,
FenceGateBlockRenderer,
StairBlockRenderer,
RepeaterBlockRenderer,
VineBlockRenderer,
PlateBlockRenderer,
EndRodRenderer,
# button, floor plate, door -> 1-cube features
# lever, sign, wall sign, stairs -> 2-cube features
# fence
# portal
]
self.materialMap = materialMap = numpy.zeros((pymclevel.materials.id_limit,), 'uint8')
materialMap[1:] = 1 # generic blocks
materialCount = 2
for br in self.blockRendererClasses[1:]: # skip generic blocks
# materialMap[br.getBlocktypes(materials)] = materialCount
materialMap[br(self).getBlocktypes(materials)] = materialCount
br.materialIndex = materialCount
materialCount += 1
self.exposedMaterialMap = numpy.array(materialMap)
self.addTransparentMaterials(self.exposedMaterialMap, materialCount)
def addTransparentMaterials(self, mats, materialCount):
logging.debug("renderer::ChunkCalculator: Dynamically adding transparent materials.")
for b in self.level.materials:
yaml = getattr(b, 'yaml', None)
if yaml is not None and yaml.get('opacity', 1) < 1:
logging.debug("Adding '%s'" % b)
mats[b.ID] = materialCount
materialCount += 1
logging.debug("renderer::ChunkCalculator: Transparent materials added.")
# don't show boundaries between dirt,grass,sand,gravel,or stone.
# This hiddenOreMaterial definition shall be delayed after the level is loaded, in order to get the exact ones from the game versionned data.
hiddenOreMaterials = numpy.arange(pymclevel.materials.id_limit, dtype='uint16')
roughMaterials = numpy.ones((pymclevel.materials.id_limit,), dtype='uint8')
roughMaterials[0] = 0
def calcFacesForChunkRenderer(self, cr):
if not cr.invalidLayers:
return
lod = cr.detailLevel
cx, cz = cr.chunkPosition
level = cr.renderer.level
try:
chunk = level.getChunk(cx, cz)
except Exception as e:
if "Session lock lost" in e.message:
yield
return
logging.warn(u"Error reading chunk: %s", e)
traceback.print_exc()
yield
return
yield
brs = []
append = brs.append
classes = (
TileEntityRenderer,
MonsterRenderer,
ItemRenderer,
TileTicksRenderer,
TerrainPopulatedRenderer,
ChunkBorderRenderer,
LowDetailBlockRenderer,
OverheadBlockRenderer,
)
existingBlockRenderers = dict(((type(b), b) for b in cr.blockRenderers))
for blockRendererClass in classes:
if cr.detailLevel not in blockRendererClass.detailLevels:
continue
if blockRendererClass.layer not in cr.visibleLayers:
continue
if blockRendererClass.layer not in cr.invalidLayers:
if blockRendererClass in existingBlockRenderers:
append(existingBlockRenderers[blockRendererClass])
continue
br = blockRendererClass(self)
br.detailLevel = cr.detailLevel
for _ in br.makeChunkVertices(chunk):
yield
append(br)
blockRenderers = []
# Recalculate high detail blocks if needed, otherwise retain the high detail renderers
if lod == 0 and Layer.Blocks in cr.invalidLayers:
for _ in self.calcHighDetailFaces(cr, blockRenderers):
yield
else:
blockRenderers.extend(br for br in cr.blockRenderers if not isinstance(br, classes))
# Add the layer renderers
blockRenderers.extend(brs)
cr.blockRenderers = blockRenderers
cr.vertexArraysDone()
raise StopIteration
@staticmethod
def getNeighboringChunks(chunk):
cx, cz = chunk.chunkPosition
level = chunk.world
neighboringChunks = {}
for dir, dx, dz in ((pymclevel.faces.FaceXDecreasing, -1, 0),
(pymclevel.faces.FaceXIncreasing, 1, 0),
(pymclevel.faces.FaceZDecreasing, 0, -1),
(pymclevel.faces.FaceZIncreasing, 0, 1)):
if not level.containsChunk(cx + dx, cz + dz):
neighboringChunks[dir] = pymclevel.infiniteworld.ZeroChunk(level.Height)
else:
try:
neighboringChunks[dir] = level.getChunk(cx + dx, cz + dz)
except (EnvironmentError, pymclevel.mclevelbase.ChunkNotPresent, pymclevel.mclevelbase.ChunkMalformed):
neighboringChunks[dir] = pymclevel.infiniteworld.ZeroChunk(level.Height)
return neighboringChunks
@staticmethod
def getAreaBlocks(chunk, neighboringChunks):
chunkWidth, chunkLength, chunkHeight = chunk.Blocks.shape
areaBlocks = numpy.zeros((chunkWidth + 2, chunkLength + 2, chunkHeight + 2), numpy.uint16)
areaBlocks[1:-1, 1:-1, 1:-1] = chunk.Blocks
zeros = numpy.zeros((16, 16, 128), dtype=areaBlocks.dtype)
nb_fxd = neighboringChunks[pymclevel.faces.FaceXDecreasing].Blocks
if nb_fxd.shape[2] == chunkHeight / 2:
nb_fxd = numpy.concatenate((nb_fxd, zeros), axis=2)
areaBlocks[:1, 1:-1, 1:-1] = nb_fxd[-1:, :chunkLength,
:chunkHeight]
nb_fxi = neighboringChunks[pymclevel.faces.FaceXIncreasing].Blocks
if nb_fxi.shape[2] == chunkHeight / 2:
nb_fxi = numpy.concatenate((nb_fxi, zeros), axis=2)
areaBlocks[-1:, 1:-1, 1:-1] = nb_fxi[:1, :chunkLength,
:chunkHeight]
nb_fzd = neighboringChunks[pymclevel.faces.FaceZDecreasing].Blocks
if nb_fzd.shape[2] == chunkHeight / 2:
nb_fzd = numpy.concatenate((nb_fzd, zeros), axis=2)
areaBlocks[1:-1, :1, 1:-1] = nb_fzd[:chunkWidth, -1:,
:chunkHeight]
nb_fzi = neighboringChunks[pymclevel.faces.FaceZIncreasing].Blocks
if nb_fzi.shape[2] == chunkHeight / 2:
nb_fzi = numpy.concatenate((nb_fzi, zeros), axis=2)
areaBlocks[1:-1, -1:, 1:-1] = nb_fzi[:chunkWidth, :1,
:chunkHeight]
return areaBlocks
@staticmethod
def getFacingBlockIndices(areaBlocks, areaBlockMats):
facingBlockIndices = [None] * 6
exposedFacesX = (areaBlockMats[:-1, 1:-1, 1:-1] != areaBlockMats[1:, 1:-1, 1:-1])
facingBlockIndices[pymclevel.faces.FaceXDecreasing] = exposedFacesX[:-1]
facingBlockIndices[pymclevel.faces.FaceXIncreasing] = exposedFacesX[1:]
exposedFacesZ = (areaBlockMats[1:-1, :-1, 1:-1] != areaBlockMats[1:-1, 1:, 1:-1])
facingBlockIndices[pymclevel.faces.FaceZDecreasing] = exposedFacesZ[:, :-1]
facingBlockIndices[pymclevel.faces.FaceZIncreasing] = exposedFacesZ[:, 1:]
exposedFacesY = (areaBlockMats[1:-1, 1:-1, :-1] != areaBlockMats[1:-1, 1:-1, 1:])
facingBlockIndices[pymclevel.faces.FaceYDecreasing] = exposedFacesY[:, :, :-1]
facingBlockIndices[pymclevel.faces.FaceYIncreasing] = exposedFacesY[:, :, 1:]
return facingBlockIndices
def getAreaBlockLights(self, chunk, neighboringChunks):
chunkWidth, chunkLength, chunkHeight = chunk.Blocks.shape
lights = chunk.BlockLight
skyLight = chunk.SkyLight
finalLight = self.whiteLight
if lights is not None:
finalLight = lights
if skyLight is not None:
finalLight = numpy.maximum(skyLight, lights)
areaBlockLights = numpy.ones((chunkWidth + 2, chunkLength + 2, chunkHeight + 2), numpy.uint8)
areaBlockLights[:] = 15
areaBlockLights[1:-1, 1:-1, 1:-1] = finalLight
zeros = numpy.zeros((16, 16, 128), dtype=areaBlockLights.dtype)
skyLight, blockLight = neighboringChunks[pymclevel.faces.FaceXDecreasing].SkyLight, neighboringChunks[pymclevel.faces.FaceXDecreasing].BlockLight
if skyLight.shape[2] == chunkHeight / 2:
skyLight = numpy.concatenate((skyLight, zeros), axis=2)
blockLight = numpy.concatenate((blockLight, zeros), axis=2)
numpy.maximum(skyLight[-1:, :chunkLength, :chunkHeight],
blockLight[-1:, :chunkLength, :chunkHeight],
areaBlockLights[0:1, 1:-1, 1:-1])
skyLight, blockLight = neighboringChunks[pymclevel.faces.FaceXIncreasing].SkyLight, neighboringChunks[pymclevel.faces.FaceXIncreasing].BlockLight
if skyLight.shape[2] == chunkHeight / 2:
skyLight = numpy.concatenate((skyLight, zeros), axis=2)
blockLight = numpy.concatenate((blockLight, zeros), axis=2)
numpy.maximum(skyLight[:1, :chunkLength, :chunkHeight],
blockLight[:1, :chunkLength, :chunkHeight],
areaBlockLights[-1:, 1:-1, 1:-1])
skyLight, blockLight = neighboringChunks[pymclevel.faces.FaceZDecreasing].SkyLight, neighboringChunks[pymclevel.faces.FaceZDecreasing].BlockLight
if skyLight.shape[2] == chunkHeight / 2:
skyLight = numpy.concatenate((skyLight, zeros), axis=2)
blockLight = numpy.concatenate((blockLight, zeros), axis=2)
numpy.maximum(skyLight[:chunkWidth, -1:, :chunkHeight],
blockLight[:chunkWidth, -1:, :chunkHeight],
areaBlockLights[1:-1, 0:1, 1:-1])
skyLight, blockLight = neighboringChunks[pymclevel.faces.FaceZIncreasing].SkyLight, neighboringChunks[pymclevel.faces.FaceZIncreasing].BlockLight
if skyLight.shape[2] == chunkHeight / 2:
skyLight = numpy.concatenate((skyLight, zeros), axis=2)
blockLight = numpy.concatenate((blockLight, zeros), axis=2)
numpy.maximum(skyLight[:chunkWidth, :1, :chunkHeight],
blockLight[:chunkWidth, :1, :chunkHeight],
areaBlockLights[1:-1, -1:, 1:-1])
fxd = neighboringChunks[pymclevel.faces.FaceXDecreasing]
fxi = neighboringChunks[pymclevel.faces.FaceXIncreasing]
fzd = neighboringChunks[pymclevel.faces.FaceZDecreasing]
fzi = neighboringChunks[pymclevel.faces.FaceZIncreasing]
fxd_skyLight = fxd.SkyLight
fxi_skyLight = fxi.SkyLight
fzd_skyLight = fzd.SkyLight
fzi_skyLight = fzi.SkyLight
fxd_blockLight = fxd.BlockLight
fxi_blockLight = fxi.BlockLight
fzd_blockLight = fzd.BlockLight
fzi_blockLight = fzi.BlockLight
if fxd_skyLight.shape[2] == chunkHeight / 2:
fxd_skyLight = numpy.concatenate((fxd_skyLight, zeros), axis=2)
fxd_blockLight = numpy.concatenate((fxd_blockLight, zeros), axis=2)
if fxi_skyLight.shape[2] == chunkHeight / 2:
fxi_skyLight = numpy.concatenate((fxi_skyLight, zeros), axis=2)
fxi_blockLight = numpy.concatenate((fxi_blockLight, zeros), axis=2)
if fzd_skyLight.shape[2] == chunkHeight / 2:
fzd_skyLight = numpy.concatenate((fzd_skyLight, zeros), axis=2)
fzd_blockLight = numpy.concatenate((fzd_blockLight, zeros), axis=2)
if fzi_skyLight.shape[2] == chunkHeight / 2:
fzi_skyLight = numpy.concatenate((fzi_skyLight, zeros), axis=2)
fzi_blockLight = numpy.concatenate((fzi_blockLight, zeros), axis=2)
numpy.maximum(fxd_skyLight[-1:, :chunkLength, :chunkHeight],
fxd_blockLight[-1:, :chunkLength, :chunkHeight],
areaBlockLights[0:1, 1:-1, 1:-1])
numpy.maximum(fxi_skyLight[:1, :chunkLength, :chunkHeight],
fxi_blockLight[:1, :chunkLength, :chunkHeight],
areaBlockLights[-1:, 1:-1, 1:-1])
numpy.maximum(fzd_skyLight[:chunkWidth, -1:, :chunkHeight],
fzd_blockLight[:chunkWidth, -1:, :chunkHeight],
areaBlockLights[1:-1, 0:1, 1:-1])
numpy.maximum(fzi_skyLight[:chunkWidth, :1, :chunkHeight],
fzi_blockLight[:chunkWidth, :1, :chunkHeight],
areaBlockLights[1:-1, -1:, 1:-1])
minimumLight = 4
numpy.clip(areaBlockLights, minimumLight, 16, areaBlockLights)
return areaBlockLights
def calcHighDetailFaces(self, cr, blockRenderers):
""" calculate the geometry for a chunk renderer from its blockMats, data,
and lighting array. fills in the cr's blockRenderers with verts
for each block facing and material"""
# chunkBlocks and chunkLights shall be indexed [x,z,y] to follow infdev's convention
cx, cz = cr.chunkPosition
level = cr.renderer.level
chunk = level.getChunk(cx, cz)
# if isinstance(chunk, pymclevel.level.FakeChunk):
# return
neighboringChunks = self.getNeighboringChunks(chunk)
areaBlocks = self.getAreaBlocks(chunk, neighboringChunks)
yield
areaBlockLights = self.getAreaBlockLights(chunk, neighboringChunks)
yield
allSlabs = set([b.ID for b in alphaMaterials.allBlocks if "Slab" in b.name])
for slab in allSlabs:
slabs = areaBlocks == slab
if slabs.any():
areaBlockLights[slabs] = areaBlockLights[:, :, 1:][slabs[:, :, :-1]]
yield
showHiddenOres = cr.renderer.showHiddenOres
if showHiddenOres:
facingMats = self.hiddenOreMaterials[areaBlocks]
else:
facingMats = self.exposedMaterialMap[areaBlocks]
yield
if self.roughGraphics:
areaBlockMats = self.roughMaterials[areaBlocks]
else:
areaBlockMats = self.materialMap[areaBlocks]
facingBlockIndices = self.getFacingBlockIndices(areaBlocks, facingMats)
yield
for _ in self.computeGeometry(chunk, areaBlockMats, facingBlockIndices, areaBlockLights, cr, blockRenderers):
yield
def computeGeometry(self, chunk, areaBlockMats, facingBlockIndices, areaBlockLights, chunkRenderer, blockRenderers):
blocks, blockData = chunk.Blocks, chunk.Data
blockData &= 0xf
blockMaterials = areaBlockMats[1:-1, 1:-1, 1:-1]
if self.roughGraphics:
blockMaterials.clip(0, 1, blockMaterials)
else:
# Special case for doors
#
# Each part of a door itself does not have all of the information required
# to render, as direction/whether its open is on the lower part and the hinge
# side is on the upper part. So here we combine the metadata of the bottom part
# with the top to form 0-32 metadata(which would be used in door renderer).
#
copied = False
for door in DoorRenderer.blocktypes:
doors = blocks == door
if doors.any():
if not copied:
# copy if required but only once
blockData = blockData.copy()
copied = True
# only accept lower part one block below upper part
valid = doors[:, :, :-1] & doors[:, :, 1:] & (blockData[:, :, :-1] < 8) & (blockData[:, :, 1:] >= 8)
mask = valid.nonzero()
upper_mask = (mask[0], mask[1], mask[2]+1)
blockData[mask] += (blockData[upper_mask] - 8) * 16
blockData[upper_mask] = blockData[mask] + 8
sx = sz = slice(0, 16)
asx = asz = slice(0, 18)
for y in xrange(0, chunk.world.Height, 16):
sy = slice(y, y + 16)
asy = slice(y, y + 18)
for _ in self.computeCubeGeometry(
y,
blockRenderers,
blocks[sx, sz, sy],
blockData[sx, sz, sy],
chunk.materials,
blockMaterials[sx, sz, sy],
[f[sx, sz, sy] for f in facingBlockIndices],
areaBlockLights[asx, asz, asy],
chunkRenderer):
yield
def computeCubeGeometry(self, y, blockRenderers, blocks, blockData, materials, blockMaterials, facingBlockIndices,
areaBlockLights, chunkRenderer):
materialCounts = numpy.bincount(blockMaterials.ravel())
append = blockRenderers.append
def texMap(blocks, blockData=0, direction=slice(None)):
return materials.blockTextures[blocks, blockData, direction] # xxx slow
for blockRendererClass in self.blockRendererClasses:
mi = blockRendererClass.materialIndex
if mi >= len(materialCounts) or materialCounts[mi] == 0:
continue
blockRenderer = blockRendererClass(self)
blockRenderer.y = y
blockRenderer.materials = materials
for _ in blockRenderer.makeVertices(facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights,
texMap):
yield
append(blockRenderer)
yield
def makeTemplate(self, direction, blockIndices):
return self.precomputedVertices[direction][numpy.where(blockIndices)]
class Layer:
Blocks = "Blocks"
Entities = "Entities"
Monsters = "Monsters"
Items = "Items"
TileEntities = "TileEntities"
TileTicks = "TileTicks"
TerrainPopulated = "TerrainPopulated"
ChunkBorder = "ChunkBorder"
AllLayers = (Blocks, Entities, Monsters, Items, TileEntities, TileTicks, TerrainPopulated, ChunkBorder)
class BlockRenderer(object):
detailLevels = (0,)
layer = Layer.Blocks
directionOffsets = {
pymclevel.faces.FaceXDecreasing: numpy.s_[:-2, 1:-1, 1:-1],
pymclevel.faces.FaceXIncreasing: numpy.s_[2:, 1:-1, 1:-1],
pymclevel.faces.FaceYDecreasing: numpy.s_[1:-1, 1:-1, :-2],
pymclevel.faces.FaceYIncreasing: numpy.s_[1:-1, 1:-1, 2:],
pymclevel.faces.FaceZDecreasing: numpy.s_[1:-1, :-2, 1:-1],
pymclevel.faces.FaceZIncreasing: numpy.s_[1:-1, 2:, 1:-1],
}
renderstate = ChunkCalculator.renderstateAlphaTest
used = False
def __init__(self, cc):
self.makeTemplate = cc.makeTemplate
self.chunkCalculator = cc
self.vertexArrays = []
self.materials = cc.level.materials
pass
def getBlocktypes(self, mats):
return self.blocktypes
def setAlpha(self, alpha):
"alpha is an unsigned byte value"
for a in self.vertexArrays:
a.view('uint8')[_RGBA][..., 3] = alpha
def bufferSize(self):
return sum(a.size for a in self.vertexArrays) * 4
def getMaterialIndices(self, blockMaterials):
return blockMaterials == self.materialIndex
def makeVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
arrays = []
append = arrays.append
materialIndices = self.getMaterialIndices(blockMaterials)
yield
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
for (direction, exposedFaceIndices) in enumerate(facingBlockIndices):
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
vertexArray = self.makeFaceVertices(direction, materialIndices, exposedFaceIndices, blocks, blockData,
blockLight, facingBlockLight, texMap)
yield
if len(vertexArray):
append(vertexArray)
self.vertexArrays = arrays
def makeArrayList(self, chunkPosition, showRedraw):
l = gl.glGenLists(1)
GL.glNewList(l, GL.GL_COMPILE)
self.drawArrays(chunkPosition, showRedraw)
GL.glEndList()
return l
def drawArrays(self, chunkPosition, showRedraw):
cx, cz = chunkPosition
y = getattr(self, "y", 0)
with gl.glPushMatrix(GL.GL_MODELVIEW):
GL.glTranslate(cx << 4, y, cz << 4)
if showRedraw:
GL.glColor(1.0, 0.25, 0.25, 1.0)
self.drawVertices()
def drawVertices(self):
if self.vertexArrays:
for buf in self.vertexArrays:
self.drawFaceVertices(buf)
def drawFaceVertices(self, buf):
if not len(buf):
return
stride = elementByteLength
GL.glVertexPointer(3, GL.GL_FLOAT, stride, (buf.ravel()))
GL.glTexCoordPointer(2, GL.GL_FLOAT, stride, (buf.ravel()[3:]))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype=numpy.uint8).ravel()[20:]))
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
class EntityRendererGeneric(BlockRenderer):
renderstate = ChunkCalculator.renderstateEntity
detailLevels = (0, 1, 2)
def drawFaceVertices(self, buf):
if not len(buf):
return
stride = elementByteLength
GL.glVertexPointer(3, GL.GL_FLOAT, stride, (buf.ravel()))
GL.glTexCoordPointer(2, GL.GL_FLOAT, stride, (buf.ravel()[3:]))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype=numpy.uint8).ravel()[20:]))
GL.glDepthMask(False)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glLineWidth(2.0)
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glPolygonOffset(DepthOffset.TerrainWire, DepthOffset.TerrainWire)
with gl.glEnable(GL.GL_POLYGON_OFFSET_FILL, GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glDepthMask(True)
@staticmethod
def _computeVertices(positions, colors, offset=False, chunkPosition=(0, 0)):
cx, cz = chunkPosition
x = cx << 4
z = cz << 4
vertexArray = numpy.zeros(shape=(len(positions), 6, 4, 6), dtype='float32')
if positions:
positions = numpy.array(positions)
positions[:, (0, 2)] -= (x, z)
if offset:
positions -= 0.5
vertexArray.view('uint8')[_RGBA] = colors
vertexArray[_XYZ] = positions[:, numpy.newaxis, numpy.newaxis, :]
vertexArray[_XYZ] += faceVertexTemplates[_XYZ]
vertexArray.shape = (len(positions) * 6, 4, 6)
return vertexArray
class TileEntityRenderer(EntityRendererGeneric):
layer = Layer.TileEntities
def makeChunkVertices(self, chunk):
tilePositions = []
append = tilePositions.append
for i, ent in enumerate(chunk.TileEntities):
if i % 10 == 0:
yield
if 'x' not in ent:
continue
append(pymclevel.TileEntity.pos(ent))
tiles = self._computeVertices(tilePositions, (0xff, 0xff, 0x33, 0x44), chunkPosition=chunk.chunkPosition)
yield
self.vertexArrays = [tiles]
class BaseEntityRenderer(EntityRendererGeneric):
pass
class MonsterRenderer(BaseEntityRenderer):
layer = Layer.Entities # xxx Monsters
notMonsters = {"Item", "XPOrb", "Painting", "ItemFrame", "ArmorStand"}
def makeChunkVertices(self, chunk):
monsterPositions = []
append = monsterPositions.append
notMonsters = self.chunkCalculator.level.defsIds.mcedit_defs.get('notMonsters', self.notMonsters)
for i, ent in enumerate(chunk.Entities):
if i % 10 == 0:
yield
id = ent["id"].value
if id in notMonsters:
continue
pos = pymclevel.Entity.pos(ent)
pos[1] += 0.5
append(pos)
monsters = self._computeVertices(monsterPositions,
(0xff, 0x22, 0x22, 0x44),
offset=True,
chunkPosition=chunk.chunkPosition)
yield
self.vertexArrays = [monsters]
class EntityRenderer(BaseEntityRenderer):
@staticmethod
def makeChunkVertices(chunk):
yield
class ItemRenderer(BaseEntityRenderer):
layer = Layer.Items
def makeChunkVertices(self, chunk):
entityPositions = []
entityColors = []
colorMap = {
"Item": (0x22, 0xff, 0x22, 0x5f),
"XPOrb": (0x88, 0xff, 0x88, 0x5f),
"Painting": (134, 96, 67, 0x5f),
"ItemFrame": (134, 96, 67, 0x5f),
"ArmorStand": (0x22, 0xff, 0x22, 0x5f),
}
pos_append = entityPositions.append
color_append = entityColors.append
defsIds = self.chunkCalculator.level.defsIds
mcedit_defs = defsIds.mcedit_defs
mcedit_ids = defsIds.mcedit_ids
for i, ent in enumerate(chunk.Entities):
if i % 10 == 0:
yield
# Let get the color from the versionned data, and use the 'old' way as fallback
color = mcedit_defs.get(mcedit_ids.get(ent["id"].value), {}).get("mapcolor")
if color is None:
color = colorMap.get(ent["id"].value)
if color is None:
continue
pos = pymclevel.Entity.pos(ent)
noRenderDelta = mcedit_defs.get('noRenderDelta', ("Painting", "ItemFrame"))
if ent["id"].value not in noRenderDelta:
pos[1] += 0.5
pos_append(pos)
color_append(color)
entities = self._computeVertices(entityPositions,
numpy.array(entityColors, dtype='uint8')[:, numpy.newaxis, numpy.newaxis],
offset=True, chunkPosition=chunk.chunkPosition)
yield
self.vertexArrays = [entities]
class TileTicksRenderer(EntityRendererGeneric):
layer = Layer.TileTicks
def makeChunkVertices(self, chunk):
if hasattr(chunk, "TileTicks"):
self.vertexArrays.append(self._computeVertices([[tick[j].value for j in "xyz"] for i, tick in enumerate(chunk.TileTicks)],
(0xff, 0xff, 0xff, 0x44),
chunkPosition=chunk.chunkPosition))
yield
class TerrainPopulatedRenderer(EntityRendererGeneric):
layer = Layer.TerrainPopulated
vertexTemplate = numpy.zeros((6, 4, 6), 'float32')
vertexTemplate[_XYZ] = faceVertexTemplates[_XYZ]
vertexTemplate[_XYZ] *= (16, 256, 16)
color = (255, 200, 155)
vertexTemplate.view('uint8')[_RGBA] = color + (72,)
def drawFaceVertices(self, buf):
if not len(buf):
return
stride = elementByteLength
GL.glVertexPointer(3, GL.GL_FLOAT, stride, (buf.ravel()))
GL.glTexCoordPointer(2, GL.GL_FLOAT, stride, (buf.ravel()[3:]))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype=numpy.uint8).ravel()[20:]))
GL.glDepthMask(False)
GL.glDisable(GL.GL_CULL_FACE)
with gl.glEnable(GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glEnable(GL.GL_CULL_FACE)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glLineWidth(1.0)
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glLineWidth(2.0)
with gl.glEnable(GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glLineWidth(1.0)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glDepthMask(True)
def makeChunkVertices(self, chunk):
neighbors = self.chunkCalculator.getNeighboringChunks(chunk)
def getpop(ch):
return getattr(ch, "TerrainPopulated", True)
pop = getpop(chunk)
yield
if pop:
return
visibleFaces = [
getpop(neighbors[pymclevel.faces.FaceXIncreasing]),
getpop(neighbors[pymclevel.faces.FaceXDecreasing]),
True,
True,
getpop(neighbors[pymclevel.faces.FaceZIncreasing]),
getpop(neighbors[pymclevel.faces.FaceZDecreasing]),
]
visibleFaces = numpy.array(visibleFaces, dtype='bool')
verts = self.vertexTemplate[visibleFaces]
self.vertexArrays.append(verts)
yield
class ChunkBorderRenderer(EntityRendererGeneric):
layer = Layer.ChunkBorder
color = (0, 210, 225)
vertexTemplate = numpy.zeros((6, 4, 6), 'float32')
vertexTemplate[_XYZ] = faceVertexTemplates[_XYZ]
vertexTemplate[_XYZ] *= (16, 256, 16)
vertexTemplate.view('uint8')[_RGBA] = color + (150,)
def makeChunkVertices(self, chunk):
visibleFaces = [
True,
True,
True,
True,
True,
True,
]
yield
visibleFaces = numpy.array(visibleFaces, dtype='bool')
verts = self.vertexTemplate[visibleFaces]
self.vertexArrays.append(verts)
yield
def drawFaceVertices(self, buf):
if not len(buf):
return
stride = elementByteLength
GL.glVertexPointer(3, GL.GL_FLOAT, stride, (buf.ravel()))
GL.glTexCoordPointer(2, GL.GL_FLOAT, stride, (buf.ravel()[3:]))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype=numpy.uint8).ravel()[20:]))
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glLineWidth(1)
with gl.glEnable(GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glLineWidth(2.0)
with gl.glEnable(GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glLineWidth(1.0)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
class LowDetailBlockRenderer(BlockRenderer):
renderstate = ChunkCalculator.renderstateLowDetail
detailLevels = (1,)
def drawFaceVertices(self, buf):
if not len(buf):
return
stride = 16
GL.glVertexPointer(3, GL.GL_FLOAT, stride, numpy.ravel(buf.ravel()))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype='uint8').ravel()[12:]))
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
def setAlpha(self, alpha):
for va in self.vertexArrays:
va.view('uint8')[..., -1] = alpha
def makeChunkVertices(self, ch):
step = 1
level = ch.world
vertexArrays = []
blocks = ch.Blocks
heightMap = ch.HeightMap
heightMap = heightMap[::step, ::step]
blocks = blocks[::step, ::step]
if 0 in blocks.shape:
return
chunkWidth, chunkLength, chunkHeight = blocks.shape
blockIndices = numpy.zeros((chunkWidth, chunkLength, chunkHeight), bool)
gridaxes = list(numpy.indices((chunkWidth, chunkLength)))
h = numpy.swapaxes(heightMap - 1, 0, 1)[:chunkWidth, :chunkLength]
numpy.clip(h, 0, chunkHeight - 1, out=h)
gridaxes = (gridaxes[0], gridaxes[1], h)
depths = numpy.zeros((chunkWidth, chunkLength), dtype='uint16')
depths[1:-1, 1:-1] = reduce(numpy.minimum, (h[1:-1, :-2], h[1:-1, 2:], h[:-2, 1:-1]), h[2:, 1:-1])
yield
try:
topBlocks = blocks[gridaxes]
nonAirBlocks = (topBlocks != 0)
blockIndices[gridaxes] = nonAirBlocks
h += 1
numpy.clip(h, 0, chunkHeight - 1, out=h)
overblocks = blocks[gridaxes][nonAirBlocks].ravel()
except ValueError as e:
raise ValueError(str(e.args) + "Chunk shape: {0}".format(blockIndices.shape), sys.exc_info()[-1])
if nonAirBlocks.any():
blockTypes = blocks[blockIndices]
flatcolors = level.materials.flatColors[blockTypes, ch.Data[blockIndices] & 0xf][:, numpy.newaxis, :]
x, z, y = blockIndices.nonzero()
yield
vertexArray = numpy.zeros((len(x), 4, 4), dtype='float32')
vertexArray[_XYZ][..., 0] = x[:, numpy.newaxis]
vertexArray[_XYZ][..., 1] = y[:, numpy.newaxis]
vertexArray[_XYZ][..., 2] = z[:, numpy.newaxis]
va0 = numpy.array(vertexArray)
va0[..., :3] += faceVertexTemplates[pymclevel.faces.FaceYIncreasing, ..., :3]
overmask = overblocks > 0
flatcolors[overmask] = level.materials.flatColors[:, 0][overblocks[overmask]][:, numpy.newaxis]
if self.detailLevel == 2:
heightfactor = (y / float(2.0 * ch.world.Height)) + 0.5
flatcolors[..., :3] = flatcolors[..., :3].astype(float) * heightfactor[:, numpy.newaxis, numpy.newaxis]
_RGBA = numpy.s_[..., 12:16]
va0.view('uint8')[_RGBA] = flatcolors
va0[_XYZ][:, :, 0] *= step
va0[_XYZ][:, :, 2] *= step
yield
if self.detailLevel == 2:
self.vertexArrays = [va0]
return
va1 = numpy.array(vertexArray)
va1[..., :3] += faceVertexTemplates[pymclevel.faces.FaceXIncreasing, ..., :3]
va1[_XYZ][:, (0, 1), 1] = depths[nonAirBlocks].ravel()[:, numpy.newaxis] # stretch to floor
va1[_XYZ][:, (1, 2), 0] -= 1.0 # turn diagonally
va1[_XYZ][:, (2, 3), 1] -= 0.5 # drop down to prevent intersection pixels
va1[_XYZ][:, :, 0] *= step
va1[_XYZ][:, :, 2] *= step
flatcolors = flatcolors.astype(float) * 0.8
va1.view('uint8')[_RGBA] = flatcolors
grassmask = topBlocks[nonAirBlocks] == 2
# color grass sides with dirt's color
va1.view('uint8')[_RGBA][grassmask] = level.materials.flatColors[:, 0][[3]][:, numpy.newaxis]
va2 = numpy.array(va1)
va2[_XYZ][:, (1, 2), 0] += step
va2[_XYZ][:, (0, 3), 0] -= step
vertexArrays = [va1, va2, va0]
self.vertexArrays = vertexArrays
class OverheadBlockRenderer(LowDetailBlockRenderer):
detailLevels = (2,)
class GenericBlockRenderer(BlockRenderer):
renderstate = ChunkCalculator.renderstateAlphaTest
materialIndex = 1
def makeGenericVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
vertexArrays = []
append = vertexArrays.append
materialIndices = self.getMaterialIndices(blockMaterials)
yield
for (direction, exposedFaceIndices) in enumerate(facingBlockIndices):
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
blockIndices = materialIndices & exposedFaceIndices
theseBlocks = blocks[blockIndices]
bdata = blockData[blockIndices]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(theseBlocks, bdata, direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
if self.materials.name in ("Alpha", "Pocket"):
if direction == pymclevel.faces.FaceYIncreasing:
grass = theseBlocks == alphaMaterials.Grass.ID
vertexArray.view('uint8')[_RGB][grass] = vertexArray.view('uint8')[_RGB][grass].astype(float) * self.grassColor
yield
append(vertexArray)
self.vertexArrays = vertexArrays
grassColor = grassColorDefault = [0.39, 0.71, 0.23] # 62C743
makeVertices = makeGenericVertices
class LeafBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [block.ID for block in mats.blocksByType["LEAVES"]]
@property
def renderstate(self):
if self.chunkCalculator.fastLeaves:
return ChunkCalculator.renderstatePlain
else:
return ChunkCalculator.renderstateAlphaTest
def makeLeafVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
arrays = []
append = arrays.append
materialIndices = self.getMaterialIndices(blockMaterials)
yield
if self.materials.name in ("Alpha", "Pocket"):
if not self.chunkCalculator.fastLeaves:
blockIndices = materialIndices
data = blockData[blockIndices]
data &= 0x3 # ignore decay states
leaves = (data == alphaMaterials.Leaves.blockData)
pines = (data == alphaMaterials.PineLeaves.blockData)
birches = (data == alphaMaterials.BirchLeaves.blockData)
jungle = (data == alphaMaterials.JungleLeaves.blockData)
acacia = (data == alphaMaterials.AcaciaLeaves.blockData)
darkoak = (data == alphaMaterials.DarkOakLeaves.blockData)
texes = texMap(blocks[blockIndices], [0], 0)
else:
blockIndices = materialIndices
texes = texMap(blocks[blockIndices], [0], 0)
for (direction, exposedFaceIndices) in enumerate(facingBlockIndices):
if self.materials.name in ("Alpha", "Pocket"):
if self.chunkCalculator.fastLeaves:
blockIndices = materialIndices & exposedFaceIndices
data = blockData[blockIndices]
data &= 0x3 # ignore decay states
leaves = (data == alphaMaterials.Leaves.blockData)
pines = (data == alphaMaterials.PineLeaves.blockData)
birches = (data == alphaMaterials.BirchLeaves.blockData)
jungle = (data == alphaMaterials.JungleLeaves.blockData)
acacia = (data == alphaMaterials.AcaciaLeaves.blockData)
darkoak = (data == alphaMaterials.DarkOakLeaves.blockData)
texes = texMap(blocks[blockIndices], data, 0)
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texes[:, numpy.newaxis]
if not self.chunkCalculator.fastLeaves:
vertexArray[_ST] -= (0x10, 0x0)
vertexArray.view('uint8')[_RGB] *= facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
if self.materials.name in ("Alpha", "Pocket"):
vertexArray.view('uint8')[_RGB][leaves] = vertexArray.view('uint8')[_RGB][leaves].astype(float) * self.leafColor
vertexArray.view('uint8')[_RGB][pines] = vertexArray.view('uint8')[_RGB][pines].astype(float) * self.pineLeafColor
vertexArray.view('uint8')[_RGB][birches] = vertexArray.view('uint8')[_RGB][birches].astype(float) * self.birchLeafColor
vertexArray.view('uint8')[_RGB][jungle] = vertexArray.view('uint8')[_RGB][jungle].astype(float) * self.jungleLeafColor
vertexArray.view('uint8')[_RGB][acacia] = vertexArray.view('uint8')[_RGB][acacia].astype(float) * self.acaciaLeafColor
vertexArray.view('uint8')[_RGB][darkoak] = vertexArray.view('uint8')[_RGB][darkoak].astype(float) * self.darkoakLeafColor
yield
append(vertexArray)
self.vertexArrays = arrays
leafColor = leafColorDefault = [0x48 / 255., 0xb5 / 255., 0x18 / 255.] # 48b518
pineLeafColor = pineLeafColorDefault = [0x61 / 255., 0x99 / 255., 0x61 / 255.] # 0x619961
birchLeafColor = birchLeafColorDefault = [0x80 / 255., 0xa7 / 255., 0x55 / 255.] # 0x80a755
jungleLeafColor = jungleLeafColorDefault = [0x48 / 255., 0xb5 / 255., 0x18 / 255.] # 48b518
acaciaLeafColor = acaciaLeafColorDefault = [0x48 / 255., 0xb5 / 255., 0x18 / 255.] # 48b518
darkoakLeafColor = darkoakLeafColorDefault = [0x48 / 255., 0xb5 / 255., 0x18 / 255.] # 48b518
makeVertices = makeLeafVertices
class PlantBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
# blocktypes = [6, 37, 38, 39, 40, 59, 83]
# if mats.name != "Classic": blocktypes += [31, 32] # shrubs, tall grass
# if mats.name == "Alpha": blocktypes += [115] # nether wart
blocktypes = [b.ID for b in mats if b.type in ("DECORATION_CROSS", "NETHER_WART", "CROPS", "STEM")]
return blocktypes
renderstate = ChunkCalculator.renderstateAlphaTest
def makePlantVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
arrays = []
append = arrays.append
blockIndices = self.getMaterialIndices(blockMaterials)
yield
theseBlocks = blocks[blockIndices]
bdata = blockData[blockIndices]
texes = texMap(blocks[blockIndices], bdata, 0)
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
lights = blockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
colorize = None
if self.materials.name != "Classic": #so hacky, someone more competent fix this
colorize = (theseBlocks == self.materials.TallGrass.ID) & (bdata != 0)
colorize2 = (theseBlocks == self.materials.TallFlowers.ID) & (bdata != 0) & (
bdata != 1) & (bdata != 4) & (bdata != 5)
for direction in (
pymclevel.faces.FaceXIncreasing, pymclevel.faces.FaceXDecreasing, pymclevel.faces.FaceZIncreasing,
pymclevel.faces.FaceZDecreasing):
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return
if direction == pymclevel.faces.FaceXIncreasing:
vertexArray[_XYZ][..., 1:3, 0] -= 1
if direction == pymclevel.faces.FaceXDecreasing:
vertexArray[_XYZ][..., 1:3, 0] += 1
if direction == pymclevel.faces.FaceZIncreasing:
vertexArray[_XYZ][..., 1:3, 2] -= 1
if direction == pymclevel.faces.FaceZDecreasing:
vertexArray[_XYZ][..., 1:3, 2] += 1
vertexArray[_ST] += texes[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] = 0xf # ignore precomputed directional light
vertexArray.view('uint8')[_RGB] *= lights
if colorize is not None:
vertexArray.view('uint8')[_RGB][colorize] = vertexArray.view('uint8')[_RGB][colorize].astype(float) * LeafBlockRenderer.leafColor
vertexArray.view('uint8')[_RGB][colorize2] = vertexArray.view('uint8')[_RGB][colorize2].astype(float) * LeafBlockRenderer.leafColor
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makePlantVertices
class TorchBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [block.ID for block in mats.blocksByType["TORCH"]]
renderstate = ChunkCalculator.renderstateAlphaTest
torchOffsetsStraight = [
[ # FaceXIncreasing
(-7 / 16., 0, 0),
(-7 / 16., 0, 0),
(-7 / 16., 0, 0),
(-7 / 16., 0, 0),
],
[ # FaceXDecreasing
(7 / 16., 0, 0),
(7 / 16., 0, 0),
(7 / 16., 0, 0),
(7 / 16., 0, 0),
],
[ # FaceYIncreasing
(7 / 16., -6 / 16., 7 / 16.),
(7 / 16., -6 / 16., -7 / 16.),
(-7 / 16., -6 / 16., -7 / 16.),
(-7 / 16., -6 / 16., 7 / 16.),
],
[ # FaceYDecreasing
(7 / 16., 0., 7 / 16.),
(-7 / 16., 0., 7 / 16.),
(-7 / 16., 0., -7 / 16.),
(7 / 16., 0., -7 / 16.),
],
[ # FaceZIncreasing
(0, 0, -7 / 16.),
(0, 0, -7 / 16.),
(0, 0, -7 / 16.),
(0, 0, -7 / 16.)
],
[ # FaceZDecreasing
(0, 0, 7 / 16.),
(0, 0, 7 / 16.),
(0, 0, 7 / 16.),
(0, 0, 7 / 16.)
],
]
torchOffsetsSouth = [
[ # FaceXIncreasing
(-7 / 16., 3 / 16., 0),
(-7 / 16., 3 / 16., 0),
(-7 / 16., 3 / 16., 0),
(-7 / 16., 3 / 16., 0),
],
[ # FaceXDecreasing
(7 / 16., 3 / 16., 0),
(7 / 16., 3 / 16., 0),
(7 / 16., 3 / 16., 0),
(7 / 16., 3 / 16., 0),
],
[ # FaceYIncreasing
(7 / 16., -3 / 16., 7 / 16.),
(7 / 16., -3 / 16., -7 / 16.),
(-7 / 16., -3 / 16., -7 / 16.),
(-7 / 16., -3 / 16., 7 / 16.),
],
[ # FaceYDecreasing
(7 / 16., 3 / 16., 7 / 16.),
(-7 / 16., 3 / 16., 7 / 16.),
(-7 / 16., 3 / 16., -7 / 16.),
(7 / 16., 3 / 16., -7 / 16.),
],
[ # FaceZIncreasing
(0, 3 / 16., -7 / 16.),
(0, 3 / 16., -7 / 16.),
(0, 3 / 16., -7 / 16.),
(0, 3 / 16., -7 / 16.)
],
[ # FaceZDecreasing
(0, 3 / 16., 7 / 16.),
(0, 3 / 16., 7 / 16.),
(0, 3 / 16., 7 / 16.),
(0, 3 / 16., 7 / 16.),
],
]
torchOffsetsNorth = torchOffsetsWest = torchOffsetsEast = torchOffsetsSouth
torchOffsets = [
torchOffsetsStraight,
torchOffsetsSouth,
torchOffsetsNorth,
torchOffsetsWest,
torchOffsetsEast,
torchOffsetsStraight,
] + [torchOffsetsStraight] * 10
torchOffsets = numpy.array(torchOffsets, dtype='float32')
torchOffsets[1][..., 3, :, 0] -= 0.5
torchOffsets[1][..., 0:2, 0:2, 0] -= 0.5
torchOffsets[1][..., 4:6, 0:2, 0] -= 0.5
torchOffsets[1][..., 0:2, 2:4, 0] -= 0.1
torchOffsets[1][..., 4:6, 2:4, 0] -= 0.1
torchOffsets[1][..., 2, :, 0] -= 0.25
torchOffsets[2][..., 3, :, 0] += 0.5
torchOffsets[2][..., 0:2, 0:2, 0] += 0.5
torchOffsets[2][..., 4:6, 0:2, 0] += 0.5
torchOffsets[2][..., 0:2, 2:4, 0] += 0.1
torchOffsets[2][..., 4:6, 2:4, 0] += 0.1
torchOffsets[2][..., 2, :, 0] += 0.25
torchOffsets[3][..., 3, :, 2] -= 0.5
torchOffsets[3][..., 0:2, 0:2, 2] -= 0.5
torchOffsets[3][..., 4:6, 0:2, 2] -= 0.5
torchOffsets[3][..., 0:2, 2:4, 2] -= 0.1
torchOffsets[3][..., 4:6, 2:4, 2] -= 0.1
torchOffsets[3][..., 2, :, 2] -= 0.25
torchOffsets[4][..., 3, :, 2] += 0.5
torchOffsets[4][..., 0:2, 0:2, 2] += 0.5
torchOffsets[4][..., 4:6, 0:2, 2] += 0.5
torchOffsets[4][..., 0:2, 2:4, 2] += 0.1
torchOffsets[4][..., 4:6, 2:4, 2] += 0.1
torchOffsets[4][..., 2, :, 2] += 0.25
upCoords = ((7, 6), (7, 8), (9, 8), (9, 6))
downCoords = ((7, 14), (7, 16), (9, 16), (9, 14))
def makeTorchVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
blockIndices = self.getMaterialIndices(blockMaterials)
torchOffsets = self.torchOffsets[blockData[blockIndices]]
texes = texMap(blocks[blockIndices], blockData[blockIndices])
yield
arrays = []
append = arrays.append
for direction in xrange(6):
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return
vertexArray.view('uint8')[_RGBA] = 0xff
vertexArray[_XYZ] += torchOffsets[:, direction]
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_ST] = self.upCoords
if direction == pymclevel.faces.FaceYDecreasing:
vertexArray[_ST] = self.downCoords
vertexArray[_ST] += texes[:, numpy.newaxis, direction]
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeTorchVertices
class LeverBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:lever"].ID]
leverBaseTemplate = makeVertexTemplatesFromJsonModel((5, 0, 4), (11, 3, 12), {
"down": (10, 0, 16, 8),
"up": (10, 0, 16, 8),
"north": (10, 8, 16, 11),
"south": (10, 8, 16, 11),
"west": (2, 0, 10, 3),
"east": (2, 0, 10, 3)
})
leverBaseTemplates = numpy.array([
rotateTemplate(leverBaseTemplate, x=180, y=90),
rotateTemplate(leverBaseTemplate, x=90, y=90),
rotateTemplate(leverBaseTemplate, x=90, y=270),
rotateTemplate(leverBaseTemplate, x=90, y=180),
rotateTemplate(leverBaseTemplate, x=270, y=180),
leverBaseTemplate,
rotateTemplate(leverBaseTemplate, y=90),
rotateTemplate(leverBaseTemplate, x=180),
rotateTemplate(leverBaseTemplate, x=180, y=90),
rotateTemplate(leverBaseTemplate, x=90, y=90),
rotateTemplate(leverBaseTemplate, x=270, y=90),
rotateTemplate(leverBaseTemplate, x=270),
rotateTemplate(leverBaseTemplate, x=270, y=180),
leverBaseTemplate,
rotateTemplate(leverBaseTemplate, y=90),
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
leverTemplate = makeVertexTemplatesFromJsonModel((7, 1, 7), (9, 11, 9), {
"down": (7, 6, 9, 8),
"up": (7, 6, 9, 8),
"north": (7, 6, 9, 16),
"south": (7, 6, 9, 16),
"west": (7, 6, 9, 16),
"east": (7, 6, 9, 16)
})
leverTemplates = numpy.array([
rotateTemplate(leverTemplate, x=180),
rotateTemplate(leverTemplate, x=90, y=90),
rotateTemplate(leverTemplate, x=90, y=270),
rotateTemplate(leverTemplate, x=90, y=180),
rotateTemplate(leverTemplate, x=270, y=180),
leverTemplate,
rotateTemplate(leverTemplate, y=90),
rotateTemplate(leverTemplate, x=180),
rotateTemplate(leverTemplate, x=180),
rotateTemplate(leverTemplate, x=90, y=90),
rotateTemplate(leverTemplate, x=270, y=90),
rotateTemplate(leverTemplate, x=270),
rotateTemplate(leverTemplate, x=270, y=180),
leverTemplate,
leverTemplate,
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
makeVertices = makeVerticesFromModel([leverBaseTemplates, leverTemplates], 15)
class RailBlockRenderer(BlockRenderer):
renderstate = ChunkCalculator.renderstateAlphaTest
def __init__(self, *args, **kwargs):
BlockRenderer.__init__(self, *args, **kwargs)
self.railTextures = numpy.array([
[(0, 128), (0, 144), (16, 144), (16, 128)], # east-west
[(0, 128), (16, 128), (16, 144), (0, 144)], # north-south
[(0, 128), (16, 128), (16, 144), (0, 144)], # south-ascending
[(0, 128), (16, 128), (16, 144), (0, 144)], # north-ascending
[(0, 128), (0, 144), (16, 144), (16, 128)], # east-ascending
[(0, 128), (0, 144), (16, 144), (16, 128)], # west-ascending
[(0, 112), (0, 128), (16, 128), (16, 112)], # northeast corner
[(0, 128), (16, 128), (16, 112), (0, 112)], # southeast corner
[(16, 128), (16, 112), (0, 112), (0, 128)], # southwest corner
[(16, 112), (0, 112), (0, 128), (16, 128)], # northwest corner
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
], dtype='float32')
self.railTextures -= self.materials.blockTextures[self.materials.Rail.ID, 0, 0]
@classmethod
def getBlocktypes(cls, mats):
return [block.ID for block in mats.blocksByType["SIMPLE_RAIL"]]
railOffsets = numpy.array([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 1], # south-ascending
[1, 1, 0, 0], # north-ascending
[1, 0, 0, 1], # east-ascending
[0, 1, 1, 0], # west-ascending
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
], dtype='float32')
def makeRailVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
direction = pymclevel.faces.FaceYIncreasing
blockIndices = self.getMaterialIndices(blockMaterials)
yield
bdata = blockData[blockIndices]
railBlocks = blocks[blockIndices]
tex = texMap(railBlocks, bdata, pymclevel.faces.FaceYIncreasing)[:, numpy.newaxis, :]
# disable 'powered' or 'pressed' bit for powered and detector rails
bdata[railBlocks != self.materials.Rail.ID] = bdata[railBlocks != self.materials.Rail.ID].astype(int) & ~0x8
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return
vertexArray[_ST] = self.railTextures[bdata]
vertexArray[_ST] += tex
vertexArray[_XYZ][..., 1] -= 0.9
vertexArray[_XYZ][..., 1] += self.railOffsets[bdata]
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
vertexArray.view('uint8')[_RGB] *= blockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
yield
self.vertexArrays = [vertexArray]
makeVertices = makeRailVertices
class LadderBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:ladder"].ID]
ladderOffsets = numpy.array([
[(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)],
[(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)],
[(0, -1, 0.9), (0, 0, -0.1), (0, 0, -0.1), (0, -1, 0.9)], # facing east
[(0, 0, 0.1), (0, -1, -.9), (0, -1, -.9), (0, 0, 0.1)], # facing west
[(.9, -1, 0), (.9, -1, 0), (-.1, 0, 0), (-.1, 0, 0)], # north
[(0.1, 0, 0), (0.1, 0, 0), (-.9, -1, 0), (-.9, -1, 0)], # south
] + [[(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)]] * 10, dtype='float32')
ladderTextures = numpy.array([
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(64, 96), (64, 80), (48, 80), (48, 96), ], # e
[(48, 80), (48, 96), (64, 96), (64, 80), ], # w
[(48, 96), (64, 96), (64, 80), (48, 80), ], # n
[(64, 80), (48, 80), (48, 96), (64, 96), ], # s
] + [[(0, 192), (0, 208), (16, 208), (16, 192)]] * 10, dtype='float32')
def ladderVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
blockIndices = self.getMaterialIndices(blockMaterials)
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
yield
bdata = blockData[blockIndices]
vertexArray = self.makeTemplate(pymclevel.faces.FaceYIncreasing, blockIndices)
if not len(vertexArray):
return
vertexArray[_ST] = self.ladderTextures[bdata]
vertexArray[_XYZ] += self.ladderOffsets[bdata]
vertexArray.view('uint8')[_RGB] *= blockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
yield
self.vertexArrays = [vertexArray]
makeVertices = ladderVertices
class WallSignBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:wall_sign"].ID]
wallSignTemplate = makeVertexTemplatesFromJsonModel((0, 4.5, 0), (16, 13.5, 2), {
"down": (0, 11, 18, 13),
"up": (0, 6, 16, 8),
"north": (0, 4, 16, 13),
"south": (0, 4, 16, 13),
"west": (0, 4, 2, 13),
"east": (10, 4, 12, 13)
})
# I don't know how this sytem works and how it should be structured, but this seem to do the job
wallSignTemplates = numpy.array([
wallSignTemplate,
wallSignTemplate,
rotateTemplate(wallSignTemplate, y=180),
wallSignTemplate,
rotateTemplate(wallSignTemplate, y=90),
rotateTemplate(wallSignTemplate, y=270),
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
makeVertices = makeVerticesFromModel(wallSignTemplates, 7)
class StandingSignRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:standing_sign"].ID]
signTemplate = makeVertexTemplatesFromJsonModel((0, 7, 7), (16, 16, 9), {
"down": (0, 14, 16, 16),
"up": (0, 12, 16, 14),
"north": (0, 7, 16, 16),
"south": (0, 7, 16, 16),
"west": (0, 7, 2, 16),
"east": (14, 7, 16, 16)
})
signTemplates = numpy.array([
signTemplate,
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
postTemplate = makeVertexTemplatesFromJsonModel((7, 0, 7), (9, 7, 9), {
"down": (7, 0, 9, 6),
"up": (7, 0, 9, 6),
"north": (7, 0, 9, 6),
"south": (7, 0, 9, 6),
"west": (7, 0, 9, 6),
"east": (7, 0, 9, 6),
})
postTemplates = numpy.array([
postTemplate,
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
makeVertices = makeVerticesFromModel([signTemplates, postTemplates])
class SnowBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:snow_layer"].ID]
def makeSnowVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
#snowIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
if direction != pymclevel.faces.FaceYIncreasing:
blockIndices = materialIndices & exposedFaceIndices
else:
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], 0)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.875
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.875
vertexArray[_ST][..., 2:4, 1] += 14
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeSnowVertices
class CarpetBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:carpet"].ID, mats["minecraft:waterlily"].ID] #Separate before implementing layers
def makeCarpetVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
#snowIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
if direction != pymclevel.faces.FaceYIncreasing:
blockIndices = materialIndices & exposedFaceIndices
else:
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], 0)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.937
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.937
vertexArray[_ST][..., 2:4, 1] += 15
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeCarpetVertices
class CactusBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:cactus"].ID]
def makeCactusVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceXIncreasing:
vertexArray[_XYZ][..., 0] -= 0.063
if direction == pymclevel.faces.FaceXDecreasing:
vertexArray[_XYZ][..., 0] += 0.063
if direction == pymclevel.faces.FaceZIncreasing:
vertexArray[_XYZ][..., 2] -= 0.063
if direction == pymclevel.faces.FaceZDecreasing:
vertexArray[_XYZ][..., 2] += 0.063
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeCactusVertices
class PaneBlockRenderer(BlockRenderer): #Basic no thickness panes, add more faces to widen.
@classmethod
def getBlocktypes(cls, mats):
return [block.ID for block in mats.blocksByType["SOLID_PANE"]]
def makePaneVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceXIncreasing:
vertexArray[_XYZ][..., 0] -= 0.5
if direction == pymclevel.faces.FaceXDecreasing:
vertexArray[_XYZ][..., 0] += 0.5
if direction == pymclevel.faces.FaceZIncreasing:
vertexArray[_XYZ][..., 2] -= 0.5
if direction == pymclevel.faces.FaceZDecreasing:
vertexArray[_XYZ][..., 2] += 0.5
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makePaneVertices
class PlateBlockRenderer(BlockRenderer): #suggestions to make this the proper shape is appreciated.
@classmethod
def getBlocktypes(cls, mats):
return [block.ID for block in mats.blocksByType["PRESSURE_PLATE"]]
def makePlateVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], 0)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.937
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.937
vertexArray[_ST][..., 2:4, 1] += 15
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makePlateVertices
class EnchantingBlockRenderer(
BlockRenderer): #Note: Enderportal frame side sprite has been lowered 1 pixel to use this renderer, will need separate renderer for eye.
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:enchanting_table"].ID, mats["minecraft:end_portal_frame"].ID]
def makeEnchantingVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
if direction != pymclevel.faces.FaceYIncreasing:
blockIndices = materialIndices & exposedFaceIndices
else:
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.25
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeEnchantingVertices
class DaylightBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:daylight_detector"].ID, mats.DaylightSensorOn.ID]
def makeDaylightVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
if direction != pymclevel.faces.FaceYIncreasing:
blockIndices = materialIndices & exposedFaceIndices
else:
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.625
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.625
vertexArray[_ST][..., 2:4, 1] += 10
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeDaylightVertices
class BedBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:bed"].ID]
def makeBedVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
if direction != pymclevel.faces.FaceYIncreasing:
blockIndices = materialIndices & exposedFaceIndices
else:
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.438
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeBedVertices
class CakeBlockRenderer(BlockRenderer): #Only shows whole cakes
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:cake"].ID]
def makeCakeVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.5
if direction == pymclevel.faces.FaceXIncreasing:
vertexArray[_XYZ][..., 0] -= 0.063
if direction == pymclevel.faces.FaceXDecreasing:
vertexArray[_XYZ][..., 0] += 0.063
if direction == pymclevel.faces.FaceZIncreasing:
vertexArray[_XYZ][..., 2] -= 0.063
if direction == pymclevel.faces.FaceZDecreasing:
vertexArray[_XYZ][..., 2] += 0.063
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeCakeVertices
class RepeaterBlockRenderer(BlockRenderer): #Sticks would be nice
@classmethod
def getBlocktypes(cls, mats):
return [block.ID for block in mats.blocksByType["THINSLICE"]]
def makeRepeaterVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.875
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.875
vertexArray[_ST][..., 2:4, 1] += 14
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeRepeaterVertices
class RedstoneBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:redstone_wire"].ID]
def redstoneVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
blockIndices = self.getMaterialIndices(blockMaterials)
yield
vertexArray = self.makeTemplate(pymclevel.faces.FaceYIncreasing, blockIndices)
if not len(vertexArray):
return
vertexArray[_ST] += self.materials.blockTextures[55, 0, 0]
vertexArray[_XYZ][..., 1] -= 0.9
bdata = blockData[blockIndices]
bdata <<= 3
# bdata &= 0xe0
bdata[bdata > 0] |= 0x80
vertexArray.view('uint8')[_RGBA][..., 0] = bdata[..., numpy.newaxis]
vertexArray.view('uint8')[_RGBA][..., 0:3] = vertexArray.view('uint8')[_RGBA][..., 0:3] * [1, 0, 0]
yield
self.vertexArrays = [vertexArray]
makeVertices = redstoneVertices
# button, floor plate, door -> 1-cube features
class DoorRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
cls.blocktypes = [block.ID for block in mats.blocksByType["DOOR"]]
return cls.blocktypes
doorTemplate = makeVertexTemplatesFromJsonModel(
(0, 0, 0), (3, 16, 16),
{
"down": (13, 0, 16, 16),
# TODO handle faces that should not appear
"up": (13, 0, 16, 16),
"north": (3, 0, 0, 16),
"south": (0, 0, 3, 16),
"west": (0, 0, 16, 16),
"east": (16, 0, 0, 16)
}
)
doorRHTemplate = makeVertexTemplatesFromJsonModel(
(0, 0, 0), (3, 16, 16),
{
"down": (13, 0, 16, 16),
# TODO handle faces that should not appear
"up": (13, 0, 16, 16),
"north": (3, 0, 0, 16),
"south": (0, 0, 3, 16),
"west": (16, 0, 0, 16),
"east": (0, 0, 16, 16)
}
)
doorTemplates = numpy.array([
# lower hinge left
doorTemplate,
rotateTemplate(doorTemplate, y=90),
rotateTemplate(doorTemplate, y=180),
rotateTemplate(doorTemplate, y=270),
rotateTemplate(doorRHTemplate, y=90),
rotateTemplate(doorRHTemplate, y=180),
rotateTemplate(doorRHTemplate, y=270),
doorRHTemplate,
# upper hinge left
doorTemplate,
rotateTemplate(doorTemplate, y=90),
rotateTemplate(doorTemplate, y=180),
rotateTemplate(doorTemplate, y=270),
rotateTemplate(doorRHTemplate, y=90),
rotateTemplate(doorRHTemplate, y=180),
rotateTemplate(doorRHTemplate, y=270),
doorRHTemplate,
# lower hinge right
doorRHTemplate,
rotateTemplate(doorRHTemplate, y=90),
rotateTemplate(doorRHTemplate, y=180),
rotateTemplate(doorRHTemplate, y=270),
rotateTemplate(doorTemplate, y=270),
doorTemplate,
rotateTemplate(doorTemplate, y=90),
rotateTemplate(doorTemplate, y=180),
# upper hinge right
doorRHTemplate,
rotateTemplate(doorRHTemplate, y=90),
rotateTemplate(doorRHTemplate, y=180),
rotateTemplate(doorRHTemplate, y=270),
rotateTemplate(doorTemplate, y=270),
doorTemplate,
rotateTemplate(doorTemplate, y=90),
rotateTemplate(doorTemplate, y=180),
])
makeVertices = makeVerticesFromModel(doorTemplates, 31)
class ButtonRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [a.ID for a in mats.blocksByType["BUTTON"]]
buttonTemplate = makeVertexTemplatesFromJsonModel((5, 0, 6), (11, 2, 10), {
"down": (5, 6, 11, 10),
"up": (5, 10, 11, 6),
"north": (5, 14, 11, 16),
"south": (5, 14, 11, 16),
"west": (6, 14, 10, 16),
"east": (6, 14, 10, 16)
})
buttonTemplatePressed = makeVertexTemplatesFromJsonModel((5, 0, 6), (11, 1, 10), {
"down": (5, 6, 11, 10),
"up": (5, 10, 11, 6),
"north": (5, 15, 11, 16),
"south": (5, 15, 11, 16),
"west": (6, 15, 10, 16),
"east": (6, 15, 10, 16)
})
buttonTemplates = numpy.array([
rotateTemplate(buttonTemplate, 180, 0),
rotateTemplate(buttonTemplate, 90, 90),
rotateTemplate(buttonTemplate, 90, 270),
rotateTemplate(buttonTemplate, 90, 180),
rotateTemplate(buttonTemplate, 90, 0),
buttonTemplate,
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6)),
rotateTemplate(buttonTemplatePressed, 180, 0),
rotateTemplate(buttonTemplatePressed, 90, 90),
rotateTemplate(buttonTemplatePressed, 90, 270),
rotateTemplate(buttonTemplatePressed, 90, 180),
rotateTemplate(buttonTemplatePressed, 90, 0),
buttonTemplatePressed,
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6)),
])
makeVertices = makeVerticesFromModel(buttonTemplates, 15)
class TrapDoorRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [a.ID for a in mats.blocksByType["TRAPDOOR"]]
openTemplate = makeVertexTemplatesFromJsonModel((0, 0, 13), (16, 16, 16), {
"down": (0, 13, 16, 16),
"up": (0, 16, 16, 13),
"north": (0, 0, 16, 16),
"south": (0, 0, 16, 16),
"west": (16, 0, 13, 16),
"east": (13, 0, 16, 16)
})
topTemplate = makeVertexTemplatesFromJsonModel((0, 13, 0), (16, 16, 16), {
"down": (0, 0, 16, 16),
"up": (0, 0, 16, 16),
"north": (0, 16, 16, 13),
"south": (0, 16, 16, 13),
"west": (0, 16, 16, 13),
"east": (0, 16, 16, 13)
})
bottomTemplate = makeVertexTemplatesFromJsonModel((0, 0, 0), (16, 3, 16), {
"down": (0, 0, 16, 16),
"up": (0, 0, 16, 16),
"north": (0, 16, 16, 13),
"south": (0, 16, 16, 13),
"west": (0, 16, 16, 13),
"east": (0, 16, 16, 13)
})
trapDoorTemplates = numpy.array([
bottomTemplate,
bottomTemplate,
bottomTemplate,
bottomTemplate,
openTemplate,
rotateTemplate(openTemplate, y=180),
rotateTemplate(openTemplate, y=270),
rotateTemplate(openTemplate, y=90),
topTemplate,
topTemplate,
topTemplate,
topTemplate,
openTemplate,
rotateTemplate(openTemplate, y=180),
rotateTemplate(openTemplate, y=270),
rotateTemplate(openTemplate, y=90),
])
makeVertices = makeVerticesFromModel(trapDoorTemplates, 15)
class FenceBlockRenderer(BlockRenderer):
# def __init__(self, *args, **kwargs):
# BlockRenderer.__init__(self, *args, **kwargs)
# self.blocktypes = [block.ID for block in self.materials.blocksByType["FENCE"]]
fenceTemplates = makeVertexTemplates(3 / 8., 0, 3 / 8., 5 / 8., 1, 5 / 8.)
makeVertices = makeVerticesFromModel(fenceTemplates)
@classmethod
def getBlocktypes(cls, mats):
# if mats.name == "Pocket":
# cls.blocktypes = cls.blocktypes_pocket
# else:
# cls.blocktypes = cls.blocktypes_alpha
# return cls.blocktypes
return [block.ID for block in mats.blocksByType["FENCE"]]
class FenceGateBlockRenderer(BlockRenderer):
closedFenceTemplates = numpy.array([
makeVertexTemplates(0, 0, 3 / 8., 1, .8, 5 / 8.),
makeVertexTemplates(3 / 8., 0, 0, 5 / 8., .8, 1)])
openFenceTemplates = numpy.array([
[makeVertexTemplates(0, 0, 3 / 8., 1 / 8., .8, 1),
makeVertexTemplates(7 / 8., 0, 3 / 8., 1, .8, 1)],
[makeVertexTemplates(0, 0, 0, 5 / 8., .8, 1 / 8.),
makeVertexTemplates(0, 0, 7 / 8., 5 / 8., .8, 1)],
[makeVertexTemplates(0, 0, 0, 1 / 8., .8, 5 / 8.),
makeVertexTemplates(7 / 8., 0, 0, 1, .8, 5 / 8.)],
[makeVertexTemplates(3 / 8., 0, 0, 1, .8, 1 / 8.),
makeVertexTemplates(3 / 8., 0, 7 / 8., 1, .8, 1)]])
@classmethod
def getBlocktypes(cls, mats):
return [a.ID for a in mats.AllStairs]
def fenceGateVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
fenceMask = self.getMaterialIndices(blockMaterials)
closedGateMask = fenceMask.copy()
closedGateMask[blockData & 4 == 4] = 0
openGateMask = fenceMask.copy()
openGateMask[blockData & 4 == 0] = 0
closedGateIndices = closedGateMask.nonzero()
openGateIndices = openGateMask.nonzero()
closedGateData = blockData[closedGateMask]
closedGateData &= 1
openGateData = blockData[openGateMask]
openGateData &= 3
yield
# closed gate
vertexArray = numpy.zeros((len(closedGateIndices[0]), 6, 4, 6), dtype='float32')
for indicies in xrange(3):
dimension = (0, 2, 1)[indicies]
vertexArray[..., indicies] = closedGateIndices[dimension][:, numpy.newaxis,
numpy.newaxis] # xxx swap z with y using ^
vertexArray[..., 0:5] += self.closedFenceTemplates[closedGateData][..., 0:5]
vertexArray[_ST] += texMap(blocks[closedGateIndices], 0)[..., numpy.newaxis, :]
vertexArray.view('uint8')[_RGB] = self.closedFenceTemplates[closedGateData][..., 5][..., numpy.newaxis]
vertexArray.view('uint8')[_A] = 0xFF
vertexArray.view('uint8')[_RGB] *= areaBlockLights[1:-1, 1:-1, 1:-1][closedGateIndices][
..., numpy.newaxis, numpy.newaxis, numpy.newaxis]
vertexArray.shape = (vertexArray.shape[0] * 6, 4, 6)
yield
self.vertexArrays = [vertexArray]
append = self.vertexArrays.append
# open gate
for i in xrange(2):
vertexArray = numpy.zeros((len(openGateIndices[0]), 6, 4, 6), dtype='float32')
for indicies in xrange(3):
dimension = (0, 2, 1)[indicies]
vertexArray[..., indicies] = openGateIndices[dimension][:, numpy.newaxis,
numpy.newaxis] # xxx swap z with y using ^
vertexArray[..., 0:5] += self.openFenceTemplates[openGateData, i][..., 0:5]
vertexArray[_ST] += texMap(blocks[openGateIndices], 0)[..., numpy.newaxis, :]
vertexArray.view('uint8')[_RGB] = self.openFenceTemplates[openGateData, i] \
[..., 5][..., numpy.newaxis]
vertexArray.view('uint8')[_A] = 0xFF
vertexArray.view('uint8')[_RGB] *= areaBlockLights[1:-1, 1:-1, 1:-1][openGateIndices][
..., numpy.newaxis, numpy.newaxis, numpy.newaxis]
vertexArray.shape = (vertexArray.shape[0] * 6, 4, 6)
yield
append(vertexArray)
makeVertices = fenceGateVertices
class StairBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [a.ID for a in mats.AllStairs]
# South - FaceXIncreasing
# North - FaceXDecreasing
# West - FaceZIncreasing
# East - FaceZDecreasing
stairTemplates = numpy.array([makeVertexTemplates(**kw) for kw in [
# South - FaceXIncreasing
{"xmin": 0.5},
# North - FaceXDecreasing
{"xmax": 0.5},
# West - FaceZIncreasing
{"zmin": 0.5},
# East - FaceZDecreasing
{"zmax": 0.5},
# Slabtype
{"ymax": 0.5},
]
])
def stairVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
arrays = []
append = arrays.append
materialIndices = self.getMaterialIndices(blockMaterials)
yield
stairBlocks = blocks[materialIndices]
stairData = blockData[materialIndices]
stairTop = (stairData >> 2).astype(bool)
stairData &= 3
x, z, y = materialIndices.nonzero()
for _ in ("slab", "step"):
vertexArray = numpy.zeros((len(x), 6, 4, 6), dtype='float32')
for i in xrange(3):
vertexArray[_XYZ][..., i] = (x, y, z)[i][:, numpy.newaxis, numpy.newaxis]
if _ == "step":
vertexArray[_XYZST] += self.stairTemplates[4][..., :5]
vertexArray[_XYZ][..., 1][stairTop] += 0.5
else:
vertexArray[_XYZST] += self.stairTemplates[stairData][..., :5]
vertexArray[_ST] += texMap(stairBlocks, 0)[..., numpy.newaxis, :]
vertexArray.view('uint8')[_RGB] = self.stairTemplates[4][numpy.newaxis, ..., 5, numpy.newaxis]
vertexArray.view('uint8')[_RGB] *= 0xf
vertexArray.view('uint8')[_A] = 0xff
vertexArray.shape = (len(x) * 6, 4, 6)
yield
append(vertexArray)
self.vertexArrays = arrays
makeVertices = stairVertices
class VineBlockRenderer(BlockRenderer):
SouthBit = 1 #FaceZIncreasing
WestBit = 2 #FaceXDecreasing
NorthBit = 4 #FaceZDecreasing
EastBit = 8 #FaceXIncreasing
renderstate = ChunkCalculator.renderstateVines
def __init__(self, *args, **kwargs):
BlockRenderer.__init__(self, *args, **kwargs)
self.blocktypes = [self.materials["minecraft:vine"].ID]
def vineFaceVertices(self, direction, blockIndices, exposedFaceIndices, blocks, blockData, blockLight,
facingBlockLight, texMap):
bdata = blockData[blockIndices]
blockIndices = numpy.array(blockIndices)
if direction == pymclevel.faces.FaceZIncreasing:
blockIndices[blockIndices] = (bdata & 1).astype(bool)
elif direction == pymclevel.faces.FaceXDecreasing:
blockIndices[blockIndices] = (bdata & 2).astype(bool)
elif direction == pymclevel.faces.FaceZDecreasing:
blockIndices[blockIndices] = (bdata & 4).astype(bool)
elif direction == pymclevel.faces.FaceXIncreasing:
blockIndices[blockIndices] = (bdata & 8).astype(bool)
else:
return []
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return vertexArray
vertexArray[_ST] += texMap(self.blocktypes[0], [0], direction)[:, numpy.newaxis, 0:2]
lights = blockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray.view('uint8')[_RGB] *= lights
vertexArray.view('uint8')[_RGB] = vertexArray.view('uint8')[_RGB].astype(float) * LeafBlockRenderer.leafColor
if direction == pymclevel.faces.FaceZIncreasing:
vertexArray[_XYZ][..., 2] -= 0.0625
if direction == pymclevel.faces.FaceXDecreasing:
vertexArray[_XYZ][..., 0] += 0.0625
if direction == pymclevel.faces.FaceZDecreasing:
vertexArray[_XYZ][..., 2] += 0.0625
if direction == pymclevel.faces.FaceXIncreasing:
vertexArray[_XYZ][..., 0] -= 0.0625
return vertexArray
makeFaceVertices = vineFaceVertices
class SlabBlockRenderer(BlockRenderer):
def __init__(self, *args, **kwargs):
BlockRenderer.__init__(self, *args, **kwargs)
materials = self.materials
# self.blocktypes = [materials["minecraft:wooden_slab"].ID,
# materials["minecraft:stone_slab"].ID,
# materials["minecraft:stone_slab2"].ID,
# materials["minecraft:purpur_slab"].ID]
# print "self.blocktypes", self.blocktypes
# print "self.materials.AllSlabs", list(set(a.ID for a in self.materials.AllSlabs if "double" not in a.name.lower()))
# print list(set(a for a in self.materials.AllSlabs if "double" not in a.name.lower()))
self.blocktypes = list(set(a.ID for a in materials.AllSlabs if "double" not in a.name.lower()))
def slabFaceVertices(self, direction, blockIndices, facingBlockLight, blocks, blockData, blockLight,
areaBlockLights, texMap):
lights = areaBlockLights[blockIndices][..., numpy.newaxis, numpy.newaxis]
bdata = blockData[blockIndices]
top = (bdata >> 3).astype(bool)
bdata &= 7
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return vertexArray
vertexArray[_ST] += texMap(blocks[blockIndices], bdata, direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.5
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.5
vertexArray[_ST][..., 2:4, 1] += 8
vertexArray[_XYZ][..., 1][top] += 0.5
return vertexArray
makeFaceVertices = slabFaceVertices
# 1.9 renderer's
class EndRodRenderer(BlockRenderer):
def __init__(self, *args, **kwargs):
BlockRenderer.__init__(self, *args, **kwargs)
self.blocktypes = [self.materials["minecraft:end_rod"].ID]
rodTemplate = makeVertexTemplatesFromJsonModel((7, 1, 7), (9, 16, 9), {
"down": (4, 2, 2, 0),
"up": (2, 0, 4, 2),
"north": (0, 0, 2, 15),
"south": (0, 0, 2, 15),
"west": (0, 0, 2, 15),
"east": (0, 0, 2, 15)
})
rodTemplates = numpy.array([
rotateTemplate(rodTemplate, x=180),
rodTemplate,
rotateTemplate(rodTemplate, x=90),
rotateTemplate(rodTemplate, y=180, x=90),
rotateTemplate(rodTemplate, y=270, x=90),
rotateTemplate(rodTemplate, y=90, x=90),
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
handleTemplate = makeVertexTemplatesFromJsonModel((6, 0, 6), (10, 1, 10), {
"down": (6, 6, 2, 2),
"up": (2, 2, 6, 6),
"north": (2, 6, 6, 7),
"south": (2, 6, 6, 7),
"west": (2, 6, 6, 7),
"east": (2, 6, 6, 7)
})
handleTemplates = numpy.array([
rotateTemplate(handleTemplate, x=180),
handleTemplate,
rotateTemplate(handleTemplate, x=90),
rotateTemplate(handleTemplate, y=180, x=90),
rotateTemplate(handleTemplate, y=270, x=90),
rotateTemplate(handleTemplate, y=90, x=90),
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
makeVertices = makeVerticesFromModel([rodTemplates, handleTemplates], 7)
class WaterBlockRenderer(BlockRenderer):
renderstate = ChunkCalculator.renderstateWater
def __init__(self, *args, **kwargs):
BlockRenderer.__init__(self, *args, **kwargs)
materials = self.materials
self.waterID = materials["minecraft:water"].ID
self.blocktypes = [materials["minecraft:flowing_water"].ID, self.waterID]
@classmethod
def getBlocktypes(cls, mats):
cls.waterID = mats["minecraft:water"].ID
return [mats["minecraft:flowing_water"].ID, cls.waterID]
def waterFaceVertices(self, direction, blockIndices, exposedFaceIndices, blocks, blockData, blockLight,
facingBlockLight, texMap):
blockIndices = blockIndices & exposedFaceIndices
vertexArray = self.makeTemplate(direction, blockIndices)
vertexArray[_ST] += texMap(self.waterID, 0, 0)[numpy.newaxis, numpy.newaxis]
vertexArray.view('uint8')[_RGB] *= facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
return vertexArray
makeFaceVertices = waterFaceVertices
class IceBlockRenderer(BlockRenderer):
renderstate = ChunkCalculator.renderstateIce
@classmethod
def getBlocktypes(cls, mats):
cls.iceID = mats["minecraft:ice"].ID
return [cls.iceID]
def iceFaceVertices(self, direction, blockIndices, exposedFaceIndices, blocks, blockData, blockLight,
facingBlockLight, texMap):
blockIndices = blockIndices & exposedFaceIndices
vertexArray = self.makeTemplate(direction, blockIndices)
vertexArray[_ST] += texMap(self.iceID, 0, 0)[numpy.newaxis, numpy.newaxis]
vertexArray.view('uint8')[_RGB] *= facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
return vertexArray
makeFaceVertices = iceFaceVertices
from glutils import DisplayList
class MCRenderer(object):
isPreviewer = False
def __init__(self, level=None, alpha=1.0):
self.render = True
self.origin = (0, 0, 0)
self.rotation = 0
self.bufferUsage = 0
self.invalidChunkQueue = deque()
self._chunkWorker = None
self.chunkRenderers = {}
self.loadableChunkMarkers = DisplayList()
self.visibleLayers = set(Layer.AllLayers)
self.masterLists = None
alpha *= 255
self.alpha = (int(alpha) & 0xff)
self.chunkStartTime = datetime.now()
self.oldChunkStartTime = self.chunkStartTime
self.oldPosition = None
self.chunkSamples = [timedelta(0, 0, 0)] * 15
self.chunkIterator = None
config.settings.fastLeaves.addObserver(self)
config.settings.roughGraphics.addObserver(self)
config.settings.showHiddenOres.addObserver(self)
config.settings.vertexBufferLimit.addObserver(self)
config.settings.drawEntities.addObserver(self)
config.settings.drawTileEntities.addObserver(self)
config.settings.drawTileTicks.addObserver(self)
config.settings.drawUnpopulatedChunks.addObserver(self, "drawTerrainPopulated")
config.settings.drawChunkBorders.addObserver(self, "drawChunkBorder")
config.settings.drawMonsters.addObserver(self)
config.settings.drawItems.addObserver(self)
config.settings.showChunkRedraw.addObserver(self, "showRedraw")
config.settings.spaceHeight.addObserver(self)
config.settings.targetFPS.addObserver(self, "targetFPS")
for ore in config.settings.hiddableOres.get():
config.settings["showOre{}".format(ore)].addObserver(self, callback=lambda x, id=ore: self.showOre(id, x))
self.level = level
if self.level.__class__.__name__ in ("FakeLevel", "MCSchematic"):
self.toggleLayer(False, 'ChunkBorder')
chunkClass = ChunkRenderer
calculatorClass = ChunkCalculator
minViewDistance = 2
_viewDistance = 8
needsRedraw = True
def toggleLayer(self, val, layer):
if val:
self.visibleLayers.add(layer)
else:
self.visibleLayers.discard(layer)
for cr in self.chunkRenderers.itervalues():
cr.invalidLayers.add(layer)
self.loadNearbyChunks()
def layerProperty(layer, default=True): # @NoSelf
attr = intern("_draw" + layer)
def _get(self):
return getattr(self, attr, default)
def _set(self, val):
if val != _get(self):
setattr(self, attr, val)
self.toggleLayer(val, layer)
return property(_get, _set)
drawEntities = layerProperty(Layer.Entities)
drawTileEntities = layerProperty(Layer.TileEntities)
drawTileTicks = layerProperty(Layer.TileTicks)
drawMonsters = layerProperty(Layer.Monsters)
drawItems = layerProperty(Layer.Items)
drawTerrainPopulated = layerProperty(Layer.TerrainPopulated)
drawChunkBorder = layerProperty(Layer.ChunkBorder)
def inSpace(self):
if self.level is None:
return True
h = self.position[1]
if self.level.dimNo == 1:
_2478aq_heot(h)
return ((h > self.level.Height + self.spaceHeight) or
(h <= -self.spaceHeight))
def chunkDistance(self, cpos):
camx, camy, camz = self.position
# if the renderer is offset into the world somewhere, adjust for that
ox, oy, oz = self.origin
camx -= ox
camz -= oz
camcx = int(numpy.floor(camx)) >> 4
camcz = int(numpy.floor(camz)) >> 4
cx, cz = cpos
return max(abs(cx - camcx), abs(cz - camcz))
overheadMode = False
def detailLevelForChunk(self, cpos):
if self.overheadMode:
return 2
if self.isPreviewer:
w, l, h = self.level.bounds.size
if w + l < 256:
return 0
distance = self.chunkDistance(cpos) - self.viewDistance
if distance > 0 or self.inSpace():
return 1
return 0
def getViewDistance(self):
return self._viewDistance
def setViewDistance(self, vd):
vd = int(vd) & 0xfffe
vd = min(max(vd, self.minViewDistance), config.settings.maxViewDistance.get())
if vd != self._viewDistance:
self._viewDistance = vd
self.viewDistanceChanged()
viewDistance = property(getViewDistance, setViewDistance, None, "View Distance")
@property
def effectiveViewDistance(self):
if self.inSpace():
return self.viewDistance * 4
else:
return self.viewDistance * 2
def viewDistanceChanged(self):
self.oldPosition = None # xxx
self.discardMasterList()
self.loadNearbyChunks()
self.discardChunksOutsideViewDistance()
maxWorkFactor = 64
minWorkFactor = 1
workFactor = 2
chunkCalculator = None
_level = None
@property
def level(self):
return self._level
@level.setter
def level(self, level):
""" this probably warrants creating a new renderer """
self.stopWork()
self._level = level
self.oldPosition = None
self.position = (0, 0, 0)
self.chunkCalculator = None
self.invalidChunkQueue = deque()
self.discardAllChunks()
self.loadableChunkMarkers.invalidate()
if level:
self.chunkCalculator = self.calculatorClass(self.level)
self.oldPosition = None
self.loadNearbyChunks()
position = (0, 0, 0)
def loadChunksStartingFrom(self, wx, wz, distance=None): # world position
if None is self.level:
return
if self.level.saving:
return
if distance is None:
d = self.effectiveViewDistance
else:
d = distance
self.chunkIterator = self.iterateChunks(wx, wz, d * 2)
def iterateChunks(self, x, z, d):
cx = x >> 4
cz = z >> 4
yield (cx, cz)
step = dir = 1
while True:
for i in xrange(step):
cx += dir
yield (cx, cz)
for i in xrange(step):
cz += dir
yield (cx, cz)
step += 1
if step > d and not self.overheadMode:
raise StopIteration
dir = -dir
chunkIterator = None
@property
def chunkWorker(self):
if self._chunkWorker is None:
self._chunkWorker = self.makeWorkIterator()
return self._chunkWorker
def stopWork(self):
self._chunkWorker = None
def discardAllChunks(self):
self.bufferUsage = 0
self.forgetAllDisplayLists()
self.chunkRenderers = {}
self.oldPosition = None # xxx force reload
def discardChunksInBox(self, box):
self.discardChunks(box.chunkPositions)
def discardChunksOutsideViewDistance(self):
if self.overheadMode:
return
# print "discardChunksOutsideViewDistance"
d = self.effectiveViewDistance
cx = (self.position[0] - self.origin[0]) / 16
cz = (self.position[2] - self.origin[2]) / 16
origin = (cx - d, cz - d)
size = d * 2
if not len(self.chunkRenderers):
return
(ox, oz) = origin
chunks = numpy.fromiter(self.chunkRenderers.iterkeys(), dtype='i,i', count=len(self.chunkRenderers))
chunks.dtype = 'int32'
chunks.shape = len(self.chunkRenderers), 2
if size:
outsideChunks = chunks[:, 0] < ox - 1
outsideChunks |= chunks[:, 0] > ox + size
outsideChunks |= chunks[:, 1] < oz - 1
outsideChunks |= chunks[:, 1] > oz + size
chunks = chunks[outsideChunks]
self.discardChunks(chunks)
def discardChunks(self, chunks):
for cx, cz in chunks:
self.discardChunk(cx, cz)
self.oldPosition = None # xxx force reload
def discardChunk(self, cx, cz):
" discards the chunk renderer for this chunk and compresses the chunk "
if (cx, cz) in self.chunkRenderers:
self.bufferUsage -= self.chunkRenderers[cx, cz].bufferSize
self.chunkRenderers[cx, cz].forgetDisplayLists()
del self.chunkRenderers[cx, cz]
_fastLeaves = False
@property
def fastLeaves(self):
return self._fastLeaves
@fastLeaves.setter
def fastLeaves(self, val):
if self._fastLeaves != bool(val):
self.discardAllChunks()
self._fastLeaves = bool(val)
_roughGraphics = False
@property
def roughGraphics(self):
return self._roughGraphics
@roughGraphics.setter
def roughGraphics(self, val):
if self._roughGraphics != bool(val):
self.discardAllChunks()
self._roughGraphics = bool(val)
_showHiddenOres = False
@property
def showHiddenOres(self):
return self._showHiddenOres
@showHiddenOres.setter
def showHiddenOres(self, val):
if self._showHiddenOres != bool(val):
self.discardAllChunks()
self._showHiddenOres = bool(val)
def showOre(self, ore, show):
ChunkCalculator.hiddenOreMaterials[ore] = ore if show else 1
if self.showHiddenOres:
self.discardAllChunks()
def invalidateChunk(self, cx, cz, layers=None):
" marks the chunk for regenerating vertex data and display lists "
if (cx, cz) in self.chunkRenderers:
self.chunkRenderers[(cx, cz)].invalidate(layers)
self.invalidChunkQueue.append((cx, cz)) # xxx encapsulate
def invalidateChunksInBox(self, box, layers=None):
# If the box is at the edge of any chunks, expanding by 1 makes sure the neighboring chunk gets redrawn.
box = box.expand(1)
self.invalidateChunks(box.chunkPositions, layers)
def invalidateEntitiesInBox(self, box):
self.invalidateChunks(box.chunkPositions, [Layer.Entities])
def invalidateTileTicksInBox(self, box):
self.invalidateChunks(box.chunkPositions, [Layer.TileTicks])
def invalidateChunks(self, chunks, layers=None):
for (cx, cz) in chunks:
self.invalidateChunk(cx, cz, layers)
self.stopWork()
self.discardMasterList()
self.loadNearbyChunks()
def invalidateAllChunks(self, layers=None):
self.invalidateChunks(self.chunkRenderers.iterkeys(), layers)
def forgetAllDisplayLists(self):
for cr in self.chunkRenderers.itervalues():
cr.forgetDisplayLists()
def invalidateMasterList(self):
self.discardMasterList()
shouldRecreateMasterList = True
def discardMasterList(self):
self.shouldRecreateMasterList = True
@property
def shouldDrawAll(self):
box = self.level.bounds
return self.isPreviewer and box.width + box.length < 256
distanceToChunkReload = 32.0
def cameraMovedFarEnough(self):
if self.shouldDrawAll:
return False
if self.oldPosition is None:
return True
cPos = self.position
oldPos = self.oldPosition
cameraDelta = self.distanceToChunkReload
return any([abs(x - y) > cameraDelta for x, y in zip(cPos, oldPos)])
def loadVisibleChunks(self):
""" loads nearby chunks if the camera has moved beyond a certain distance """
# print "loadVisibleChunks"
if self.cameraMovedFarEnough():
if datetime.now() - self.lastVisibleLoad > timedelta(0, 0.5):
self.discardChunksOutsideViewDistance()
self.loadNearbyChunks()
self.oldPosition = self.position
self.lastVisibleLoad = datetime.now()
lastVisibleLoad = datetime.now()
def loadNearbyChunks(self):
if None is self.level:
return
# print "loadNearbyChunks"
cameraPos = self.position
if self.shouldDrawAll:
self.loadAllChunks()
else:
# subtract self.origin to load nearby chunks correctly for preview renderers
self.loadChunksStartingFrom(int(cameraPos[0]) - self.origin[0], int(cameraPos[2]) - self.origin[2])
def loadAllChunks(self):
box = self.level.bounds
self.loadChunksStartingFrom(box.origin[0] + box.width / 2, box.origin[2] + box.length / 2,
max(box.width, box.length))
_floorTexture = None
@property
def floorTexture(self):
if self._floorTexture is None:
self._floorTexture = Texture(self.makeFloorTex)
return self._floorTexture
@staticmethod
def makeFloorTex():
color0 = (0xff, 0xff, 0xff, 0x22)
color1 = (0xff, 0xff, 0xff, 0x44)
img = numpy.array([color0, color1, color1, color0], dtype='uint8')
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, 2, 2, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, img)
def invalidateChunkMarkers(self):
self.loadableChunkMarkers.invalidate()
def _drawLoadableChunkMarkers(self):
if self.level.chunkCount:
chunkSet = set(self.level.allChunks)
sizedChunks = chunkMarkers(chunkSet)
GL.glPushAttrib(GL.GL_FOG_BIT)
GL.glDisable(GL.GL_FOG)
GL.glEnable(GL.GL_BLEND)
GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
GL.glPolygonOffset(DepthOffset.ChunkMarkers, DepthOffset.ChunkMarkers)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glColor(1.0, 1.0, 1.0, 1.0)
self.floorTexture.bind()
for size, chunks in sizedChunks.iteritems():
if not len(chunks):
continue
chunks = numpy.array(chunks, dtype='float32')
chunkPosition = numpy.zeros(shape=(chunks.shape[0], 4, 3), dtype='float32')
chunkPosition[:, :, (0, 2)] = numpy.array(((0, 0), (0, 1), (1, 1), (1, 0)), dtype='float32')
chunkPosition[:, :, (0, 2)] *= size
chunkPosition[:, :, (0, 2)] += chunks[:, numpy.newaxis, :]
chunkPosition *= 16
GL.glVertexPointer(3, GL.GL_FLOAT, 0, chunkPosition.ravel())
GL.glTexCoordPointer(2, GL.GL_FLOAT, 0, (chunkPosition[..., (0, 2)] * 16).ravel())
GL.glDrawArrays(GL.GL_QUADS, 0, len(chunkPosition) * 4)
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glDisable(GL.GL_TEXTURE_2D)
GL.glDisable(GL.GL_BLEND)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)
GL.glPopAttrib()
def drawLoadableChunkMarkers(self):
if not self.isPreviewer or isinstance(self.level, pymclevel.MCInfdevOldLevel):
self.loadableChunkMarkers.call(self._drawLoadableChunkMarkers)
needsImmediateRedraw = False
viewingFrustum = None
if "-debuglists" in sys.argv:
def createMasterLists(self):
pass
def callMasterLists(self):
for cr in self.chunkRenderers.itervalues():
cr.debugDraw()
else:
def createMasterLists(self):
if self.shouldRecreateMasterList:
lists = {}
chunkLists = defaultdict(list)
chunksPerFrame = 80
shouldRecreateAgain = False
for ch in self.chunkRenderers.itervalues():
if chunksPerFrame:
if ch.needsRedisplay:
chunksPerFrame -= 1
ch.makeDisplayLists()
else:
shouldRecreateAgain = True
if ch.renderstateLists:
for rs in ch.renderstateLists:
chunkLists[rs] += ch.renderstateLists[rs]
for rs in chunkLists:
if len(chunkLists[rs]):
lists[rs] = numpy.array(chunkLists[rs], dtype='uint32').ravel()
self.masterLists = lists
self.shouldRecreateMasterList = shouldRecreateAgain
self.needsImmediateRedraw = shouldRecreateAgain
def callMasterLists(self):
for renderstate in self.chunkCalculator.renderstates:
if renderstate not in self.masterLists:
continue
if self.alpha != 0xff and renderstate is not ChunkCalculator.renderstateLowDetail:
GL.glEnable(GL.GL_BLEND)
renderstate.bind()
GL.glCallLists(self.masterLists[renderstate])
renderstate.release()
if self.alpha != 0xff and renderstate is not ChunkCalculator.renderstateLowDetail:
GL.glDisable(GL.GL_BLEND)
errorLimit = 10
def draw(self):
self.needsRedraw = False
if not self.level:
return
if not self.chunkCalculator:
return
if not self.render:
return
if self.level.materials.name in ("Pocket", "Alpha"):
GL.glMatrixMode(GL.GL_TEXTURE)
GL.glScalef(1 / 2., 1 / 2., 1 / 2.)
with gl.glPushMatrix(GL.GL_MODELVIEW):
dx, dy, dz = self.origin
GL.glTranslate(dx, dy, dz)
GL.glEnable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_DEPTH_TEST)
self.level.materials.terrainTexture.bind()
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
offset = DepthOffset.PreviewRenderer if self.isPreviewer else DepthOffset.Renderer
GL.glPolygonOffset(offset, offset)
GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
self.createMasterLists()
try:
self.callMasterLists()
except GL.GLError as e:
if self.errorLimit:
self.errorLimit -= 1
traceback.print_exc()
print e
GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)
GL.glDisable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_TEXTURE_2D)
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
self.drawLoadableChunkMarkers()
if self.level.materials.name in ("Pocket", "Alpha"):
GL.glMatrixMode(GL.GL_TEXTURE)
GL.glScalef(2., 2., 2.)
renderErrorHandled = False
def addDebugInfo(self, addDebugString):
addDebugString("BU: {0} MB, ".format(
self.bufferUsage / 1000000,
))
addDebugString("WQ: {0}, ".format(len(self.invalidChunkQueue)))
if self.chunkIterator:
addDebugString("[LR], ")
addDebugString("CR: {0}, ".format(len(self.chunkRenderers), ))
def next(self):
self.chunkWorker.next()
def makeWorkIterator(self):
''' does chunk face and vertex calculation work. returns a generator that can be
iterated over for smaller work units.'''
try:
while True:
if self.level is None:
raise StopIteration
if len(self.invalidChunkQueue) > 1024:
self.invalidChunkQueue.clear()
if len(self.invalidChunkQueue):
c = self.invalidChunkQueue[0]
for _ in self.workOnChunk(c):
yield
self.invalidChunkQueue.popleft()
elif self.chunkIterator is None:
raise StopIteration
else:
c = self.chunkIterator.next()
if self.vertexBufferLimit:
while self.bufferUsage > (0.9 * (self.vertexBufferLimit << 20)):
deadChunk = None
deadDistance = self.chunkDistance(c)
for cr in self.chunkRenderers.itervalues():
dist = self.chunkDistance(cr.chunkPosition)
if dist > deadDistance:
deadChunk = cr
deadDistance = dist
if deadChunk is not None:
self.discardChunk(*deadChunk.chunkPosition)
else:
break
else:
for _ in self.workOnChunk(c):
yield
else:
for _ in self.workOnChunk(c):
yield
yield
finally:
self._chunkWorker = None
if self.chunkIterator:
self.chunkIterator = None
vertexBufferLimit = 384
def getChunkRenderer(self, c):
if c not in self.chunkRenderers:
return self.chunkClass(self, c)
return self.chunkRenderers[c]
def calcFacesForChunkRenderer(self, cr):
self.bufferUsage -= cr.bufferSize
calc = cr.calcFaces()
work = 0
for _ in calc:
yield
work += 1
self.chunkDone(cr, work)
def workOnChunk(self, c):
work = 0
if self.level.containsChunk(*c):
cr = self.getChunkRenderer(c)
if self.viewingFrustum:
if not self.viewingFrustum.visible1([c[0] * 16 + 8, self.level.Height / 2, c[1] * 16 + 8, 1.0],
self.level.Height / 2):
raise StopIteration
faceInfoCalculator = self.calcFacesForChunkRenderer(cr)
try:
for _ in faceInfoCalculator:
work += 1
if (work % MCRenderer.workFactor) == 0:
yield
self.invalidateMasterList()
except Exception as e:
traceback.print_exc()
fn = c
logging.info(u"Skipped chunk {f}: {e}".format(e=e, f=fn))
redrawChunks = 0
def chunkDone(self, chunkRenderer, work):
self.chunkRenderers[chunkRenderer.chunkPosition] = chunkRenderer
self.bufferUsage += chunkRenderer.bufferSize
# print "Chunk {0} used {1} work units".format(chunkRenderer.chunkPosition, work)
if not self.needsRedraw:
if self.redrawChunks:
self.redrawChunks -= 1
if not self.redrawChunks:
self.needsRedraw = True
else:
self.redrawChunks = 2
if work > 0:
self.oldChunkStartTime = self.chunkStartTime
self.chunkStartTime = datetime.now()
self.chunkSamples.pop(0)
self.chunkSamples.append(self.chunkStartTime - self.oldChunkStartTime)
class PreviewRenderer(MCRenderer):
isPreviewer = True
def rendermain():
renderer = MCRenderer()
renderer.level = pymclevel.mclevel.loadWorld("World1")
renderer.viewDistance = 6
renderer.detailLevelForChunk = lambda *x: 0
start = datetime.now()
renderer.loadVisibleChunks()
try:
while True:
# for i in range(100):
renderer.next()
except StopIteration:
pass
except Exception as e:
traceback.print_exc()
print repr(e)
duration = datetime.now() - start
perchunk = duration / len(renderer.chunkRenderers)
print "Duration: {0} ({1} chunks per second, {2} per chunk, {3} chunks)".format(duration,
1000000.0 / perchunk.microseconds,
perchunk,
len(renderer.chunkRenderers))
# display.init( (640, 480), OPENGL | DOUBLEBUF )
from utilities.gl_display_context import GLDisplayContext
from OpenGL import GLU
import pygame
# distance = 4000
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GLU.gluPerspective(35, 640.0 / 480.0, 0.5, 4000.0)
h = 366
pos = (0, h, 0)
look = (0.0001, h - 1, 0.0001)
up = (0, 1, 0)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
GLU.gluLookAt(pos[0], pos[1], pos[2],
look[0], look[1], look[2],
up[0], up[1], up[2])
GL.glClearColor(0.0, 0.0, 0.0, 1.0)
framestart = datetime.now()
frames = 200
for i in xrange(frames):
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
renderer.draw()
pygame.display.flip()
delta = datetime.now() - framestart
seconds = delta.seconds + delta.microseconds / 1000000.0
print "{0} frames in {1} ({2} per frame, {3} FPS)".format(frames, delta, delta / frames, frames / seconds)
while True:
evt = pygame.event.poll()
if evt.type == pygame.MOUSEBUTTONDOWN:
break
# time.sleep(3.0)
import traceback
if __name__ == "__main__":
import cProfile
cProfile.run("rendermain()", "mcedit.profile")
| 146,617 | 35.572213 | 153 | py |
MCEdit-Unified | MCEdit-Unified-master/resource_packs.py | # -*- coding: utf-8 -*-
#!# If the comman line parameter '--debug-packs' is given, the logging level is set to debug.
#!# Otherwise, it is set to critical.
from PIL import Image
import zipfile
import directories
import os
import shutil
from config import config
from cStringIO import StringIO
import locale
import traceback
from utilities.misc import Singleton
DEF_ENC = locale.getdefaultlocale()[1]
if DEF_ENC is None:
DEF_ENC = "UTF-8"
try:
import resource # @UnresolvedImport
resource.setrlimit(resource.RLIMIT_NOFILE, (500,-1))
except:
pass
#!# Debugging .zip resource pack not loaded bug.
import logging
level = 50
if '--debug-packs' in os.sys.argv:
level = 10
log = logging.getLogger(__name__)
log.setLevel(level)
#!#
def step(slot):
'''
Utility method for multiplying the slot by 16
:param slot: Texture slot
:type slot: int
'''
return slot << 4
'''
Empty comment lines like:
#
are for texture spaces that I don't know what should go there
'''
textureSlots = {
# Start Top Row
"grass_top": (step(0),step(0)),
"stone": (step(1),step(0)),
"dirt": (step(2),step(0)),
"grass_side": (step(3),step(0)),
"planks_oak": (step(4),step(0)),
"stone_slab_side": (step(5),step(0)),
"stone_slab_top": (step(6),step(0)),
"brick": (step(7),step(0)),
"tnt_side": (step(8),step(0)),
"tnt_top": (step(9),step(0)),
"tnt_bottom": (step(10),step(0)),
"web": (step(11),step(0)),
"flower_rose": (step(12),step(0)),
"flower_dandelion": (step(13),step(0)),
#
"sapling_oak": (step(15),step(0)),
"flower_blue_orchid": (step(16),step(0)),
"flower_allium": (step(17),step(0)),
"flower_houstonia": (step(18),step(0)),
"flower_tulip_red": (step(19),step(0)),
"sapling_roofed_oak": (step(20),step(0)),
# End Top Row
# Start Second Row
"cobblestone": (step(0),step(1)),
"bedrock": (step(1),step(1)),
"sand": (step(2),step(1)),
"gravel": (step(3),step(1)),
"log_oak": (step(4),step(1)),
"log_oak_top": (step(5),step(1)),
"iron_block": (step(6),step(1)),
"gold_block": (step(7),step(1)),
"diamond_block": (step(8),step(1)),
"emerald_block": (step(9),step(1)),
#
"red_sand": (step(11),step(1)),
"mushroom_red": (step(12),step(1)),
"mushroom_brown": (step(13),step(1)),
"sapling_jungle": (step(14),step(1)),
"fire_layer_0": (step(15),step(1)),
"flower_tulip_orange": (step(16),step(1)),
"flower_tulip_white": (step(17),step(1)),
"flower_tulip_pink": (step(18),step(1)),
"flower_oxeye_daisy": (step(19),step(1)),
"sapling_acacia": (step(20),step(1)),
# End Second Row
# Start Third Row
"gold_ore": (step(0),step(2)),
"iron_ore": (step(1),step(2)),
"coal_ore": (step(2),step(2)),
"bookshelf": (step(3),step(2)),
"cobblestone_mossy": (step(4),step(2)),
"obsidian": (step(5),step(2)),
#
"tallgrass": (step(7),step(2)),
#
"beacon": (step(9),step(2)),
"dropper_front_horizontal": (step(10),step(2)),
"crafting_table_top": (step(11),step(2)),
"furnace_front_off": (step(12),step(2)),
"furnace_side": (step(13),step(2)),
"dispenser_front_horizontal": (step(14),step(2)),
"fire_layer_1": (step(15),step(2)),
#
#
#
#
"daylight_detector_side": (step(20),step(2)),
# End Third Row
# Start Fourth Row
"sponge": (step(0), step(3)),
"glass": (step(1), step(3)),
"diamond_ore": (step(2), step(3)),
"redstone_ore": (step(3), step(3)),
#
#
"stonebrick": (step(6), step(3)),
"deadbush": (step(7), step(3)),
"fern": (step(8), step(3)),
"dirt_podzol_top": (step(9), step(3)),
"dirt_podzol_side": (step(10), step(3)),
"crafting_table_side": (step(11), step(3)),
"crafting_table_front": (step(12), step(3)),
"furnace_front_on": (step(13), step(3)),
"furnace_top": (step(14), step(3)),
"sapling_spruce": (step(15), step(3)),
#
#
#
#
# End Fourth Row
# Start Fifth Row
"wool_colored_white": (step(0), step(4)),
"mob_spawner": (step(1), step(4)),
"snow": (step(2), step(4)),
"ice": (step(3), step(4)),
"grass_side_snowed": (step(4), step(4)),
"cactus_top": (step(5), step(4)),
"cactus_side": (step(6), step(4)),
"cactus_bottom": (step(7), step(4)),
"clay": (step(8), step(4)),
"reeds": (step(9), step(4)),
"jukebox_side": (step(10), step(4)),
"jukebox_top": (step(11), step(4)),
"waterlily": (step(12), step(4)),
"mycelium_side": (step(13), step(4)),
"mycelium_top": (step(14), step(4)),
"sapling_birch": (step(15), step(4)),
#
#
"dropper_front_vertical": (step(18), step(4)),
"daylight_detector_inverted_top": (step(19), step(4)),
# End Fifth Row
# Start Sixth Row
"torch_on": (step(0), step(5)),
"door_wood_upper": (step(1), step(5)),
"door_iron_upper": (step(2), step(5)),
"ladder": (step(3), step(5)),
"trapdoor": (step(4), step(5)),
"iron_bars": (step(5), step(5)),
"farmland_wet": (step(6), step(5)),
"farmland_dry": (step(7), step(5)),
"wheat_stage_0": (step(8), step(5)),
"wheat_stage_1": (step(9), step(5)),
"wheat_stage_2": (step(10), step(5)),
"wheat_stage_3": (step(11), step(5)),
"wheat_stage_4": (step(12), step(5)),
"wheat_stage_5": (step(13), step(5)),
"wheat_stage_6": (step(14), step(5)),
"wheat_stage_7": (step(15), step(5)),
#
#
"dispenser_front_vertical": (step(18), step(5)),
#
# End Sixth Row
# Start Seventh Row
"lever": (step(0), step(6)),
"door_wood_lower": (step(1), step(6)),
"door_iron_lower": (step(2), step(6)),
"redstone_torch_on": (step(3), step(6)),
"stonebrick_mossy": (step(4), step(6)),
"stonebrick_cracked": (step(5), step(6)),
"pumpkin_top": (step(6), step(6)),
"netherrack": (step(7), step(6)),
"soul_sand": (step(8), step(6)),
"glowstone": (step(9), step(6)),
"piston_top_sticky": (step(10), step(6)),
"piston_top_normal": (step(11), step(6)),
"piston_side": (step(12), step(6)),
"piston_bottom": (step(13), step(6)),
"piston_inner": (step(14), step(6)),
"pumpkin_stem_disconnected": (step(15), step(6)),
#
#
#
# End Seventh Row
# Start Eigth Row
"rail_normal_turned": (step(0),step(7)),
"wool_colored_black": (step(1),step(7)),
"wool_colored_gray": (step(2),step(7)),
"redstone_torch_off": (step(3),step(7)),
"log_spruce": (step(4),step(7)),
"log_birch": (step(5),step(7)),
"pumpkin_side": (step(6),step(7)),
"pumpkin_face_off": (step(7),step(7)),
"pumpkin_face_on": (step(8),step(7)),
"cake_top": (step(9),step(7)),
"cake_side": (step(10),step(7)),
"cake_inner": (step(11),step(7)),
"cake_bottom": (step(12),step(7)),
"mushroom_block_skin_red": (step(13),step(7)),
"mushroom_block_skin_brown": (step(14),step(7)),
"pumpkin_stem_connected": (step(15),step(7)),
#
#
"repeater_off_west": (step(18),step(7)),
#
# End Eigth Row
# Start Ninth Row
"rail_normal": (step(0),step(8)),
"wool_colored_red": (step(1),step(8)),
"wool_colored_magenta": (step(2),step(8)),
"repeater_off_south": (step(3),step(8)),
"leaves_spruce": (step(4),step(8)),
#
"bed_feet_top": (step(6),step(8)),
"bed_head_top": (step(7),step(8)),
"melon_side": (step(8),step(8)),
"melon_top": (step(9),step(8)),
#
#
#
"mushroom_block_skin_stem": (step(13),step(8)),
"mushroom_block_inside": (step(14),step(8)),
"vine": (step(15),step(8)),
#
#
"repeater_off_north": (step(18),step(8)),
#
# End Ninth Row
# Start Tenth Row
"lapis_block": (step(0),step(9)),
"wool_colored_green": (step(1),step(9)),
"wool_colored_lime": (step(2),step(9)),
"repeater_on_south": (step(3),step(9)),
#
"bed_feet_end": (step(5),step(9)),
"bed_feet_side": (step(6),step(9)),
"bed_head_side": (step(7),step(9)),
"bed_head_end": (step(8),step(9)),
"log_jungle": (step(9),step(9)),
"cauldron_side": (step(10),step(9)),
"cauldron_bottom": (step(11),step(9)),
"brewing_stand_base": (step(12),step(9)),
"brewing_stand": (step(13),step(9)),
"endframe_top": (step(14),step(9)),
"endframe_side": (step(15),step(9)),
"double_plant_sunflower_bottom": (step(16),step(9)),
#
"repeater_off_east": (step(18),step(9)),
"structure_block_data": (step(19),step(9)),
"structure_block_corner": (step(20),step(9)),
# End Tenth Row
# Start Eleventh Row
"lapis_ore": (step(0),step(10)),
"wool_colored_brown": (step(1),step(10)),
"wool_colored_yellow": (step(2),step(10)),
"rail_golden": (step(3),step(10)),
"redstone_dust_cross": (step(4),step(10)),
#
"enchanting_table_top": (step(6),step(10)),
"dragon_egg": (step(7),step(10)),
"cocoa_stage_2": (step(8),step(10)),
"cocoa_stage_1": (step(9),step(10)),
"cocoa_stage_0": (step(10),step(10)),
"emerald_ore": (step(11),step(10)),
"trip_wire_source": (step(12),step(10)),
"trip_wire": (step(13),step(10)),
"endframe_eye": (step(14),step(10)),
"end_stone": (step(15),step(10)),
"double_plant_syringa_bottom": (step(16),step(10)),
"double_plant_syringa_top": (step(17),step(10)),
"repeater_on_west": (step(18),step(10)),
"structure_block_save": (step(19),step(10)),
"structure_block_load": (step(20),step(10)),
# End Eleventh Row
# Start Twelfth Row
"sandstone_top": (step(0),step(11)),
"wool_colored_blue": (step(1),step(11)),
"wool_colored_light_blue": (step(2),step(11)),
"rail_golden_powered": (step(3),step(11)),
#
"redstone_dust_line": (step(5),step(11)),
"enchanting_table_side": (step(6),step(11)),
"enchanting_table_bottom": (step(7),step(11)),
"command_block": (step(8),step(11)),
"itemframe_backround": (step(9),step(11)),
"flower_pot": (step(10),step(11)),
"comparator_off_south": (step(11),step(11)),
"comparator_on_south": (step(12),step(11)),
"daylight_detector_top": (step(13),step(11)),
"redstone_block": (step(14),step(11)),
"quartz_ore": (step(15),step(11)),
"double_plant_grass_bottom": (step(16),step(11)),
"double_plant_grass_top": (step(17),step(11)),
"repeater_on_north": (step(18),step(11)),
"command_block_back": (step(19),step(11)),
"command_block_conditional": (step(20),step(11)),
"command_block_front": (step(21),step(11)),
"command_block_side": (step(22),step(11)),
"bone_block_top": (step(19),step(11)),
# End Twelfth Row
# Start Thriteenth Row
"sandstone_normal": (step(0),step(12)),
"wool_colored_purple": (step(1),step(12)),
"wool_colored_pink": (step(2),step(12)),
"rail_detector": (step(3),step(12)),
"leaves_jungle": (step(4),step(12)),
#
"planks_spruce": (step(6),step(12)),
"planks_jungle": (step(7),step(12)),
"carrots_stage_0": (step(8),step(12)),
"carrots_stage_1": (step(9),step(12)),
"carrots_stage_2": (step(10),step(12)),
"carrots_stage_3": (step(11),step(12)),
"potatoes_stage_3": (step(12),step(12)),
#
"piston_right": (step(14),step(12)),
"piston_down": (step(15),step(12)),
"double_plant_fern_bottom": (step(16),step(12)),
"double_plant_fern_top": (step(17),step(12)),
"repeater_on_east": (step(18),step(12)),
"repeating_command_block_back": (step(19),step(12)),
"repeating_command_block_conditional": (step(20),step(12)),
"repeating_command_block_front": (step(21),step(12)),
"repeating_command_block_side": (step(22),step(12)),
"bone_block_side": (step(19),step(12)),
# End Thriteenth Row
# Start Fourteenth Row
"sandstone_bottom": (step(0),step(13)),
"wool_colored_cyan": (step(1),step(13)),
"wool_colored_orange": (step(2),step(13)),
"redstone_lamp_off": (step(3),step(13)),
"redstone_lamp_on": (step(4),step(13)),
"stonebrick_carved": (step(5),step(13)),
"planks_birch": (step(6),step(13)),
"anvil_base": (step(7),step(13)),
"anvil_top_damaged_1": (step(8),step(13)),
"quatrz_block_top": (step(9),step(13)),
"rail_activator": (step(10),step(13)),
"rail_activator_powered": (step(11),step(13)),
"coal_block": (step(12),step(13)),
"log_acacia_top": (step(13),step(13)),
"piston_left": (step(14),step(13)),
"magma": (step(18),step(13)),
#
"double_plant_rose_bottom": (step(16),step(13)),
"double_plant_rose_top": (step(17),step(13)),
"chain_command_block_back": (step(19),step(11)),
"chain_command_block_conditional": (step(20),step(11)),
"chain_command_block_front": (step(21),step(11)),
"chain_command_block_side": (step(22),step(11)),
# End Fourteenth Row
# Start Fifteenth Row
"nether_brick": (step(0),step(14)),
"wool_colored_silver": (step(1),step(14)),
"nether_wart_stage_0": (step(2),step(14)),
"nether_wart_stage_1": (step(3),step(14)),
"nether_wart_stage_2": (step(4),step(14)),
"sandstone_carved": (step(5),step(14)),
"sandstone_smooth": (step(6),step(14)),
"anvil_top_damaged_0": (step(7),step(14)),
"anvil_top_damaged_2": (step(8),step(14)),
"log_spruce_top": (step(9),step(14)),
"log_birch_top": (step(10),step(14)),
"log_jungle_top": (step(11),step(14)),
"log_big_oak_top": (step(12),step(14)),
"lava_still": (step(13),step(14)),
#
#
"double_plant_paeonia_bottom": (step(16),step(14)),
"double_plant_paeonia_top": (step(17),step(14)),
"nether_wart_block": (step(18),step(14)),
# End Fifteenth Row
# Start Sixteenth Row
"planks_acacia": (step(0),step(15)),
"planks_big_oak": (step(1),step(15)),
#
"log_acacia": (step(3),step(15)),
"log_big_oak": (step(4),step(15)),
"hardened_clay": (step(5),step(15)),
"portal": (step(6),step(15)),
#
"quatrz_block_chiseled": (step(8),step(15)),
"quartz_block_chiseled_top": (step(9),step(15)),
"quartz_block_lines": (step(10),step(15)),
"quartz_block_lines_top": (step(11),step(15)),
#
#
#
#
#
"slime": (step(17),step(15)),
"red_nether_brick": (step(18),step(15)),
# End Sixteenth Row
# Start Seventeenth Row
"ice_packed": (step(0),step(16)),
"hay_block_side": (step(1),step(16)),
"hay_block_top": (step(2),step(16)),
"iron_trapdoor": (step(3),step(16)),
"stone_granite": (step(4),step(16)),
"stone_grantie_smooth": (step(5),step(16)),
"stone_diorite": (step(6),step(16)),
"stone_diorite_smooth": (step(7),step(16)),
"stone_andesite": (step(8),step(16)),
"stone_andesite_smooth": (step(9),step(16)),
#
#
#
#
#
#
#
#
#
"frosted_ice_0": (step(19), step(16)),
# End Seventeenth Row
# Start Eigteenth Row
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
"prismarine_bricks": (step(16),step(17)),
"prismarine_dark": (step(17),step(17)),
"prismarine_rough": (step(18),step(17)),
"purpur_pillar": (step(19),step(17)),
# End Eigteenth Row
# Start Ninteenth Row
"hardened_clay_stained_white": (step(0),step(18)),
"hardened_clay_stained_orange": (step(1),step(18)),
"hardened_clay_stained_magenta": (step(2),step(18)),
"hardened_clay_stained_light_blue": (step(3),step(18)),
"hardened_clay_stained_yellow": (step(4),step(18)),
"hardened_clay_stained_lime": (step(5),step(18)),
"hardened_clay_stained_pink": (step(6),step(18)),
"hardened_clay_stained_gray": (step(7),step(18)),
"hardened_clay_stained_silver": (step(8),step(18)),
"hardened_clay_stained_cyan": (step(9),step(18)),
"hardened_clay_stained_purple": (step(10),step(18)),
"hardened_clay_stained_blue": (step(11),step(18)),
"hardened_clay_stained_brown": (step(12),step(18)),
"hardened_clay_stained_green": (step(13),step(18)),
"hardened_clay_stained_red": (step(14),step(18)),
"hardened_clay_stained_black": (step(15),step(18)),
"sponge_wet": (step(16),step(18)),
"sea_lantern": (step(17),step(18)),
"end_bricks": (step(18),step(18)),
"purpur_pillar_top": (step(19),step(18)),
# End Ninteenth Row
# Start Twentieth Row
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
"hay_block_side_rotated": (step(16),step(19)),
"quartz_block_lines_rotated": (step(17),step(19)),
"purpur_block": (step(18),step(19)),
# End Twentieth Row
# Start Twentyfirst Row
"red_sandstone_bottom": (step(0),step(20)),
"red_sandstone_carved": (step(1),step(20)),
"red_sandstone_normal": (step(2),step(20)),
"red_sandstone_smooth": (step(3),step(20)),
"red_sandstone_top": (step(4),step(20)),
"door_spruce_upper": (step(5),step(20)),
"door_birch_upper": (step(6),step(20)),
"door_jungle_upper": (step(7),step(20)),
"door_acacia_upper": (step(8),step(20)),
"door_dark_oak_upper": (step(9),step(20)),
#
#
#
#
#
#
#
#
"chorus_plant": (step(13),step(20)),
"chorus_flower_dead": (step(14),step(20)),
"chorus_flower": (step(15),step(20)),
"end_rod": (step(16),step(20)),
# End Twentyfirst Row
# Start MISC
# Start More Bed Textures
"bed_head_side_flipped": (step(1),step(21)),
"bed_feet_side_flipped": (step(2),step(21)),
"bed_head_top_flipped": (step(1),step(22)),
"bed_feet_top_flipped": (step(2),step(22)),
"bed_feet_top_bottom": (step(3),step(21)),
"bed_head_top_bottom": (step(3),step(22)),
"bed_feet_top_top": (step(4),step(22)),
"bed_head_top_top": (step(4),step(21)),
# End More Bed Textures
# Start Comparator Block
}
class MultipartTexture(object):
def __init__(self, texture_objects):
self.subclasses = []
self.runAnyways = []
self.texture_dict = {}
for subcls in self.__class__.__subclasses__(): # This is why I love Python
self.subclasses.append(subcls)
for cls in self.subclasses:
instance = cls(texture_objects)
if instance.runAnyway:
self.runAnyways.append(instance)
else:
self.texture_dict[instance.target] = instance
class LeverTexture(MultipartTexture):
target = "lever"
runAnyway = False
def __init__(self, texture_objects):
self.texture_objects = texture_objects
def parse_texture(self):
if "lever" not in self.texture_objects or "cobblestone" not in self.texture_objects:
return None
lever = self.texture_objects["lever"].copy()
cobblestone = self.texture_objects["cobblestone"].copy()
base_1 = cobblestone.crop((5, 4, 11, 12))
lever.paste(base_1, (10, 0, 16, 8))
base_2 = cobblestone.crop((5, 0, 11, 3))
lever.paste(base_2, (10, 8, 16, 11))
base_3 = cobblestone.crop((4, 0, 12, 3))
lever.paste(base_3, (2, 0, 10, 3))
return lever
class StandingSignTexture(MultipartTexture):
target = ""
runAnyway = True
position = (step(20), step(5))
def __init__(self, texture_objects):
self.texture_objects = texture_objects
def parse_texture(self):
if "planks_oak" not in self.texture_objects or "log_oak" not in self.texture_objects:
return None
planks = self.texture_objects["planks_oak"].copy()
log_tex = self.texture_objects["log_oak"].copy()
sign = planks.crop((0, 7, 16, 16))
log_tex.paste(sign, (0, 7, 16, 16))
if log_tex.mode != "RGBA":
log_tex = log_tex.convert("RGBA")
return log_tex
class IResourcePack(object):
'''
Sets all base variables for a Resource Pack
'''
def __init__(self):
self.__stop = False
texture_path = os.path.join(directories.parentDir, "textures", self._pack_name)
self.texture_path = texture_path
self._isEmpty = False
self._too_big = False
self.big_textures_counted = 0
self.big_textures_max = 10
self.block_image = {}
self.propogated_textures = []
self.all_texture_slots = []
#self.old_terrain = Image.open(os.path.join(directories.getDataDir(), 'terrain.png'))
self.old_terrain = Image.open(directories.getDataFile('terrain.png'))
for texx in xrange(0,33):
for texy in xrange(0,33):
self.all_texture_slots.append((step(texx),step(texy)))
self._terrain_name = self._pack_name.replace(" ", "_")+".png"
#self._terrain_path = os.path.join("terrain-textures", self._terrain_name.replace(" ", "_"))
self._terrain_path = directories.getDataFile(u'terrain-textures', self._terrain_name.replace(u' ', u'_'))
@property
def pack_name(self):
'''
The name of the Resource Pack
'''
return self._pack_name
@property
def terrain_name(self):
'''
Name of the parsed texture PNG file
'''
return self._terrain_name
def terrain_path(self):
'''
Path to the parsed PNG file
'''
return self._terrain_path
@property
def isEmpty(self):
'''
Returns true if the Resource Pack doesn't replace the minimum amount of textures
'''
return self._isEmpty
@property
def tooBig(self):
'''
Returns true if the Resource Pack has a greater resolution than 32x32
'''
return self._too_big
def parse_terrain_png(self):
'''
Parses each block texture into a usable PNG file like terrain.png
'''
multiparts = MultipartTexture(self.block_image)
log.debug("Parsing terrain.png")
new_terrain = Image.new("RGBA", (512, 512), None)
for tex in self.block_image.keys():
if not self.__stop and tex in textureSlots.keys():
try:
if tex not in multiparts.texture_dict:
image = self.block_image[tex]
else:
image = multiparts.texture_dict[tex].parse_texture()
if image is None:
continue
log.debug(" Image is %s"%tex)
log.debug(" Image mode: %s"%image.mode)
if image.mode != "RGBA":
try:
image = image.convert("RGBA")
log.debug(" Image converted to RGBA.")
except Exception as ee:
print "* * *", tex, ee
slot = textureSlots[tex]
try:
new_terrain.paste(image, slot, image)
except Exception as eee:
print "* * * new_terrain error:", eee
self.propogated_textures.append(slot)
log.debug(" Image pasted and propagated.")
except Exception as e:
try:
# Print the resource pack 'raw' name.
print "An Exception occurred while trying to parse textures for {}".format(self._pack_name)
log.debug("An Exception occurred while trying to parse textures for {}".format(self._pack_name))
except:
# I for a reason it fails, print the 'representation' of it.
print "An Exception occurred while trying to parse textures for {}".format(repr(self._pack_name))
log.debug("An Exception occurred while trying to parse textures for {}".format(repr(self._pack_name)))
traceback.print_stack()
print "Exception Message: "+str(e)
log.debug("Exception Message: "+str(e))
print "Exception type: "+str(type(e))
log.debug("Exception type: "+str(type(e)))
print e
self.__stop = True
self._isEmpty = True
log.debug("Parsing stopped.")
log.debug("Resource pack considered as empty.")
pass
for runAnyway in multiparts.runAnyways:
parsed_texture = runAnyway.parse_texture()
if parsed_texture is not None:
new_terrain.paste(parsed_texture, runAnyway.position, parsed_texture)
self.propogated_textures.append(runAnyway.position)
copy = self.old_terrain.copy()
log.debug("Correcting textures...")
for t in self.all_texture_slots:
if t not in self.propogated_textures:
old_tex = copy.crop((t[0],t[1],t[0]+16,t[1]+16))
new_terrain.paste(old_tex, t, old_tex)
log.debug(" Done.")
log.debug("Saving %s."%self._terrain_path)
new_terrain.save(self._terrain_path)
log.debug(" Done.")
try:
os.remove(self._pack_name.replace(" ", "_")+".png")
except:
pass
if not self.propogated_textures:
os.remove(self._terrain_path)
self._isEmpty = True
log.debug("No propagated textures.\nTexture pack considered as empty.")
#print u"{} did not replace any textures".format(self._pack_name)
del self.block_image
if hasattr(self, 'fps'):
log.debug(" Closing file descriptors.")
for f in self.fps:
f.close()
log.debug(" Done")
log.debug("Parsing terrain.png ended.")
def handle_too_big_packs(self):
'''
Removes the parsed PNG file
'''
self._too_big = True
log.debug("Resource pack is too big.")
try:
os.remove(self._terrain_path)
except:
pass
del self.block_image
class ZipResourcePack(IResourcePack):
'''
Represents a single Resource Pack that is in a zip file
'''
def __init__(self, zipfileFile, noEncConvert=False):
self.zipfile = zipfileFile
log.debug("Zip file: %s"%zipfileFile)
self._pack_name = os.path.splitext(os.path.split(zipfileFile)[-1])[0]
log.debug("Pack name: %s"%self._pack_name)
# Define a list of opened textures file objects to be cleaned when operations are finished.
self.fps = []
IResourcePack.__init__(self)
if not os.path.exists(self._terrain_path):
try:
self.open_pack()
except Exception as e:
if 'seek' not in e:
print "Error while trying to load one of the resource packs: {}".format(e)
def open_pack(self):
'''
Opens the zip file and puts texture data into a dictionary, where the key is the texture file name, and the value is a PIL.Image instance
'''
zfile = zipfile.ZipFile(self.zipfile)
self.fps = []
for name in zfile.infolist():
if name.filename.endswith(".png") and not name.filename.split(os.path.sep)[-1].startswith("._"):
filename = "assets/minecraft/textures/blocks"
if name.filename.startswith(filename) and name.filename.replace(filename+"/", "").replace(".png","") in textureSlots:
log.debug(" Is a possible texture.")
block_name = os.path.normpath(name.filename).split(os.path.sep)[-1]
block_name = block_name.split(".")[0]
log.debug(" Block name: %s"%block_name)
log.debug(" Opening %s"%name)
fp = zfile.open(name)
#!# Sending this 'fp' file descriptor to PIL.Image does not work, because such
#!# descriptors are not seekable.
#!# But, reading the fd data and writing it to a temporary file seem to work...
log.debug(" Done. (%s, seekable: %s, readable: %s)"%(type(fp), fp.seekable(), fp.readable()))
log.debug(" Saving fp data to temp file.")
fp1 = StringIO()
fp1.write(fp.read())
fp.close()
fp1.seek(0)
log.debug(" Done.")
try:
possible_texture = Image.open(fp1)
log.debug(" File descriptor for %s opened."%block_name)
log.debug(" Is %s."%repr(possible_texture.size))
except Exception as e:
log.debug(" Can't open descriptor for %s"%block_name)
log.debug(" System said:")
log.debug(" %s"%repr(e))
if possible_texture.size == (16, 16):
self.block_image[block_name] = possible_texture
if block_name.startswith("repeater_") or block_name.startswith("comparator_"):
self.block_image[block_name+"_west"] = possible_texture.rotate(-90)
self.block_image[block_name+"_north"] = possible_texture.rotate(180)
self.block_image[block_name+"_east"] = possible_texture.rotate(90)
self.block_image[block_name+"_south"] = possible_texture
if block_name == "piston_side":
self.block_image["piston_up"] = possible_texture
self.block_image["piston_left"] = possible_texture.rotate(90)
self.block_image["piston_down"] = possible_texture.rotate(180)
self.block_image["piston_right"] = possible_texture.rotate(-90)
if block_name == "hay_block_side":
self.block_image["hay_block_side_rotated"] = possible_texture.rotate(-90)
if block_name == "quartz_block_lines":
self.block_image["quartz_block_lines_rotated"] = possible_texture.rotate(-90)
if block_name.startswith("bed_"):
if block_name == "bed_head_side":
self.block_image["bed_head_side_flipped"] = possible_texture.transpose(Image.FLIP_LEFT_RIGHT)
if block_name == "bed_feet_side":
self.block_image["bed_feet_side_flipped"] = possible_texture.transpose(Image.FLIP_LEFT_RIGHT)
if block_name == "bed_head_top":
self.block_image["bed_head_top_flipped"] = possible_texture.transpose(Image.FLIP_LEFT_RIGHT)
self.block_image["bed_head_top_bottom"] = possible_texture.rotate(-90)
self.block_image["bed_head_top_top"] = possible_texture.rotate(90)
if block_name == "bed_feet_top":
self.block_image["bed_feet_top_flipped"] = possible_texture.transpose(Image.FLIP_LEFT_RIGHT)
self.block_image["bed_feet_top_bottom"] = possible_texture.rotate(-90)
self.block_image["bed_feet_top_top"] = possible_texture.rotate(90)
log.debug(" Is loaded.")
else:
if possible_texture.size == (32, 32):
self.block_image[block_name] = possible_texture.resize((16, 16))
log.debug(" Is loaded.")
elif possible_texture.size == (64, 64) or possible_texture.size == (128, 128) or possible_texture.size == (256, 256):
self.big_textures_counted += 1
log.debug(" Is too big.")
else:
self.block_image[block_name] = possible_texture.crop((0,0,16,16))
log.debug(" Is loaded.")
self.fps.append(fp1)
if self.big_textures_counted >= self.big_textures_max:
self.handle_too_big_packs()
else:
try:
self.parse_terrain_png()
except Exception:
traceback.print_exc()
log.warning('Encountered an Exception while parsing terrain texture')
self._too_big = True
pass
class FolderResourcePack(IResourcePack):
def __init__(self, folder, noEncConvert=False):
self._folder = folder
self._pack_name = self._folder.replace(" ", "_")
IResourcePack.__init__(self)
self._full_path = os.path.join(directories.getMinecraftProfileDirectory(directories.getSelectedProfile()), "resourcepacks", self._folder)
self.texture_path = os.path.join(directories.parentDir, "textures", self._pack_name)
if not os.path.exists(self._terrain_path):
self.add_textures()
def add_textures(self):
'''
Scraps the block textures folder and puts texture data into a dictionary with exactly identical structure as ZipResourcePack
'''
base_path = os.path.join(self._full_path, "assets", "minecraft", "textures", "blocks")
if os.path.exists(base_path):
files = os.listdir(base_path)
for tex_file in files:
if tex_file.endswith(".png") and not tex_file.startswith("._") and tex_file.replace(".png","") in textureSlots:
possible_texture = Image.open(os.path.join(base_path, tex_file))
block_name = tex_file[:-4]
if possible_texture.size == (16, 16):
self.block_image[block_name] = possible_texture
if block_name.startswith("repeater_") or block_name.startswith("comparator_"):
self.block_image[block_name+"_west"] = possible_texture.rotate(-90)
self.block_image[block_name+"_north"] = possible_texture.rotate(180)
self.block_image[block_name+"_east"] = possible_texture.rotate(90)
self.block_image[block_name+"_south"] = possible_texture
if block_name == "piston_side":
self.block_image["piston_up"] = possible_texture
self.block_image["piston_left"] = possible_texture.rotate(90)
self.block_image["piston_down"] = possible_texture.rotate(180)
self.block_image["piston_right"] = possible_texture.rotate(-90)
if block_name == "hay_block_side":
self.block_image["hay_block_side_rotated"] = possible_texture.rotate(-90)
if block_name == "quartz_block_lines":
self.block_image["quartz_block_lines_rotated"] = possible_texture.rotate(-90)
if block_name.startswith("bed_"):
if block_name == "bed_head_side":
self.block_image["bed_head_side_flipped"] = possible_texture.transpose(Image.FLIP_LEFT_RIGHT)
if block_name == "bed_feet_side":
self.block_image["bed_feet_side_flipped"] = possible_texture.transpose(Image.FLIP_LEFT_RIGHT)
if block_name == "bed_head_top":
self.block_image["bed_head_top_flipped"] = possible_texture.transpose(Image.FLIP_LEFT_RIGHT)
self.block_image["bed_head_top_bottom"] = possible_texture.rotate(-90)
self.block_image["bed_head_top_top"] = possible_texture.rotate(90)
if block_name == "bed_feet_top":
self.block_image["bed_feet_top_flipped"] = possible_texture.transpose(Image.FLIP_LEFT_RIGHT)
self.block_image["bed_feet_top_bottom"] = possible_texture.rotate(-90)
self.block_image["bed_feet_top_top"] = possible_texture.rotate(90)
else:
if possible_texture.size == (32, 32):
self.block_image[block_name] = possible_texture.resize((16, 16))
if possible_texture.size == (64, 64) or possible_texture.size == (128, 128) or possible_texture.size == (256, 256):
self.big_textures_counted += 1
else:
self.block_image[block_name] = possible_texture.crop((0,0,16,16))
if self.big_textures_counted >= self.big_textures_max:
self.handle_too_big_packs()
else:
try:
self.parse_terrain_png()
except Exception:
traceback.print_exc()
log.warning('Encountered an Exception while parsing terrain texture')
self._too_big = True
pass
class DefaultResourcePack(IResourcePack):
'''
Represents the default Resource Pack that is always present
'''
def __init__(self):
self._isEmpty = False
self._too_big = False
#self._terrain_path = os.path.join(directories.getDataDir(), "terrain.png")
self._terrain_path = directories.getDataFile('terrain.png')
self._pack_name = "Default"
def terrain_path(self):
return self._terrain_path
@property
def isEmpty(self):
return self._isEmpty
@property
def tooBig(self):
return self._too_big
@Singleton
class ResourcePackHandler:
'''
A single point to manage which Resource Pack is being used and to provide the paths to each parsed PNG
'''
Instance = None
def setup_reource_packs(self):
'''
Handles parsing of Resource Packs and removing ones that are either have to0 high of a resolution, or don't replace any textures
'''
log.debug("Setting up the resource packs.")
self._resource_packs = {}
try:
os.mkdir("terrain-textures")
except OSError:
pass
self._resource_packs["Default Resource Pack"] = DefaultResourcePack()
if os.path.exists(os.path.join(directories.getMinecraftProfileDirectory(directories.getSelectedProfile()), "resourcepacks")):
log.debug("Gathering zipped packs...")
zipResourcePacks = directories.getAllOfAFile(unicode(os.path.join(directories.getMinecraftProfileDirectory(directories.getSelectedProfile()), "resourcepacks")), ".zip")
log.debug("Gatering folder packs...")
folderResourcePacks = os.listdir(unicode(os.path.join(directories.getMinecraftProfileDirectory(directories.getSelectedProfile()), "resourcepacks")))
log.debug("Processing zipped packs...")
for zip_tex_pack in zipResourcePacks:
zrp = ZipResourcePack(zip_tex_pack)
if not zrp.isEmpty:
if not zrp.tooBig:
self._resource_packs[zrp.pack_name] = zrp
log.debug("Processing folder packs...")
for folder_tex_pack in folderResourcePacks:
if os.path.isdir(os.path.join(directories.getMinecraftProfileDirectory(directories.getSelectedProfile()), "resourcepacks", folder_tex_pack)):
frp = FolderResourcePack(folder_tex_pack)
if not frp.isEmpty:
if not frp.tooBig:
self._resource_packs[frp.pack_name] = frp
for tex in self._resource_packs.keys():
pack = self._resource_packs[tex]
if not os.path.exists(pack.terrain_path()):
del self._resource_packs[tex]
try:
shutil.rmtree(os.path.join(directories.parentDir, "textures"))
except:
print "Could not remove \"textures\" directory"
pass
def __init__(self):
try:
os.mkdir(os.path.join(directories.parentDir, "textures"))
except OSError:
pass
self.setup_reource_packs()
self._selected_resource_pack = config.settings.resourcePack.get()
if self._selected_resource_pack not in self._resource_packs.keys():
self.set_selected_resource_pack_name("Default Resource Pack")
@property
def resource_packs(self):
'''
A dictionary of Resource Packs, where the key is the pack's file/folder name, and the value is the path to the parsed PNG
'''
return self._resource_packs
def get_available_resource_packs(self):
'''
Returns the names of all the Resource Packs that can be used
'''
return self._resource_packs.keys()
def reload_resource_packs(self):
'''
Reparses all Resource Packs
'''
self.setup_resource_packs()
def reparse_resource_pack(self, packName):
if packName in self._resource_packs:
pack = self._resource_packs[packName]
if isinstance(pack, FolderResourcePack):
pack.add_textures()
elif isinstance(pack, ZipResourcePack):
pack.open_pack()
def get_selected_resource_pack_name(self):
'''
Returns the currently selected Resource Pack's name
'''
return self._selected_resource_pack
def set_selected_resource_pack_name(self, name):
'''
Sets the currently selected Resource Pack
:param name: Name of the Resource Pack
'''
config.settings.resourcePack.set(name)
self._selected_resource_pack = name
def get_selected_resource_pack(self):
'''
Returns the selected Resource Pack instance. Can be an instance of either DefaultResourcePack, ZipResourcePack or FolderResourcePack
'''
return self._resource_packs[self._selected_resource_pack] | 42,430 | 37.963269 | 180 | py |
MCEdit-Unified | MCEdit-Unified-master/pymclevel/indev.py | """
Created on Jul 22, 2011
@author: Rio
Indev levels:
TAG_Compound "MinecraftLevel"
{
TAG_Compound "Environment"
{
TAG_Short "SurroundingGroundHeight"// Height of surrounding ground (in blocks)
TAG_Byte "SurroundingGroundType" // Block ID of surrounding ground
TAG_Short "SurroundingWaterHeight" // Height of surrounding water (in blocks)
TAG_Byte "SurroundingWaterType" // Block ID of surrounding water
TAG_Short "CloudHeight" // Height of the cloud layer (in blocks)
TAG_Int "CloudColor" // Hexadecimal value for the color of the clouds
TAG_Int "SkyColor" // Hexadecimal value for the color of the sky
TAG_Int "FogColor" // Hexadecimal value for the color of the fog
TAG_Byte "SkyBrightness" // The brightness of the sky, from 0 to 100
}
TAG_List "Entities"
{
TAG_Compound
{
// One of these per entity on the map.
// These can change a lot, and are undocumented.
// Feel free to play around with them, though.
// The most interesting one might be the one with ID "LocalPlayer", which contains the player inventory
}
}
TAG_Compound "Map"
{
// To access a specific block from either byte array, use the following algorithm:
// Index = x + (y * Depth + z) * Width
TAG_Short "Width" // Width of the level (along X)
TAG_Short "Height" // Height of the level (along Y)
TAG_Short "Length" // Length of the level (along Z)
TAG_Byte_Array "Blocks" // An array of Length*Height*Width bytes specifying the block types
TAG_Byte_Array "Data" // An array of Length*Height*Width bytes with data for each blocks
TAG_List "Spawn" // Default spawn position
{
TAG_Short x // These values are multiplied by 32 before being saved
TAG_Short y // That means that the actual values are x/32.0, y/32.0, z/32.0
TAG_Short z
}
}
TAG_Compound "About"
{
TAG_String "Name" // Level name
TAG_String "Author" // Name of the player who made the level
TAG_Long "CreatedOn" // Timestamp when the level was first created
}
}
"""
from entity import TileEntity
from level import MCLevel
from logging import getLogger
from materials import indevMaterials
from numpy import array, swapaxes
import nbt
import os
log = getLogger(__name__)
MinecraftLevel = "MinecraftLevel"
Environment = "Environment"
SurroundingGroundHeight = "SurroundingGroundHeight"
SurroundingGroundType = "SurroundingGroundType"
SurroundingWaterHeight = "SurroundingWaterHeight"
SurroundingWaterType = "SurroundingWaterType"
CloudHeight = "CloudHeight"
CloudColor = "CloudColor"
SkyColor = "SkyColor"
FogColor = "FogColor"
SkyBrightness = "SkyBrightness"
About = "About"
Name = "Name"
Author = "Author"
CreatedOn = "CreatedOn"
Spawn = "Spawn"
__all__ = ["MCIndevLevel"]
from level import EntityLevel
class MCIndevLevel(EntityLevel):
""" IMPORTANT: self.Blocks and self.Data are indexed with [x,z,y] via axis
swapping to be consistent with infinite levels."""
materials = indevMaterials
_gamePlatform = 'indev'
def setPlayerSpawnPosition(self, pos, player=None):
assert len(pos) == 3
self.Spawn = array(pos)
def playerSpawnPosition(self, player=None):
return self.Spawn
def setPlayerPosition(self, pos, player="Ignored"):
self.LocalPlayer["Pos"] = nbt.TAG_List([nbt.TAG_Float(p) for p in pos])
def getPlayerPosition(self, player="Ignored"):
return array(map(lambda x: x.value, self.LocalPlayer["Pos"]))
def setPlayerOrientation(self, yp, player="Ignored"):
self.LocalPlayer["Rotation"] = nbt.TAG_List([nbt.TAG_Float(p) for p in yp])
def getPlayerOrientation(self, player="Ignored"):
""" returns (yaw, pitch) """
return array(map(lambda x: x.value, self.LocalPlayer["Rotation"]))
def setBlockDataAt(self, x, y, z, newdata):
if x < 0 or y < 0 or z < 0:
return 0
if x >= self.Width or y >= self.Height or z >= self.Length:
return 0
self.Data[x, z, y] = (newdata & 0xf)
def blockDataAt(self, x, y, z):
if x < 0 or y < 0 or z < 0:
return 0
if x >= self.Width or y >= self.Height or z >= self.Length:
return 0
return self.Data[x, z, y]
def blockLightAt(self, x, y, z):
if x < 0 or y < 0 or z < 0:
return 0
if x >= self.Width or y >= self.Height or z >= self.Length:
return 0
return self.BlockLight[x, z, y]
def __repr__(self):
return u"MCIndevLevel({0}): {1}W {2}L {3}H".format(self.filename, self.Width, self.Length, self.Height)
@classmethod
def _isTagLevel(cls, root_tag):
return "MinecraftLevel" == root_tag.name
def __init__(self, root_tag=None, filename=""):
self.Width = 0
self.Height = 0
self.Length = 0
self.Blocks = array([], "uint8")
self.Data = array([], "uint8")
self.Spawn = (0, 0, 0)
self.filename = filename
if root_tag:
self.root_tag = root_tag
mapTag = root_tag["Map"]
self.Width = mapTag["Width"].value
self.Length = mapTag["Length"].value
self.Height = mapTag["Height"].value
mapTag["Blocks"].value.shape = (self.Height, self.Length, self.Width)
self.Blocks = swapaxes(mapTag["Blocks"].value, 0, 2)
mapTag["Data"].value.shape = (self.Height, self.Length, self.Width)
self.Data = swapaxes(mapTag["Data"].value, 0, 2)
self.BlockLight = self.Data & 0xf
self.Data >>= 4
self.Spawn = [mapTag[Spawn][i].value for i in xrange(3)]
if "Entities" not in root_tag:
root_tag["Entities"] = nbt.TAG_List()
self.Entities = root_tag["Entities"]
# xxx fixup Motion and Pos to match infdev format
def numbersToDoubles(ent):
for attr in "Motion", "Pos":
if attr in ent:
ent[attr] = nbt.TAG_List([nbt.TAG_Double(t.value) for t in ent[attr]])
for ent in self.Entities:
numbersToDoubles(ent)
if "TileEntities" not in root_tag:
root_tag["TileEntities"] = nbt.TAG_List()
self.TileEntities = root_tag["TileEntities"]
# xxx fixup TileEntities positions to match infdev format
for te in self.TileEntities:
pos = te["Pos"].value
(x, y, z) = self.decodePos(pos)
TileEntity.setpos(te, (x, y, z))
localPlayerList = [tag for tag in root_tag["Entities"] if tag['id'].value == 'LocalPlayer']
if len(localPlayerList) == 0: # omen doesn't make a player entity
playerTag = nbt.TAG_Compound()
playerTag['id'] = nbt.TAG_String('LocalPlayer')
playerTag['Pos'] = nbt.TAG_List([nbt.TAG_Float(0.), nbt.TAG_Float(64.), nbt.TAG_Float(0.)])
playerTag['Rotation'] = nbt.TAG_List([nbt.TAG_Float(0.), nbt.TAG_Float(45.)])
self.LocalPlayer = playerTag
else:
self.LocalPlayer = localPlayerList[0]
else:
log.info(u"Creating new Indev levels is not yet implemented.!")
raise ValueError("Can't do that yet")
# self.SurroundingGroundHeight = root_tag[Environment][SurroundingGroundHeight].value
# self.SurroundingGroundType = root_tag[Environment][SurroundingGroundType].value
# self.SurroundingWaterHeight = root_tag[Environment][SurroundingGroundHeight].value
# self.SurroundingWaterType = root_tag[Environment][SurroundingWaterType].value
# self.CloudHeight = root_tag[Environment][CloudHeight].value
# self.CloudColor = root_tag[Environment][CloudColor].value
# self.SkyColor = root_tag[Environment][SkyColor].value
# self.FogColor = root_tag[Environment][FogColor].value
# self.SkyBrightness = root_tag[Environment][SkyBrightness].value
# self.TimeOfDay = root_tag[Environment]["TimeOfDay"].value
#
#
# self.Name = self.root_tag[About][Name].value
# self.Author = self.root_tag[About][Author].value
# self.CreatedOn = self.root_tag[About][CreatedOn].value
def rotateLeft(self):
MCLevel.rotateLeft(self)
self.Data = swapaxes(self.Data, 1, 0)[:, ::-1, :] # x=y; y=-x
torchRotation = array([0, 4, 3, 1, 2, 5,
6, 7,
8, 9, 10, 11, 12, 13, 14, 15])
torchIndexes = (self.Blocks == self.materials.Torch.ID)
log.info(u"Rotating torches: {0}".format(len(torchIndexes.nonzero()[0])))
self.Data[torchIndexes] = torchRotation[self.Data[torchIndexes]]
@staticmethod
def decodePos(v):
b = 10
m = (1 << b) - 1
return v & m, (v >> b) & m, (v >> (2 * b))
@staticmethod
def encodePos(x, y, z):
b = 10
return x + (y << b) + (z << (2 * b))
def saveToFile(self, filename=None):
if filename is None:
filename = self.filename
if filename is None:
log.warn(u"Attempted to save an unnamed file in place")
return # you fool!
self.Data <<= 4
self.Data |= (self.BlockLight & 0xf)
self.Blocks = swapaxes(self.Blocks, 0, 2)
self.Data = swapaxes(self.Data, 0, 2)
mapTag = nbt.TAG_Compound()
mapTag["Width"] = nbt.TAG_Short(self.Width)
mapTag["Height"] = nbt.TAG_Short(self.Height)
mapTag["Length"] = nbt.TAG_Short(self.Length)
mapTag["Blocks"] = nbt.TAG_Byte_Array(self.Blocks)
mapTag["Data"] = nbt.TAG_Byte_Array(self.Data)
self.Blocks = swapaxes(self.Blocks, 0, 2)
self.Data = swapaxes(self.Data, 0, 2)
mapTag[Spawn] = nbt.TAG_List([nbt.TAG_Short(i) for i in self.Spawn])
self.root_tag["Map"] = mapTag
self.Entities.append(self.LocalPlayer)
# fix up Entities imported from Alpha worlds
def numbersToFloats(ent):
for attr in "Motion", "Pos":
if attr in ent:
ent[attr] = nbt.TAG_List([nbt.TAG_Double(t.value) for t in ent[attr]])
for ent in self.Entities:
numbersToFloats(ent)
# fix up TileEntities imported from Alpha worlds.
for ent in self.TileEntities:
if "Pos" not in ent and all(c in ent for c in 'xyz'):
ent["Pos"] = nbt.TAG_Int(self.encodePos(ent['x'].value, ent['y'].value, ent['z'].value))
# output_file = gzip.open(self.filename, "wb", compresslevel=1)
try:
os.rename(filename, filename + ".old")
except Exception:
pass
try:
self.root_tag.save(filename)
except:
os.rename(filename + ".old", filename)
try:
os.remove(filename + ".old")
except Exception:
pass
self.Entities.remove(self.LocalPlayer)
self.BlockLight = self.Data & 0xf
self.Data >>= 4
| 11,565 | 34.697531 | 111 | py |
MCEdit-Unified | MCEdit-Unified-master/pymclevel/mclevel.py | # -*- coding: utf-8 -*-
"""
MCLevel interfaces
Sample usage:
import mclevel
# Call mclevel.fromFile to identify and open any of these four file formats:
#
# Classic levels - gzipped serialized java objects. Returns an instance of MCJavalevel
# Indev levels - gzipped NBT data in a single file. Returns an MCIndevLevel
# Schematics - gzipped NBT data in a single file. Returns an MCSchematic.
# MCSchematics have the special method rotateLeft which will reorient torches, stairs, and other tiles appropriately.
# Alpha levels - world folder structure containing level.dat and chunk folders. Single or Multiplayer.
# Can accept a path to the world folder or a path to the level.dat. Returns an MCInfdevOldLevel
# Load a Classic level.
level = mclevel.fromFile("server_level.dat");
# fromFile identified the file type and returned a MCJavaLevel. MCJavaLevel doesn't actually know any java. It guessed the
# location of the Blocks array by starting at the end of the file and moving backwards until it only finds valid blocks.
# It also doesn't know the dimensions of the level. This is why you have to tell them to MCEdit via the filename.
# This works here too: If the file were 512 wide, 512 long, and 128 high, I'd have to name it "server_level_512_512_128.dat"
#
# This is one area for improvement.
# Classic and Indev levels have all of their blocks in one place.
blocks = level.Blocks
# Sand to glass.
blocks[blocks == level.materials.Sand.ID] = level.materials.Glass.ID
# Save the file with another name. This only works for non-Alpha levels.
level.saveToFile("server_level_glassy.dat");
# Load an Alpha world
# Loading an Alpha world immediately scans the folder for chunk files. This takes longer for large worlds.
ourworld = mclevel.fromFile("C:\\Minecraft\\OurWorld");
# Convenience method to load a numbered world from the saves folder.
world1 = mclevel.loadWorldNumber(1);
# Find out which chunks are present. Doing this will scan the chunk folders the
# first time it is used. If you already know where you want to be, skip to
# world1.getChunk(xPos, zPos)
chunkPositions = list(world1.allChunks)
# allChunks returns an iterator that yields a (xPos, zPos) tuple for each chunk
xPos, zPos = chunkPositions[0];
# retrieve an AnvilChunk object. this object will load and decompress
# the chunk as needed, and remember whether it needs to be saved or relighted
chunk = world1.getChunk(xPos, zPos)
### Access the data arrays of the chunk like so:
# Take note that the array is indexed x, z, y. The last index corresponds to
# height or altitude.
blockType = chunk.Blocks[0,0,64]
chunk.Blocks[0,0,64] = 1
# Access the chunk's Entities and TileEntities as arrays of TAG_Compound as
# they appear in the save format.
# Entities usually have Pos, Health, and id
# TileEntities usually have tileX, tileY, tileZ, and id
# For more information, google "Chunk File Format"
for entity in chunk.Entities:
if entity["id"].value == "Spider":
entity["Health"].value = 50
# Accessing one byte at a time from the Blocks array is very slow in Python.
# To get around this, we have methods to access multiple bytes at once.
# The first technique is slicing. You can use slicing to restrict your access
# to certain depth levels, or to extract a column or a larger section from the
# array. Standard python slice notation is used.
# Set the top half of the array to 0. The : says to use the entire array along
# that dimension. The syntax []= indicates we are overwriting part of the array
chunk.Blocks[:,:,64:] = 0
# Using [] without = creates a 'view' on part of the array. This is not a
# copy, it is a reference to a portion of the original array.
midBlocks = chunk.Blocks[:,:,32:64]
# Here's a gotcha: You can't just write 'midBlocks = 0' since it will replace
# the 'midBlocks' reference itself instead of accessing the array. Instead, do
# this to access and overwrite the array using []= syntax.
midBlocks[:] = 0
# The second is masking. Using a comparison operator ( <, >, ==, etc )
# against the Blocks array will return a 'mask' that we can use to specify
# positions in the array.
# Create the mask from the result of the equality test.
fireBlocks = ( chunk.Blocks==world.materials.Fire.ID )
# Access Blocks using the mask to set elements. The syntax is the same as
# using []= with slices
chunk.Blocks[fireBlocks] = world.materials.Leaves.ID
# You can also combine mask arrays using logical operations (&, |, ^) and use
# the mask to access any other array of the same shape.
# Here we turn all trees into birch trees.
# Extract a mask from the Blocks array to find the locations of tree trunks.
# Or | it with another mask to find the locations of leaves.
# Use the combined mask to access the Data array and set those locations to birch
# Note that the Data, BlockLight, and SkyLight arrays have been
# unpacked from 4-bit arrays to numpy uint8 arrays. This makes them much easier
# to work with.
treeBlocks = ( chunk.Blocks == world.materials.Wood.ID )
treeBlocks |= ( chunk.Blocks == world.materials.Leaves.ID )
chunk.Data[treeBlocks] = 2 # birch
# The chunk doesn't know you've changed any of that data. Call chunkChanged()
# to let it know. This will mark the chunk for lighting calculation,
# recompression, and writing to disk. It will also immediately recalculate the
# chunk's HeightMap and fill the SkyLight only with light falling straight down.
# These are relatively fast and were added here to aid MCEdit.
chunk.chunkChanged();
# To recalculate all of the dirty lights in the world, call generateLights
world.generateLights();
# Move the player and his spawn
world.setPlayerPosition( (0, 67, 0) ) # add 3 to make sure his head isn't in the ground.
world.setPlayerSpawnPosition( (0, 64, 0) )
# Save the level.dat and any chunks that have been marked for writing to disk
# This also compresses any chunks marked for recompression.
world.saveInPlace();
# Advanced use:
# The getChunkSlices method returns an iterator that returns slices of chunks within the specified range.
# the slices are returned as tuples of (chunk, slices, point)
# chunk: The AnvilChunk object we're interested in.
# slices: A 3-tuple of slice objects that can be used to index chunk's data arrays
# point: A 3-tuple of floats representing the relative position of this subslice within the larger slice.
#
# Take caution:
# the point tuple is ordered (x,y,z) in accordance with the tuples used to initialize a bounding box
# however, the slices tuple is ordered (x,z,y) for easy indexing into the arrays.
# Here is an old version of MCInfdevOldLevel.fillBlocks in its entirety:
def fillBlocks(self, box, blockType, blockData = 0):
chunkIterator = self.getChunkSlices(box)
for (chunk, slices, point) in chunkIterator:
chunk.Blocks[slices] = blockType
chunk.Data[slices] = blockData
chunk.chunkChanged();
Copyright 2010 David Rio Vierra
"""
from indev import MCIndevLevel
from infiniteworld import MCInfdevOldLevel
from javalevel import MCJavaLevel
from logging import getLogger
import logging
from directories import minecraftSaveFileDir
import nbt
from numpy import fromstring
import os
from pocket import PocketWorld
from leveldbpocket import PocketLeveldbWorld
from pymclevel import leveldbpocket
from schematic import INVEditChest, MCSchematic, ZipSchematic
import sys
import traceback
log = getLogger(__name__)
class LoadingError(RuntimeError):
pass
def fromFile(filename, loadInfinite=True, readonly=False):
''' The preferred method for loading Minecraft levels of any type.
pass False to loadInfinite if you'd rather not load infdev levels.
'''
log.info(u"Identifying " + filename)
if not filename:
raise IOError("File not found: " + filename)
if not os.path.exists(filename):
raise IOError("File not found: " + filename)
if ZipSchematic._isLevel(filename):
log.info("Zipfile found, attempting zipped infinite level")
lev = ZipSchematic(filename)
log.info("Detected zipped Infdev level")
return lev
if PocketWorld._isLevel(filename):
return PocketWorld(filename)
if MCInfdevOldLevel._isLevel(filename):
log.info(u"Detected Infdev level.dat")
if loadInfinite:
return MCInfdevOldLevel(filename=filename, readonly=readonly)
else:
raise ValueError("Asked to load {0} which is an infinite level, loadInfinite was False".format(
os.path.basename(filename)))
if PocketLeveldbWorld._isLevel(filename):
if leveldbpocket.leveldb_available:
return PocketLeveldbWorld(filename)
else:
logging.exception("Pocket support has failed")
if os.path.isdir(filename):
logging.exception("World load failed, trying to open a directory instead of a file")
f = file(filename, 'rb')
rawdata = f.read()
f.close()
if len(rawdata) < 4:
raise ValueError("{0} is too small! ({1}) ".format(filename, len(rawdata)))
data = fromstring(rawdata, dtype='uint8')
if not data.any():
raise ValueError("{0} contains only zeroes. This file is damaged beyond repair.")
if MCJavaLevel._isDataLevel(data):
log.info(u"Detected Java-style level")
lev = MCJavaLevel(filename, data)
lev.compressed = False
return lev
# ungzdata = None
compressed = True
unzippedData = None
try:
unzippedData = nbt.gunzip(rawdata)
except Exception as e:
log.info(u"Exception during Gzip operation, assuming {0} uncompressed: {1!r}".format(filename, e))
if unzippedData is None:
compressed = False
unzippedData = rawdata
#data =
data = unzippedData
if MCJavaLevel._isDataLevel(data):
log.info(u"Detected compressed Java-style level")
lev = MCJavaLevel(filename, data)
lev.compressed = compressed
return lev
try:
root_tag = nbt.load(buf=data)
except Exception as e:
log.info(u"Error during NBT load: {0!r}".format(e))
log.info(traceback.format_exc())
log.info(u"Fallback: Detected compressed flat block array, yzx ordered ")
try:
lev = MCJavaLevel(filename, data)
lev.compressed = compressed
return lev
except Exception as e2:
raise LoadingError(("Multiple errors encountered", e, e2), sys.exc_info()[2])
else:
if MCIndevLevel._isTagLevel(root_tag):
log.info(u"Detected Indev .mclevel")
return MCIndevLevel(root_tag, filename)
if MCSchematic._isTagLevel(root_tag):
log.info(u"Detected Schematic.")
return MCSchematic(filename=filename)
if INVEditChest._isTagLevel(root_tag):
log.info(u"Detected INVEdit inventory file")
return INVEditChest(root_tag=root_tag, filename=filename)
raise IOError("Cannot detect file type.")
def loadWorld(name):
filename = os.path.join(minecraftSaveFileDir, name)
return fromFile(filename)
def loadWorldNumber(i):
# deprecated
filename = u"{0}{1}{2}{3}{1}".format(minecraftSaveFileDir, os.sep, u"World", i)
return fromFile(filename)
| 11,260 | 35.800654 | 125 | py |
MCEdit-Unified | MCEdit-Unified-master/stock-filters/Find.py | # written by texelelf
#-# Adding a result pages, and NBT edit stuff
from pymclevel import TAG_Byte, TAG_Short, TAG_Int, TAG_Compound, TAG_List, TAG_String, TAG_Double, TAG_Float, TAG_Long, \
TAG_Byte_Array, TAG_Int_Array
from pymclevel.box import BoundingBox
from albow import alert, ask
import ast
# Let import the stuff to save files.
from mcplatform import askSaveFile
from directories import getDocumentsFolder
# The RECODR_UNDO is not yet usable...
# RECORD_UNDO = False
displayName = "Find"
tagtypes = {"TAG_Byte": 0, "TAG_Short": 1, "TAG_Int": 2, "TAG_Compound": 3, "TAG_List": 4, "TAG_String": 5,
"TAG_Double": 6, "TAG_Float": 7, "TAG_Long": 8, "TAG_Byte_Array": 9, "TAG_Int_Array": 10}
tagses = {0: TAG_Byte, 1: TAG_Short, 2: TAG_Int, 3: TAG_Compound, 4: TAG_List, 5: TAG_String,
6: TAG_Double, 7: TAG_Float, 8: TAG_Long, 9: TAG_Byte_Array, 10: TAG_Int_Array, "Any": 11}
inputs = [(("Match by:", ("TileEntity", "Entity", "Block")),
("Match block type (for TileEntity searches):", False),
("Match block:", "blocktype"),
("Match block data:", True),
("Match tile entities (for Block searches):", False),
("\"None\" for Name or Value will match any tag's Name or Value respectively.", "label"),
("Match Tag Name:", ("string", "value=None")),
("Match Tag Value:", ("string", "value=None")),
("Case insensitive:", True),
("Match Tag Type:", tuple(("Any",)) + tuple(tagtypes.keys())),
("Operation:", ("Start New Search", "Dump Found Coordinates")),
("Options", "title")),
(("Results", "title"), ("", ["NBTTree", {}, 0, False])), # [str name_of_widget_type, dict default_data, int page_to_goback, bool show_load_button]
(("Documentation", "title"),
("This filter is designed to search for NBT in either Entities or TileEntities.\n"
"It can also be used to search for blocks.\n\"Match by\" determines which type of object "
"is prioritized during the search.\nEntites and TileEntities will search relatively quickly, "
"while the speed of searching by Block will be directly proportional to the selection size (since every "
"single block within the selection will be examined).\n"
"All Entity searches will ignore the block settings; TileEntity searches will try to "
"\"Match block type\" if checked, and Block searches will try to \"Match tile entity\" tags if "
"checked.\nIt is faster to match TileEntity searches with a Block Type than vice versa.\nBlock "
"matching can also optionally match block data, e.g. matching all torches, or only torches facing "
"a specific direction.\n\"Start New Search\" will re-search through the selected volume, while \"Find Next\" "
"will iterate through the search results of the previous search.", "label"))
]
tree = None # the tree widget
chunks = None # the chunks used to perform the search
bbox = None # the bouding box to search in
by = None # what is searched: Entities, TileEntities or blocs
def set_tree(t):
global tree
tree = t
# Use this method to overwrite the NBT tree default behaviour on mouse clicks
def nbttree_mouse_down(e):
if e.num_clicks > 1:
if tree.selected_item and tree.selected_item[3].startswith('(') and tree.selected_item[3].endswith(')'):
s = ast.literal_eval(tree.selected_item[3])
editor.mainViewport.cameraPosition = (s[0] + 0.5, s[1] + 2, s[2] - 1)
editor.mainViewport.yaw = 0.0
editor.mainViewport.pitch = 45.0
newBox = BoundingBox(s, (1, 1, 1))
editor.selectionTool.setSelection(newBox)
tree.treeRow.__class__.mouse_down(tree.treeRow, e)
def get_chunks():
return chunks
def get_box():
return bbox
def get_by():
return by
# Use this method to overwrite the NBT tree 'OK' button default behaviour
def nbt_ok_action():
by = get_by()
chunks = get_chunks()
box = get_box()
if by not in (trn._('TileEntity'), trn._('Entity')):
return
if chunks:
for chunk, slices, point in chunks:
if by == trn._('TileEntity'):
for e in chunk.TileEntities:
x = e["x"].value
y = e["y"].value
z = e["z"].value
elif by == trn._('Entity'):
for e in chunk.Entities:
x = e["Pos"][0].value
y = e["Pos"][1].value
z = e["Pos"][2].value
if (x, y, z) in box:
chunk.dirty = True
try:
search
except NameError:
search = None
def FindTagS(nbtData, name, value, tagtype):
if type(nbtData) is TAG_List or type(nbtData) is TAG_Compound:
if name in nbtData.name or name == "":
if value == "":
if type(nbtData) is tagtype or tagtype == 11:
print "found in pre-area"
return True
if type(nbtData) is TAG_List:
list = True
else:
list = False
for tag in range(0, len(nbtData)) if list else nbtData.keys():
if type(nbtData[tag]) is TAG_Compound:
if FindTagS(nbtData[tag], name, value, tagtype):
return True
elif type(nbtData[tag]) is TAG_List:
if FindTagS(nbtData[tag], name, value, tagtype):
return True
else:
if name in nbtData[tag].name or name == "":
if value in unicode(nbtData[tag].value):
if type(nbtData[tag]) is tagtype or tagtype == 11:
print "found in list/compound"
return True
else:
return False
else:
if name in nbtData.name or name == "":
if value in unicode(nbtData.value):
if type(nbtData[tag]) is tagtype or tagtype == 11:
print "found outside"
return True
return False
def FindTagI(nbtData, name, value, tagtype):
if type(nbtData) is TAG_List or type(nbtData) is TAG_Compound:
if name in (u"%s"%nbtData.name).upper() or name == "":
if value == "":
if type(nbtData) is tagtype or tagtype == 11:
return True
if type(nbtData) is TAG_List:
list = True
else:
list = False
for tag in range(0, len(nbtData)) if list else nbtData.keys():
if type(nbtData[tag]) is TAG_Compound:
if FindTagI(nbtData[tag], name, value, tagtype):
return True
elif type(nbtData[tag]) is TAG_List:
if FindTagI(nbtData[tag], name, value, tagtype):
return True
else:
if name in (u"%s"%nbtData[tag].name).upper() or name == "":
if value in unicode(nbtData[tag].value).upper():
if type(nbtData[tag]) is tagtype or tagtype == 11:
return True
else:
return False
else:
if name in (u"%s"%nbtData.name).upper() or name == "":
if value in unicode(nbtData.value).upper():
if type(nbtData[tag]) is tagtype or tagtype == 11:
return True
return False
def FindTag(nbtData, name, value, tagtype, caseSensitive):
if caseSensitive:
return FindTagS(nbtData, name, value, tagtype)
else:
return FindTagI(nbtData, name, value, tagtype)
def perform(level, box, options):
global search
# Don't forget to 'globalize' these...
global chunks
global bbox
global by
bbox = box
by = options["Match by:"]
matchtype = options["Match block type (for TileEntity searches):"]
matchblock = options["Match block:"]
matchdata = options["Match block data:"]
matchtile = options["Match tile entities (for Block searches):"]
matchname = u"" if options["Match Tag Name:"] == "None" else unicode(options["Match Tag Name:"])
matchval = u"" if options["Match Tag Value:"] == "None" else unicode(options["Match Tag Value:"])
caseSensitive = not options["Case insensitive:"]
matchtagtype = tagtypes.get(options["Match Tag Type:"], "Any")
op = options["Operation:"]
datas = []
if not caseSensitive:
matchname = matchname.upper()
matchval = matchval.upper()
if matchtile and matchname == "" and matchval == "":
alert("\nInvalid Tag Name and Value; the present values will match every tag of the specified type.")
if search is None or op == trn._("Start New Search") or op == trn._("Dump Found Coordinates"):
search = []
if not search:
if by == trn._("Block"):
for x in xrange(box.minx, box.maxx):
for z in xrange(box.minz, box.maxz):
for y in xrange(box.miny, box.maxy):
block = level.blockAt(x, y, z)
data = level.blockDataAt(x, y, z)
if block == matchblock.ID and (not matchdata or data == matchblock.blockData):
pass
else:
continue
if matchtile:
tile = level.tileEntityAt(x, y, z)
if tile is not None:
if not FindTag(tile, matchname, matchval, tagses[matchtagtype], caseSensitive):
continue
else:
continue
search.append((x, y, z))
datas.append(data)
elif by == trn._("TileEntity"):
chunks = []
for (chunk, slices, point) in level.getChunkSlices(box):
for e in chunk.TileEntities:
x = e["x"].value
y = e["y"].value
z = e["z"].value
if (x, y, z) in box:
if matchtype:
block = level.blockAt(x, y, z)
data = level.blockDataAt(x, y, z)
if block == matchblock.ID and (not matchdata or data == matchblock.blockData):
pass
else:
continue
if not FindTag(e, matchname, matchval, tagses[matchtagtype], caseSensitive):
continue
search.append((x, y, z))
datas.append(e)
chunks.append([chunk, slices, point])
else:
chunks = []
for (chunk, slices, point) in level.getChunkSlices(box):
for e in chunk.Entities:
x = e["Pos"][0].value
y = e["Pos"][1].value
z = e["Pos"][2].value
if (x, y, z) in box:
if FindTag(e, matchname, matchval, tagses[matchtagtype], caseSensitive):
search.append((x, y, z))
datas.append(e)
chunks.append([chunk, slices, point])
if not search:
alert("\nNo matching blocks/tile entities found")
else:
search.sort()
if op == trn._("Dump Found Coordinates"):
result = "\n".join("%d, %d, %d" % pos for pos in search)
answer = ask(result, height=editor.height, colLabel="Matching Coordinates", responses=["Save", "OK"])
if answer == "Save":
fName = askSaveFile(getDocumentsFolder(), "Save to file...", "find.txt", 'TXT\0*.txt\0\0', 'txt')
if fName:
fData = "# MCEdit find output\n# Search options:\n# Match by: %s\n# Match block type: %s\n# Match block: %s\n# Match block data: %s\n# Match tile entities: %s\n# Match Tag Name:%s\n# Match Tag Value: %s\n# Case insensitive: %s\n# Match Tag Type: %s\n\n%s"%(by, matchtype, matchblock, matchdata, matchtile, matchname, matchval, caseSensitive, matchtagtype, result)
open(fName, 'w').write(fData)
else:
treeData = {}
# To set tooltip text to the items the need it, use a dict: {"value": <item to be added to the tree>, "tooltipText": "Some text"}
for i in range(len(search)):
if by == trn._('Block'):
treeData[u"%s"%(search[i],)] = {"value": datas[i], "tooltipText": "Double-click to go to this item."}
elif by == trn._('Entity'):
treeData[u"%s"%((datas[i]['Pos'][0].value, datas[i]['Pos'][1].value, datas[i]['Pos'][2].value),)] = {"value": datas[i], "tooltipText": "Double-click to go to this item."}
else:
treeData[u"%s"%((datas[i]['x'].value, datas[i]['y'].value, datas[i]['z'].value),)] = {"value": datas[i], "tooltipText": "Double-click to go to this item."}
inputs[1][1][1][1] = {'Data': treeData}
options[""](inputs[1])
| 13,288 | 43.89527 | 383 | py |
MCEdit-Unified | MCEdit-Unified-master/stock-filters/Forester.py | # Version 5
'''This takes a base MineCraft level and adds or edits trees.
Place it in the folder where the save files are (usually .../.minecraft/saves)
Requires mcInterface.py in the same folder.'''
# Here are the variables you can edit.
# This is the name of the map to edit.
# Make a backup if you are experimenting!
LOADNAME = "LevelSave"
# How many trees do you want to add?
TREECOUNT = 12
# Where do you want the new trees?
# X, and Z are the map coordinates
X = 66
Z = -315
# How large an area do you want the trees to be in?
# for example, RADIUS = 10 will make place trees randomly in
# a circular area 20 blocks wide.
RADIUS = 80
# NOTE: tree density will be higher in the center than at the edges.
# Which shapes would you like the trees to be?
# these first three are best suited for small heights, from 5 - 10
# "normal" is the normal minecraft shape, it only gets taller and shorter
# "bamboo" a trunk with foliage, it only gets taller and shorter
# "palm" a trunk with a fan at the top, only gets taller and shorter
# "stickly" selects randomly from "normal", "bamboo" and "palm"
# these last five are best suited for very large trees, heights greater than 8
# "round" procedural spherical shaped tree, can scale up to immense size
# "cone" procedural, like a pine tree, also can scale up to immense size
# "procedural" selects randomly from "round" and "conical"
# "rainforest" many slender trees, most at the lower range of the height,
# with a few at the upper end.
# "mangrove" makes mangrove trees (see PLANTON below).
SHAPE = "procedural"
# What height should the trees be?
# Specifies the average height of the tree
# Examples:
# 5 is normal minecraft tree
# 3 is minecraft tree with foliage flush with the ground
# 10 is very tall trees, they will be hard to chop down
# NOTE: for round and conical, this affects the foliage size as well.
# CENTERHEIGHT is the height of the trees at the center of the area
# ie, when radius = 0
CENTERHEIGHT = 55
# EDGEHEIGHT is the height at the trees at the edge of the area.
# ie, when radius = RADIUS
EDGEHEIGHT = 25
# What should the variation in HEIGHT be?
# actual value +- variation
# default is 1
# Example:
# HEIGHT = 8 and HEIGHTVARIATION = 3 will result in
# trunk heights from 5 to 11
# value is clipped to a max of HEIGHT
# for a good rainforest, set this value not more than 1/2 of HEIGHT
HEIGHTVARIATION = 12
# Do you want branches, trunk, and roots?
# True makes all of that
# False does not create the trunk and branches, or the roots (even if they are
# enabled further down)
WOOD = True
# Trunk thickness multiplyer
# from zero (super thin trunk) to whatever huge number you can think of.
# Only works if SHAPE is not a "stickly" subtype
# Example:
# 1.0 is the default, it makes decently normal sized trunks
# 0.3 makes very thin trunks
# 4.0 makes a thick trunk (good for HOLLOWTRUNK).
# 10.5 will make a huge thick trunk. Not even kidding. Makes spacious
# hollow trunks though!
TRUNKTHICKNESS = 1.0
# Trunk height, as a fraction of the tree
# Only works on "round" shaped trees
# Sets the height of the crown, where the trunk ends and splits
# Examples:
# 0.7 the default value, a bit more than half of the height
# 0.3 good for a fan-like tree
# 1.0 the trunk will extend to the top of the tree, and there will be no crown
# 2.0 the trunk will extend out the top of the foliage, making the tree appear
# like a cluster of green grapes impaled on a spike.
TRUNKHEIGHT = 0.7
# Do you want the trunk and tree broken off at the top?
# removes about half of the top of the trunk, and any foliage
# and branches that would attach above it.
# Only works if SHAPE is not a "stickly" subtype
# This results in trees that are shorter than the height settings
# True does that stuff
# False makes a normal tree (default)
BROKENTRUNK = False
# Note, this works well with HOLLOWTRUNK (below) turned on as well.
# Do you want the trunk to be hollow (or filled) inside?
# Only works with larger sized trunks.
# Only works if SHAPE is not a "stickly" subtype
# True makes the trunk hollow (or filled with other stuff)
# False makes a solid trunk (default)
HOLLOWTRUNK = False
# Note, this works well with BROKENTRUNK set to true (above)
# Further note, you may want to use a large value for TRUNKTHICKNESS
# How many branches should there be?
# General multiplyer for the number of branches
# However, it will not make more branches than foliage clusters
# so to garuntee a branch to every foliage cluster, set it very high, like 10000
# this also affects the number of roots, if they are enabled.
# Examples:
# 1.0 is normal
# 0.5 will make half as many branches
# 2.0 will make twice as mnay branches
# 10000 will make a branch to every foliage cluster (I'm pretty sure)
BRANCHDENSITY = 1.0
# do you want roots from the bottom of the tree?
# Only works if SHAPE is "round" or "cone" or "procedural"
# "yes" roots will penetrate anything, and may enter underground caves.
# "tostone" roots will be stopped by stone (default see STOPSROOTS below).
# There may be some penetration.
# "hanging" will hang downward in air. Good for "floating" type maps
# (I really miss "floating" terrain as a default option)
# "no" roots will not be generated
ROOTS = "tostone"
# Do you want root buttresses?
# These make the trunk not-round at the base, seen in tropical or old trees.
# This option generally makes the trunk larger.
# Only works if SHAPE is "round" or "cone" or "procedural"
# Options:
# True makes root butresses
# False leaves them out
ROOTBUTTRESSES = True
# Do you want leaves on the trees?
# True there will be leaves
# False there will be no leaves
FOLIAGE = True
# How thick should the foliage be
# General multiplyer for the number of foliage clusters
# Examples:
# 1.0 is normal
# 0.3 will make very sparse spotty trees, half as many foliage clusters
# 2.0 will make dense foliage, better for the "rainforests" SHAPE
FOLIAGEDENSITY = 1.0
# Limit the tree height to the top of the map?
# True the trees will not grow any higher than the top of the map
# False the trees may be cut off by the top of the map
MAPHEIGHTLIMIT = True
# add lights in the middle of foliage clusters
# for those huge trees that get so dark underneath
# or for enchanted forests that should glow and stuff
# Only works if SHAPE is "round" or "cone" or "procedural"
# 0 makes just normal trees
# 1 adds one light inside the foliage clusters for a bit of light
# 2 adds two lights around the base of each cluster, for more light
# 4 adds lights all around the base of each cluster for lots of light
LIGHTTREE = 0
# Do you want to only place trees near existing trees?
# True will only plant new trees near existing trees.
# False will not check for existing trees before planting.
# NOTE: the taller the tree, the larger the forest needs to be to qualify
# OTHER NOTE: this feature has not been extensively tested.
# IF YOU HAVE PROBLEMS: SET TO False
ONLYINFORESTS = False
#####################
# Advanced options! #
#####################
# What kind of material should the "wood" be made of?
# defaults to 17
WOODMAT = 17
# What data value should the wood blocks have?
# Some blocks, like wood, leaves, and cloth change
# apperance with different data values
# defaults to 0
WOODDATA = 0
# What kind of material should the "leaves" be made of?
# defaults to 18
LEAFMAT = 18
# What data value should the leaf blocks have?
# Some blocks, like wood, leaves, and cloth change
# apperance with different data values
# defaults to 0
LEAFDATA = 0
# What kind of material should the "lights" be made of?
# defaults to 89 (glowstone)
LIGHTMAT = 89
# What data value should the light blocks have?
# defaults to 0
LIGHTDATA = 0
# What kind of material would you like the "hollow" trunk filled with?
# defaults to 0 (air)
TRUNKFILLMAT = 0
# What data value would you like the "hollow" trunk filled with?
# defaults to 0
TRUNKFILLDATA = 0
# What kind of blocks should the trees be planted on?
# Use the Minecraft index.
# Examples
# 2 is grass (the default)
# 3 is dirt
# 1 is stone (an odd choice)
# 12 is sand (for beach or desert)
# 9 is water (if you want an aquatic forest)
# this is a list, and comma seperated.
# example: [2, 3]
# will plant trees on grass or dirt
PLANTON = [2]
# What kind of blocks should stop the roots?
# a list of block id numbers like PLANTON
# Only works if ROOTS = "tostone"
# default, [1] (stone)
# if you want it to be stopped by other block types, add it to the list
STOPSROOTS = [1]
# What kind of blocks should stop branches?
# same as STOPSROOTS above, but is always turned on
# defaults to stone, cobblestone, and glass
# set it to [] if you want branches to go through everything
STOPSBRANCHES = [1, 4, 20]
# How do you want to interpolate from center to edge?
# "linear" makes a cone-shaped forest
# This is the only option at present
INTERPOLATION = "linear"
# Do a rough recalculation of the lighting?
# Slows it down to do a very rough and incomplete re-light.
# If you want to really fix the lighting, use a seperate re-lighting tool.
# True do the rough fix
# False don't bother
LIGHTINGFIX = True
# How many times do you want to try to find a location?
# it will stop planing after MAXTRIES has been exceeded.
# Set to smaller numbers to abort quicker, or larger numbers
# if you want to keep trying for a while.
# NOTE: the number of trees will not exceed this number
# Default: 1000
MAXTRIES = 1000
# Do you want lots of text telling you waht is going on?
# True lots of text (default). Good for debugging.
# False no text
VERBOSE = True
##############################################################
# Don't edit below here unless you know what you are doing #
##############################################################
# input filtering
TREECOUNT = int(TREECOUNT)
if TREECOUNT < 0:
TREECOUNT = 0
if SHAPE not in ["normal", "bamboo", "palm", "stickly",
"round", "cone", "procedural",
"rainforest", "mangrove"]:
if VERBOSE:
print("SHAPE not set correctly, using 'procedural'.")
SHAPE = "procedural"
if CENTERHEIGHT < 1:
CENTERHEIGHT = 1
if EDGEHEIGHT < 1:
EDGEHEIGHT = 1
minheight = min(CENTERHEIGHT, EDGEHEIGHT)
if HEIGHTVARIATION > minheight:
HEIGHTVARIATION = minheight
if INTERPOLATION not in ["linear"]:
if VERBOSE:
print("INTERPOLATION not set correctly, using 'linear'.")
INTERPOLATION = "linear"
if WOOD not in [True, False]:
if VERBOSE:
print("WOOD not set correctly, using True")
WOOD = True
if TRUNKTHICKNESS < 0.0:
TRUNKTHICKNESS = 0.0
if TRUNKHEIGHT < 0.0:
TRUNKHEIGHT = 0.0
if ROOTS not in ["yes", "tostone", "hanging", "no"]:
if VERBOSE:
print("ROOTS not set correctly, using 'no' and creating no roots")
ROOTS = "no"
if ROOTBUTTRESSES not in [True, False]:
if VERBOSE:
print("ROOTBUTTRESSES not set correctly, using False")
ROOTBUTTRESSES = False
if FOLIAGE not in [True, False]:
if VERBOSE:
print("FOLIAGE not set correctly, using True")
ROOTBUTTRESSES = True
if FOLIAGEDENSITY < 0.0:
FOLIAGEDENSITY = 0.0
if BRANCHDENSITY < 0.0:
BRANCHDENSITY = 0.0
if MAPHEIGHTLIMIT not in [True, False]:
if VERBOSE:
print("MAPHEIGHTLIMIT not set correctly, using False")
MAPHEIGHTLIMIT = False
if LIGHTTREE not in [0, 1, 2, 4]:
if VERBOSE:
print("LIGHTTREE not set correctly, using 0 for no torches")
LIGHTTREE = 0
# assemble the material dictionaries
WOODINFO = {'B': WOODMAT, 'D': WOODDATA}
LEAFINFO = {'B': LEAFMAT, 'D': LEAFDATA}
LIGHTINFO = {'B': LIGHTMAT, 'D': LIGHTDATA}
TRUNKFILLINFO = {'B': TRUNKFILLMAT, 'D': TRUNKFILLDATA}
# The following is an interface class for .mclevel data for minecraft savefiles.
# The following also includes a useful coordinate to index convertor and several
# other useful functions.
import mcInterface
#some handy functions
def dist_to_mat(cord, vec, matidxlist, mcmap, invert=False, limit=False):
'''travel from cord along vec and return how far it was to a point of matidx
the distance is returned in number of iterations. If the edge of the map
is reached, then return the number of iterations as well.
if invert == True, search for anything other than those in matidxlist
'''
assert isinstance(mcmap, mcInterface.SaveFile)
block = mcmap.block
curcord = [i + .5 for i in cord]
iterations = 0
on_map = True
while on_map:
x = int(curcord[0])
y = int(curcord[1])
z = int(curcord[2])
return_dict = block(x, y, z)
if return_dict is None:
break
else:
block_value = return_dict['B']
if (block_value in matidxlist) and (invert is False):
break
elif (block_value not in matidxlist) and invert:
break
else:
curcord = [curcord[i] + vec[i] for i in range(3)]
iterations += 1
if limit and iterations > limit:
break
return iterations
# This is the end of the MCLevel interface.
# Now, on to the actual code.
from random import random, choice, sample
from math import sqrt, sin, cos, pi
def calc_column_lighting(x, z, mclevel):
'''Recalculate the sky lighting of the column.'''
# Begin at the top with sky light level 15.
cur_light = 15
# traverse the column until cur_light == 0
# and the existing light values are also zero.
y = 255
get_block = mclevel.block
set_block = mclevel.set_block
get_height = mclevel.retrieve_heightmap
set_height = mclevel.set_heightmap
#get the current heightmap
cur_height = get_height(x, z)
# set a flag that the highest point has been updated
height_updated = False
# if this doesn't exist, the block doesn't exist either, abort.
if cur_height is None:
return None
light_reduction_lookup = {0: 0, 20: 0, 18: 1, 8: 2, 79: 2}
while True:
#get the block sky light and type
block_info = get_block(x, y, z, 'BS')
block_light = block_info['S']
block_type = block_info['B']
# update the height map if it hasn't been updated yet,
# and the current block reduces light
if (not height_updated) and (block_type not in (0, 20)):
new_height = y + 1
if new_height == 256:
new_height = 255
set_height(x, new_height, z)
height_updated = True
#compare block with cur_light, escape if both 0
if block_light == 0 and cur_light == 0:
break
#set the block light if necessary
if block_light != cur_light:
set_block(x, y, z, {'S': cur_light})
#set the new cur_light
if block_type in light_reduction_lookup:
# partial light reduction
light_reduction = light_reduction_lookup[block_type]
else:
# full light reduction
light_reduction = 16
cur_light += -light_reduction
if cur_light < 0:
cur_light = 0
#increment and check y
y += -1
if y < 0:
break
class ReLight(object):
'''keep track of which squares need to be relit, and then relight them'''
def add(self, x, z):
coords = (x, z)
self.all_columns.add(coords)
def calc_lighting(self):
mclevel = self.save_file
for column_coords in self.all_columns:
# recalculate the lighting
x = column_coords[0]
z = column_coords[1]
calc_column_lighting(x, z, mclevel)
def __init__(self):
self.all_columns = set()
self.save_file = None
relight_master = ReLight()
def assign_value(x, y, z, values, save_file):
'''Assign an index value to a location in mcmap.
If the index is outside the bounds of the map, return None. If the
assignment succeeds, return True.
'''
if y > 255:
return None
result = save_file.set_block(x, y, z, values)
if LIGHTINGFIX:
relight_master.add(x, z)
return result
class Tree(object):
'''Set up the interface for tree objects. Designed for subclassing.
'''
def prepare(self, mcmap):
'''initialize the internal values for the Tree object.
'''
return None
def maketrunk(self, mcmap):
'''Generate the trunk and enter it in mcmap.
'''
return None
def makefoliage(self, mcmap):
"""Generate the foliage and enter it in mcmap.
Note, foliage will disintegrate if there is no log nearby"""
return None
def copy(self, other):
'''Copy the essential values of the other tree object into self.
'''
self.pos = other.pos
self.height = other.height
def __init__(self, pos=[0, 0, 0], height=1):
'''Accept values for the position and height of a tree.
Store them in self.
'''
self.pos = pos
self.height = height
class StickTree(Tree):
'''Set up the trunk for trees with a trunk width of 1 and simple geometry.
Designed for sublcassing. Only makes the trunk.
'''
def maketrunk(self, mcmap):
x = self.pos[0]
y = self.pos[1]
z = self.pos[2]
for i in range(self.height):
assign_value(x, y, z, WOODINFO, mcmap)
y += 1
class NormalTree(StickTree):
'''Set up the foliage for a 'normal' tree.
This tree will be a single bulb of foliage above a single width trunk.
This shape is very similar to the default Minecraft tree.
'''
def makefoliage(self, mcmap):
"""note, foliage will disintegrate if there is no foliage below, or
if there is no "log" block within range 2 (square) at the same level or
one level below"""
topy = self.pos[1] + self.height - 1
start = topy - 2
end = topy + 2
for y in range(start, end):
if y > start + 1:
rad = 1
else:
rad = 2
for xoff in range(-rad, rad + 1):
for zoff in range(-rad, rad + 1):
if (random() > 0.618
and abs(xoff) == abs(zoff)
and abs(xoff) == rad
):
continue
x = self.pos[0] + xoff
z = self.pos[2] + zoff
assign_value(x, y, z, LEAFINFO, mcmap)
class BambooTree(StickTree):
'''Set up the foliage for a bamboo tree.
Make foliage sparse and adjacent to the trunk.
'''
def makefoliage(self, mcmap):
start = self.pos[1]
end = self.pos[1] + self.height + 1
for y in range(start, end):
for _ in [0, 1]:
xoff = choice([-1, 1])
zoff = choice([-1, 1])
x = self.pos[0] + xoff
z = self.pos[2] + zoff
assign_value(x, y, z, LEAFINFO, mcmap)
class PalmTree(StickTree):
'''Set up the foliage for a palm tree.
Make foliage stick out in four directions from the top of the trunk.
'''
def makefoliage(self, mcmap):
y = self.pos[1] + self.height
for xoff in range(-2, 3):
for zoff in range(-2, 3):
if abs(xoff) == abs(zoff):
x = self.pos[0] + xoff
z = self.pos[2] + zoff
assign_value(x, y, z, LEAFINFO, mcmap)
class ProceduralTree(Tree):
'''Set up the methods for a larger more complicated tree.
This tree type has roots, a trunk, and branches all of varying width,
and many foliage clusters.
MUST BE SUBCLASSED. Specifically, self.foliage_shape must be set.
Subclass 'prepare' and 'shapefunc' to make different shaped trees.
'''
@staticmethod
def crossection(center, radius, diraxis, matidx, mcmap):
'''Create a round section of type matidx in mcmap.
Passed values:
center = [x, y, z] for the coordinates of the center block
radius = <number> as the radius of the section. May be a float or int.
diraxis: The list index for the axis to make the section
perpendicular to. 0 indicates the x axis, 1 the y, 2 the z. The
section will extend along the other two axies.
matidx = <int> the integer value to make the section out of.
mcmap = the array generated by make_mcmap
matdata = <int> the integer value to make the block data value.
'''
rad = int(radius + .618)
if rad <= 0:
return None
secidx1 = (diraxis - 1) % 3
secidx2 = (1 + diraxis) % 3
coord = [0, 0, 0]
for off1 in range(-rad, rad + 1):
for off2 in range(-rad, rad + 1):
thisdist = sqrt((abs(off1) + .5) ** 2 + (abs(off2) + .5) ** 2)
if thisdist > radius:
continue
pri = center[diraxis]
sec1 = center[secidx1] + off1
sec2 = center[secidx2] + off2
coord[diraxis] = pri
coord[secidx1] = sec1
coord[secidx2] = sec2
assign_value(coord[0], coord[1], coord[2], matidx, mcmap)
def shapefunc(self, y):
'''Take y and return a radius for the location of the foliage cluster.
If no foliage cluster is to be created, return None
Designed for sublcassing. Only makes clusters close to the trunk.
'''
if random() < 100. / (self.height ** 2) and y < self.trunkheight:
return self.height * .12
return None
def foliagecluster(self, center, mcmap):
'''generate a round cluster of foliage at the location center.
The shape of the cluster is defined by the list self.foliage_shape.
This list must be set in a subclass of ProceduralTree.
'''
level_radius = self.foliage_shape
x = center[0]
y = center[1]
z = center[2]
for i in level_radius:
self.crossection([x, y, z], i, 1, LEAFINFO, mcmap)
y += 1
def taperedcylinder(self, start, end, startsize, endsize, mcmap, blockdata):
'''Create a tapered cylinder in mcmap.
start and end are the beginning and ending coordinates of form [x, y, z].
startsize and endsize are the beginning and ending radius.
The material of the cylinder is WOODMAT.
'''
# delta is the coordinate vector for the difference between
# start and end.
delta = [int(end[i] - start[i]) for i in range(3)]
# primidx is the index (0, 1, or 2 for x, y, z) for the coordinate
# which has the largest overall delta.
maxdist = max(delta, key=abs)
if maxdist == 0:
return None
primidx = delta.index(maxdist)
# secidx1 and secidx2 are the remaining indicies out of [0, 1, 2].
secidx1 = (primidx - 1) % 3
secidx2 = (1 + primidx) % 3
# primsign is the digit 1 or -1 depending on whether the limb is headed
# along the positive or negative primidx axis.
primsign = int(delta[primidx] / abs(delta[primidx]))
# secdelta1 and ...2 are the amount the associated values change
# for every step along the prime axis.
secdelta1 = delta[secidx1]
secfac1 = float(secdelta1) / delta[primidx]
secdelta2 = delta[secidx2]
secfac2 = float(secdelta2) / delta[primidx]
# Initialize coord. These values could be anything, since
# they are overwritten.
coord = [0, 0, 0]
# Loop through each crossection along the primary axis,
# from start to end.
endoffset = delta[primidx] + primsign
for primoffset in range(0, endoffset, primsign):
primloc = start[primidx] + primoffset
secloc1 = int(start[secidx1] + primoffset * secfac1)
secloc2 = int(start[secidx2] + primoffset * secfac2)
coord[primidx] = primloc
coord[secidx1] = secloc1
coord[secidx2] = secloc2
primdist = abs(delta[primidx])
radius = endsize + (startsize - endsize) * abs(delta[primidx]
- primoffset) / primdist
self.crossection(coord, radius, primidx, blockdata, mcmap)
def makefoliage(self, mcmap):
'''Generate the foliage for the tree in mcmap.
'''
"""note, foliage will disintegrate if there is no foliage below, or
if there is no "log" block within range 2 (square) at the same level or
one level below"""
foliage_coords = self.foliage_cords
for coord in foliage_coords:
self.foliagecluster(coord, mcmap)
for cord in foliage_coords:
assign_value(cord[0], cord[1], cord[2], WOODINFO, mcmap)
if LIGHTTREE == 1:
assign_value(cord[0], cord[1] + 1, cord[2], LIGHTINFO, mcmap)
elif LIGHTTREE in [2, 4]:
assign_value(cord[0] + 1, cord[1], cord[2], LIGHTINFO, mcmap)
assign_value(cord[0] - 1, cord[1], cord[2], LIGHTINFO, mcmap)
if LIGHTTREE == 4:
assign_value(cord[0], cord[1], cord[2] + 1, LIGHTINFO, mcmap)
assign_value(cord[0], cord[1], cord[2] - 1, LIGHTINFO, mcmap)
def makebranches(self, mcmap):
'''Generate the branches and enter them in mcmap.
'''
treeposition = self.pos
height = self.height
topy = treeposition[1] + int(self.trunkheight + 0.5)
# endrad is the base radius of the branches at the trunk
endrad = self.trunkradius * (1 - self.trunkheight / height)
if endrad < 1.0:
endrad = 1.0
for coord in self.foliage_cords:
dist = (sqrt(float(coord[0] - treeposition[0]) ** 2 +
float(coord[2] - treeposition[2]) ** 2))
ydist = coord[1] - treeposition[1]
# value is a magic number that weights the probability
# of generating branches properly so that
# you get enough on small trees, but not too many
# on larger trees.
# Very difficult to get right... do not touch!
value = (self.branchdensity * 220 * height) / ((ydist + dist) ** 3)
if value < random():
continue
posy = coord[1]
slope = self.branchslope + (0.5 - random()) * .16
if coord[1] - dist * slope > topy:
# Another random rejection, for branches between
# the top of the trunk and the crown of the tree
threshhold = 1 / float(height)
if random() < threshhold:
continue
branchy = topy
basesize = endrad
else:
branchy = posy - dist * slope
basesize = (endrad + (self.trunkradius - endrad) *
(topy - branchy) / self.trunkheight)
startsize = (basesize * (1 + random()) * .618 *
(dist / height) ** 0.618)
rndr = sqrt(random()) * basesize * 0.618
rndang = random() * 2 * pi
rndx = int(rndr * sin(rndang) + 0.5)
rndz = int(rndr * cos(rndang) + 0.5)
startcoord = [treeposition[0] + rndx,
int(branchy),
treeposition[2] + rndz]
if startsize < 1.0:
startsize = 1.0
endsize = 1.0
self.taperedcylinder(startcoord, coord, startsize, endsize,
mcmap, WOODINFO)
def makeroots(self, rootbases, mcmap):
'''generate the roots and enter them in mcmap.
rootbases = [[x, z, base_radius], ...] and is the list of locations
the roots can originate from, and the size of that location.
'''
treeposition = self.pos
height = self.height
for coord in self.foliage_cords:
# First, set the threshhold for randomly selecting this
# coordinate for root creation.
dist = (sqrt(float(coord[0] - treeposition[0]) ** 2 +
float(coord[2] - treeposition[2]) ** 2))
ydist = coord[1] - treeposition[1]
value = (self.branchdensity * 220 * height) / ((ydist + dist) ** 3)
# Randomly skip roots, based on the above threshold
if value < random():
continue
# initialize the internal variables from a selection of
# starting locations.
rootbase = choice(rootbases)
rootx = rootbase[0]
rootz = rootbase[1]
rootbaseradius = rootbase[2]
# Offset the root origin location by a random amount
# (radialy) from the starting location.
rndr = (sqrt(random()) * rootbaseradius * .618)
rndang = random() * 2 * pi
rndx = int(rndr * sin(rndang) + 0.5)
rndz = int(rndr * cos(rndang) + 0.5)
rndy = int(random() * rootbaseradius * 0.5)
startcoord = [rootx + rndx, treeposition[1] + rndy, rootz + rndz]
# offset is the distance from the root base to the root tip.
offset = [startcoord[i] - coord[i] for i in range(3)]
# If this is a mangrove tree, make the roots longer.
if SHAPE == "mangrove":
offset = [int(val * 1.618 - 1.5) for val in offset]
endcoord = [startcoord[i] + offset[i] for i in range(3)]
rootstartsize = (rootbaseradius * 0.618 * abs(offset[1]) /
(height * 0.618))
if rootstartsize < 1.0:
rootstartsize = 1.0
endsize = 1.0
# If ROOTS is set to "tostone" or "hanging" we need to check
# along the distance for collision with existing materials.
if ROOTS in ["tostone", "hanging"]:
offlength = sqrt(float(offset[0]) ** 2 +
float(offset[1]) ** 2 +
float(offset[2]) ** 2)
if offlength < 1:
continue
rootmid = endsize
# vec is a unit vector along the direction of the root.
vec = [offset[i] / offlength for i in range(3)]
if ROOTS == "tostone":
searchindex = STOPSROOTS
elif ROOTS == "hanging":
searchindex = [0]
# startdist is how many steps to travel before starting to
# search for the material. It is used to ensure that large
# roots will go some distance before changing directions
# or stopping.
startdist = int(random() * 6 * sqrt(rootstartsize) + 2.8)
# searchstart is the coordinate where the search should begin
searchstart = [startcoord[i] + startdist * vec[i]
for i in range(3)]
# dist stores how far the search went (including searchstart)
# before encountering the expected marterial.
dist = startdist + dist_to_mat(searchstart, vec,
searchindex, mcmap, limit=offlength)
# If the distance to the material is less than the length
# of the root, change the end point of the root to where
# the search found the material.
if dist < offlength:
# rootmid is the size of the crossection at endcoord.
rootmid += (rootstartsize -
endsize) * (1 - dist / offlength)
# endcoord is the midpoint for hanging roots,
# and the endpoint for roots stopped by stone.
endcoord = [startcoord[i] + int(vec[i] * dist)
for i in range(3)]
if ROOTS == "hanging":
# remaining_dist is how far the root had left
# to go when it was stopped.
remaining_dist = offlength - dist
# Initialize bottomcord to the stopping point of
# the root, and then hang straight down
# a distance of remaining_dist.
bottomcord = endcoord[:]
bottomcord[1] += -int(remaining_dist)
# Make the hanging part of the hanging root.
self.taperedcylinder(endcoord, bottomcord,
rootmid, endsize, mcmap, WOODINFO)
# make the beginning part of hanging or "tostone" roots
self.taperedcylinder(startcoord, endcoord,
rootstartsize, rootmid, mcmap, WOODINFO)
# If you aren't searching for stone or air, just make the root.
else:
self.taperedcylinder(startcoord, endcoord,
rootstartsize, endsize, mcmap, WOODINFO)
def maketrunk(self, mcmap):
'''Generate the trunk, roots, and branches in mcmap.
'''
height = self.height
trunkheight = self.trunkheight
trunkradius = self.trunkradius
treeposition = self.pos
starty = treeposition[1]
midy = treeposition[1] + int(trunkheight * .382)
topy = treeposition[1] + int(trunkheight + 0.5)
# In this method, x and z are the position of the trunk.
x = treeposition[0]
z = treeposition[2]
end_size_factor = trunkheight / height
midrad = trunkradius * (1 - end_size_factor * .5)
endrad = trunkradius * (1 - end_size_factor)
if endrad < 1.0:
endrad = 1.0
if midrad < endrad:
midrad = endrad
# Make the root buttresses, if indicated
if ROOTBUTTRESSES or SHAPE == "mangrove":
# The start radius of the trunk should be a little smaller if we
# are using root buttresses.
startrad = trunkradius * .8
# rootbases is used later in self.makeroots(...) as
# starting locations for the roots.
rootbases = [[x, z, startrad]]
buttress_radius = trunkradius * 0.382
# posradius is how far the root buttresses should be offset
# from the trunk.
posradius = trunkradius
# In mangroves, the root buttresses are much more extended.
if SHAPE == "mangrove":
posradius *= 2.618
num_of_buttresses = int(sqrt(trunkradius) + 3.5)
for i in range(num_of_buttresses):
rndang = random() * 2 * pi
thisposradius = posradius * (0.9 + random() * .2)
# thisx and thisz are the x and z position for the base of
# the root buttress.
thisx = x + int(thisposradius * sin(rndang))
thisz = z + int(thisposradius * cos(rndang))
# thisbuttressradius is the radius of the buttress.
# Currently, root buttresses do not taper.
thisbuttressradius = buttress_radius * (0.618 + random())
if thisbuttressradius < 1.0:
thisbuttressradius = 1.0
# Make the root buttress.
self.taperedcylinder([thisx, starty, thisz], [x, midy, z],
thisbuttressradius, thisbuttressradius,
mcmap, WOODINFO)
# Add this root buttress as a possible location at
# which roots can spawn.
rootbases += [[thisx, thisz, thisbuttressradius]]
else:
# If root buttresses are turned off, set the trunk radius
# to normal size.
startrad = trunkradius
rootbases = [[x, z, startrad]]
# Make the lower and upper sections of the trunk.
self.taperedcylinder([x, starty, z], [x, midy, z], startrad, midrad,
mcmap, WOODINFO)
self.taperedcylinder([x, midy, z], [x, topy, z], midrad, endrad,
mcmap, WOODINFO)
#Make the branches
self.makebranches(mcmap)
#Make the roots, if indicated.
if ROOTS in ["yes", "tostone", "hanging"]:
self.makeroots(rootbases, mcmap)
# Hollow the trunk, if specified
# check to make sure that the trunk is large enough to be hollow
if trunkradius > 2 and HOLLOWTRUNK:
# wall thickness is actually the double the wall thickness
# it is a diameter difference, not a radius difference.
wall_thickness = (1 + trunkradius * 0.1 * random())
if wall_thickness < 1.3:
wall_thickness = 1.3
base_radius = trunkradius - wall_thickness
if base_radius < 1:
base_radius = 1.0
mid_radius = midrad - wall_thickness
top_radius = endrad - wall_thickness
# the starting x and y can be offset by up to the wall thickness.
base_offset = int(wall_thickness)
x_choices = [i for i in range(x - base_offset,
x + base_offset + 1)]
start_x = choice(x_choices)
z_choices = [i for i in range(z - base_offset,
z + base_offset + 1)]
start_z = choice(z_choices)
self.taperedcylinder([start_x, starty, start_z], [x, midy, z],
base_radius, mid_radius,
mcmap, TRUNKFILLINFO)
hollow_top_y = int(topy + trunkradius + 1.5)
self.taperedcylinder([x, midy, z], [x, hollow_top_y, z],
mid_radius, top_radius,
mcmap, TRUNKFILLINFO)
def prepare(self, mcmap):
'''Initialize the internal values for the Tree object.
Primarily, sets up the foliage cluster locations.
'''
treeposition = self.pos
self.trunkradius = .618 * sqrt(self.height * TRUNKTHICKNESS)
if self.trunkradius < 1:
self.trunkradius = 1
if BROKENTRUNK:
self.trunkheight = self.height * (.3 + random() * .4)
yend = int(treeposition[1] + self.trunkheight + .5)
else:
self.trunkheight = self.height
yend = int(treeposition[1] + self.height)
self.branchdensity = BRANCHDENSITY / FOLIAGEDENSITY
topy = treeposition[1] + int(self.trunkheight + 0.5)
foliage_coords = []
ystart = treeposition[1]
num_of_clusters_per_y = int(1.5 + (FOLIAGEDENSITY *
self.height / 19.) ** 2)
if num_of_clusters_per_y < 1:
num_of_clusters_per_y = 1
# make sure we don't spend too much time off the top of the map
if yend > 255:
yend = 255
if ystart > 255:
ystart = 255
for y in range(yend, ystart, -1):
for i in range(num_of_clusters_per_y):
shapefac = self.shapefunc(y - ystart)
if shapefac is None:
continue
r = (sqrt(random()) + .328) * shapefac
theta = random() * 2 * pi
x = int(r * sin(theta)) + treeposition[0]
z = int(r * cos(theta)) + treeposition[2]
# if there are values to search in STOPSBRANCHES
# then check to see if this cluster is blocked
# by stuff, like dirt or rock, or whatever
if len(STOPSBRANCHES):
dist = (sqrt(float(x - treeposition[0]) ** 2 +
float(z - treeposition[2]) ** 2))
slope = self.branchslope
if y - dist * slope > topy:
# the top of the tree
starty = topy
else:
starty = y - dist * slope
# the start position of the search
start = [treeposition[0], starty, treeposition[2]]
offset = [x - treeposition[0],
y - starty,
z - treeposition[2]]
offlength = sqrt(offset[0] ** 2 + offset[1] ** 2 + offset[2] ** 2)
# if the branch is as short as... nothing, don't bother.
if offlength < 1:
continue
# unit vector for the search
vec = [offset[i] / offlength for i in range(3)]
mat_dist = dist_to_mat(start, vec, STOPSBRANCHES,
mcmap, limit=offlength + 3)
# after all that, if you find something, don't add
# this coordinate to the list
if mat_dist < offlength + 2:
continue
foliage_coords += [[x, y, z]]
self.foliage_cords = foliage_coords
class RoundTree(ProceduralTree):
'''This kind of tree is designed to resemble a deciduous tree.
'''
def prepare(self, mcmap):
self.branchslope = 0.382
ProceduralTree.prepare(self, mcmap)
self.foliage_shape = [2, 3, 3, 2.5, 1.6]
self.trunkradius *= 0.8
self.trunkheight *= TRUNKHEIGHT
def shapefunc(self, y):
twigs = ProceduralTree.shapefunc(self, y)
if twigs is not None:
return twigs
if y < self.height * (.282 + .1 * sqrt(random())):
return None
radius = self.height / 2.
adj = self.height / 2. - y
if adj == 0:
dist = radius
elif abs(adj) >= radius:
dist = 0
else:
dist = sqrt((radius ** 2) - (adj ** 2))
dist *= .618
return dist
class ConeTree(ProceduralTree):
'''this kind of tree is designed to resemble a conifer tree.
'''
# woodType is the kind of wood the tree has, a data value
woodType = 1
def prepare(self, mcmap):
self.branchslope = 0.15
ProceduralTree.prepare(self, mcmap)
self.foliage_shape = [3, 2.6, 2, 1]
self.trunkradius *= 0.5
def shapefunc(self, y):
twigs = ProceduralTree.shapefunc(self, y)
if twigs is not None:
return twigs
if y < self.height * (.25 + .05 * sqrt(random())):
return None
radius = (self.height - y) * 0.382
if radius < 0:
radius = 0
return radius
class RainforestTree(ProceduralTree):
'''This kind of tree is designed to resemble a rainforest tree.
'''
def prepare(self, mcmap):
self.foliage_shape = [3.4, 2.6]
self.branchslope = 1.0
ProceduralTree.prepare(self, mcmap)
self.trunkradius *= 0.382
self.trunkheight *= .9
def shapefunc(self, y):
if y < self.height * 0.8:
if EDGEHEIGHT < self.height:
twigs = ProceduralTree.shapefunc(self, y)
if (twigs is not None) and random() < 0.07:
return twigs
return None
else:
width = self.height * .382
topdist = (self.height - y) / (self.height * 0.2)
dist = width * (0.618 + topdist) * (0.618 + random()) * 0.382
return dist
class MangroveTree(RoundTree):
'''This kind of tree is designed to resemble a mangrove tree.
'''
def prepare(self, mcmap):
self.branchslope = 1.0
RoundTree.prepare(self, mcmap)
self.trunkradius *= 0.618
def shapefunc(self, y):
val = RoundTree.shapefunc(self, y)
if val is None:
return val
val *= 1.618
return val
def planttrees(mcmap, treelist):
'''Take mcmap and add trees to random locations on the surface to treelist.
'''
assert isinstance(mcmap, mcInterface.SaveFile)
# keep looping until all the trees are placed
# calc the radius difference, for interpolation
in_out_dif = EDGEHEIGHT - CENTERHEIGHT
if VERBOSE:
print('Tree Locations: x, y, z, tree height')
tries = 0
max_tries = MAXTRIES
while len(treelist) < TREECOUNT:
if tries > max_tries:
if VERBOSE:
print("Stopping search for tree locations after {0} tries".format(tries))
print("If you don't have enough trees, check X, Y, RADIUS, and PLANTON")
break
tries += 1
# choose a location
rad_fraction = random()
# this is some kind of square interpolation
rad_fraction = 1.0 - rad_fraction
rad_fraction **= 2
rad_fraction = 1.0 - rad_fraction
rad = rad_fraction * RADIUS
ang = random() * pi * 2
x = X + int(rad * sin(ang) + .5)
z = Z + int(rad * cos(ang) + .5)
# check to see if this location is suitable
y_top = mcmap.surface_block(x, z)
if y_top is None:
# this location is off the map!
continue
if y_top['B'] in PLANTON:
# plant the tree on the block above the ground
# hence the " + 1"
y = y_top['y'] + 1
else:
continue
# this is linear interpolation also.
base_height = CENTERHEIGHT + (in_out_dif * rad_fraction)
height_rand = (random() - .5) * 2 * HEIGHTVARIATION
height = int(base_height + height_rand)
# if the option is set, check the surrounding area for trees
if ONLYINFORESTS:
'''we are looking for foliage
it should show up in the "surface_block" search
check every fifth block in a square pattern,
offset around the trunk
and equal to the trees height
if the area is not at least one third foliage,
don't build the tree'''
# spacing is how far apart each sample should be
spacing = 5
# search_size is how many blocks to check
# along each axis
search_size = 2 + (height // spacing)
# check at least 3 x 3
search_size = max([search_size, 3])
# set up the offset values to offset the starting corner
offset = ((search_size - 1) * spacing) // 2
# foliage_count is the total number of foliage blocks found
foliage_count = 0
# check each sample location for foliage
for step_x in range(search_size):
# search_x is the x location to search this sample
search_x = x - offset + (step_x * spacing)
for step_z in range(search_size):
# same as for search_x
search_z = z - offset + (step_z * spacing)
search_block = mcmap.surface_block(search_x, search_z)
if search_block is None:
continue
if search_block['B'] == 18:
# this sample contains foliage!
# add it to the total
foliage_count += 1
#now that we have the total count, find the ratio
total_searched = search_size ** 2
foliage_ratio = foliage_count / total_searched
# the acceptable amount is about a third
acceptable_ratio = .3
if foliage_ratio < acceptable_ratio:
# after all that work, there wasn't enough foliage around!
# try again!
continue
# generate the new tree
newtree = Tree([x, y, z], height)
if VERBOSE:
print(x, y, z, height)
treelist += [newtree]
def processtrees(mcmap, treelist):
'''Initalize all of the trees in treelist.
Set all of the trees to the right type, and run prepare. If indicated
limit the height of the trees to the top of the map.
'''
assert isinstance(mcmap, mcInterface.SaveFile)
if SHAPE == "stickly":
shape_choices = ["normal", "bamboo", "palm"]
elif SHAPE == "procedural":
shape_choices = ["round", "cone"]
else:
shape_choices = [SHAPE]
# initialize mapheight, just in case
mapheight = 255
for i in range(len(treelist)):
newshape = choice(shape_choices)
if newshape == "normal":
newtree = NormalTree()
elif newshape == "bamboo":
newtree = BambooTree()
elif newshape == "palm":
newtree = PalmTree()
elif newshape == "round":
newtree = RoundTree()
elif newshape == "cone":
newtree = ConeTree()
elif newshape == "rainforest":
newtree = RainforestTree()
elif newshape == "mangrove":
newtree = MangroveTree()
# Get the height and position of the existing trees in
# the list.
newtree.copy(treelist[i])
# Now check each tree to ensure that it doesn't stick
# out the top of the map. If it does, shorten it until
# the top of the foliage just touches the top of the map.
if MAPHEIGHTLIMIT:
height = newtree.height
ybase = newtree.pos[1]
if SHAPE == "rainforest":
foliageheight = 2
else:
foliageheight = 4
if ybase + height + foliageheight > mapheight:
newheight = mapheight - ybase - foliageheight
newtree.height = newheight
# Even if it sticks out the top of the map, every tree
# should be at least one unit tall.
if newtree.height < 1:
newtree.height = 1
newtree.prepare(mcmap)
treelist[i] = newtree
def main(the_map):
'''create the trees
'''
treelist = []
if VERBOSE:
print("Planting new trees")
planttrees(the_map, treelist)
if VERBOSE:
print("Processing tree changes")
processtrees(the_map, treelist)
if FOLIAGE:
if VERBOSE:
print("Generating foliage ")
for i in treelist:
i.makefoliage(the_map)
if VERBOSE:
print(' completed')
if WOOD:
if VERBOSE:
print("Generating trunks, roots, and branches ")
for i in treelist:
i.maketrunk(the_map)
if VERBOSE:
print(' completed')
return None
def standalone():
if VERBOSE:
print("Importing the map")
try:
the_map = mcInterface.SaveFile(LOADNAME)
except IOError:
if VERBOSE:
print('File name invalid or save file otherwise corrupted. Aborting')
return None
main(the_map)
if LIGHTINGFIX:
if VERBOSE:
print("Rough re-lighting the map")
relight_master.save_file = the_map
relight_master.calc_lighting()
if VERBOSE:
print("Saving the map, this could be a while")
the_map.write()
if VERBOSE:
print("finished")
if __name__ == '__main__':
standalone()
# to do:
# get height limits from map
# set "limit height" or somesuch to respect level height limits
| 51,634 | 37.163341 | 89 | py |
lale | lale-master/setup.py | # Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from datetime import datetime
from setuptools import find_packages, setup
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
try:
import builtins
# This trick is borrowed from scikit-learn
# This is a bit (!) hackish: we are setting a global variable so that the
# main lale __init__ can detect if it is being loaded by the setup
# routine, to avoid attempting to import components before installation.
builtins.__LALE_SETUP__ = True # type: ignore
except ImportError:
pass
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
on_rtd = os.environ.get("READTHEDOCS") == "True"
if on_rtd:
install_requires = []
else:
install_requires = [
"numpy<1.24",
"black>=22.1.0",
"click==8.0.4",
"graphviz",
"hyperopt>=0.2,<=0.2.5",
"jsonschema",
"jsonsubschema>=0.0.6",
"scikit-learn>=1.0.0,<=1.2.0",
"scipy<1.11.0",
"pandas<2.0.0",
"packaging",
"decorator",
"astunparse",
"typing-extensions",
]
import lale # noqa: E402 # pylint:disable=wrong-import-position
if "TRAVIS" in os.environ:
now = datetime.now().strftime("%y%m%d%H%M")
VERSION = f"{lale.__version__}-{now}"
else:
VERSION = lale.__version__
extras_require = {
"full": [
"mystic",
"xgboost<=1.5.1",
"lightgbm<4.0.0",
"snapml>=1.7.0rc3,<1.12.0",
"liac-arff>=2.4.0",
"tensorflow>=2.4.0",
"smac<=0.10.0",
"numba",
"aif360>=0.4.0",
"protobuf<=3.20.1",
"torch>=1.0",
"BlackBoxAuditing",
"imbalanced-learn",
"cvxpy>=1.0",
"fairlearn",
"h5py",
],
"dev": ["pre-commit"],
"test": [
"mystic",
"joblib",
"ipython<8.8.0",
"jupyter",
"sphinx>=5.0.0",
"sphinx_rtd_theme>=0.5.2",
"docutils<0.17",
"m2r2",
"sphinxcontrib.apidoc",
"sphinxcontrib-svg2pdfconverter",
"pytest",
"pyspark",
"func_timeout",
"category-encoders",
"pynisher==0.6.4",
],
"fairness": [
"mystic",
"liac-arff>=2.4.0",
"aif360<0.6.0",
"imbalanced-learn",
"protobuf<=3.20.1",
"BlackBoxAuditing",
],
"tutorial": [
"ipython<8.8.0",
"jupyter",
"xgboost<=1.5.1",
"imbalanced-learn",
"liac-arff>=2.4.0",
"aif360==0.5.0",
"protobuf<=3.20.1",
"BlackBoxAuditing",
"typing-extensions",
],
}
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
]
setup(
name="lale",
version=VERSION,
author="Guillaume Baudart, Martin Hirzel, Kiran Kate, Parikshit Ram, Avraham Shinnar",
description="Library for Semi-Automated Data Science",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/IBM/lale",
python_requires=">=3.6",
package_data={"lale": ["py.typed"]},
packages=find_packages(),
license="Apache License 2.0",
classifiers=classifiers,
install_requires=install_requires,
extras_require=extras_require,
)
| 4,556 | 27.304348 | 90 | py |
lale | lale-master/test/test_custom_schemas.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test import EnableSchemaValidation
from test.mock_module import CustomOrigOperator, UnknownOp
import numpy as np
from lightgbm import LGBMClassifier as baz
from sklearn.decomposition import PCA as sk_PCA
from sklearn.linear_model import Lars as sk_lars
from xgboost import XGBClassifier as alt_xgb
import lale
import lale.type_checking
from lale import schemas
from lale.search.lale_grid_search_cv import get_grid_search_parameter_grids
class TestCustomSchema(unittest.TestCase):
def setUp(self):
import sklearn.decomposition
from lale.lib.sklearn import PCA as lale_PCA
from lale.operators import make_operator
self.sk_pca = make_operator(sklearn.decomposition.PCA, schemas={})
self.ll_pca = lale_PCA
self.maxDiff = None
def test_override_schemas(self):
with EnableSchemaValidation():
init_schemas = self.sk_pca._schemas
pca_schemas = self.ll_pca._schemas
custom = self.sk_pca.customize_schema(schemas=schemas.JSON(pca_schemas))
self.assertEqual(custom._schemas, pca_schemas)
self.assertEqual(self.sk_pca._schemas, init_schemas)
self.assertRaises(Exception, self.sk_pca.customize_schema, schemas={})
def test_override_input(self):
with EnableSchemaValidation():
init_input_schema = self.sk_pca.get_schema("input_fit")
pca_input = self.ll_pca.get_schema("input_fit")
custom = self.sk_pca.customize_schema(input_fit=schemas.JSON(pca_input))
self.assertEqual(custom.get_schema("input_fit"), pca_input)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.sk_pca.get_schema("input_fit"), init_input_schema)
self.assertRaises(Exception, self.sk_pca.customize_schema, input_fit=42)
_ = self.sk_pca.customize_schema(input_foo=pca_input)
def test_override_output(self):
with EnableSchemaValidation():
init_output_schema = self.sk_pca.get_schema("output_transform")
pca_output = self.ll_pca.get_schema("output_transform")
custom = self.sk_pca.customize_schema(
output_transform=schemas.JSON(pca_output)
)
self.assertEqual(custom.get_schema("output_transform"), pca_output)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(
self.sk_pca.get_schema("output_transform"), init_output_schema
)
self.assertRaises(Exception, self.sk_pca.customize_schema, output=42)
_ = self.sk_pca.customize_schema(output_foo=pca_output)
def test_override_output2(self):
init_output_schema = self.sk_pca.get_schema("output_transform")
pca_output = schemas.AnyOf(
[
schemas.Array(schemas.Array(schemas.Float())),
schemas.Array(schemas.Float()),
]
)
expected = {
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"type": "array", "items": {"type": "number"}},
]
}
custom = self.sk_pca.customize_schema(output_transform=pca_output)
self.assertEqual(custom.get_schema("output_transform"), expected)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.sk_pca.get_schema("output_transform"), init_output_schema)
def test_override_bool_param_sk(self):
with EnableSchemaValidation():
init = self.sk_pca.hyperparam_schema("whiten")
expected = {"default": True, "type": "boolean", "description": "override"}
custom = self.sk_pca.customize_schema(
whiten=schemas.Bool(default=True, desc="override")
)
self.assertEqual(custom.hyperparam_schema("whiten"), expected)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.sk_pca.hyperparam_schema("whiten"), init)
self.assertRaises(Exception, self.sk_pca.customize_schema, whitenX=42)
def test_override_bool_param_ll(self):
with EnableSchemaValidation():
init = self.ll_pca.hyperparam_schema("whiten")
expected = {"default": True, "type": "boolean"}
custom = self.ll_pca.customize_schema(whiten=schemas.Bool(default=True))
self.assertEqual(custom.hyperparam_schema("whiten"), expected)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.ll_pca.hyperparam_schema("whiten"), init)
self.assertRaises(Exception, self.ll_pca.customize_schema, whitenX=42)
def test_override_enum_param(self):
with EnableSchemaValidation():
init = self.ll_pca.hyperparam_schema("svd_solver")
expected = {"default": "full", "enum": ["auto", "full"]}
custom = self.ll_pca.customize_schema(
svd_solver=schemas.Enum(default="full", values=["auto", "full"])
)
self.assertEqual(custom.hyperparam_schema("svd_solver"), expected)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.ll_pca.hyperparam_schema("svd_solver"), init)
def test_override_float_param(self):
init = self.ll_pca.hyperparam_schema("tol")
expected = {
"default": 0.1,
"type": "number",
"minimum": -10,
"maximum": 10,
"exclusiveMaximum": True,
"exclusiveMinimum": False,
}
custom = self.ll_pca.customize_schema(
tol=schemas.Float(
default=0.1,
minimum=-10,
maximum=10,
exclusiveMaximum=True,
exclusiveMinimum=False,
)
)
self.assertEqual(custom.hyperparam_schema("tol"), expected)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.ll_pca.hyperparam_schema("tol"), init)
def test_override_int_param(self):
init = self.ll_pca.hyperparam_schema("iterated_power")
expected = {
"default": 1,
"type": "integer",
"minimum": -10,
"maximum": 10,
"exclusiveMaximum": True,
"exclusiveMinimum": False,
}
custom = self.ll_pca.customize_schema(
iterated_power=schemas.Int(
default=1,
minimum=-10,
maximum=10,
exclusiveMaximum=True,
exclusiveMinimum=False,
)
)
self.assertEqual(custom.hyperparam_schema("iterated_power"), expected)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.ll_pca.hyperparam_schema("iterated_power"), init)
def test_override_null_param(self):
init = self.ll_pca.hyperparam_schema("n_components")
expected = {"enum": [None]}
custom = self.ll_pca.customize_schema(n_components=schemas.Null())
self.assertEqual(custom.hyperparam_schema("n_components"), expected)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.ll_pca.hyperparam_schema("n_components"), init)
def test_override_json_param(self):
init = self.ll_pca.hyperparam_schema("tol")
expected = {
"description": "Tol",
"type": "number",
"minimum": 0.2,
"default": 1.0,
}
custom = self.ll_pca.customize_schema(tol=schemas.JSON(expected))
self.assertEqual(custom.hyperparam_schema("tol"), expected)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.ll_pca.hyperparam_schema("tol"), init)
def test_override_any_param(self):
init = self.ll_pca.hyperparam_schema("iterated_power")
expected = {
"anyOf": [{"type": "integer"}, {"enum": ["auto", "full"]}],
"default": "auto",
}
custom = self.ll_pca.customize_schema(
iterated_power=schemas.AnyOf(
[schemas.Int(), schemas.Enum(["auto", "full"])], default="auto"
)
)
self.assertEqual(custom.hyperparam_schema("iterated_power"), expected)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.ll_pca.hyperparam_schema("iterated_power"), init)
def test_override_array_param(self):
init = self.sk_pca.hyperparam_schema("copy")
expected = {
"type": "array",
"minItems": 1,
"maxItems": 20,
"items": {"type": "integer"},
}
custom = self.sk_pca.customize_schema(
copy=schemas.Array(minItems=1, maxItems=20, items=schemas.Int())
)
self.assertEqual(custom.hyperparam_schema("copy"), expected)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.sk_pca.hyperparam_schema("copy"), init)
def test_override_object_param(self):
init = self.sk_pca.get_schema("input_fit")
expected = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {"X": {"type": "array", "items": {"type": "number"}}},
}
custom = self.sk_pca.customize_schema(
input_fit=schemas.Object(
required=["X"],
additionalProperties=False,
X=schemas.Array(schemas.Float()),
)
)
self.assertEqual(custom.get_schema("input_fit"), expected)
lale.type_checking.validate_is_schema(custom.get_schema("input_fit"))
self.assertEqual(self.sk_pca.get_schema("input_fit"), init)
def test_add_constraint(self):
init_expected = self.sk_pca.hyperparam_schema()
expected = {
"allOf": [
init_expected["allOf"][0],
{
"anyOf": [
{
"type": "object",
"properties": {
"n_components": {
"not": {"enum": ["mle"]},
}
},
},
{
"type": "object",
"properties": {
"svd_solver": {"enum": ["full", "auto"]},
},
},
]
},
]
}
custom = self.sk_pca.customize_schema(
constraint=schemas.AnyOf(
[
schemas.Object(n_components=schemas.Not(schemas.Enum(["mle"]))),
schemas.Object(svd_solver=schemas.Enum(["full", "auto"])),
]
)
)
self.assertEqual(custom.hyperparam_schema(), expected)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.sk_pca.hyperparam_schema(), init_expected)
def test_add_multiple_constraints(self):
init_expected = self.sk_pca.hyperparam_schema()
expected = {
"allOf": [
init_expected["allOf"][0],
{
"anyOf": [
{
"type": "object",
"properties": {
"n_components": {
"not": {"enum": ["mle"]},
}
},
},
{
"type": "object",
"properties": {
"svd_solver": {"enum": ["full", "auto"]},
},
},
]
},
{
"anyOf": [
{
"type": "object",
"properties": {"copy": {"enum": [False]}},
},
{
"type": "object",
"properties": {
"whiten": {"enum": [False]},
},
},
]
},
]
}
custom = self.sk_pca.customize_schema(
constraint=[
schemas.AnyOf(
[
schemas.Object(n_components=schemas.Not(schemas.Enum(["mle"]))),
schemas.Object(svd_solver=schemas.Enum(["full", "auto"])),
]
),
schemas.AnyOf(
[
schemas.Object(copy=schemas.Enum([False])),
schemas.Object(whiten=schemas.Enum([False])),
]
),
]
)
self.assertEqual(custom.hyperparam_schema(), expected)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.sk_pca.hyperparam_schema(), init_expected)
def test_override_relevant(self):
init = self.ll_pca.hyperparam_schema()["allOf"][0]["relevantToOptimizer"]
expected = ["svd_solver"]
custom = self.ll_pca.customize_schema(relevantToOptimizer=["svd_solver"])
self.assertEqual(
custom.hyperparam_schema()["allOf"][0]["relevantToOptimizer"], expected
)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(
self.ll_pca.hyperparam_schema()["allOf"][0]["relevantToOptimizer"], init
)
self.assertRaises(
Exception, self.sk_pca.customize_schema, relevantToOptimizer={}
)
def test_override_tags(self):
with EnableSchemaValidation():
init = self.ll_pca._schemas["tags"]
tags = {
"pre": ["~categoricals"],
"op": ["estimator", "classifier", "interpretable"],
"post": ["probabilities"],
}
custom = self.ll_pca.customize_schema(tags=tags)
self.assertEqual(custom._schemas["tags"], tags)
lale.type_checking.validate_is_schema(custom._schemas)
self.assertEqual(self.ll_pca._schemas["tags"], init)
self.assertRaises(Exception, self.sk_pca.customize_schema, tags=42)
def test_wrap_imported_operators(self):
old_globals = {**globals()}
try:
from lale.lib.autogen import Lars
from lale.lib.lightgbm import LGBMClassifier
from lale.lib.xgboost import XGBClassifier
lale.wrap_imported_operators(
exclude_classes=["sk_PCA"],
wrapper_modules=["test.mock_custom_operators"],
)
from lale.operators import PlannedIndividualOp
op_obj = sk_PCA()
self.assertIsInstance(op_obj, sk_PCA)
self.assertEqual(alt_xgb._schemas, XGBClassifier._schemas) # type: ignore
self.assertEqual(baz._schemas, LGBMClassifier._schemas) # type: ignore
self.assertEqual(sk_lars._schemas, Lars._schemas)
self.assertIsInstance(CustomOrigOperator, PlannedIndividualOp)
finally:
for sym, obj in old_globals.items():
globals()[sym] = obj
class TestConstraintDropping(unittest.TestCase):
def test_constraint_dropping(self):
from lale.lib.sklearn import LogisticRegression
from lale.operators import make_operator
from lale.search.schema2search_space import op_to_search_space
orig_schemas = LogisticRegression._schemas
mod_schemas = {
**orig_schemas,
"properties": {
**orig_schemas["properties"],
"hyperparams": {
"allOf": [
s if i == 0 else {**s, "forOptimizer": False}
for i, s in enumerate(
orig_schemas["properties"]["hyperparams"]["allOf"]
)
]
},
},
}
orig_space = op_to_search_space(LogisticRegression)
mod_op = make_operator(LogisticRegression._impl_class(), mod_schemas)
mod_space = op_to_search_space(mod_op)
# dropping constraints makes the search space smaller
self.assertGreater(len(str(orig_space)), len(str(mod_space)))
class TestConstraintMerging(unittest.TestCase):
def test_override_float_param1(self):
from lale.lib.sklearn import PCA
from lale.search.schema2search_space import op_to_search_space
from lale.search.search_space import SearchSpaceNumber, SearchSpaceObject
pca = PCA.customize_schema(
relevantToOptimizer=["tol"],
tol=schemas.Float(
minimum=0.25,
minimumForOptimizer=0,
maximum=0.5,
maximumForOptimizer=1.0,
exclusiveMaximum=True,
exclusiveMinimum=False,
),
)
search = op_to_search_space(pca)
assert isinstance(search, SearchSpaceObject)
num_space = list(search.choices)[0][0]
assert isinstance(num_space, SearchSpaceNumber)
self.assertEqual(num_space.minimum, 0.25)
self.assertEqual(num_space.maximum, 0.5)
self.assertTrue(num_space.exclusiveMaximum)
self.assertFalse(num_space.exclusiveMinimum)
def test_override_float_param2(self):
from lale.lib.sklearn import PCA
from lale.search.schema2search_space import op_to_search_space
from lale.search.search_space import SearchSpaceNumber, SearchSpaceObject
pca = PCA.customize_schema(
relevantToOptimizer=["tol"],
tol=schemas.Float(
minimum=0,
minimumForOptimizer=0.25,
maximum=1.5,
maximumForOptimizer=1.0,
exclusiveMaximumForOptimizer=False,
exclusiveMinimumForOptimizer=True,
),
)
search = op_to_search_space(pca)
assert isinstance(search, SearchSpaceObject)
num_space = list(search.choices)[0][0]
assert isinstance(num_space, SearchSpaceNumber)
self.assertEqual(num_space.minimum, 0.25)
self.assertEqual(num_space.maximum, 1.0)
self.assertFalse(num_space.exclusiveMaximum)
self.assertTrue(num_space.exclusiveMinimum)
def test_override_int_param1(self):
from lale.lib.sklearn import PCA
from lale.search.schema2search_space import op_to_search_space
from lale.search.search_space import SearchSpaceNumber, SearchSpaceObject
pca = PCA.customize_schema(
relevantToOptimizer=["iterated_power"],
iterated_power=schemas.Float(
minimum=1,
minimumForOptimizer=0,
maximum=5,
maximumForOptimizer=6,
exclusiveMaximum=True,
exclusiveMinimum=False,
),
)
search = op_to_search_space(pca)
assert isinstance(search, SearchSpaceObject)
num_space = list(search.choices)[0][0]
assert isinstance(num_space, SearchSpaceNumber)
self.assertEqual(num_space.minimum, 1)
self.assertEqual(num_space.maximum, 5)
self.assertTrue(num_space.exclusiveMaximum)
self.assertFalse(num_space.exclusiveMinimum)
def test_override_int_param2(self):
from lale.lib.sklearn import PCA
from lale.search.schema2search_space import op_to_search_space
from lale.search.search_space import SearchSpaceNumber, SearchSpaceObject
pca = PCA.customize_schema(
relevantToOptimizer=["iterated_power"],
iterated_power=schemas.Float(
minimum=0,
minimumForOptimizer=1,
maximum=6,
maximumForOptimizer=5,
exclusiveMaximumForOptimizer=False,
exclusiveMinimumForOptimizer=True,
),
)
search = op_to_search_space(pca)
assert isinstance(search, SearchSpaceObject)
num_space = list(search.choices)[0][0]
assert isinstance(num_space, SearchSpaceNumber)
self.assertEqual(num_space.minimum, 1)
self.assertEqual(num_space.maximum, 5)
self.assertFalse(num_space.exclusiveMaximum)
self.assertTrue(num_space.exclusiveMinimum)
class TestWrapUnknownOps(unittest.TestCase):
expected_schema = {
"allOf": [
{
"type": "object",
"relevantToOptimizer": [],
"additionalProperties": False,
"properties": {
"n_neighbors": {"default": 5},
"algorithm": {"default": "auto"},
},
}
]
}
def test_wrap_from_class(self):
from lale.operators import PlannedIndividualOp, make_operator
self.assertFalse(isinstance(UnknownOp, PlannedIndividualOp))
Wrapped = make_operator(UnknownOp)
self.assertTrue(isinstance(Wrapped, PlannedIndividualOp))
self.assertEqual(Wrapped.hyperparam_schema(), self.expected_schema)
instance = Wrapped(n_neighbors=3)
self.assertEqual(instance.hyperparams(), {"n_neighbors": 3})
def test_wrapped_from_import(self):
old_globals = {**globals()}
try:
from lale.operators import PlannedIndividualOp
self.assertFalse(isinstance(UnknownOp, PlannedIndividualOp))
lale.wrap_imported_operators()
self.assertFalse(isinstance(UnknownOp, PlannedIndividualOp))
finally:
for sym, obj in old_globals.items():
globals()[sym] = obj
def test_wrap_from_instance(self):
from sklearn.base import clone
from lale.operators import TrainableIndividualOp, make_operator
self.assertFalse(isinstance(UnknownOp, TrainableIndividualOp))
instance = UnknownOp(n_neighbors=3)
self.assertFalse(isinstance(instance, TrainableIndividualOp))
wrapped = make_operator(instance)
self.assertTrue(isinstance(wrapped, TrainableIndividualOp))
assert isinstance(
wrapped, TrainableIndividualOp
) # help type checkers that don't know about assertTrue
self.assertEqual(wrapped.reduced_hyperparams(), {"n_neighbors": 3})
cloned = clone(wrapped)
self.assertTrue(isinstance(cloned, TrainableIndividualOp))
self.assertEqual(cloned.reduced_hyperparams(), {"n_neighbors": 3})
class TestFreeze(unittest.TestCase):
def test_individual_op_freeze_trainable(self):
from lale.lib.sklearn import LogisticRegression
liquid = LogisticRegression(C=0.1, solver="liblinear")
self.assertIn("dual", liquid.free_hyperparams())
self.assertFalse(liquid.is_frozen_trainable())
liquid_grid = get_grid_search_parameter_grids(liquid)
self.assertTrue(len(liquid_grid) > 1, f"grid size {len(liquid_grid)}")
frozen = liquid.freeze_trainable()
self.assertEqual(len(frozen.free_hyperparams()), 0)
self.assertTrue(frozen.is_frozen_trainable())
frozen_grid = get_grid_search_parameter_grids(frozen)
self.assertEqual(len(frozen_grid), 1)
def test_pipeline_freeze_trainable(self):
from lale.lib.sklearn import PCA, LogisticRegression
liquid = PCA() >> LogisticRegression()
self.assertFalse(liquid.is_frozen_trainable())
liquid_grid = get_grid_search_parameter_grids(liquid)
self.assertTrue(len(liquid_grid) > 1, f"grid size {len(liquid_grid)}")
frozen = liquid.freeze_trainable()
self.assertTrue(frozen.is_frozen_trainable())
frozen_grid = get_grid_search_parameter_grids(frozen)
self.assertEqual(len(frozen_grid), 1)
def test_individual_op_freeze_trained(self):
from lale.lib.sklearn import KNeighborsClassifier
with EnableSchemaValidation():
trainable = KNeighborsClassifier(n_neighbors=1)
X = np.array([[0.0], [1.0], [2.0]])
y_old = np.array([0.0, 0.0, 1.0])
y_new = np.array([1.0, 0.0, 0.0])
liquid_old = trainable.fit(X, y_old)
self.assertEqual(list(liquid_old.predict(X)), list(y_old))
liquid_new = liquid_old.fit(X, y_new)
self.assertEqual(list(liquid_new.predict(X)), list(y_new))
frozen_old = trainable.fit(X, y_old).freeze_trained()
self.assertFalse(liquid_old.is_frozen_trained())
self.assertTrue(frozen_old.is_frozen_trained())
self.assertEqual(list(frozen_old.predict(X)), list(y_old))
frozen_new = frozen_old.fit(X, y_new)
self.assertEqual(list(frozen_new.predict(X)), list(y_old))
def test_pipeline_freeze_trained(self):
from lale.lib.sklearn import LogisticRegression, MinMaxScaler
trainable = MinMaxScaler() >> LogisticRegression()
X = [[0.0], [1.0], [2.0]]
y = [0.0, 0.0, 1.0]
liquid = trainable.fit(X, y)
frozen = liquid.freeze_trained()
self.assertFalse(liquid.is_frozen_trained())
self.assertTrue(frozen.is_frozen_trained())
def test_trained_individual_op_freeze_trainable(self):
from lale.lib.sklearn import KNeighborsClassifier
from lale.operators import TrainedIndividualOp
with EnableSchemaValidation():
trainable = KNeighborsClassifier(n_neighbors=1)
X = np.array([[0.0], [1.0], [2.0]])
y_old = np.array([0.0, 0.0, 1.0])
liquid = trainable.fit(X, y_old)
self.assertIsInstance(liquid, TrainedIndividualOp)
self.assertFalse(liquid.is_frozen_trainable())
self.assertIn("algorithm", liquid.free_hyperparams())
frozen = liquid.freeze_trainable()
self.assertIsInstance(frozen, TrainedIndividualOp)
self.assertTrue(frozen.is_frozen_trainable())
self.assertFalse(frozen.is_frozen_trained())
self.assertEqual(len(frozen.free_hyperparams()), 0)
def test_trained_pipeline_freeze_trainable(self):
from lale.lib.sklearn import LogisticRegression, MinMaxScaler
from lale.operators import TrainedPipeline
trainable = MinMaxScaler() >> LogisticRegression()
X = [[0.0], [1.0], [2.0]]
y = [0.0, 0.0, 1.0]
liquid = trainable.fit(X, y)
self.assertIsInstance(liquid, TrainedPipeline)
self.assertFalse(liquid.is_frozen_trainable())
frozen = liquid.freeze_trainable()
self.assertFalse(liquid.is_frozen_trainable())
self.assertTrue(frozen.is_frozen_trainable())
self.assertIsInstance(frozen, TrainedPipeline)
| 27,885 | 40.435364 | 88 | py |
lale | lale-master/test/test_lale_lib_versions.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test import EnableSchemaValidation
import jsonschema
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.metrics
import sklearn.model_selection
import xgboost
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import (
SVC,
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreesClassifier,
ExtraTreesRegressor,
FeatureAgglomeration,
FunctionTransformer,
GradientBoostingClassifier,
GradientBoostingRegressor,
LinearRegression,
LogisticRegression,
MLPClassifier,
PolynomialFeatures,
RandomForestClassifier,
RandomForestRegressor,
Ridge,
VotingClassifier,
)
from lale.lib.xgboost import XGBClassifier, XGBRegressor
assert sklearn.__version__ == "0.20.3", "This test is for scikit-learn 0.20.3."
assert xgboost.__version__ == "0.90", "This test is for XGBoost 0.90."
class TestDecisionTreeClassifier(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_iris(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = DecisionTreeClassifier()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_ccp_alpha(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'ccp_alpha' was unexpected"
):
_ = DecisionTreeClassifier(ccp_alpha=0.01)
def test_with_hyperopt(self):
planned = DecisionTreeClassifier
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=Hyperopt, cv=3, max_evals=3
)
_ = trained.predict(self.test_X)
class TestDecisionTreeRegressor(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_diabetes(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = DecisionTreeRegressor()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_ccp_alpha(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'ccp_alpha' was unexpected"
):
_ = DecisionTreeRegressor(ccp_alpha=0.01)
def test_with_hyperopt(self):
planned = DecisionTreeRegressor
trained = planned.auto_configure(
self.train_X,
self.train_y,
optimizer=Hyperopt,
scoring="r2",
cv=3,
max_evals=3,
)
_ = trained.predict(self.test_X)
class TestExtraTreesClassifier(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_iris(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = ExtraTreesClassifier()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_n_estimators(self):
default = ExtraTreesClassifier.get_defaults()["n_estimators"]
self.assertEqual(default, 10)
def test_ccp_alpha(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'ccp_alpha' was unexpected"
):
_ = ExtraTreesClassifier(ccp_alpha=0.01)
def test_max_samples(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'max_samples' was unexpected"
):
_ = ExtraTreesClassifier(max_samples=0.01)
def test_with_hyperopt(self):
planned = ExtraTreesClassifier
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=Hyperopt, cv=3, max_evals=3
)
_ = trained.predict(self.test_X)
class TestExtraTreesRegressor(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_diabetes(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = ExtraTreesRegressor()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_n_estimators(self):
default = ExtraTreesRegressor.get_defaults()["n_estimators"]
self.assertEqual(default, 10)
def test_ccp_alpha(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'ccp_alpha' was unexpected"
):
_ = ExtraTreesRegressor(ccp_alpha=0.01)
def test_max_samples(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'max_samples' was unexpected"
):
_ = ExtraTreesRegressor(max_samples=0.01)
def test_with_hyperopt(self):
planned = ExtraTreesRegressor
trained = planned.auto_configure(
self.train_X,
self.train_y,
scoring="r2",
optimizer=Hyperopt,
cv=3,
max_evals=3,
)
_ = trained.predict(self.test_X)
class TestFeatureAgglomeration(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_iris(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = FeatureAgglomeration() >> LogisticRegression()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_with_hyperopt(self):
planned = FeatureAgglomeration >> LogisticRegression
trained = planned.auto_configure(
self.train_X,
self.train_y,
optimizer=Hyperopt,
cv=3,
max_evals=3,
verbose=True,
)
_ = trained.predict(self.test_X)
class TestFunctionTransformer(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_iris(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = FunctionTransformer(func=np.log1p) >> LogisticRegression()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_pass_y(self):
trainable = (
FunctionTransformer(func=np.log1p, pass_y=False) >> LogisticRegression()
)
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_validate(self):
default = FunctionTransformer.get_defaults()["validate"]
self.assertEqual(default, True)
def test_with_hyperopt(self):
planned = FunctionTransformer(func=np.log1p) >> LogisticRegression
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=Hyperopt, cv=3, max_evals=3
)
_ = trained.predict(self.test_X)
class TestGradientBoostingClassifier(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_iris(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = GradientBoostingClassifier()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_ccp_alpha(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'ccp_alpha' was unexpected"
):
_ = GradientBoostingClassifier(ccp_alpha=0.01)
def test_with_hyperopt(self):
planned = GradientBoostingClassifier
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=Hyperopt, cv=3, max_evals=3
)
_ = trained.predict(self.test_X)
class TestGradientBoostingRegressor(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_diabetes(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = GradientBoostingRegressor()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_ccp_alpha(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'ccp_alpha' was unexpected"
):
_ = GradientBoostingRegressor(ccp_alpha=0.01)
def test_with_hyperopt(self):
planned = GradientBoostingRegressor
trained = planned.auto_configure(
self.train_X,
self.train_y,
scoring="r2",
optimizer=Hyperopt,
cv=3,
max_evals=3,
)
_ = trained.predict(self.test_X)
class TestLinearRegression(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_diabetes(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = LinearRegression()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_with_hyperopt(self):
planned = LinearRegression
trained = planned.auto_configure(
self.train_X,
self.train_y,
scoring="r2",
optimizer=Hyperopt,
cv=3,
max_evals=3,
)
_ = trained.predict(self.test_X)
class TestLogisticRegression(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_iris(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = LogisticRegression()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_multi_class(self):
default = LogisticRegression.get_defaults()["multi_class"]
self.assertEqual(default, "ovr")
def test_solver(self):
default = LogisticRegression.get_defaults()["solver"]
self.assertEqual(default, "liblinear")
def test_l1_ratio(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'l1_ratio' was unexpected"
):
_ = LogisticRegression(l1_ratio=0.2)
def test_with_hyperopt(self):
planned = LogisticRegression
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=Hyperopt, cv=3, max_evals=3
)
_ = trained.predict(self.test_X)
class TestMLPClassifier(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_iris(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = MLPClassifier()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_max_fun(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'max_fun' was unexpected"
):
_ = MLPClassifier(max_fun=1000)
def test_with_hyperopt(self):
planned = MLPClassifier(max_iter=20)
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=Hyperopt, cv=3, max_evals=3
)
_ = trained.predict(self.test_X)
class TestPolynomialFeatures(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_iris(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = PolynomialFeatures() >> LogisticRegression()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_order(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'order' was unexpected"
):
_ = PolynomialFeatures(order="F") >> LogisticRegression()
def test_with_hyperopt(self):
planned = PolynomialFeatures >> LogisticRegression
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=Hyperopt, cv=3, max_evals=3
)
_ = trained.predict(self.test_X)
class TestRandomForestClassifier(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_iris(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = RandomForestClassifier()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_n_estimators(self):
default = RandomForestClassifier.get_defaults()["n_estimators"]
self.assertEqual(default, 10)
def test_ccp_alpha(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'ccp_alpha' was unexpected"
):
_ = RandomForestClassifier(ccp_alpha=0.01)
def test_max_samples(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'max_samples' was unexpected"
):
_ = RandomForestClassifier(max_samples=0.01)
def test_with_hyperopt(self):
planned = RandomForestClassifier
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=Hyperopt, cv=3, max_evals=3
)
_ = trained.predict(self.test_X)
class TestRandomForestRegressor(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_diabetes(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = RandomForestRegressor()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_n_estimators(self):
default = RandomForestRegressor.get_defaults()["n_estimators"]
self.assertEqual(default, 10)
def test_ccp_alpha(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'ccp_alpha' was unexpected"
):
_ = RandomForestRegressor(ccp_alpha=0.01)
def test_max_samples(self):
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'max_samples' was unexpected"
):
_ = RandomForestRegressor(max_samples=0.01)
def test_with_hyperopt(self):
planned = RandomForestRegressor
trained = planned.auto_configure(
self.train_X,
self.train_y,
scoring="r2",
optimizer=Hyperopt,
cv=3,
max_evals=3,
)
_ = trained.predict(self.test_X)
class TestRidge(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_diabetes(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = Ridge()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_with_hyperopt(self):
planned = Ridge
trained = planned.auto_configure(
self.train_X,
self.train_y,
scoring="r2",
optimizer=Hyperopt,
cv=3,
max_evals=3,
)
_ = trained.predict(self.test_X)
class TestSVC(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_iris(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = SVC()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_gamma(self):
default = SVC.get_defaults()["gamma"]
self.assertEqual(default, "auto_deprecated")
def test_break_ties(self):
with EnableSchemaValidation():
with self.assertRaisesRegex(
jsonschema.ValidationError, "argument 'break_ties' was unexpected"
):
_ = SVC(break_ties=True)
def test_with_hyperopt(self):
planned = SVC
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=Hyperopt, cv=3, max_evals=3
)
_ = trained.predict(self.test_X)
class TestVotingClassifier(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_iris(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = VotingClassifier(
estimators=[("lr", LogisticRegression()), ("dt", DecisionTreeClassifier())]
)
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_estimators(self):
trainable = VotingClassifier(
estimators=[
("lr", LogisticRegression()),
("dt", DecisionTreeClassifier()),
("na", None),
]
)
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_with_hyperopt(self):
planned = VotingClassifier(
estimators=[("lr", LogisticRegression), ("dt", DecisionTreeClassifier)]
)
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=Hyperopt, cv=3, max_evals=3
)
_ = trained.predict(self.test_X)
class TestXGBClassifier(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_iris(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = XGBClassifier()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_with_hyperopt(self):
planned = XGBClassifier
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=Hyperopt, cv=3, max_evals=3
)
_ = trained.predict(self.test_X)
class TestXGBRegressor(unittest.TestCase):
def setUp(self):
X, y = sklearn.datasets.load_diabetes(return_X_y=True)
(
self.train_X,
self.test_X,
self.train_y,
self.test_y,
) = sklearn.model_selection.train_test_split(X, y)
def test_with_defaults(self):
trainable = XGBRegressor()
trained = trainable.fit(self.train_X, self.train_y)
_ = trained.predict(self.test_X)
def test_with_hyperopt(self):
planned = XGBRegressor
trained = planned.auto_configure(
self.train_X,
self.train_y,
scoring="r2",
optimizer=Hyperopt,
cv=3,
max_evals=3,
)
_ = trained.predict(self.test_X)
| 22,044 | 30.856936 | 87 | py |
lale | lale-master/test/test_pipeline.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import warnings
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier as SkMLPClassifier
from sklearn.pipeline import Pipeline as SkPipeline
from sklearn.preprocessing import MinMaxScaler as SkMinMaxScaler
from lale.lib.lale import Batching, Hyperopt, NoOp
from lale.lib.sklearn import PCA, LogisticRegression, Nystroem
from lale.search.lale_grid_search_cv import get_grid_search_parameter_grids
class TestBatching(unittest.TestCase):
def setUp(self):
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_fit(self):
import lale.lib.sklearn as lale_sklearn
warnings.filterwarnings(action="ignore")
pipeline = NoOp() >> Batching(
operator=lale_sklearn.MinMaxScaler()
>> lale_sklearn.MLPClassifier(random_state=42),
batch_size=56,
)
trained = pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
lale_accuracy = accuracy_score(self.y_test, predictions)
prep = SkMinMaxScaler()
trained_prep = prep.partial_fit(self.X_train[0:56, :], self.y_train[0:56])
trained_prep.partial_fit(self.X_train[56:, :], self.y_train[56:])
X_transformed = trained_prep.transform(self.X_train)
clf = SkMLPClassifier(random_state=42)
import numpy as np
trained_clf = clf.partial_fit(
X_transformed[0:56, :], self.y_train[0:56], classes=np.unique(self.y_train)
)
trained_clf.partial_fit(
X_transformed[56:, :], self.y_train[56:], classes=np.unique(self.y_train)
)
predictions = trained_clf.predict(trained_prep.transform(self.X_test))
sklearn_accuracy = accuracy_score(self.y_test, predictions)
self.assertEqual(lale_accuracy, sklearn_accuracy)
def test_fit1(self):
warnings.filterwarnings(action="ignore")
from lale.lib.sklearn import MinMaxScaler, MLPClassifier
pipeline = Batching(
operator=MinMaxScaler() >> MLPClassifier(random_state=42), batch_size=56
)
trained = pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
lale_accuracy = accuracy_score(self.y_test, predictions)
prep = MinMaxScaler()
trained_prep = prep.partial_fit(self.X_train[0:56, :], self.y_train[0:56])
trained_prep.partial_fit(self.X_train[56:, :], self.y_train[56:])
X_transformed = trained_prep.transform(self.X_train)
clf = SkMLPClassifier(random_state=42)
import numpy as np
trained_clf = clf.partial_fit(
X_transformed[0:56, :], self.y_train[0:56], classes=np.unique(self.y_train)
)
trained_clf.partial_fit(
X_transformed[56:, :], self.y_train[56:], classes=np.unique(self.y_train)
)
predictions = trained_clf.predict(trained_prep.transform(self.X_test))
sklearn_accuracy = accuracy_score(self.y_test, predictions)
self.assertEqual(lale_accuracy, sklearn_accuracy)
def test_fit2(self):
warnings.filterwarnings(action="ignore")
from lale.lib.sklearn import MinMaxScaler
pipeline = Batching(
operator=MinMaxScaler() >> MinMaxScaler(), batch_size=112, shuffle=False
)
trained = pipeline.fit(self.X_train, self.y_train)
lale_transforms = trained.transform(self.X_test)
prep = SkMinMaxScaler()
trained_prep = prep.partial_fit(self.X_train, self.y_train)
X_transformed = trained_prep.transform(self.X_train)
clf = MinMaxScaler()
trained_clf = clf.partial_fit(X_transformed, self.y_train)
sklearn_transforms = trained_clf.transform(trained_prep.transform(self.X_test))
for i in range(5):
for j in range(2):
self.assertAlmostEqual(lale_transforms[i, j], sklearn_transforms[i, j])
def test_fit3(self):
from lale.lib.sklearn import MinMaxScaler, MLPClassifier
pipeline = PCA() >> Batching(
operator=MinMaxScaler() >> MLPClassifier(random_state=42), batch_size=10
)
trained = pipeline.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
def test_no_partial_fit(self):
pipeline = Batching(operator=NoOp() >> LogisticRegression())
_ = pipeline.fit(self.X_train, self.y_train)
def test_fit4(self):
warnings.filterwarnings(action="ignore")
from lale.lib.sklearn import MinMaxScaler, MLPClassifier
pipeline = Batching(
operator=MinMaxScaler() >> MLPClassifier(random_state=42),
batch_size=56,
inmemory=True,
)
trained = pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
lale_accuracy = accuracy_score(self.y_test, predictions)
prep = SkMinMaxScaler()
trained_prep = prep.partial_fit(self.X_train[0:56, :], self.y_train[0:56])
trained_prep.partial_fit(self.X_train[56:, :], self.y_train[56:])
X_transformed = trained_prep.transform(self.X_train)
clf = SkMLPClassifier(random_state=42)
import numpy as np
trained_clf = clf.partial_fit(
X_transformed[0:56, :], self.y_train[0:56], classes=np.unique(self.y_train)
)
trained_clf.partial_fit(
X_transformed[56:, :], self.y_train[56:], classes=np.unique(self.y_train)
)
predictions = trained_clf.predict(trained_prep.transform(self.X_test))
sklearn_accuracy = accuracy_score(self.y_test, predictions)
self.assertEqual(lale_accuracy, sklearn_accuracy)
# TODO: Nesting doesn't work yet
# def test_nested_pipeline(self):
# from lale.lib.sklearn import MinMaxScaler, MLPClassifier
# pipeline = Batching(operator = MinMaxScaler() >> Batching(operator = NoOp() >> MLPClassifier(random_state=42)), batch_size = 112)
# trained = pipeline.fit(self.X_train, self.y_train)
# predictions = trained.predict(self.X_test)
# lale_accuracy = accuracy_score(self.y_test, predictions)
class TestPipeline(unittest.TestCase):
def dont_test_with_gridsearchcv2_auto(self):
from sklearn.model_selection import GridSearchCV
lr = LogisticRegression(random_state=42)
pca = PCA(random_state=42, svd_solver="arpack")
trainable = pca >> lr
scikit_pipeline = SkPipeline(
[
(pca.name(), PCA(random_state=42, svd_solver="arpack")),
(lr.name(), LogisticRegression(random_state=42)),
]
)
all_parameters = get_grid_search_parameter_grids(trainable, num_samples=1)
# otherwise the test takes too long
parameters = random.sample(all_parameters, 2)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = GridSearchCV(
scikit_pipeline, parameters, cv=2, scoring=make_scorer(accuracy_score)
)
iris = load_iris()
clf.fit(iris.data, iris.target)
predicted = clf.predict(iris.data)
accuracy_with_lale_operators = accuracy_score(iris.target, predicted)
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.linear_model import LogisticRegression as SklearnLR
scikit_pipeline = SkPipeline(
[
(pca.name(), SklearnPCA(random_state=42, svd_solver="arpack")),
(lr.name(), SklearnLR(random_state=42)),
]
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = GridSearchCV(
scikit_pipeline, parameters, cv=2, scoring=make_scorer(accuracy_score)
)
iris = load_iris()
clf.fit(iris.data, iris.target)
predicted = clf.predict(iris.data)
accuracy_with_scikit_operators = accuracy_score(iris.target, predicted)
self.assertEqual(accuracy_with_lale_operators, accuracy_with_scikit_operators)
def test_with_gridsearchcv3(self):
from sklearn.model_selection import GridSearchCV
_ = LogisticRegression()
scikit_pipeline = SkPipeline(
[("nystroem", Nystroem()), ("lr", LogisticRegression())]
)
parameters = {"lr__solver": ("liblinear", "lbfgs"), "lr__penalty": ["l2"]}
clf = GridSearchCV(
scikit_pipeline, parameters, cv=2, scoring=make_scorer(accuracy_score)
)
iris = load_iris()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf.fit(iris.data, iris.target)
_ = clf.predict(iris.data)
def test_with_gridsearchcv3_auto(self):
from sklearn.model_selection import GridSearchCV
lr = LogisticRegression()
scikit_pipeline = SkPipeline(
[(Nystroem().name(), Nystroem()), (lr.name(), LogisticRegression())]
)
all_parameters = get_grid_search_parameter_grids(
Nystroem() >> lr, num_samples=1
)
# otherwise the test takes too long
parameters = random.sample(all_parameters, 2)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = GridSearchCV(
scikit_pipeline, parameters, cv=2, scoring=make_scorer(accuracy_score)
)
iris = load_iris()
clf.fit(iris.data, iris.target)
_ = clf.predict(iris.data)
def test_with_gridsearchcv3_auto_wrapped(self):
pipeline = Nystroem() >> LogisticRegression()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from lale.lib.lale import GridSearchCV
clf = GridSearchCV(
estimator=pipeline,
lale_num_samples=1,
lale_num_grids=1,
cv=2,
scoring=make_scorer(accuracy_score),
)
iris = load_iris()
clf.fit(iris.data, iris.target)
_ = clf.predict(iris.data)
class TestBatching2(unittest.TestCase):
def setUp(self):
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_batching_with_hyperopt(self):
from lale.lib.sklearn import MinMaxScaler, SGDClassifier
pipeline = Batching(operator=MinMaxScaler() >> SGDClassifier())
trained = pipeline.auto_configure(
self.X_train, self.y_train, optimizer=Hyperopt, max_evals=1
)
_ = trained.predict(self.X_test)
class TestImportFromSklearnWithCognito(unittest.TestCase):
def test_import_from_sklearn(self):
pipeline_str = """from lale.lib.autoai_libs import NumpyColumnSelector
from lale.lib.autoai_libs import CompressStrings
from lale.lib.autoai_libs import NumpyReplaceMissingValues
from lale.lib.autoai_libs import NumpyReplaceUnknownValues
from lale.lib.autoai_libs import boolean2float
from lale.lib.autoai_libs import CatImputer
from lale.lib.autoai_libs import CatEncoder
import numpy as np
from lale.lib.autoai_libs import float32_transform
from lale.operators import make_pipeline
from lale.lib.autoai_libs import FloatStr2Float
from lale.lib.autoai_libs import NumImputer
from lale.lib.autoai_libs import OptStandardScaler
from lale.operators import make_union
from lale.lib.autoai_libs import NumpyPermuteArray
from lale.lib.autoai_libs import TA1
import autoai_libs.utils.fc_methods
from lale.lib.autoai_libs import FS1
from xgboost import XGBRegressor
numpy_column_selector_0 = NumpyColumnSelector(columns=[1])
compress_strings = CompressStrings(compress_type='hash', dtypes_list=['int_num'], missing_values_reference_list=['', '-', '?', float('nan')], misslist_list=[[]])
numpy_replace_missing_values_0 = NumpyReplaceMissingValues(filling_values=float('nan'), missing_values=[])
numpy_replace_unknown_values = NumpyReplaceUnknownValues(filling_values=float('nan'), filling_values_list=[float('nan')], known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]], missing_values_reference_list=['', '-', '?', float('nan')])
cat_imputer = CatImputer(missing_values=float('nan'), sklearn_version_family='20', strategy='most_frequent')
cat_encoder = CatEncoder(dtype=np.float64, handle_unknown='error', sklearn_version_family='20')
pipeline_0 = make_pipeline(numpy_column_selector_0, compress_strings, numpy_replace_missing_values_0, numpy_replace_unknown_values, boolean2float(), cat_imputer, cat_encoder, float32_transform())
numpy_column_selector_1 = NumpyColumnSelector(columns=[0])
float_str2_float = FloatStr2Float(dtypes_list=['int_num'], missing_values_reference_list=[])
numpy_replace_missing_values_1 = NumpyReplaceMissingValues(filling_values=float('nan'), missing_values=[])
num_imputer = NumImputer(missing_values=float('nan'), strategy='median')
opt_standard_scaler = OptStandardScaler(num_scaler_copy=None, num_scaler_with_mean=None, num_scaler_with_std=None, use_scaler_flag=False)
pipeline_1 = make_pipeline(numpy_column_selector_1, float_str2_float, numpy_replace_missing_values_1, num_imputer, opt_standard_scaler, float32_transform())
union = make_union(pipeline_0, pipeline_1)
numpy_permute_array = NumpyPermuteArray(axis=0, permutation_indices=[1, 0])
ta1_0 = TA1(fun=np.tan, name='tan', datatypes=['float'], feat_constraints=[autoai_libs.utils.fc_methods.is_not_categorical], col_names=['age', 'weight'], col_dtypes=[np.dtype('float32'), np.dtype('float32')])
fs1_0 = FS1(cols_ids_must_keep=range(0, 2), additional_col_count_to_keep=4, ptype='regression')
ta1_1 = TA1(fun=np.square, name='square', datatypes=['numeric'], feat_constraints=[autoai_libs.utils.fc_methods.is_not_categorical], col_names=['age', 'weight', 'tan(age)'], col_dtypes=[np.dtype('float32'), np.dtype('float32'), np.dtype('float32')])
fs1_1 = FS1(cols_ids_must_keep=range(0, 2), additional_col_count_to_keep=4, ptype='regression')
ta1_2 = TA1(fun=np.sin, name='sin', datatypes=['float'], feat_constraints=[autoai_libs.utils.fc_methods.is_not_categorical], col_names=['age', 'weight', 'tan(age)', 'square(age)', 'square(tan(age))'], col_dtypes=[np.dtype('float32'), np.dtype('float32'), np.dtype('float32'), np.dtype('float32'), np.dtype('float32')])
fs1_2 = FS1(cols_ids_must_keep=range(0, 2), additional_col_count_to_keep=4, ptype='regression')
xgb_regressor = XGBRegressor(missing=float('nan'), n_jobs=4, random_state=33, silent=True, verbosity=0)
pipeline = make_pipeline(union, numpy_permute_array, ta1_0, fs1_0, ta1_1, fs1_1, ta1_2, fs1_2, xgb_regressor)
"""
globals2 = {}
# This call to exec should be safe since we are using a fixed (completely specified) string
exec(pipeline_str, globals2) # nosec
pipeline2 = globals2["pipeline"]
sklearn_pipeline = pipeline2.export_to_sklearn_pipeline()
from lale import helpers
_ = helpers.import_from_sklearn_pipeline(sklearn_pipeline)
class TestExportToSklearnForEstimator(unittest.TestCase):
def setUp(self):
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def create_pipeline(self):
from sklearn.decomposition import PCA as SkPCA
from sklearn.pipeline import make_pipeline
pipeline = make_pipeline(SkPCA(), LogisticRegression())
return pipeline
def test_import_export_trained(self):
import numpy as np
from lale.helpers import import_from_sklearn_pipeline
pipeline = self.create_pipeline()
self.assertEqual(isinstance(pipeline, SkPipeline), True)
pipeline.fit(self.X_train, self.y_train)
predictions_before = pipeline.predict(self.X_test)
lale_pipeline = import_from_sklearn_pipeline(pipeline)
predictions_after = lale_pipeline.predict(self.X_test)
sklearn_pipeline = lale_pipeline.export_to_sklearn_pipeline()
predictions_after_1 = sklearn_pipeline.predict(self.X_test)
self.assertEqual(np.all(predictions_before == predictions_after), True)
self.assertEqual(np.all(predictions_before == predictions_after_1), True)
def test_import_export_trainable(self):
from sklearn.exceptions import NotFittedError
from lale.helpers import import_from_sklearn_pipeline
pipeline = self.create_pipeline()
self.assertEqual(isinstance(pipeline, SkPipeline), True)
pipeline.fit(self.X_train, self.y_train)
lale_pipeline = import_from_sklearn_pipeline(pipeline, fitted=False)
with self.assertRaises(ValueError):
lale_pipeline.predict(self.X_test)
sklearn_pipeline = lale_pipeline.export_to_sklearn_pipeline()
with self.assertRaises(NotFittedError):
sklearn_pipeline.predict(self.X_test)
| 17,752 | 43.717884 | 318 | py |
lale | lale-master/test/test_core_regressors.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from jsonschema.exceptions import ValidationError
import lale.lib.lale
import lale.type_checking
from lale.lib.lale import NoOp
from lale.lib.sklearn import (
ExtraTreesRegressor,
GradientBoostingRegressor,
RandomForestRegressor,
Ridge,
SGDRegressor,
)
class TestRegression(unittest.TestCase):
def setUp(self):
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
X, y = make_regression(
n_samples=200, n_features=4, n_informative=2, random_state=0, shuffle=False
)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def create_function_test_regressor(clf_name):
def test_regressor(self):
X_train, y_train = self.X_train, self.y_train
import importlib
module_name = ".".join(clf_name.split(".")[0:-1])
class_name = clf_name.split(".")[-1]
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
regr = None
if class_name in ["StackingRegressor", "VotingRegressor"]:
regr = class_(estimators=[("base", SGDRegressor())])
else:
regr = class_()
# test_schemas_are_schemas
lale.type_checking.validate_is_schema(regr.input_schema_fit())
lale.type_checking.validate_is_schema(regr.input_schema_predict())
lale.type_checking.validate_is_schema(regr.output_schema_predict())
lale.type_checking.validate_is_schema(regr.hyperparam_schema())
# test_init_fit_predict
trained = regr.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
# test score
_ = trained.score(self.X_test, self.y_test)
# test_predict_on_trainable
trained = regr.fit(X_train, y_train)
regr.predict(X_train)
# test_to_json
regr.to_json()
# test_in_a_pipeline
pipeline = NoOp() >> regr
trained = pipeline.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
# test_with_hyperopt
if isinstance(regr, Ridge): # type: ignore
from lale.lib.lale import Hyperopt
hyperopt = Hyperopt(estimator=pipeline, max_evals=1)
trained = hyperopt.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
test_regressor.__name__ = f"test_{clf_name.split('.')[-1]}"
return test_regressor
regressors = [
"lale.lib.sklearn.BaggingRegressor",
"lale.lib.sklearn.DummyRegressor",
"lale.lib.sklearn.RandomForestRegressor",
"lale.lib.sklearn.DecisionTreeRegressor",
"lale.lib.sklearn.ExtraTreesRegressor",
"lale.lib.sklearn.GradientBoostingRegressor",
"lale.lib.sklearn.LinearRegression",
"lale.lib.sklearn.Ridge",
"lale.lib.lightgbm.LGBMRegressor",
"lale.lib.xgboost.XGBRegressor",
"lale.lib.sklearn.AdaBoostRegressor",
"lale.lib.sklearn.SGDRegressor",
"lale.lib.sklearn.SVR",
"lale.lib.sklearn.KNeighborsRegressor",
"lale.lib.sklearn.LinearSVR",
"lale.lib.sklearn.StackingRegressor",
"lale.lib.sklearn.VotingRegressor",
]
for clf in regressors:
setattr(
TestRegression,
f"test_{clf.rsplit('.', maxsplit=1)[-1]}",
create_function_test_regressor(clf),
)
class TestSpuriousSideConstraintsRegression(unittest.TestCase):
# This was prompted by a bug, keeping it as it may help with support for other sklearn versions
def setUp(self):
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
X, y = make_regression(
n_features=4, n_informative=2, random_state=0, shuffle=False
)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_gradient_boost_regressor(self):
reg = GradientBoostingRegressor(
alpha=0.9789984970831765,
criterion="friedman_mse",
init=None,
learning_rate=0.1,
loss="squared_error",
)
reg.fit(self.X_train, self.y_train)
def test_sgd_regressor(self):
reg = SGDRegressor(loss="squared_error", epsilon=0.2)
reg.fit(self.X_train, self.y_train)
def test_sgd_regressor_1(self):
reg = SGDRegressor(learning_rate="optimal", eta0=0.2)
reg.fit(self.X_train, self.y_train)
def test_sgd_regressor_2(self):
reg = SGDRegressor(early_stopping=False, validation_fraction=0.2)
reg.fit(self.X_train, self.y_train)
def test_sgd_regressor_3(self):
reg = SGDRegressor(l1_ratio=0.2, penalty="l1")
reg.fit(self.X_train, self.y_train)
class TestFriedmanMSE(unittest.TestCase):
# This was prompted buy a bug, keeping it as it may help with support for other sklearn versions
def setUp(self):
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
X, y = make_regression(
n_features=4, n_informative=2, random_state=0, shuffle=False
)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_rfr(self):
import sklearn
if sklearn.__version__ < "1.0":
reg = RandomForestRegressor(
bootstrap=True,
criterion="friedman_mse",
max_depth=4,
max_features=0.9832410473940374,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=3,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
n_estimators=29,
n_jobs=4,
oob_score=False,
random_state=33,
verbose=0,
warm_start=False,
)
reg.fit(self.X_train, self.y_train)
def test_etr(self):
import sklearn
if sklearn.__version__ < "1.0":
reg = ExtraTreesRegressor(
bootstrap=True,
criterion="friedman_mse",
max_depth=4,
max_features=0.9832410473940374,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=3,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
n_estimators=29,
n_jobs=4,
oob_score=False,
random_state=33,
verbose=0,
warm_start=False,
)
reg.fit(self.X_train, self.y_train)
class TestRidge(unittest.TestCase):
# This was prompted by a bug, keeping it as it may help with support for other sklearn versions
def setUp(self):
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
X, y = make_regression(
n_features=4, n_informative=2, random_state=0, shuffle=False
)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_positive(self):
import sklearn
from lale.settings import set_disable_data_schema_validation
set_disable_data_schema_validation(False)
if sklearn.__version__ > "1.0":
reg = Ridge(solver="lbfgs", positive=True)
reg.fit(self.X_train, self.y_train)
with self.assertRaises(ValidationError):
reg = Ridge(solver="saga", positive=True)
reg = Ridge(solver="auto", positive=True)
reg.fit(self.X_train, self.y_train)
with self.assertRaises(ValidationError):
reg = Ridge(solver="lbfgs", positive=False)
reg = Ridge(solver="auto", positive=False)
reg.fit(self.X_train, self.y_train)
| 8,557 | 32.826087 | 100 | py |
lale | lale-master/test/test_core_pipeline.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import traceback
import typing
import unittest
import numpy as np
import sklearn.datasets
import sklearn.pipeline
from sklearn.feature_selection import SelectKBest as SkSelectKBest
from sklearn.metrics import accuracy_score, r2_score
from sklearn.model_selection import train_test_split
import lale.datasets.openml
import lale.helpers
from lale.helpers import import_from_sklearn_pipeline
from lale.lib.lale import ConcatFeatures, NoOp
from lale.lib.sklearn import (
PCA,
AdaBoostClassifier,
GaussianNB,
IsolationForest,
KNeighborsClassifier,
LinearRegression,
LinearSVC,
LogisticRegression,
Nystroem,
OneHotEncoder,
PassiveAggressiveClassifier,
SelectKBest,
SGDClassifier,
StandardScaler,
)
from lale.lib.xgboost import XGBClassifier
from lale.operators import (
TrainableIndividualOp,
TrainablePipeline,
TrainedIndividualOp,
TrainedPipeline,
make_choice,
make_pipeline,
make_union,
)
class TestCreation(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_pipeline_create(self):
from lale.operators import Pipeline
pipeline = Pipeline(([("pca1", PCA()), ("lr1", LogisticRegression())]))
trained = pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
accuracy_score(self.y_test, predictions)
def test_pipeline_create_trainable(self):
from lale.lib.sklearn import Pipeline as SkPipeline
pipeline = SkPipeline(steps=[("pca1", PCA()), ("lr1", LogisticRegression())])
self.assertIsInstance(pipeline, TrainableIndividualOp)
trained = pipeline.fit(self.X_train, self.y_train)
pca_trained, lr_trained = [op for _, op in trained.hyperparams()["steps"]]
self.assertIsInstance(pca_trained, TrainedIndividualOp)
self.assertIsInstance(lr_trained, TrainedIndividualOp)
predictions = trained.predict(self.X_test)
accuracy_score(self.y_test, predictions)
def test_pipeline_create_trained(self):
from lale.lib.sklearn import Pipeline as SkPipeline
orig_trainable = PCA() >> LogisticRegression()
orig_trained = orig_trainable.fit(self.X_train, self.y_train)
self.assertIsInstance(orig_trained, TrainedPipeline)
pca_trained, lr_trained = orig_trained.steps_list()
pre_trained = SkPipeline(steps=[("pca1", pca_trained), ("lr1", lr_trained)])
self.assertIsInstance(pre_trained, TrainedIndividualOp)
predictions = pre_trained.predict(self.X_test)
accuracy_score(self.y_test, predictions)
def test_pipeline_clone(self):
from sklearn.base import clone
from lale.operators import Pipeline
pipeline = Pipeline(([("pca1", PCA()), ("lr1", LogisticRegression())]))
trained = pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
orig_acc = accuracy_score(self.y_test, predictions)
cloned_pipeline = clone(pipeline)
trained = cloned_pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
cloned_acc = accuracy_score(self.y_test, predictions)
self.assertEqual(orig_acc, cloned_acc)
def test_make_pipeline(self):
tfm = PCA(n_components=10)
clf = LogisticRegression(random_state=42)
trainable = make_pipeline(tfm, clf)
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.predict(digits.data)
def test_compose2(self):
tfm = PCA(n_components=10)
clf = LogisticRegression(random_state=42)
trainable = tfm >> clf
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.predict(digits.data)
def test_compose3(self):
nys = Nystroem(n_components=15)
pca = PCA(n_components=10)
lr = LogisticRegression(random_state=42)
trainable = nys >> pca >> lr
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.predict(digits.data)
def test_pca_nys_lr(self):
nys = Nystroem(n_components=15)
pca = PCA(n_components=10)
lr = LogisticRegression(random_state=42)
trainable = make_union(nys, pca) >> lr
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.predict(digits.data)
def test_compose4(self):
digits = sklearn.datasets.load_digits()
_ = digits
ohe = OneHotEncoder(handle_unknown=OneHotEncoder.enum.handle_unknown.ignore)
ohe.get_params()
no_op = NoOp()
pca = PCA()
nys = Nystroem()
lr = LogisticRegression()
knn = KNeighborsClassifier()
step1 = ohe | no_op
step2 = pca | nys
step3 = lr | knn
model_plan = step1 >> step2 >> step3
_ = model_plan
# TODO: optimize on this plan and then fit and predict
def test_compose5(self):
ohe = OneHotEncoder(handle_unknown=OneHotEncoder.enum.handle_unknown.ignore)
digits = sklearn.datasets.load_digits()
lr = LogisticRegression()
lr_trained = lr.fit(digits.data, digits.target)
lr_trained.predict(digits.data)
pipeline1 = ohe >> lr
pipeline1_trained = pipeline1.fit(digits.data, digits.target)
pipeline1_trained.predict(digits.data)
def test_compare_with_sklearn(self):
tfm = PCA()
clf = LogisticRegression(
LogisticRegression.enum.solver.saga,
LogisticRegression.enum.multi_class.auto,
)
trainable = make_pipeline(tfm, clf)
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
predicted = trained.predict(digits.data)
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.linear_model import LogisticRegression as SklearnLR
sklearn_pipeline = sklearn.pipeline.make_pipeline(
SklearnPCA(), SklearnLR(solver="saga", multi_class="auto")
)
sklearn_pipeline.fit(digits.data, digits.target)
predicted_sklearn = sklearn_pipeline.predict(digits.data)
lale_score = accuracy_score(digits.target, predicted)
scikit_score = accuracy_score(digits.target, predicted_sklearn)
self.assertEqual(lale_score, scikit_score)
class TestImportExport(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
@classmethod
def get_sklearn_params(cls, op):
lale_sklearn_impl = op._impl_instance()
wrapped_model = getattr(lale_sklearn_impl, "_wrapped_model", None)
if wrapped_model is not None:
lale_sklearn_impl = wrapped_model
return lale_sklearn_impl.get_params()
def assert_equal_predictions(self, pipeline1, pipeline2):
trained = pipeline1.fit(self.X_train, self.y_train)
predictions1 = trained.predict(self.X_test)
trained = pipeline2.fit(self.X_train, self.y_train)
predictions2 = trained.predict(self.X_test)
for i, p1 in enumerate(predictions1):
self.assertEqual(p1, predictions2[i])
def test_import_from_sklearn_pipeline(self):
from sklearn.feature_selection import f_regression
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC as SklearnSVC
anova_filter = SkSelectKBest(f_regression, k=3)
clf = SklearnSVC(kernel="linear")
sklearn_pipeline = Pipeline([("anova", anova_filter), ("svc", clf)])
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
sklearn_step_params = sklearn_pipeline.named_steps[
pipeline_step
].get_params()
lale_sklearn_params = self.get_sklearn_params(lale_pipeline.steps_list()[i])
self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline1(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
sklearn_pipeline = sklearn.pipeline.make_pipeline(
SklearnPCA(n_components=3), SklearnKNN()
)
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
sklearn_step_params = sklearn_pipeline.named_steps[
pipeline_step
].get_params()
lale_sklearn_params = self.get_sklearn_params(lale_pipeline.steps_list()[i])
self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_feature_union(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
from sklearn.pipeline import FeatureUnion
union = FeatureUnion(
[
("pca", SklearnPCA(n_components=1)),
("nys", SklearnNystroem(n_components=2, random_state=42)),
]
)
sklearn_pipeline = sklearn.pipeline.make_pipeline(union, SklearnKNN())
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
self.assertEqual(len(lale_pipeline.edges()), 3)
self.assertIsInstance(lale_pipeline.edges()[0][0], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[0][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][0], Nystroem) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][0], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][1], KNeighborsClassifier) # type: ignore
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_nested_pipeline(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
from sklearn.pipeline import FeatureUnion
union = FeatureUnion(
[
(
"selectkbest_pca",
sklearn.pipeline.make_pipeline(
SkSelectKBest(k=3), SklearnPCA(n_components=1)
),
),
("nys", SklearnNystroem(n_components=2, random_state=42)),
]
)
sklearn_pipeline = sklearn.pipeline.make_pipeline(union, SklearnKNN())
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
self.assertEqual(len(lale_pipeline.edges()), 4)
# These assertions assume topological sort
self.assertIsInstance(lale_pipeline.edges()[0][0], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[0][1], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][0], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][0], Nystroem) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[3][0], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[3][1], KNeighborsClassifier) # type: ignore
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_nested_pipeline1(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
from sklearn.pipeline import FeatureUnion
union = FeatureUnion(
[
(
"selectkbest_pca",
sklearn.pipeline.make_pipeline(
SkSelectKBest(k=3),
FeatureUnion(
[
("pca", SklearnPCA(n_components=1)),
(
"nested_pipeline",
sklearn.pipeline.make_pipeline(
SkSelectKBest(k=2), SklearnNystroem()
),
),
]
),
),
),
("nys", SklearnNystroem(n_components=2, random_state=42)),
]
)
sklearn_pipeline = sklearn.pipeline.make_pipeline(union, SklearnKNN())
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
self.assertEqual(len(lale_pipeline.edges()), 8)
# These assertions assume topological sort, which may not be unique. So the assertions are brittle.
self.assertIsInstance(lale_pipeline.edges()[0][0], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[0][1], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][0], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][1], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][0], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][1], Nystroem) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[3][0], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[3][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[4][0], Nystroem) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[4][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[5][0], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[5][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[6][0], Nystroem) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[6][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[7][0], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[7][1], KNeighborsClassifier) # type: ignore
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_nested_pipeline2(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
from sklearn.pipeline import FeatureUnion
union = FeatureUnion(
[
(
"selectkbest_pca",
sklearn.pipeline.make_pipeline(
SkSelectKBest(k=3),
sklearn.pipeline.make_pipeline(
SkSelectKBest(k=2), SklearnPCA()
),
),
),
("nys", SklearnNystroem(n_components=2, random_state=42)),
]
)
sklearn_pipeline = sklearn.pipeline.make_pipeline(union, SklearnKNN())
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
self.assertEqual(len(lale_pipeline.edges()), 5)
self.assertIsInstance(lale_pipeline.edges()[0][0], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[0][1], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][0], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][1], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][0], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[3][0], Nystroem) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[3][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[4][0], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[4][1], KNeighborsClassifier) # type: ignore
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_noop(self):
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.pipeline import Pipeline
pipe = Pipeline([("noop", None), ("gbc", GradientBoostingClassifier())])
_ = import_from_sklearn_pipeline(pipe)
def test_import_from_sklearn_pipeline_noop1(self):
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.pipeline import Pipeline
pipe = Pipeline([("noop", NoOp()), ("gbc", GradientBoostingClassifier())])
_ = import_from_sklearn_pipeline(pipe)
def test_import_from_sklearn_pipeline_no_wrapper(self):
from sklearn.neighbors import LocalOutlierFactor
from sklearn.pipeline import make_pipeline as sk_make_pipeline
sklearn_pipeline = sk_make_pipeline(PCA(), LocalOutlierFactor())
_ = import_from_sklearn_pipeline(sklearn_pipeline, fitted=False)
def test_import_from_sklearn_pipeline_higherorder(self):
from sklearn.ensemble import VotingClassifier as VC
from sklearn.feature_selection import f_regression
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC as SklearnSVC
anova_filter = SkSelectKBest(f_regression, k=3)
clf = SklearnSVC(kernel="linear")
sklearn_pipeline = Pipeline(
[("anova", anova_filter), ("vc_svc", VC(estimators=[("clf", clf)]))]
)
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
# for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
# sklearn_step_params = sklearn_pipeline.named_steps[
# pipeline_step
# ].get_params()
# lale_sklearn_params = self.get_sklearn_params(lale_pipeline.steps_list()[i])
# self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_export_to_sklearn_pipeline(self):
lale_pipeline = PCA(n_components=3) >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
sklearn_step_params = sklearn_pipeline.named_steps[
pipeline_step
].get_params()
lale_sklearn_params = self.get_sklearn_params(
trained_lale_pipeline.steps_list()[i]
)
self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline1(self):
lale_pipeline = SkSelectKBest(k=3) >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
sklearn_step_params = type(sklearn_pipeline.named_steps[pipeline_step])
lale_sklearn_params = (
type(trained_lale_pipeline.steps_list()[i]._impl._wrapped_model)
if hasattr(
trained_lale_pipeline.steps_list()[i]._impl, "_wrapped_model"
)
else type(trained_lale_pipeline.steps_list()[i]._impl)
)
self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline2(self):
from sklearn.pipeline import FeatureUnion
lale_pipeline = (
(
(
(PCA(svd_solver="randomized", random_state=42) & SkSelectKBest(k=3))
>> ConcatFeatures()
)
& Nystroem(random_state=42)
)
>> ConcatFeatures()
>> KNeighborsClassifier()
)
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assertIsInstance(
sklearn_pipeline.named_steps["featureunion"], FeatureUnion
)
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
self.assertIsInstance(
sklearn_pipeline.named_steps["kneighborsclassifier"], SklearnKNN
)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline3(self):
from sklearn.pipeline import FeatureUnion
lale_pipeline = (
(
(PCA() >> SkSelectKBest(k=2))
& (Nystroem(random_state=42) >> SkSelectKBest(k=3))
& (SkSelectKBest(k=3))
)
>> ConcatFeatures()
>> SkSelectKBest(k=2)
>> LogisticRegression()
)
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assertIsInstance(
sklearn_pipeline.named_steps["featureunion"], FeatureUnion
)
self.assertIsInstance(
sklearn_pipeline.named_steps["selectkbest"], SkSelectKBest
)
from sklearn.linear_model import LogisticRegression as SklearnLR
self.assertIsInstance(
sklearn_pipeline.named_steps["logisticregression"], SklearnLR
)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline4(self):
lale_pipeline = make_pipeline(LogisticRegression())
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
from sklearn.linear_model import LogisticRegression as SklearnLR
self.assertIsInstance(
sklearn_pipeline.named_steps["logisticregression"], SklearnLR
)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline5(self):
lale_pipeline = PCA() >> (XGBClassifier() | SGDClassifier())
with self.assertRaises(ValueError):
_ = lale_pipeline.export_to_sklearn_pipeline()
def test_export_to_pickle(self):
lale_pipeline = make_pipeline(LogisticRegression())
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
pickle.dumps(lale_pipeline)
pickle.dumps(trained_lale_pipeline)
def test_import_from_sklearn_pipeline2(self):
from sklearn.feature_selection import f_regression
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC as SklearnSVC
anova_filter = SkSelectKBest(f_regression, k=3)
clf = SklearnSVC(kernel="linear")
sklearn_pipeline = Pipeline([("anova", anova_filter), ("svc", clf)])
sklearn_pipeline.fit(self.X_train, self.y_train)
lale_pipeline = typing.cast(
TrainedPipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
lale_pipeline.predict(self.X_test)
def test_import_from_sklearn_pipeline3(self):
from sklearn.feature_selection import f_regression
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC as SklearnSVC
anova_filter = SkSelectKBest(f_regression, k=3)
clf = SklearnSVC(kernel="linear")
sklearn_pipeline = Pipeline([("anova", anova_filter), ("svc", clf)])
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline, fitted=False),
)
with self.assertRaises(
ValueError
): # fitted=False returns a Trainable, so calling predict is invalid.
lale_pipeline.predict(self.X_test)
def test_export_to_sklearn_pipeline_with_noop_1(self):
lale_pipeline = NoOp() >> PCA(n_components=3) >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline_with_noop_2(self):
lale_pipeline = PCA(n_components=3) >> NoOp() >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline_with_noop_3(self):
# This test is probably unnecessary, but doesn't harm at this point
lale_pipeline = PCA(n_components=3) >> KNeighborsClassifier() >> NoOp()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
_ = trained_lale_pipeline.export_to_sklearn_pipeline()
def test_export_to_sklearn_pipeline_with_noop_4(self):
lale_pipeline = NoOp() >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
class TestComposition(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_two_estimators_predict(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & LogisticRegression())
>> ConcatFeatures()
>> NoOp()
>> LogisticRegression()
)
trained = pipeline.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_two_estimators_predict1(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
trained = pipeline.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_two_estimators_predict_proba(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & LogisticRegression())
>> ConcatFeatures()
>> NoOp()
>> LogisticRegression()
)
trained = pipeline.fit(self.X_train, self.y_train)
trained.predict_proba(self.X_test)
def test_two_estimators_predict_proba1(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & GaussianNB())
>> ConcatFeatures()
>> NoOp()
>> GaussianNB()
)
pipeline.fit(self.X_train, self.y_train)
pipeline.predict_proba(self.X_test)
def test_multiple_estimators_predict_predict_proba(self):
pipeline = (
StandardScaler()
>> (LogisticRegression() & PCA())
>> ConcatFeatures()
>> (NoOp() & LinearSVC())
>> ConcatFeatures()
>> KNeighborsClassifier()
)
pipeline.fit(self.X_train, self.y_train)
_ = pipeline.predict_proba(self.X_test)
_ = pipeline.predict(self.X_test)
def test_two_transformers(self):
tfm1 = PCA()
tfm2 = Nystroem()
trainable = tfm1 >> tfm2
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.transform(digits.data)
def test_duplicate_instances(self):
tfm = PCA()
clf = LogisticRegression(
LogisticRegression.enum.solver.lbfgs,
LogisticRegression.enum.multi_class.auto,
)
with self.assertRaises(ValueError):
_ = make_pipeline(tfm, tfm, clf)
def test_increase_num_rows_predict(self):
from test.mock_custom_operators import IncreaseRows
increase_rows = IncreaseRows()
trainable = increase_rows >> LogisticRegression()
iris = sklearn.datasets.load_iris()
X, y = iris.data, iris.target
trained = trainable.fit(X, y)
y_pred = trained.predict(X)
self.assertEqual(len(y_pred), len(y) + increase_rows.impl.n_rows)
def test_increase_num_rows_transform_X_y(self):
from test.mock_custom_operators import IncreaseRows
increase_rows_4 = IncreaseRows(n_rows=4)
increase_rows_2 = IncreaseRows(n_rows=2)
trainable = increase_rows_4 >> increase_rows_2
iris = sklearn.datasets.load_iris()
X, y = iris.data, iris.target
trained = trainable.fit(X, y)
output_X, output_y = trained.transform_X_y(X, y)
self.assertEqual(output_X.shape[0], X.shape[0] + 4 + 2)
self.assertEqual(output_X.shape[1], X.shape[1])
self.assertEqual(output_y.shape[0], y.shape[0] + 4 + 2)
def test_remove_last1(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
new_pipeline = pipeline.remove_last()
self.assertEqual(len(new_pipeline._steps), 6)
self.assertEqual(len(pipeline._steps), 7)
def test_remove_last2(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> (PassiveAggressiveClassifier() & LogisticRegression())
)
with self.assertRaises(ValueError):
pipeline.remove_last()
def test_remove_last3(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
pipeline.remove_last().freeze_trainable()
def test_remove_last4(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
new_pipeline = pipeline.remove_last(inplace=True)
self.assertEqual(len(new_pipeline._steps), 6)
self.assertEqual(len(pipeline._steps), 6)
def test_remove_last5(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
pipeline.remove_last(inplace=True).freeze_trainable()
class TestAutoPipeline(unittest.TestCase):
def _fit_predict(self, prediction_type, all_X, all_y, verbose=True):
if verbose:
_file_name, _line, fn_name, _text = traceback.extract_stack()[-2]
print(f"--- TestAutoPipeline.{fn_name}() ---")
from lale.lib.lale import AutoPipeline
train_X, test_X, train_y, test_y = train_test_split(all_X, all_y)
trainable = AutoPipeline(
prediction_type=prediction_type, max_evals=10, verbose=verbose
)
trained = trainable.fit(train_X, train_y)
predicted = trained.predict(test_X)
if prediction_type == "regression":
score = f"r2 score {r2_score(test_y, predicted):.2f}"
else:
score = f"accuracy {accuracy_score(test_y, predicted):.1%}"
if verbose:
print(score)
pipe = trained.get_pipeline()
assert pipe is not None
print(pipe.pretty_print(show_imports=False))
def test_sklearn_iris(self):
# classification, only numbers, no missing values
all_X, all_y = sklearn.datasets.load_iris(return_X_y=True)
self._fit_predict("classification", all_X, all_y)
def test_sklearn_digits(self):
# classification, numbers but some appear categorical, no missing values
all_X, all_y = sklearn.datasets.load_digits(return_X_y=True)
self._fit_predict("classification", all_X, all_y)
def test_sklearn_boston(self):
# regression, categoricals+numbers, no missing values
from lale.datasets.util import load_boston
all_X, all_y = load_boston(return_X_y=True)
self._fit_predict("regression", all_X, all_y)
def test_sklearn_diabetes(self):
# regression, categoricals+numbers, no missing values
all_X, all_y = sklearn.datasets.load_diabetes(return_X_y=True)
self._fit_predict("regression", all_X, all_y)
def test_openml_creditg(self):
# classification, categoricals+numbers incl. string, no missing values
(orig_train_X, orig_train_y), _ = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False
)
subsample_X, _, subsample_y, _ = train_test_split(
orig_train_X, orig_train_y, train_size=0.05
)
self._fit_predict("classification", subsample_X, subsample_y)
def test_missing_iris(self):
# classification, only numbers, synthetically added missing values
all_X, all_y = sklearn.datasets.load_iris(return_X_y=True)
with_missing_X = lale.helpers.add_missing_values(all_X)
with self.assertRaisesRegex(ValueError, "Input.*contains NaN"):
lr_trainable = LogisticRegression()
_ = lr_trainable.fit(with_missing_X, all_y)
self._fit_predict("classification", with_missing_X, all_y)
def test_missing_boston(self):
# regression, categoricals+numbers, synthetically added missing values
from lale.datasets.util import load_boston
all_X, all_y = load_boston(return_X_y=True)
with_missing_X = lale.helpers.add_missing_values(all_X)
with self.assertRaisesRegex(ValueError, "Input.*contains NaN"):
lr_trainable = LinearRegression()
_ = lr_trainable.fit(with_missing_X, all_y)
self._fit_predict("regression", with_missing_X, all_y)
def test_missing_creditg(self):
# classification, categoricals+numbers incl. string, synth. missing
(orig_train_X, orig_train_y), _ = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False
)
subsample_X, _, subsample_y, _ = train_test_split(
orig_train_X, orig_train_y, train_size=0.05
)
with_missing_X = lale.helpers.add_missing_values(subsample_X)
self._fit_predict("classification", with_missing_X, subsample_y)
class TestOperatorChoice(unittest.TestCase):
def test_make_choice_with_instance(self):
from sklearn.datasets import load_iris
iris = load_iris()
X, y = iris.data, iris.target
tfm = PCA() | Nystroem() | NoOp()
with self.assertRaises(AttributeError):
# we are trying to trigger a runtime error here, so we ignore the static warning
_ = tfm.fit(X, y) # type: ignore
_ = (OneHotEncoder | NoOp) >> tfm >> (LogisticRegression | KNeighborsClassifier)
_ = (
(OneHotEncoder | NoOp)
>> (PCA | Nystroem)
>> (LogisticRegression | KNeighborsClassifier)
)
_ = (
make_choice(OneHotEncoder, NoOp)
>> make_choice(PCA, Nystroem)
>> make_choice(LogisticRegression, KNeighborsClassifier)
)
class TestScore(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_trained_pipeline(self):
trainable_pipeline = StandardScaler() >> LogisticRegression()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
score = trained_pipeline.score(self.X_test, self.y_test)
predictions = trained_pipeline.predict(self.X_test)
accuracy = accuracy_score(self.y_test, predictions)
self.assertEqual(accuracy, score)
def test_trainable_pipeline(self):
trainable_pipeline = StandardScaler() >> LogisticRegression()
trainable_pipeline.fit(self.X_train, self.y_train)
score = trainable_pipeline.score(self.X_test, self.y_test)
predictions = trainable_pipeline.predict(self.X_test)
accuracy = accuracy_score(self.y_test, predictions)
self.assertEqual(accuracy, score)
def test_planned_pipeline(self):
planned_pipeline = StandardScaler >> LogisticRegression
with self.assertRaises(AttributeError):
planned_pipeline.score(self.X_test, self.y_test) # type: ignore
class TestScoreSamples(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
import warnings
warnings.filterwarnings("ignore")
def test_trained_pipeline(self):
trainable_pipeline = StandardScaler() >> IsolationForest()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
_ = trained_pipeline.score_samples(self.X_test)
def test_trainable_pipeline(self):
trainable_pipeline = StandardScaler() >> IsolationForest()
trainable_pipeline.fit(self.X_train, self.y_train)
with self.assertWarns(DeprecationWarning):
_ = trainable_pipeline.score_samples(self.X_test)
def test_planned_pipeline(self):
planned_pipeline = StandardScaler >> IsolationForest
with self.assertRaises(AttributeError):
planned_pipeline.score_samples(self.X_test) # type: ignore
def test_with_incompatible_estimator(self):
trainable_pipeline = StandardScaler() >> LogisticRegression()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
with self.assertRaises(AttributeError):
_ = trained_pipeline.score_samples(self.X_test)
class TestPredictLogProba(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
import warnings
warnings.filterwarnings("ignore")
def test_trained_pipeline(self):
trainable_pipeline = StandardScaler() >> AdaBoostClassifier()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
_ = trained_pipeline.predict_log_proba(self.X_test)
def test_trainable_pipeline(self):
trainable_pipeline = StandardScaler() >> AdaBoostClassifier()
trainable_pipeline.fit(self.X_train, self.y_train)
with self.assertWarns(DeprecationWarning):
_ = trainable_pipeline.predict_log_proba(self.X_test)
def test_planned_pipeline(self):
planned_pipeline = StandardScaler >> AdaBoostClassifier
with self.assertRaises(AttributeError):
planned_pipeline.predict_log_proba(self.X_test) # type: ignore
def test_with_incompatible_estimator(self):
trainable_pipeline = StandardScaler() >> IsolationForest()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
with self.assertRaises(AttributeError):
_ = trained_pipeline.predict_log_proba(self.X_test)
def test_with_incompatible_estimator_1(self):
trainable_pipeline = IsolationForest()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
with self.assertRaises(AttributeError):
_ = trained_pipeline.predict_log_proba(self.X_test)
class TestPartialFit(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
import warnings
warnings.filterwarnings("ignore")
def test_first_call(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline.freeze_trained() >> SGDClassifier()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_train, self.y_train, classes=[0, 1, 2]
)
_ = new_trained_pipeline.predict(self.X_test)
def test_multiple_calls_with_classes(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline.freeze_trained() >> SGDClassifier()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_train, self.y_train, classes=[0, 1, 2]
)
new_trained_pipeline = new_trained_pipeline.partial_fit(
self.X_test, self.y_test, classes=[0, 1, 2]
)
_ = new_trained_pipeline.predict(self.X_test)
def _last_impl_has(self, op, attr):
last = op.get_last()
assert last is not None
return hasattr(last._impl, attr)
def test_second_call_without_classes(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline.freeze_trained() >> SGDClassifier()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_train, self.y_train, classes=[0, 1, 2]
)
# Once SGDClassifier is trained, it has a classes_ attribute.
self.assertTrue(self._last_impl_has(new_trained_pipeline, "classes_"))
new_trained_pipeline = new_trained_pipeline.partial_fit(
self.X_test, self.y_test
)
_ = new_trained_pipeline.predict(self.X_test)
def test_second_call_with_different_classes(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline.freeze_trained() >> SGDClassifier()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_train, self.y_train, classes=[0, 1, 2]
)
# Once SGDClassifier is trained, it has a classes_ attribute.
self.assertTrue(self._last_impl_has(new_trained_pipeline, "classes_"))
subset_labels = self.y_test[np.where(self.y_test != 0)]
subset_X = self.X_test[0 : len(subset_labels)]
new_trained_pipeline = new_trained_pipeline.partial_fit(subset_X, subset_labels)
_ = new_trained_pipeline.predict(self.X_test)
def test_second_call_with_different_classes_trainable(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline.freeze_trained() >> SGDClassifier()
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
# Once SGDClassifier is trained, it has a classes_ attribute.
self.assertTrue(self._last_impl_has(new_pipeline._trained, "classes_"))
subset_labels = self.y_test[np.where(self.y_test != 0)]
subset_X = self.X_test[0 : len(subset_labels)]
new_trained_pipeline = new_pipeline.partial_fit(subset_X, subset_labels)
_ = new_trained_pipeline.predict(self.X_test)
def test_call_on_trainable(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline.freeze_trained() >> SGDClassifier()
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
new_pipeline.pretty_print()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_test, self.y_test, classes=[0, 1, 2]
)
self.assertEqual(new_trained_pipeline, new_pipeline._trained)
_ = new_trained_pipeline.predict(self.X_test)
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
def test_call_on_trainable_with_freeze_trained_prefix(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline >> SGDClassifier()
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
new_pipeline.pretty_print()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_test, self.y_test, classes=[0, 1, 2]
)
self.assertEqual(new_trained_pipeline, new_pipeline._trained)
_ = new_trained_pipeline.predict(self.X_test)
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
def test_call_on_trainable_with_freeze_trained_prefix_false(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline >> SGDClassifier()
with self.assertRaises(ValueError):
new_pipeline.partial_fit(
self.X_train,
self.y_train,
freeze_trained_prefix=False,
classes=[0, 1, 2],
)
def test_call_on_trained_with_freeze_trained_prefix(self):
trainable_pipeline = StandardScaler() >> SGDClassifier()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
new_pipeline.pretty_print()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_test, self.y_test, classes=[0, 1, 2]
)
_ = new_trained_pipeline.predict(self.X_test)
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
def test_call_on_trained_with_freeze_trained_prefix_false(self):
trainable_pipeline = StandardScaler() >> SGDClassifier()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline
with self.assertRaises(ValueError):
new_pipeline.partial_fit(
self.X_train,
self.y_train,
freeze_trained_prefix=False,
classes=[0, 1, 2],
)
| 48,983 | 42.348673 | 107 | py |
lale | lale-master/test/test_autoai_libs.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pandas as pd
import sklearn.datasets
import sklearn.model_selection
import lale.lib.autoai_libs
# from lale.datasets.uci import fetch_household_power_consumption
from lale.lib.autoai_libs import float32_transform
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.xgboost.xgb_classifier import XGBClassifier
class TestAutoaiLibs(unittest.TestCase):
@classmethod
def setUpClass(cls):
iris = sklearn.datasets.load_iris()
iris_X, iris_y = iris.data, iris.target
(
iris_train_X,
iris_test_X,
iris_train_y,
iris_test_y,
) = sklearn.model_selection.train_test_split(iris_X, iris_y)
cls._iris = {
"train_X": iris_train_X,
"train_y": iris_train_y,
"test_X": iris_test_X,
"test_y": iris_test_y,
}
def doTest(self, trainable, train_X, train_y, test_X, test_y):
trained = trainable.fit(train_X, train_y)
_ = trained.transform(test_X)
with self.assertWarns(DeprecationWarning):
trainable.transform(train_X)
trainable.to_json()
trainable_pipeline = trainable >> float32_transform() >> LR()
trained_pipeline = trainable_pipeline.fit(train_X, train_y)
trained_pipeline.predict(test_X)
hyperopt = Hyperopt(estimator=trainable_pipeline, max_evals=1, verbose=True)
trained_hyperopt = hyperopt.fit(train_X, train_y)
trained_hyperopt.predict(test_X)
def test_NumpyColumnSelector(self):
trainable = lale.lib.autoai_libs.NumpyColumnSelector()
self.doTest(trainable, **self._iris)
def test_NumpyColumnSelector_pandas(self):
iris_X, iris_y = sklearn.datasets.load_iris(return_X_y=True, as_frame=True)
keys = ["train_X", "test_X", "train_y", "test_y"]
splits = sklearn.model_selection.train_test_split(iris_X, iris_y)
iris = dict(zip(keys, splits))
self.assertIsInstance(iris["train_X"], pd.DataFrame)
trainable = lale.lib.autoai_libs.NumpyColumnSelector(columns=[0, 2, 3])
self.doTest(trainable, **iris)
def test_CompressStrings(self):
n_columns = self._iris["train_X"].shape[1]
trainable = lale.lib.autoai_libs.CompressStrings(
dtypes_list=["int_num" for i in range(n_columns)],
misslist_list=[[] for i in range(n_columns)],
)
self.doTest(trainable, **self._iris)
def test_NumpyReplaceMissingValues(self):
trainable = lale.lib.autoai_libs.NumpyReplaceMissingValues()
self.doTest(trainable, **self._iris)
def test_NumpyReplaceUnknownValues(self):
trainable = lale.lib.autoai_libs.NumpyReplaceUnknownValues(filling_values=42.0)
self.doTest(trainable, **self._iris)
def test_boolean2float(self):
trainable = lale.lib.autoai_libs.boolean2float()
self.doTest(trainable, **self._iris)
def test_CatImputer(self):
trainable = lale.lib.autoai_libs.CatImputer()
self.doTest(trainable, **self._iris)
def test_CatEncoder(self):
trainable = lale.lib.autoai_libs.CatEncoder(
encoding="ordinal",
categories="auto",
dtype="float64",
handle_unknown="ignore",
)
self.doTest(trainable, **self._iris)
def test_float32_transform(self):
trainable = lale.lib.autoai_libs.float32_transform()
self.doTest(trainable, **self._iris)
def test_FloatStr2Float(self):
n_columns = self._iris["train_X"].shape[1]
trainable = lale.lib.autoai_libs.FloatStr2Float(
dtypes_list=["int_num" for i in range(n_columns)]
)
self.doTest(trainable, **self._iris)
def test_OptStandardScaler(self):
trainable = lale.lib.autoai_libs.OptStandardScaler()
self.doTest(trainable, **self._iris)
def test_NumImputer(self):
trainable = lale.lib.autoai_libs.NumImputer()
self.doTest(trainable, **self._iris)
def test_NumpyPermuteArray(self):
trainable = lale.lib.autoai_libs.NumpyPermuteArray(
axis=0, permutation_indices=[2, 0, 1, 3]
)
self.doTest(trainable, **self._iris)
def test_TNoOp(self):
from autoai_libs.utils.fc_methods import is_not_categorical
trainable = lale.lib.autoai_libs.TNoOp(
fun=np.rint,
name="do nothing",
datatypes=["numeric"],
feat_constraints=[is_not_categorical],
)
self.doTest(trainable, **self._iris)
def test_TA1(self):
from autoai_libs.utils.fc_methods import is_not_categorical
float32 = np.dtype("float32")
trainable = lale.lib.autoai_libs.TA1(
fun=np.rint,
name="round",
datatypes=["numeric"],
feat_constraints=[is_not_categorical],
col_names=["a", "b", "c", "d"],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_TA2(self):
from autoai_libs.utils.fc_methods import is_not_categorical
float32 = np.dtype("float32")
trainable = lale.lib.autoai_libs.TA2(
fun=np.add,
name="sum",
datatypes1=["numeric"],
feat_constraints1=[is_not_categorical],
datatypes2=["numeric"],
feat_constraints2=[is_not_categorical],
col_names=["a", "b", "c", "d"],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_TB1(self):
from autoai_libs.utils.fc_methods import is_not_categorical
from sklearn.preprocessing import StandardScaler
float32 = np.dtype("float32")
trainable = lale.lib.autoai_libs.TB1(
tans_class=StandardScaler,
name="stdscaler",
datatypes=["numeric"],
feat_constraints=[is_not_categorical],
col_names=["a", "b", "c", "d"],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_TB2(self):
pass # TODO: not sure how to instantiate, what to pass for tans_class
def test_TAM(self):
from autoai_libs.cognito.transforms.transform_extras import (
IsolationForestAnomaly,
)
float32 = np.dtype("float32")
trainable = lale.lib.autoai_libs.TAM(
tans_class=IsolationForestAnomaly,
name="isoforestanomaly",
col_names=["a", "b", "c", "d"],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_TGen(self):
from autoai_libs.cognito.transforms.transform_extras import NXOR
from autoai_libs.utils.fc_methods import is_not_categorical
float32 = np.dtype("float32")
trainable = lale.lib.autoai_libs.TGen(
fun=NXOR,
name="nxor",
arg_count=2,
datatypes_list=[["numeric"], ["numeric"]],
feat_constraints_list=[[is_not_categorical], [is_not_categorical]],
col_names=["a", "b", "c", "d"],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_FS1(self):
trainable = lale.lib.autoai_libs.FS1(
cols_ids_must_keep=[1],
additional_col_count_to_keep=3,
ptype="classification",
)
self.doTest(trainable, **self._iris)
def test_FS2(self):
from sklearn.ensemble import ExtraTreesClassifier
trainable = lale.lib.autoai_libs.FS2(
cols_ids_must_keep=[1],
additional_col_count_to_keep=3,
ptype="classification",
eval_algo=ExtraTreesClassifier,
)
self.doTest(trainable, **self._iris)
def test_ColumnSelector(self):
trainable = lale.lib.autoai_libs.ColumnSelector()
self.doTest(trainable, **self._iris)
def test_ColumnSelector_pandas(self):
iris_X, iris_y = sklearn.datasets.load_iris(return_X_y=True, as_frame=True)
keys = ["train_X", "test_X", "train_y", "test_y"]
splits = sklearn.model_selection.train_test_split(iris_X, iris_y)
iris = dict(zip(keys, splits))
self.assertIsInstance(iris["train_X"], pd.DataFrame)
trainable = lale.lib.autoai_libs.ColumnSelector(columns_indices_list=[0, 2, 3])
self.doTest(trainable, **iris)
class TestAutoaiLibsText(unittest.TestCase):
def setUp(self):
from sklearn.datasets import fetch_20newsgroups
cats = ["alt.atheism", "sci.space"]
newsgroups_train = fetch_20newsgroups(subset="train", categories=cats)
self.train_X, self.train_y = (
np.array(newsgroups_train.data),
newsgroups_train.target,
)
self.train_X = np.reshape(self.train_X, (self.train_X.shape[0], 1))
newsgroups_test = fetch_20newsgroups(subset="test", categories=cats)
self.test_X, self.test_y = (
np.array(newsgroups_test.data),
newsgroups_test.target,
)
self.test_X = np.reshape(self.test_X, (self.test_X.shape[0], 1))
def doTest(self, trainable, train_X, train_y, test_X, test_y):
trained = trainable.fit(train_X, train_y)
_ = trained.transform(test_X)
with self.assertWarns(DeprecationWarning):
trainable.transform(train_X)
trainable.to_json()
trainable_pipeline = trainable >> float32_transform() >> XGBClassifier()
trained_pipeline = trainable_pipeline.fit(train_X, train_y)
trained_pipeline.predict(test_X)
hyperopt = Hyperopt(estimator=trainable_pipeline, max_evals=1, verbose=True)
trained_hyperopt = hyperopt.fit(train_X, train_y)
trained_hyperopt.predict(test_X)
@unittest.skip(
"skipping for now because this does not work with the latest xgboost."
)
def test_TextTransformer(self):
trainable = lale.lib.autoai_libs.TextTransformer(
drop_columns=True,
columns_to_be_deleted=[0, 1],
text_processing_options={"word2vec": {"output_dim": 5}},
)
self.doTest(trainable, self.train_X, self.train_y, self.test_X, self.test_y)
@unittest.skip(
"skipping for now because this does not work with the latest xgboost."
)
def test_Word2VecTransformer(self):
trainable = lale.lib.autoai_libs.Word2VecTransformer(
drop_columns=True, output_dim=5
)
self.doTest(trainable, self.train_X, self.train_y, self.test_X, self.test_y)
# class TestDateTransformer(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# data = fetch_household_power_consumption()
# data = data.iloc[:5000, [0, 2, 3, 4, 5]]
# cls.X_train = data.iloc[-1000:]
# cls.X_test = data.iloc[:-1000]
# def test_01_all_mini_options_with_headers(self):
# transformer = lale.lib.autoai_libs.DateTransformer(
# options=["all"], column_headers_list=self.X_train.columns.values.tolist()
# )
# fitted_transformer = transformer.fit(self.X_train.values)
# X_test_transformed = fitted_transformer.transform(self.X_test.values)
# X_train_transformed = fitted_transformer.transform(self.X_train.values)
# header_list = fitted_transformer.impl.new_column_headers_list
# print(f"New columns: {header_list}, new shape: {X_train_transformed.shape}")
# self.assertEqual(
# X_train_transformed.shape[1],
# X_test_transformed.shape[1],
# f"Number of columns after transform is different.:{X_train_transformed.shape[1]}, {X_test_transformed.shape[1]}",
# )
# def test_02_all_options_without_headers(self):
# transformer = lale.lib.autoai_libs.DateTransformer(options=["all"])
# fitted_transformer = transformer.fit(self.X_train.values)
# X_train = fitted_transformer.transform(self.X_train.values)
# X_test = transformer.transform(self.X_test.values)
# header_list = fitted_transformer.impl.new_column_headers_list
# print(f"New columns: {header_list}")
# self.assertEqual(
# X_train.shape[1], X_test.shape[1], msg="Shape after transform is different."
# )
# def test_03_specific_options_and_delete_source_columns(self):
# transformer = lale.lib.autoai_libs.DateTransformer(
# options=["FloatTimestamp", "DayOfWeek", "Hour", "Minute"],
# delete_source_columns=True,
# column_headers_list=self.X_train.columns.values.tolist(),
# )
# fitted_transformer = transformer.fit(self.X_train.values)
# X_train = fitted_transformer.transform(self.X_train.values)
# X_test = transformer.transform(self.X_test.values)
# header_list = fitted_transformer.impl.new_column_headers_list
# print(f"New columns: {header_list}")
# self.assertEqual(
# X_train.shape[1], X_test.shape[1], msg="Shape after transform is different."
# )
# def test_04_option_Datetime_and_delete_source_columns(self):
# transformer = lale.lib.autoai_libs.DateTransformer(
# options=["Datetime"],
# delete_source_columns=True,
# column_headers_list=self.X_train.columns.values.tolist(),
# )
# fitted_transformer = transformer.fit(self.X_train.values)
# X_train = fitted_transformer.transform(self.X_train.values)
# X_test = transformer.transform(self.X_test.values)
# header_list = fitted_transformer.impl.new_column_headers_list
# print(f"New columns: {header_list}")
# self.assertEqual(
# X_train.shape[1], X_test.shape[1], msg="Shape after transform is different."
# )
| 14,626 | 38.005333 | 127 | py |
lale | lale-master/test/test_core_classifiers.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import warnings
from test import EnableSchemaValidation
import jsonschema
from sklearn.datasets import load_iris
import lale.lib.lale
import lale.type_checking
from lale.lib.lale import NoOp
from lale.lib.sklearn import (
PCA,
SVC,
IsolationForest,
KMeans,
KNeighborsClassifier,
LogisticRegression,
MLPClassifier,
Nystroem,
PassiveAggressiveClassifier,
RidgeClassifier,
SGDClassifier,
SimpleImputer,
VotingClassifier,
)
from lale.search.lale_grid_search_cv import get_grid_search_parameter_grids
from lale.settings import set_disable_data_schema_validation
set_disable_data_schema_validation(False)
class TestClassification(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def create_function_test_classifier(clf_name):
def test_classifier(self):
X_train, y_train = self.X_train, self.y_train
import importlib
module_name = ".".join(clf_name.split(".")[0:-1])
class_name = clf_name.split(".")[-1]
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
clf = class_()
# test_schemas_are_schemas
lale.type_checking.validate_is_schema(clf.input_schema_fit())
lale.type_checking.validate_is_schema(clf.input_schema_predict())
lale.type_checking.validate_is_schema(clf.output_schema_predict())
lale.type_checking.validate_is_schema(clf.hyperparam_schema())
# test_init_fit_predict
trained = clf.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
# test score
if not isinstance(
clf, IsolationForest # type: ignore
): # IsolationForest does not define score
_ = trained.score(self.X_test, self.y_test)
from lale.lib.sklearn.gradient_boosting_classifier import (
GradientBoostingClassifier,
)
if isinstance(clf, GradientBoostingClassifier): # type: ignore
# because exponential loss does not work with iris dataset as it is not binary classification
from lale import schemas
clf = clf.customize_schema(
loss=schemas.Enum(default="deviance", values=["deviance"])
)
# test_with_hyperopt
from lale.lib.lale import Hyperopt
hyperopt = Hyperopt(estimator=clf, max_evals=1, verbose=True)
trained = hyperopt.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
# test_cross_validation
from lale.helpers import cross_val_score
cv_results = cross_val_score(clf, X_train, y_train, cv=2)
self.assertEqual(len(cv_results), 2)
# test_with_gridsearchcv_auto_wrapped
from sklearn.metrics import accuracy_score, make_scorer
with warnings.catch_warnings():
warnings.simplefilter("ignore")
grid_search = lale.lib.lale.GridSearchCV(
estimator=clf,
lale_num_samples=1,
lale_num_grids=1,
cv=2,
scoring=make_scorer(accuracy_score),
)
grid_search.fit(X_train, y_train)
# test_predict_on_trainable
trained = clf.fit(X_train, y_train)
clf.predict(X_train)
# test_to_json
clf.to_json()
# test_in_a_pipeline
pipeline = NoOp() >> clf
trained = pipeline.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
test_classifier.__name__ = f"test_{clf_to_test.rsplit('.', maxsplit=1)[-1]}"
return test_classifier
classifiers = [
"lale.lib.sklearn.DummyClassifier",
"lale.lib.sklearn.RandomForestClassifier",
"lale.lib.sklearn.DecisionTreeClassifier",
"lale.lib.sklearn.ExtraTreesClassifier",
"lale.lib.sklearn.GradientBoostingClassifier",
"lale.lib.sklearn.GaussianNB",
"lale.lib.sklearn.QuadraticDiscriminantAnalysis",
"lale.lib.lightgbm.LGBMClassifier",
"lale.lib.xgboost.XGBClassifier",
"lale.lib.sklearn.KNeighborsClassifier",
"lale.lib.sklearn.LinearSVC",
"lale.lib.sklearn.LogisticRegression",
"lale.lib.sklearn.MLPClassifier",
"lale.lib.sklearn.SVC",
"lale.lib.sklearn.Perceptron",
"lale.lib.sklearn.PassiveAggressiveClassifier",
"lale.lib.sklearn.MultinomialNB",
"lale.lib.sklearn.AdaBoostClassifier",
"lale.lib.sklearn.SGDClassifier",
"lale.lib.sklearn.RidgeClassifier",
"lale.lib.sklearn.IsolationForest",
"lale.lib.sklearn.KMeans",
]
for clf_to_test in classifiers:
setattr(
TestClassification,
f"test_{clf_to_test.rsplit('.', maxsplit=1)[-1]}",
create_function_test_classifier(clf_to_test),
)
class TestMLPClassifier(unittest.TestCase):
def test_with_multioutput_targets(self):
import numpy as np
from sklearn.datasets import make_classification
from sklearn.utils import shuffle
X, y1 = make_classification(
n_samples=10, n_features=100, n_informative=30, n_classes=3, random_state=1
)
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
Y = np.vstack((y1, y2, y3)).T
trainable = KNeighborsClassifier()
trained = trainable.fit(X, Y)
_ = trained.predict(X)
def test_predict_proba(self):
trainable = MLPClassifier()
iris = load_iris()
trained = trainable.fit(iris.data, iris.target)
# with self.assertWarns(DeprecationWarning):
_ = trainable.predict_proba(iris.data)
_ = trained.predict_proba(iris.data)
class TestVotingClassifier(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
warnings.filterwarnings("ignore")
def test_with_lale_classifiers(self):
clf = VotingClassifier(
estimators=[("knn", KNeighborsClassifier()), ("lr", LogisticRegression())]
)
trained = clf.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_with_lale_pipeline(self):
clf = VotingClassifier(
estimators=[
("knn", KNeighborsClassifier()),
("pca_lr", PCA() >> LogisticRegression()),
]
)
trained = clf.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_with_hyperopt(self):
from lale.lib.lale import Hyperopt
clf = VotingClassifier(
estimators=[("knn", KNeighborsClassifier()), ("lr", LogisticRegression())]
)
_ = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
def test_with_gridsearch(self):
from sklearn.metrics import accuracy_score, make_scorer
from lale.lib.lale import GridSearchCV
clf = VotingClassifier(
estimators=[("knn", KNeighborsClassifier()), ("rc", RidgeClassifier())],
voting="hard",
)
_ = clf.auto_configure(
self.X_train,
self.y_train,
GridSearchCV,
lale_num_samples=1,
lale_num_grids=1,
cv=2,
scoring=make_scorer(accuracy_score),
)
@unittest.skip("TODO: get this working with sklearn 0.23")
def test_with_observed_gridsearch(self):
from sklearn.metrics import accuracy_score, make_scorer
from lale.lib.lale import GridSearchCV
from lale.lib.lale.observing import LoggingObserver
clf = VotingClassifier(
estimators=[("knn", KNeighborsClassifier()), ("rc", RidgeClassifier())],
voting="hard",
)
_ = clf.auto_configure(
self.X_train,
self.y_train,
GridSearchCV,
lale_num_samples=1,
lale_num_grids=1,
cv=2,
scoring=make_scorer(accuracy_score),
observer=LoggingObserver,
)
class TestBaggingClassifier(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
warnings.filterwarnings("ignore")
def test_with_lale_classifiers(self):
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(base_estimator=LogisticRegression())
trained = clf.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_with_lale_pipeline(self):
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(base_estimator=PCA() >> LogisticRegression())
trained = clf.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_with_hyperopt(self):
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(base_estimator=LogisticRegression())
trained = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
print(trained.to_json())
def test_pipeline_with_hyperopt(self):
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(base_estimator=PCA() >> LogisticRegression())
_ = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
def test_pipeline_choice_with_hyperopt(self):
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(
base_estimator=PCA() >> (LogisticRegression() | KNeighborsClassifier())
)
_ = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
def test_predict_log_proba(self):
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(base_estimator=PCA() >> LogisticRegression())
trained = clf.fit(self.X_train, self.y_train)
trained.predict_log_proba(self.X_test)
def test_predict_log_proba_trained_trainable(self):
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier()
clf.fit(self.X_train, self.y_train)
with self.assertWarns(DeprecationWarning):
clf.predict_log_proba(self.X_test)
def test_predict_log_proba_trainable(self):
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(base_estimator=PCA() >> LogisticRegression())
with self.assertRaises(ValueError):
clf.predict_log_proba(self.X_test)
class TestStackingClassifier(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_with_lale_classifiers(self):
from lale.lib.sklearn import StackingClassifier
clf = StackingClassifier(estimators=[("base", LogisticRegression())])
trained = clf.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_with_lale_pipeline(self):
from lale.lib.sklearn import StackingClassifier
clf = StackingClassifier(estimators=[("base", PCA() >> LogisticRegression())])
trained = clf.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_with_hyperopt(self):
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import StackingClassifier
clf = StackingClassifier(
estimators=[("base", LogisticRegression())],
final_estimator=LogisticRegression(),
)
trained = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
print(trained.to_json())
def test_pipeline_with_hyperopt(self):
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import StackingClassifier
clf = StackingClassifier(
estimators=[("base", PCA() >> LogisticRegression())],
final_estimator=LogisticRegression(),
)
_ = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
def test_pipeline_choice_with_hyperopt(self):
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import StackingClassifier
clf = StackingClassifier(
estimators=[
("base", PCA() >> (LogisticRegression() | KNeighborsClassifier()))
]
)
_ = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
class TestSpuriousSideConstraintsClassification(unittest.TestCase):
# This was prompted buy a bug, keeping it as it may help with support for other sklearn versions
def setUp(self):
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_sgd_classifier(self):
reg = SGDClassifier(loss="squared_error", epsilon=0.2)
reg.fit(self.X_train, self.y_train)
def test_sgd_classifier_1(self):
reg = SGDClassifier(learning_rate="optimal", eta0=0.2)
reg.fit(self.X_train, self.y_train)
def test_sgd_classifier_2(self):
reg = SGDClassifier(early_stopping=False, validation_fraction=0.2)
reg.fit(self.X_train, self.y_train)
def test_sgd_classifier_3(self):
reg = SGDClassifier(l1_ratio=0.2, penalty="l1")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier(self):
reg = MLPClassifier(early_stopping=False, validation_fraction=0.2)
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_1(self):
reg = MLPClassifier(beta_1=0.8, solver="sgd")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_2b(self):
reg = MLPClassifier(beta_2=0.8, solver="sgd")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_2e(self):
reg = MLPClassifier(epsilon=0.8, solver="sgd")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_3(self):
reg = MLPClassifier(n_iter_no_change=100, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_4(self):
reg = MLPClassifier(early_stopping=True, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_5(self):
reg = MLPClassifier(nesterovs_momentum=False, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_6(self):
reg = MLPClassifier(momentum=0.8, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_7(self):
reg = MLPClassifier(shuffle=False, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_8(self):
reg = MLPClassifier(learning_rate="invscaling", solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_9(self):
reg = MLPClassifier(learning_rate_init=0.002, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_10(self):
reg = MLPClassifier(learning_rate="invscaling", power_t=0.4, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_passive_aggressive_classifier(self):
reg = PassiveAggressiveClassifier(validation_fraction=0.4, early_stopping=False)
reg.fit(self.X_train, self.y_train)
def test_svc(self):
reg = SVC(kernel="linear", gamma=1)
reg.fit(self.X_train, self.y_train)
def test_simple_imputer(self):
reg = SimpleImputer(strategy="mean", fill_value=10)
reg.fit(self.X_train, self.y_train)
def test_nystroem(self):
reg = Nystroem(kernel="cosine", gamma=0.1)
reg.fit(self.X_train, self.y_train)
def test_nystroem_1(self):
reg = Nystroem(kernel="cosine", coef0=0.1)
reg.fit(self.X_train, self.y_train)
def test_nystroem_2(self):
reg = Nystroem(kernel="cosine", degree=2)
reg.fit(self.X_train, self.y_train)
def test_ridge_classifier(self):
reg = RidgeClassifier(fit_intercept=False)
reg.fit(self.X_train, self.y_train)
def test_ridge_classifier_1(self):
reg = RidgeClassifier(solver="svd", max_iter=10)
reg.fit(self.X_train, self.y_train)
class TestKNeighborsClassifier(unittest.TestCase):
def test_with_multioutput_targets(self):
import numpy as np
from sklearn.datasets import make_classification
from sklearn.utils import shuffle
X, y1 = make_classification(
n_samples=10, n_features=100, n_informative=30, n_classes=3, random_state=1
)
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
Y = np.vstack((y1, y2, y3)).T
trainable = KNeighborsClassifier()
trained = trainable.fit(X, Y)
_ = trained.predict(X)
def test_predict_proba(self):
trainable = KNeighborsClassifier()
iris = load_iris()
trained = trainable.fit(iris.data, iris.target)
# with self.assertWarns(DeprecationWarning):
_ = trainable.predict_proba(iris.data)
_ = trained.predict_proba(iris.data)
class TestLogisticRegression(unittest.TestCase):
def test_hyperparam_keyword_enum(self):
_ = LogisticRegression(
LogisticRegression.enum.penalty.l1,
C=0.1,
solver=LogisticRegression.enum.solver.saga,
)
def test_hyperparam_exclusive_min(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = LogisticRegression(LogisticRegression.enum.penalty.l1, C=0.0)
def test_hyperparam_penalty_solver_dependence(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = LogisticRegression(
LogisticRegression.enum.penalty.l1,
LogisticRegression.enum.solver.newton_cg,
)
def test_hyperparam_dual_penalty_solver_dependence(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = LogisticRegression(
LogisticRegression.enum.penalty.l2,
LogisticRegression.enum.solver.sag,
dual=True,
)
def test_sample_weight(self):
import numpy as np
trainable_lr = LogisticRegression(n_jobs=1)
iris = load_iris()
trained_lr = trainable_lr.fit(
iris.data, iris.target, sample_weight=np.arange(len(iris.target))
)
_ = trained_lr.predict(iris.data)
def test_predict_proba(self):
import numpy as np
trainable_lr = LogisticRegression(n_jobs=1)
iris = load_iris()
trained_lr = trainable_lr.fit(
iris.data, iris.target, sample_weight=np.arange(len(iris.target))
)
# with self.assertWarns(DeprecationWarning):
_ = trainable_lr.predict_proba(iris.data)
_ = trained_lr.predict_proba(iris.data)
def test_decision_function(self):
import numpy as np
trainable_lr = LogisticRegression(n_jobs=1)
iris = load_iris()
trained_lr = trainable_lr.fit(
iris.data, iris.target, sample_weight=np.arange(len(iris.target))
)
_ = trained_lr.decision_function(iris.data)
def test_with_sklearn_gridsearchcv(self):
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import GridSearchCV
lr = LogisticRegression()
parameters = {"solver": ("liblinear", "lbfgs"), "penalty": ["l2"]}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = GridSearchCV(
lr, parameters, cv=5, scoring=make_scorer(accuracy_score)
)
iris = load_iris()
clf.fit(iris.data, iris.target)
def test_with_randomizedsearchcv(self):
import numpy as np
from scipy.stats.distributions import uniform
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import RandomizedSearchCV
lr = LogisticRegression()
ranges, _cat_idx = lr.get_param_ranges()
# specify parameters and distributions to sample from
# the loguniform distribution needs to be taken care of properly
param_dist = {"solver": ranges["solver"], "C": uniform(0.03125, np.log(32768))}
# run randomized search
n_iter_search = 5
with warnings.catch_warnings():
warnings.simplefilter("ignore")
random_search = RandomizedSearchCV(
lr,
param_distributions=param_dist,
n_iter=n_iter_search,
cv=5,
scoring=make_scorer(accuracy_score),
)
iris = load_iris()
random_search.fit(iris.data, iris.target)
def test_grid_search_on_trained(self):
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import GridSearchCV
iris = load_iris()
X, y = iris.data, iris.target
lr = LogisticRegression()
trained = lr.fit(X, y)
parameters = {"solver": ("liblinear", "lbfgs"), "penalty": ["l2"]}
_ = GridSearchCV(trained, parameters, cv=5, scoring=make_scorer(accuracy_score))
def test_grid_search_on_trained_auto(self):
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import GridSearchCV
iris = load_iris()
X, y = iris.data, iris.target
lr = LogisticRegression()
trained = lr.fit(X, y)
parameters = get_grid_search_parameter_grids(lr, num_samples=2)
_ = GridSearchCV(trained, parameters, cv=5, scoring=make_scorer(accuracy_score))
def test_doc(self):
from test.mock_custom_operators import MyLR
import sklearn.datasets
import sklearn.utils
iris = load_iris()
X_all, y_all = sklearn.utils.shuffle(iris.data, iris.target, random_state=42)
X_train, y_train = X_all[10:], y_all[10:]
X_test, y_test = X_all[:10], y_all[:10]
print(f"expected {y_test}")
warnings.filterwarnings("ignore", category=FutureWarning)
trainable = MyLR(solver="lbfgs", C=0.1)
trained = trainable.fit(X_train, y_train)
predictions = trained.predict(X_test)
print(f"actual {predictions}")
class TestIsolationForest(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
from lale.datasets.util import load_boston
data = load_boston()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
warnings.filterwarnings("ignore")
def test_with_no_y(self):
clf = IsolationForest()
trained = clf.fit(self.X_train)
trained.predict(self.X_test)
def test_with_hyperopt(self):
def my_scorer(estimator, X, y=None):
return 1
from lale.lib.lale import Hyperopt
hyperopt = Hyperopt(
estimator=IsolationForest(max_features=1.0, max_samples=1.0),
max_evals=5,
verbose=True,
scoring=my_scorer,
)
trained = hyperopt.fit(self.X_train)
_ = trained.predict(self.X_test)
def test_decision_function_1(self):
def my_scorer(estimator, X, y=None):
return 1
from lale.lib.lale import Hyperopt
hyperopt = Hyperopt(
estimator=IsolationForest(max_features=1.0, max_samples=1.0),
max_evals=5,
verbose=True,
scoring=my_scorer,
)
trained = hyperopt.fit(self.X_train)
pipeline = trained.get_pipeline()
assert pipeline is not None
_ = pipeline.decision_function(self.X_test)
def test_decision_function_2(self):
def my_scorer(estimator, X, y=None):
return 1
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import MinMaxScaler
hyperopt = Hyperopt(
estimator=MinMaxScaler()
>> IsolationForest(max_features=1.0, max_samples=1.0),
max_evals=5,
verbose=True,
scoring=my_scorer,
)
trained = hyperopt.fit(self.X_train)
pipeline = trained.get_pipeline()
assert pipeline is not None
_ = pipeline.decision_function(self.X_test)
def test_score_samples(self):
clf = IsolationForest()
trained = clf.fit(self.X_train)
trained.score_samples(self.X_test)
def test_score_samples_trainable(self):
clf = IsolationForest()
with self.assertRaises(ValueError):
clf.score_samples(self.X_test)
def test_score_samples_trained_trainable(self):
clf = IsolationForest()
clf.fit(self.X_train)
with self.assertWarns(DeprecationWarning):
clf.score_samples(self.X_test)
class TestKMeans(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
from lale.datasets.util import load_boston
data = load_boston()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
warnings.filterwarnings("ignore")
def test_with_no_y(self):
clf = KMeans()
trained = clf.fit(self.X_train)
trained.predict(self.X_test)
def test_with_hyperopt(self):
from lale.lib.lale import Hyperopt
def my_scorer(estimator, X, y=None):
return 1
hyperopt = Hyperopt(
estimator=KMeans(n_clusters=3), max_evals=5, verbose=True, scoring=my_scorer
)
trained = hyperopt.fit(self.X_train)
_ = trained.predict(self.X_test)
| 27,160 | 33.6 | 105 | py |
lale | lale-master/test/test_relational_sklearn.py | # Copyright 2021-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import math
import numbers
import os.path
import re
import tempfile
import unittest
import urllib.request
from typing import Any, Dict, List, Tuple, cast
import jsonschema
import numpy as np
import pandas as pd
import sklearn
import sklearn.datasets
from category_encoders import HashingEncoder as SkHashingEncoder
from sklearn.feature_selection import SelectKBest as SkSelectKBest
from sklearn.impute import SimpleImputer as SkSimpleImputer
from sklearn.metrics import accuracy_score as sk_accuracy_score
from sklearn.metrics import balanced_accuracy_score as sk_balanced_accuracy_score
from sklearn.metrics import f1_score as sk_f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import r2_score as sk_r2_score
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score as sk_cross_val_score
from sklearn.model_selection import cross_validate as sk_cross_validate
from sklearn.pipeline import make_pipeline as sk_make_pipeline
from sklearn.preprocessing import MinMaxScaler as SkMinMaxScaler
from sklearn.preprocessing import OneHotEncoder as SkOneHotEncoder
from sklearn.preprocessing import OrdinalEncoder as SkOrdinalEncoder
from sklearn.preprocessing import StandardScaler as SkStandardScaler
from sklearn.preprocessing import scale as sk_scale
import lale.datasets
import lale.datasets.openml
import lale.datasets.openml.openml_datasets
import lale.lib.aif360
import lale.type_checking
from lale.datasets import pandas2spark
from lale.datasets.data_schemas import (
SparkDataFrameWithIndex,
add_table_name,
forward_metadata,
get_index_name,
)
from lale.datasets.multitable.fetch_datasets import fetch_go_sales_dataset
from lale.expressions import it
from lale.helpers import _ensure_pandas, create_data_loader, datatype_param_type
from lale.lib.category_encoders import TargetEncoder as SkTargetEncoder
from lale.lib.lightgbm import LGBMClassifier, LGBMRegressor
from lale.lib.rasl import BatchedBaggingClassifier, ConcatFeatures, Convert
from lale.lib.rasl import HashingEncoder as RaslHashingEncoder
from lale.lib.rasl import Map
from lale.lib.rasl import MinMaxScaler as RaslMinMaxScaler
from lale.lib.rasl import OneHotEncoder as RaslOneHotEncoder
from lale.lib.rasl import OrdinalEncoder as RaslOrdinalEncoder
from lale.lib.rasl import PrioBatch, PrioStep, Project, Scan
from lale.lib.rasl import SelectKBest as RaslSelectKBest
from lale.lib.rasl import SimpleImputer as RaslSimpleImputer
from lale.lib.rasl import StandardScaler as RaslStandardScaler
from lale.lib.rasl import TargetEncoder as RaslTargetEncoder
from lale.lib.rasl import accuracy_score as rasl_accuracy_score
from lale.lib.rasl import balanced_accuracy_score as rasl_balanced_accuracy_score
from lale.lib.rasl import categorical
from lale.lib.rasl import cross_val_score as rasl_cross_val_score
from lale.lib.rasl import cross_validate as rasl_cross_validate
from lale.lib.rasl import csv_data_loader
from lale.lib.rasl import f1_score as rasl_f1_score
from lale.lib.rasl import fit_with_batches
from lale.lib.rasl import get_scorer as rasl_get_scorer
from lale.lib.rasl import mockup_data_loader, openml_data_loader
from lale.lib.rasl import r2_score as rasl_r2_score
from lale.lib.rasl.standard_scaler import scale as rasl_scale
from lale.lib.sklearn import (
DecisionTreeClassifier,
LinearRegression,
LogisticRegression,
RandomForestClassifier,
SGDClassifier,
)
from lale.lib.xgboost import XGBClassifier, XGBRegressor
from lale.operators import TrainedPipeline
assert sklearn.__version__ >= "1.0", sklearn.__version__
class TestDatasets(unittest.TestCase):
def test_openml_creditg_arff(self):
batches = openml_data_loader("credit-g", 340)
n_rows_found = 0
n_batches_found = 0
for bX, by in batches:
n_batches_found += 1
n_rows_batch, n_columns_batch = bX.shape
n_rows_found += n_rows_batch
self.assertEqual(n_rows_batch, len(by))
self.assertEqual(n_columns_batch, 20)
self.assertEqual(n_batches_found, 3)
self.assertEqual(n_rows_found, 1000)
def test_autoai_creditg_csv(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
url = "https://raw.githubusercontent.com/pmservice/wml-sample-models/master/autoai/credit-risk-prediction/data/german_credit_data_biased_training.csv"
file_name = os.path.join(tmpdir_name, "credit-g.csv")
# this request is to a hardcoded https url, so does not risk leaking local data
urllib.request.urlretrieve(url, file_name) # nosec
assert os.path.exists(file_name)
n_rows = 5000
n_batches = 3
rows_per_batch = (n_rows + n_batches - 1) // n_batches
batches = csv_data_loader(file_name, "Risk", rows_per_batch)
n_rows_found = 0
n_batches_found = 0
for bX, by in batches:
n_batches_found += 1
n_rows_batch, n_columns_batch = bX.shape
n_rows_found += n_rows_batch
self.assertEqual(n_rows_batch, len(by))
self.assertEqual(n_columns_batch, 20)
self.assertEqual(n_batches_found, n_batches)
self.assertEqual(n_rows_found, n_rows)
def _check_trained_min_max_scaler(test, op1, op2, msg):
test.assertEqual(list(op1.data_min_), list(op2.data_min_), msg)
test.assertEqual(list(op1.data_max_), list(op2.data_max_), msg)
test.assertEqual(list(op1.data_range_), list(op2.data_range_), msg)
test.assertEqual(list(op1.scale_), list(op2.scale_), msg)
test.assertEqual(list(op1.min_), list(op2.min_), msg)
test.assertEqual(op1.n_features_in_, op2.n_features_in_, msg)
test.assertEqual(op1.n_samples_seen_, op2.n_samples_seen_, msg)
class TestMinMaxScaler(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets: List[datatype_param_type] = ["pandas", "spark"]
cls.tgt2datasets = {tgt: fetch_go_sales_dataset(tgt) for tgt in targets}
def test_get_params(self):
sk_scaler = SkMinMaxScaler()
rasl_scaler = RaslMinMaxScaler()
sk_params = sk_scaler.get_params()
rasl_params = rasl_scaler.get_params()
self.assertDictContainsSubset(sk_params, rasl_params)
def test_error(self):
_ = RaslMinMaxScaler(clip=True) # should raise no error
with self.assertRaisesRegex(
jsonschema.ValidationError,
re.compile(r"MinMaxScaler\(copy=False\)", re.MULTILINE | re.DOTALL),
):
_ = RaslMinMaxScaler(copy=False)
def test_fit(self):
columns = ["Product number", "Quantity", "Retailer code"]
pandas_data = self.tgt2datasets["pandas"][0][columns]
sk_scaler = SkMinMaxScaler()
sk_trained = sk_scaler.fit(pandas_data)
rasl_scaler = RaslMinMaxScaler()
for tgt, go_sales in self.tgt2datasets.items():
data = go_sales[0][columns]
if tgt == "spark":
data = SparkDataFrameWithIndex(data)
rasl_trained = rasl_scaler.fit(data)
_check_trained_min_max_scaler(self, sk_trained, rasl_trained.impl, "pandas")
def test_transform(self):
columns = ["Product number", "Quantity", "Retailer code"]
pandas_data = self.tgt2datasets["pandas"][0][columns]
sk_scaler = SkMinMaxScaler()
sk_trained = sk_scaler.fit(pandas_data)
sk_transformed = sk_trained.transform(pandas_data)
rasl_scaler = RaslMinMaxScaler()
for tgt, go_sales in self.tgt2datasets.items():
data = go_sales[0][columns]
if tgt == "spark":
data = SparkDataFrameWithIndex(data)
rasl_trained = rasl_scaler.fit(data)
rasl_transformed = rasl_trained.transform(data)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertAlmostEqual(sk_transformed[0, 0], rasl_transformed.iloc[0, 0])
self.assertAlmostEqual(sk_transformed[0, 1], rasl_transformed.iloc[0, 1])
self.assertAlmostEqual(sk_transformed[0, 2], rasl_transformed.iloc[0, 2])
self.assertAlmostEqual(sk_transformed[10, 0], rasl_transformed.iloc[10, 0])
self.assertAlmostEqual(sk_transformed[10, 1], rasl_transformed.iloc[10, 1])
self.assertAlmostEqual(sk_transformed[10, 2], rasl_transformed.iloc[10, 2])
self.assertAlmostEqual(sk_transformed[20, 0], rasl_transformed.iloc[20, 0])
self.assertAlmostEqual(sk_transformed[20, 1], rasl_transformed.iloc[20, 1])
self.assertAlmostEqual(sk_transformed[20, 2], rasl_transformed.iloc[20, 2])
def test_transform_clipped(self):
columns = ["Product number", "Quantity", "Retailer code"]
pandas_data = self.tgt2datasets["pandas"][0][columns]
sk_scaler = SkMinMaxScaler(clip=True)
sk_trained = sk_scaler.fit(pandas_data)
sk_transformed = sk_trained.transform(pandas_data)
rasl_scaler = RaslMinMaxScaler(clip=True)
for tgt, go_sales in self.tgt2datasets.items():
data = go_sales[0][columns]
if tgt == "spark":
data = SparkDataFrameWithIndex(data)
rasl_trained = rasl_scaler.fit(data)
rasl_transformed = rasl_trained.transform(data)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertAlmostEqual(sk_transformed[0, 0], rasl_transformed.iloc[0, 0])
self.assertAlmostEqual(sk_transformed[0, 1], rasl_transformed.iloc[0, 1])
self.assertAlmostEqual(sk_transformed[0, 2], rasl_transformed.iloc[0, 2])
self.assertAlmostEqual(sk_transformed[10, 0], rasl_transformed.iloc[10, 0])
self.assertAlmostEqual(sk_transformed[10, 1], rasl_transformed.iloc[10, 1])
self.assertAlmostEqual(sk_transformed[10, 2], rasl_transformed.iloc[10, 2])
self.assertAlmostEqual(sk_transformed[20, 0], rasl_transformed.iloc[20, 0])
self.assertAlmostEqual(sk_transformed[20, 1], rasl_transformed.iloc[20, 1])
self.assertAlmostEqual(sk_transformed[20, 2], rasl_transformed.iloc[20, 2])
def test_zero_scale(self):
pandas_data = pd.DataFrame({"a": [0.5]})
sk_scaler = SkMinMaxScaler()
sk_trained = sk_scaler.fit(pandas_data)
sk_transformed = sk_trained.transform(pandas_data)
rasl_scaler = RaslMinMaxScaler()
for tgt, _ in self.tgt2datasets.items():
data = Convert(astype=tgt).transform(pandas_data)
rasl_trained = rasl_scaler.fit(data)
rasl_transformed = rasl_trained.transform(data)
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertAlmostEqual(sk_transformed[0, 0], rasl_transformed.iloc[0, 0])
def test_fit_range(self):
columns = ["Product number", "Quantity", "Retailer code"]
pandas_data = self.tgt2datasets["pandas"][0][columns]
sk_scaler = SkMinMaxScaler(feature_range=(-5, 5))
sk_trained = sk_scaler.fit(pandas_data)
for tgt, go_sales in self.tgt2datasets.items():
data = go_sales[0][columns]
if tgt == "spark":
data = SparkDataFrameWithIndex(data)
rasl_scaler = RaslMinMaxScaler(feature_range=(-5, 5))
rasl_trained = rasl_scaler.fit(data)
_check_trained_min_max_scaler(self, sk_trained, rasl_trained.impl, "pandas")
def test_transform_range(self):
columns = ["Product number", "Quantity", "Retailer code"]
pandas_data = self.tgt2datasets["pandas"][0][columns]
sk_scaler = SkMinMaxScaler(feature_range=(-5, 5))
sk_trained = sk_scaler.fit(pandas_data)
sk_transformed = sk_trained.transform(pandas_data)
rasl_scaler = RaslMinMaxScaler(feature_range=(-5, 5))
for tgt, go_sales in self.tgt2datasets.items():
data = go_sales[0][columns]
if tgt == "spark":
data = SparkDataFrameWithIndex(data)
rasl_trained = rasl_scaler.fit(data)
rasl_transformed = rasl_trained.transform(data)
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertAlmostEqual(sk_transformed[0, 0], rasl_transformed.iloc[0, 0])
self.assertAlmostEqual(sk_transformed[0, 1], rasl_transformed.iloc[0, 1])
self.assertAlmostEqual(sk_transformed[0, 2], rasl_transformed.iloc[0, 2])
self.assertAlmostEqual(sk_transformed[10, 0], rasl_transformed.iloc[10, 0])
self.assertAlmostEqual(sk_transformed[10, 1], rasl_transformed.iloc[10, 1])
self.assertAlmostEqual(sk_transformed[10, 2], rasl_transformed.iloc[10, 2])
self.assertAlmostEqual(sk_transformed[20, 0], rasl_transformed.iloc[20, 0])
self.assertAlmostEqual(sk_transformed[20, 1], rasl_transformed.iloc[20, 1])
self.assertAlmostEqual(sk_transformed[20, 2], rasl_transformed.iloc[20, 2])
def test_partial_fit(self):
columns = ["Product number", "Quantity", "Retailer code"]
data = self.tgt2datasets["pandas"][0][columns]
for tgt in self.tgt2datasets.keys():
sk_scaler = SkMinMaxScaler()
rasl_scaler = RaslMinMaxScaler()
for lower, upper in [[0, 10], [10, 100], [100, data.shape[0]]]:
data_so_far = data[0:upper]
data_delta = data[lower:upper]
if tgt == "pandas":
pass
elif tgt == "spark":
data_delta = pandas2spark(data_delta)
else:
assert False
sk_trained = sk_scaler.fit(data_so_far)
rasl_trained = rasl_scaler.partial_fit(data_delta)
_check_trained_min_max_scaler(self, sk_trained, rasl_trained.impl, tgt)
class TestPipeline(unittest.TestCase):
@classmethod
def setUpClass(cls):
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
targets = ["pandas", "spark"]
cls.tgt2datasets = {tgt: {} for tgt in targets}
def add_df(name, df):
cls.tgt2datasets["pandas"][name] = df
cls.tgt2datasets["spark"][name] = pandas2spark(df)
X, y = load_iris(as_frame=True, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
add_df("X_train", X_train)
add_df("X_test", X_test)
add_df("y_train", y_train)
add_df("y_test", y_test)
def test_pipeline(self):
for _tgt, datasets in self.tgt2datasets.items():
X_train, X_test = (datasets["X_train"], datasets["X_test"])
y_train = self.tgt2datasets["pandas"]["y_train"]
pipeline = (
RaslMinMaxScaler() >> Convert(astype="pandas") >> LogisticRegression()
)
trained = pipeline.fit(X_train, y_train)
_ = trained.predict(X_test)
_ = trained.predict(X_test)
def _check_trained_select_k_best(self, sk_trained, rasl_trained, msg=""):
self.assertEqual(len(sk_trained.scores_), len(rasl_trained.impl.scores_))
self.assertEqual(len(sk_trained.scores_), len(sk_trained.pvalues_))
self.assertEqual(len(sk_trained.scores_), len(rasl_trained.impl.pvalues_))
for i, (sk_score, rasl_score, sk_pvalue, rasl_pvalue) in enumerate(
zip(
sk_trained.scores_,
rasl_trained.impl.scores_,
sk_trained.pvalues_,
rasl_trained.impl.pvalues_,
)
):
if not (np.isnan(sk_score) and np.isnan(rasl_score)):
self.assertAlmostEqual(
sk_score,
rasl_score,
msg=f"{msg}: {i}",
)
if not (np.isnan(sk_pvalue) and np.isnan(rasl_pvalue)):
self.assertAlmostEqual(
sk_pvalue,
rasl_pvalue,
msg=f"{msg}: {i}",
)
self.assertEqual(sk_trained.n_features_in_, rasl_trained.impl.n_features_in_, msg)
class TestSelectKBest(unittest.TestCase):
@classmethod
def setUpClass(cls):
from sklearn.datasets import load_digits
targets = ["pandas", "spark"]
cls.tgt2datasets = {tgt: {} for tgt in targets}
def add_df(name, df):
cls.tgt2datasets["pandas"][name] = df
cls.tgt2datasets["spark"][name] = pandas2spark(df)
X, y = load_digits(return_X_y=True, as_frame=True)
X = add_table_name(X, "X")
y = add_table_name(y, "y")
add_df("X", X)
add_df("y", y)
def test_fit(self):
sk_trainable = SkSelectKBest(k=20)
X, y = self.tgt2datasets["pandas"]["X"], self.tgt2datasets["pandas"]["y"]
sk_trained = sk_trainable.fit(X, y)
rasl_trainable = RaslSelectKBest(k=20)
for tgt, datasets in self.tgt2datasets.items():
X, y = datasets["X"], datasets["y"]
rasl_trained = rasl_trainable.fit(X, y)
_check_trained_select_k_best(self, sk_trained, rasl_trained, tgt)
def test_transform(self):
sk_trainable = SkSelectKBest(k=20)
X, y = self.tgt2datasets["pandas"]["X"], self.tgt2datasets["pandas"]["y"]
sk_trained = sk_trainable.fit(X, y)
sk_transformed = sk_trained.transform(X)
rasl_trainable = RaslSelectKBest(k=20)
for tgt, datasets in self.tgt2datasets.items():
X, y = datasets["X"], datasets["y"]
rasl_trained = rasl_trainable.fit(X, y)
rasl_transformed = rasl_trained.transform(X)
_check_trained_select_k_best(self, sk_trained, rasl_trained, tgt)
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertAlmostEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
msg=(row_idx, col_idx),
)
def test_partial_fit(self):
rasl_trainable = RaslSelectKBest(k=20)
X, y = self.tgt2datasets["pandas"]["X"], self.tgt2datasets["pandas"]["y"]
for lower, upper in [[0, 100], [100, 200], [200, X.shape[0]]]:
X_so_far, y_so_far = X[0:upper], y[0:upper]
sk_trainable = SkSelectKBest(k=20)
sk_trained = sk_trainable.fit(X_so_far, y_so_far)
X_delta, y_delta = X[lower:upper], y[lower:upper]
rasl_trained = rasl_trainable.partial_fit(X_delta, y_delta)
_check_trained_select_k_best(
self, sk_trained, rasl_trained, f"lower: {lower}, upper: {upper}"
)
def _check_trained_ordinal_encoder(test, op1, op2, msg):
if hasattr(op1, "feature_names_in_"):
test.assertEqual(list(op1.feature_names_in_), list(op2.feature_names_in_), msg)
test.assertEqual(len(op1.categories_), len(op2.categories_), msg)
for cat1, cat2 in zip(op1.categories_, op2.categories_):
test.assertEqual(len(cat1), len(cat2), msg)
for num1, num2 in zip(cat1, cat2):
if isinstance(num1, numbers.Number) and math.isnan(num2):
test.assertTrue(math.isnan(num2), msg)
else:
test.assertEqual(num1, num2, msg)
class TestOrdinalEncoder(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets: List[datatype_param_type] = ["pandas", "spark"]
cls.tgt2gosales = {tgt: fetch_go_sales_dataset(tgt) for tgt in targets}
cls.tgt2creditg = {
tgt: lale.datasets.openml.fetch(
"credit-g",
"classification",
preprocess=False,
astype=tgt,
)
for tgt in targets
}
def _check_last_trained(self, op1, op2, msg):
_check_trained_ordinal_encoder(
self, op1.get_last().impl, op2.get_last().impl, msg
)
def test_fit(self):
prefix = Scan(table=it.go_daily_sales) >> Map(
columns={"retailer": it["Retailer code"], "method": it["Order method code"]}
)
encoder_args = {"handle_unknown": "use_encoded_value", "unknown_value": np.nan}
rasl_trainable = prefix >> RaslOrdinalEncoder(**encoder_args)
sk_trainable = prefix >> SkOrdinalEncoder(**encoder_args)
sk_trained = sk_trainable.fit(self.tgt2gosales["pandas"])
for tgt, datasets in self.tgt2gosales.items():
rasl_trained = rasl_trainable.fit(datasets)
self._check_last_trained(sk_trained, rasl_trained, tgt)
def test_partial_fit(self):
prefix = Scan(table=it.go_daily_sales) >> Map(
columns={"retailer": it["Retailer code"], "method": it["Order method code"]}
)
pandas_data = prefix.transform(self.tgt2gosales["pandas"])
encoder_args = {"handle_unknown": "use_encoded_value", "unknown_value": np.nan}
for tgt in self.tgt2gosales.keys():
rasl_op = RaslOrdinalEncoder(**encoder_args)
for lower, upper in [[0, 10], [10, 100], [100, pandas_data.shape[0]]]:
data_so_far = pandas_data[0:upper]
sk_op = SkOrdinalEncoder(**encoder_args).fit(data_so_far)
data_delta = pandas_data[lower:upper]
if tgt == "pandas":
pass
elif tgt == "spark":
data_delta = pandas2spark(data_delta)
else:
assert False
rasl_op = rasl_op.partial_fit(data_delta)
_check_trained_ordinal_encoder(
self,
sk_op,
rasl_op.impl,
f"tgt {tgt}, lower {lower}, upper {upper}",
)
def test_transform(self):
prefix = Scan(table=it.go_daily_sales) >> Map(
columns={"retailer": it["Retailer code"], "method": it["Order method code"]}
)
encoder_args = {"handle_unknown": "use_encoded_value", "unknown_value": np.nan}
rasl_trainable = prefix >> RaslOrdinalEncoder(**encoder_args)
sk_trainable = prefix >> SkOrdinalEncoder(**encoder_args)
sk_trained = sk_trainable.fit(self.tgt2gosales["pandas"])
sk_transformed = sk_trained.transform(self.tgt2gosales["pandas"])
for tgt, datasets in self.tgt2gosales.items():
rasl_trained = rasl_trainable.fit(datasets)
self._check_last_trained(sk_trained, rasl_trained, tgt)
rasl_transformed = rasl_trained.transform(datasets)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(row_idx, col_idx, tgt),
)
def test_predict(self):
(train_X_pd, train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
to_pd = Convert(astype="pandas")
lr = LogisticRegression()
encoder_args = {"handle_unknown": "use_encoded_value", "unknown_value": -1}
sk_trainable = prefix >> SkOrdinalEncoder(**encoder_args) >> lr
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_predicted = sk_trained.predict(test_X_pd)
rasl_trainable = prefix >> RaslOrdinalEncoder(**encoder_args) >> to_pd >> lr
for tgt, dataset in self.tgt2creditg.items():
(train_X, train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_predicted = rasl_trained.predict(test_X)
self.assertEqual(sk_predicted.shape, rasl_predicted.shape, tgt)
self.assertEqual(sk_predicted.tolist(), rasl_predicted.tolist(), tgt)
def _check_trained_one_hot_encoder(test, op1, op2, msg):
if hasattr(op1, "feature_names_in_"):
test.assertEqual(list(op1.feature_names_in_), list(op2.feature_names_in_), msg)
test.assertEqual(len(op1.categories_), len(op2.categories_), msg)
for cat1, cat2 in zip(op1.categories_, op2.categories_):
test.assertEqual(list(cat1), list(cat2), msg)
class TestOneHotEncoder(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets = ["pandas", "spark"]
cls.tgt2creditg = cast(
Dict[str, Any],
{
tgt: lale.datasets.openml.fetch(
"credit-g",
"classification",
preprocess=False,
astype=tgt,
)
for tgt in targets
},
)
def _check_last_trained(self, op1, op2, msg):
_check_trained_one_hot_encoder(
self, op1.get_last().impl, op2.get_last().impl, msg
)
def test_fit(self):
(train_X_pd, _), (_, _) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
rasl_trainable = prefix >> RaslOneHotEncoder()
sk_trainable = prefix >> SkOneHotEncoder()
sk_trained = sk_trainable.fit(train_X_pd)
for tgt, dataset in self.tgt2creditg.items():
(train_X, _train_y), (_test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X)
self._check_last_trained(sk_trained, rasl_trained, tgt)
def test_partial_fit(self):
(train_X_pd, _), (_, _) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
for tgt in self.tgt2creditg.keys():
rasl_pipe = prefix >> RaslOneHotEncoder()
for lower, upper in [[0, 10], [10, 100], [100, train_X_pd.shape[0]]]:
data_so_far = train_X_pd[0:upper]
sk_pipe = prefix >> SkOrdinalEncoder()
sk_pipe = sk_pipe.fit(data_so_far)
data_delta = train_X_pd[lower:upper]
if tgt == "pandas":
pass
elif tgt == "spark":
data_delta = pandas2spark(data_delta)
else:
assert False
rasl_pipe = rasl_pipe.partial_fit(data_delta)
self._check_last_trained(
sk_pipe,
rasl_pipe,
(tgt, lower, upper),
)
def test_transform(self):
(train_X_pd, _train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
rasl_trainable = prefix >> RaslOneHotEncoder(sparse=False)
sk_trainable = prefix >> SkOneHotEncoder(sparse=False)
sk_trained = sk_trainable.fit(train_X_pd)
sk_transformed = sk_trained.transform(test_X_pd)
for tgt, dataset in self.tgt2creditg.items():
(train_X, _train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X)
self._check_last_trained(sk_trained, rasl_trained, tgt)
rasl_transformed = rasl_trained.transform(test_X)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(row_idx, col_idx, tgt),
)
def test_predict(self):
(train_X_pd, train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
to_pd = Convert(astype="pandas")
lr = LogisticRegression()
sk_trainable = prefix >> SkOneHotEncoder(sparse=False) >> lr
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_predicted = sk_trained.predict(test_X_pd)
rasl_trainable = prefix >> RaslOneHotEncoder(sparse=False) >> to_pd >> lr
for tgt, dataset in self.tgt2creditg.items():
(train_X, train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_predicted = rasl_trained.predict(test_X)
self.assertEqual(sk_predicted.shape, rasl_predicted.shape, tgt)
self.assertEqual(sk_predicted.tolist(), rasl_predicted.tolist(), tgt)
def _get_feature_names_out(op):
"""later version of category_encoder's HashingEncoder changed the attribute name"""
fnames = getattr(op, "feature_names", None)
if fnames is not None:
return fnames
fnames = getattr(op, "get_feature_names_out", None)
assert fnames is not None
return fnames()
def _check_trained_hashing_encoder(test, op1, op2, msg):
test.assertEqual(
list(_get_feature_names_out(op1)), list(_get_feature_names_out(op2)), msg
)
class TestHashingEncoder(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets = ["pandas", "spark"]
cls.tgt2creditg = cast(
Dict[str, Any],
{
tgt: lale.datasets.openml.fetch(
"credit-g",
"classification",
preprocess=False,
astype=tgt,
)
for tgt in targets
},
)
def _check_last_trained(self, op1, op2, msg):
_check_trained_hashing_encoder(
self, op1.get_last().impl, op2.get_last().impl, msg
)
def test_fit(self):
(train_X_pd, _), (_, _) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
rasl_trainable = prefix >> RaslHashingEncoder()
sk_trainable = prefix >> SkHashingEncoder()
sk_trained = sk_trainable.fit(train_X_pd)
# TODO: test with multiple batches
for tgt, dataset in self.tgt2creditg.items():
(train_X, _train_y), (_test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X)
self._check_last_trained(sk_trained, rasl_trained, tgt)
def test_transform(self):
(train_X_pd, _train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
rasl_trainable = prefix >> RaslHashingEncoder()
sk_trainable = prefix >> SkHashingEncoder()
sk_trained = sk_trainable.fit(train_X_pd)
sk_transformed = sk_trained.transform(test_X_pd)
for tgt, dataset in self.tgt2creditg.items():
(train_X, _train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X)
self._check_last_trained(sk_trained, rasl_trained, tgt)
rasl_transformed = rasl_trained.transform(test_X)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed.iloc[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(row_idx, col_idx, tgt),
)
def test_predict(self):
(train_X_pd, train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
to_pd = Convert(astype="pandas")
lr = LogisticRegression()
sk_trainable = prefix >> SkHashingEncoder() >> lr
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_predicted = sk_trained.predict(test_X_pd)
rasl_trainable = prefix >> RaslHashingEncoder() >> to_pd >> lr
for tgt, dataset in self.tgt2creditg.items():
(train_X, train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_predicted = rasl_trained.predict(test_X)
self.assertEqual(sk_predicted.shape, rasl_predicted.shape, tgt)
self.assertEqual(sk_predicted.tolist(), rasl_predicted.tolist(), tgt)
def _check_trained_target_encoder(test, op1, op2, msg):
names1, names2 = _get_feature_names_out(op1), _get_feature_names_out(op2)
test.assertListEqual(list(names1), list(names2), msg)
test.assertSequenceEqual(op1.mapping.keys(), op2._col2cat2value.keys(), msg)
for col in op1.mapping.keys():
op1_cat2val = op1.mapping[col].to_dict()
op2_cat2val = op2._col2cat2value[col]
expected_keys = list(range(1, 1 + len(op2_cat2val))) + [-1, -2]
test.assertListEqual(list(op1_cat2val.keys()), expected_keys, (col, msg))
test.assertAlmostEqual(op1_cat2val[-1], op2._prior, msg=(col, msg))
for i, cat in enumerate(op2_cat2val.keys()):
test.assertAlmostEqual(
op1_cat2val[i + 1], op2_cat2val[cat], msg=(col, i, cat, msg)
)
class TestTargetEncoder(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets = ["pandas", "spark"]
dataset_names = [
"tae", # 3-class classification
"cloud", # regression
"credit-g", # binary classification
]
experiments_dict = lale.datasets.openml.openml_datasets.experiments_dict
cls.datasets = cast(
Dict[Tuple[str, str], Any],
{
(tgt, dataset_name): lale.datasets.openml.fetch(
dataset_name,
experiments_dict[dataset_name]["task_type"],
preprocess=False,
astype=tgt,
)
for tgt, dataset_name in itertools.product(targets, dataset_names)
},
)
def test_fit(self):
for (tgt, dataset_name), dataset in self.datasets.items():
(train_X, train_y), (_, _) = dataset
(train_X_pd, train_y_pd), (_, _) = self.datasets["pandas", dataset_name]
sk_trainable = SkTargetEncoder()
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
experiments_dict = lale.datasets.openml.openml_datasets.experiments_dict
if experiments_dict[dataset_name]["task_type"] == "regression":
rasl_trainable = RaslTargetEncoder()
else:
classes = sorted(list(train_y_pd.unique()))
rasl_trainable = RaslTargetEncoder(classes=classes)
rasl_trained = rasl_trainable.fit(train_X, train_y)
_check_trained_target_encoder(
self, sk_trained.impl, rasl_trained.impl, (tgt, dataset_name)
)
def test_partial_fit(self):
for (tgt, dataset_name), dataset in self.datasets.items():
if tgt != "pandas":
continue
(train_X, train_y), (_, _) = dataset
experiments_dict = lale.datasets.openml.openml_datasets.experiments_dict
if experiments_dict[dataset_name]["task_type"] == "regression":
classes = None
else:
classes = sorted(list(_ensure_pandas(train_y).unique()))
rasl_trainable = RaslTargetEncoder(classes=classes)
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_batched = RaslTargetEncoder(classes=classes)
for batch_X, batch_y in mockup_data_loader(train_X, train_y, 3, tgt):
rasl_batched = rasl_batched.partial_fit(batch_X, batch_y)
op1, op2 = rasl_trained.impl, rasl_batched.impl
self.assertAlmostEqual(op1._prior, op2._prior, msg=(tgt, dataset_name))
self.assertSequenceEqual(
op1._col2cat2value.keys(),
op2._col2cat2value.keys(),
msg=(tgt, dataset_name),
)
for col in op1._col2cat2value:
self.assertSequenceEqual(
op1._col2cat2value[col].keys(),
op2._col2cat2value[col].keys(),
msg=(tgt, dataset_name, col),
)
for cat in op1._col2cat2value[col]:
self.assertAlmostEqual(
op1._col2cat2value[col][cat],
op2._col2cat2value[col][cat],
msg=(tgt, dataset_name, col, cat),
)
def test_transform(self):
for (tgt, dataset_name), dataset in self.datasets.items():
(train_X, train_y), (test_X, _) = dataset
(train_X_pd, train_y_pd), (test_X_pd, _) = self.datasets[
"pandas", dataset_name
]
sk_trainable = SkTargetEncoder()
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_transformed = sk_trained.transform(test_X_pd)
experiments_dict = lale.datasets.openml.openml_datasets.experiments_dict
if experiments_dict[dataset_name]["task_type"] == "regression":
rasl_trainable = RaslTargetEncoder(classes=None)
else:
classes = sorted(list(train_y_pd.unique()))
rasl_trainable = RaslTargetEncoder(classes=classes)
rasl_trained = rasl_trainable.fit(train_X, train_y)
_check_trained_target_encoder(
self, sk_trained.impl, rasl_trained.impl, (tgt, dataset_name)
)
rasl_transformed = rasl_trained.transform(test_X)
if tgt == "spark":
self.assertEqual(
get_index_name(rasl_transformed), "index", (tgt, dataset_name)
)
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(
sk_transformed.shape, rasl_transformed.shape, (tgt, dataset_name)
)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed.iloc[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(tgt, dataset_name, row_idx, col_idx),
)
def test_predict(self):
for (tgt, dataset_name), dataset in self.datasets.items():
(train_X, train_y), (test_X, _) = dataset
(train_X_pd, train_y_pd), (test_X_pd, _) = self.datasets[
"pandas", dataset_name
]
experiments_dict = lale.datasets.openml.openml_datasets.experiments_dict
if experiments_dict[dataset_name]["task_type"] == "regression":
classes = None
est = LinearRegression()
else:
classes = sorted(list(train_y_pd.unique()))
est = LogisticRegression()
sk_trainable = SkTargetEncoder() >> est
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_predicted = sk_trained.predict(test_X_pd)
rasl_trainable = (
RaslTargetEncoder(classes=classes) >> Convert(astype="pandas") >> est
)
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_predicted = rasl_trained.predict(test_X)
self.assertEqual(
sk_predicted.shape, rasl_predicted.shape, (tgt, dataset_name)
)
self.assertListEqual(
sk_predicted.tolist(), rasl_predicted.tolist(), (tgt, dataset_name)
)
def _check_trained_simple_imputer(test, op1, op2, msg):
if hasattr(op1, "feature_names_in_"):
test.assertEqual(list(op1.feature_names_in_), list(op2.feature_names_in_), msg)
if hasattr(op1, "statistics_"):
test.assertEqual(len(op1.statistics_), len(op2.statistics_), msg)
for stats1, stats2 in zip(op1.statistics_, op2.statistics_):
test.assertEqual(stats1, stats2, msg)
if hasattr(op1, "n_features_in_"):
test.assertEqual(op1.n_features_in_, op2.n_features_in_, msg)
if hasattr(op1, "indicator_"):
test.assertEqual(op1.indicator_, op2.indicator_, msg)
class TestSimpleImputer(unittest.TestCase):
def setUp(self):
targets = ["pandas", "spark"]
self.tgt2adult = {
tgt: lale.datasets.openml.fetch(
"adult",
"classification",
preprocess=False,
astype=tgt,
)
for tgt in targets
}
def _fill_missing_value(self, col_name, value, missing_value):
for tgt, datasets in self.tgt2adult.items():
(train_X, train_y), (test_X, test_y) = datasets
if tgt == "pandas":
train_X.loc[
train_X[col_name] == value, col_name
] = missing_value # type:ignore
test_X.loc[
test_X[col_name] == value, col_name
] = missing_value # type:ignore
elif tgt == "spark":
from pyspark.sql.functions import col, when
train_X_new = train_X.withColumn(
col_name,
when(col(col_name) == value, missing_value).otherwise(
col(col_name)
),
)
test_X_new = test_X.withColumn(
col_name,
when(col(col_name) == value, missing_value).otherwise(
col(col_name)
),
)
train_X = forward_metadata(train_X, train_X_new)
test_X = forward_metadata(test_X, test_X_new)
else:
assert False
self.tgt2adult[tgt] = (train_X, train_y), (test_X, test_y)
def test_fit_transform_numeric_nan_missing(self):
self._fill_missing_value("age", 36.0, np.nan)
num_columns = ["age", "fnlwgt", "education-num"]
prefix = Map(columns={c: it[c] for c in num_columns})
hyperparams = [
{"strategy": "mean"},
{"strategy": "median"},
{"strategy": "most_frequent"},
{"strategy": "constant", "fill_value": 99},
]
for hyperparam in hyperparams:
rasl_imputer = RaslSimpleImputer(**hyperparam)
sk_imputer = SkSimpleImputer(**hyperparam)
rasl_trainable = prefix >> rasl_imputer
sk_trainable = prefix >> sk_imputer
sk_trained = sk_trainable.fit(self.tgt2adult["pandas"][0][0])
sk_transformed = sk_trained.transform(self.tgt2adult["pandas"][1][0])
sk_statistics_ = sk_trained.steps[-1][1].impl.statistics_
for tgt, dataset in self.tgt2adult.items():
(train_X, _), (test_X, _) = dataset
rasl_trained = rasl_trainable.fit(train_X)
# test the fit succeeded.
rasl_statistics_ = rasl_trained.steps[-1][1].impl.statistics_
self.assertEqual(
len(sk_statistics_), len(rasl_statistics_), (hyperparam, tgt)
)
for i in range(sk_statistics_.shape[0]):
self.assertAlmostEqual(
sk_statistics_[i], rasl_statistics_[i], msg=(i, hyperparam, tgt)
)
rasl_transformed = rasl_trained.transform(test_X)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertAlmostEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
msg=(row_idx, col_idx, tgt),
)
_check_trained_simple_imputer(self, sk_imputer, rasl_imputer, tgt)
def test_fit_transform_numeric_nonan_missing(self):
self._fill_missing_value("age", 36.0, -1)
num_columns = ["age", "fnlwgt", "education-num"]
prefix = Map(columns={c: it[c] for c in num_columns})
hyperparams = [
{"strategy": "mean"},
{"strategy": "median"},
{"strategy": "most_frequent"},
{"strategy": "constant", "fill_value": 99},
]
for hyperparam in hyperparams:
rasl_trainable = prefix >> RaslSimpleImputer(
missing_values=-1, **hyperparam
)
sk_trainable = prefix >> SkSimpleImputer(missing_values=-1, **hyperparam)
sk_trained = sk_trainable.fit(self.tgt2adult["pandas"][0][0])
sk_transformed = sk_trained.transform(self.tgt2adult["pandas"][1][0])
sk_statistics_ = sk_trained.get_last().impl.statistics_
for tgt, dataset in self.tgt2adult.items():
(train_X, _), (test_X, _) = dataset
rasl_trained = rasl_trainable.fit(train_X)
# test the fit succeeded.
rasl_statistics_ = rasl_trained.get_last().impl.statistics_ # type: ignore
self.assertEqual(len(sk_statistics_), len(rasl_statistics_), tgt)
self.assertEqual(list(sk_statistics_), list(rasl_statistics_), tgt)
rasl_transformed = rasl_trained.transform(test_X)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(row_idx, col_idx, tgt),
)
def test_predict(self):
self._fill_missing_value("age", 36.0, np.nan)
(train_X_pd, train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2adult["pandas"]
num_columns = ["age", "fnlwgt", "education-num"]
prefix = Map(columns={c: it[c] for c in num_columns})
to_pd = Convert(astype="pandas")
lr = LogisticRegression()
imputer_args = {"strategy": "mean"}
sk_trainable = prefix >> SkSimpleImputer(**imputer_args) >> lr
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_predicted = sk_trained.predict(test_X_pd)
rasl_trainable = prefix >> RaslSimpleImputer(**imputer_args) >> to_pd >> lr
for tgt, dataset in self.tgt2adult.items():
(train_X, train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_predicted = rasl_trained.predict(test_X)
self.assertEqual(sk_predicted.shape, rasl_predicted.shape, tgt)
self.assertEqual(sk_predicted.tolist(), rasl_predicted.tolist(), tgt)
def test_invalid_datatype_strategy(self):
sk_trainable = SkSimpleImputer()
with self.assertRaises(ValueError):
sk_trainable.fit(self.tgt2adult["pandas"][0][0])
rasl_trainable = RaslSimpleImputer()
for tgt, dataset in self.tgt2adult.items():
(train_X, _), (_, _) = dataset
if tgt == "spark":
# Skip test because of timeout!
continue
with self.assertRaises(ValueError):
_ = rasl_trainable.fit(train_X)
def test_default_numeric_fill_value(self):
self._fill_missing_value("age", 36.0, np.nan)
num_columns = ["age", "fnlwgt", "education-num"]
prefix = Map(columns={c: it[c] for c in num_columns})
hyperparams = [{"strategy": "constant"}]
for hyperparam in hyperparams:
rasl_trainable = prefix >> RaslSimpleImputer(**hyperparam)
sk_trainable = prefix >> SkSimpleImputer(**hyperparam)
sk_trained = sk_trainable.fit(self.tgt2adult["pandas"][0][0])
sk_transformed = sk_trained.transform(self.tgt2adult["pandas"][1][0])
sk_statistics_ = sk_trained.steps[-1][1].impl.statistics_
for tgt, dataset in self.tgt2adult.items():
(train_X, _), (test_X, _) = dataset
rasl_trained = rasl_trainable.fit(train_X)
# test the fit succeeded.
rasl_statistics_ = rasl_trained.steps[-1][1].impl.statistics_
self.assertEqual(len(sk_statistics_), len(rasl_statistics_), tgt)
self.assertEqual(list(sk_statistics_), list(rasl_statistics_), tgt)
rasl_transformed = rasl_trained.transform(test_X)
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(row_idx, col_idx, tgt),
)
def test_default_string_fill_value(self):
self._fill_missing_value("education", "Prof-school", np.nan)
str_columns = ["workclass", "education", "capital-gain"]
prefix = Map(columns={c: it[c] for c in str_columns})
hyperparams = [{"strategy": "constant"}]
for hyperparam in hyperparams:
rasl_trainable = prefix >> RaslSimpleImputer(**hyperparam)
sk_trainable = prefix >> SkSimpleImputer(**hyperparam)
sk_trained = sk_trainable.fit(self.tgt2adult["pandas"][0][0])
sk_statistics_ = sk_trained.steps[-1][1].impl.statistics_
for tgt, dataset in self.tgt2adult.items():
(train_X, _), (test_X, _) = dataset
rasl_trained = rasl_trainable.fit(train_X)
# test the fit succeeded.
rasl_statistics_ = rasl_trained.steps[-1][1].impl.statistics_
self.assertEqual(len(sk_statistics_), len(rasl_statistics_), tgt)
self.assertEqual(list(sk_statistics_), list(rasl_statistics_), tgt)
rasl_transformed = rasl_trained.transform(test_X)
rasl_transformed = _ensure_pandas(rasl_transformed)
# Note that for this test case, the output of sklearn transform does not
# match rasl transform. There is at least one row which has a None
# value and pandas replace treats it as nan and replaces it.
# Sklearn which uses numpy does not replace a None.
# So we just test that `missing_value` is the default assigned.
self.assertEqual(rasl_transformed.iloc[1, 1], "missing_value")
def test_multiple_modes_numeric(self):
# Sklearn SimpleImputer says: for strategy `most_frequent`,
# if there is more than one such value, only the smallest is returned.
data = [[1, 10], [2, 14], [3, 15], [4, 15], [5, 14], [6, np.nan]]
df = pd.DataFrame(data, columns=["Id", "Age"])
hyperparam = {"strategy": "most_frequent"}
sk_trainable = SkSimpleImputer(**hyperparam)
rasl_trainable = RaslSimpleImputer(**hyperparam)
sk_trained = sk_trainable.fit(df)
rasl_trained = rasl_trainable.fit(df)
self.assertEqual(
len(sk_trained.statistics_), len(rasl_trained.impl.statistics_), "pandas"
)
self.assertEqual([6, 15], list(rasl_trained.impl.statistics_), "pandas")
spark_df = pandas2spark(df)
rasl_trained = rasl_trainable.fit(spark_df)
self.assertEqual(
len(sk_trained.statistics_), len(rasl_trained.impl.statistics_), "spark"
)
self.assertIn(rasl_trained.impl.statistics_[1], [14, 15])
def test_multiple_modes_string(self):
# Sklearn SimpleImputer says: for strategy `most_frequent`,
# if there is more than one such value, only the smallest is returned.
data = [
["a", "t"],
["b", "f"],
["b", "m"],
["c", "f"],
["c", "m"],
["f", "missing"],
]
df = pd.DataFrame(data, columns=["Id", "Gender"])
hyperparam = {"strategy": "most_frequent", "missing_values": "missing"}
sk_trainable = SkSimpleImputer(**hyperparam)
rasl_trainable = RaslSimpleImputer(**hyperparam)
sk_trained = sk_trainable.fit(df)
rasl_trained = rasl_trainable.fit(df)
self.assertEqual(
len(sk_trained.statistics_), len(rasl_trained.impl.statistics_), "pandas"
)
self.assertEqual(
list(["c", "m"]), list(rasl_trained.impl.statistics_), "pandas"
)
spark_df = pandas2spark(df)
rasl_trained = rasl_trainable.fit(spark_df)
self.assertEqual(
len(sk_trained.statistics_), len(rasl_trained.impl.statistics_), "spark"
)
self.assertIn(rasl_trained.impl.statistics_[1], ["f", "m"])
def test_valid_partial_fit(self):
self._fill_missing_value("age", 36.0, -1)
num_columns = ["age", "fnlwgt", "education-num"]
prefix = Map(columns={c: it[c] for c in num_columns})
hyperparams = [
{"strategy": "mean"},
{"strategy": "constant", "fill_value": 99},
]
for hyperparam in hyperparams:
rasl_trainable = prefix >> RaslSimpleImputer(
missing_values=-1, **hyperparam
)
sk_trainable = prefix >> SkSimpleImputer(missing_values=-1, **hyperparam)
sk_trained = sk_trainable.fit(self.tgt2adult["pandas"][0][0])
sk_transformed = sk_trained.transform(self.tgt2adult["pandas"][1][0])
sk_statistics_ = sk_trained.get_last().impl.statistics_
(train_X, _), (test_X, _) = self.tgt2adult["pandas"]
data1_pandas = train_X.iloc[:10]
data2_pandas = train_X.iloc[10:100]
data3_pandas = train_X.iloc[100:]
test_X_pandas = test_X
for tgt in self.tgt2adult.keys():
if tgt == "pandas":
data1 = data1_pandas
data2 = data2_pandas
data3 = data3_pandas
test_X = test_X_pandas
elif tgt == "spark":
data1 = pandas2spark(data1_pandas) # type:ignore
data2 = pandas2spark(data2_pandas) # type:ignore
data3 = pandas2spark(data3_pandas) # type:ignore
test_X = pandas2spark(test_X_pandas) # type:ignore
else:
assert False
rasl_trainable = prefix >> RaslSimpleImputer(
missing_values=-1, **hyperparam
)
rasl_trained = rasl_trainable.partial_fit(data1)
rasl_trained = rasl_trained.partial_fit(data2)
rasl_trained = rasl_trained.partial_fit(data3)
# test the fit succeeded.
rasl_statistics_ = rasl_trained.get_last().impl.statistics_ # type: ignore
self.assertEqual(len(sk_statistics_), len(rasl_statistics_), tgt)
for sk_stats, rasl_stats in zip(sk_statistics_, rasl_statistics_):
self.assertEqual(sk_stats, rasl_stats)
rasl_transformed = rasl_trained.transform(test_X)
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(row_idx, col_idx, tgt),
)
def test_invalid_partial_fit(self):
num_columns = ["age", "fnlwgt", "education-num"]
prefix = Map(columns={c: it[c] for c in num_columns})
hyperparams = [
{"strategy": "median"},
{"strategy": "most_frequent"},
]
for hyperparam in hyperparams:
rasl_trainable = prefix >> RaslSimpleImputer(
missing_values=-1, **hyperparam
)
(train_X, _), (_, _) = self.tgt2adult["pandas"]
with self.assertRaises(ValueError):
_ = rasl_trainable.partial_fit(train_X)
def _check_trained_standard_scaler(test, op1, op2, msg):
if hasattr(op1, "feature_names_in_"):
test.assertEqual(list(op1.feature_names_in_), list(op2.feature_names_in_), msg)
test.assertEqual(op1.n_features_in_, op2.n_features_in_, msg)
test.assertEqual(op1.n_samples_seen_, op2.n_samples_seen_, msg)
if op1.mean_ is None:
test.assertIsNone(op2.mean_, msg)
else:
test.assertIsNotNone(op2.mean_, msg)
test.assertEqual(len(op1.mean_), len(op2.mean_), msg)
for mean1, mean2 in zip(op1.mean_, op2.mean_):
test.assertAlmostEqual(mean1, mean2, msg=msg)
if op1.var_ is None:
test.assertIsNone(op2.var_, msg)
else:
test.assertIsNotNone(op2.var_, msg)
test.assertEqual(len(op1.var_), len(op2.var_), msg)
for var1, var2 in zip(op1.var_, op2.var_):
test.assertAlmostEqual(var1, var2, msg=msg)
if op1.scale_ is None:
test.assertIsNone(op2.scale_, msg)
else:
test.assertIsNotNone(op2.scale_, msg)
test.assertEqual(len(op1.scale_), len(op2.scale_), msg)
for scale1, scale2 in zip(op1.scale_, op2.scale_):
test.assertAlmostEqual(scale1, scale2, msg=msg)
class TestStandardScaler(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets = ["pandas", "spark"]
cls.tgt2creditg = cast(
Dict[str, Any],
{
tgt: lale.datasets.openml.fetch(
"credit-g",
"classification",
preprocess=True,
astype=tgt,
)
for tgt in targets
},
)
def test_fit(self):
(train_X_pd, _), (_, _) = self.tgt2creditg["pandas"]
sk_trainable = SkStandardScaler()
sk_trained = sk_trainable.fit(train_X_pd)
rasl_trainable = RaslStandardScaler()
for tgt, dataset in self.tgt2creditg.items():
(train_X, _), (_, _) = dataset
rasl_trained = rasl_trainable.fit(train_X)
_check_trained_standard_scaler(self, sk_trained, rasl_trained.impl, tgt)
def test_partial_fit(self):
(train_X_pd, _), (_, _) = self.tgt2creditg["pandas"]
for tgt in self.tgt2creditg.keys():
rasl_op = RaslStandardScaler()
for lower, upper in [[0, 10], [10, 100], [100, train_X_pd.shape[0]]]:
data_so_far = train_X_pd[0:upper]
sk_op = SkStandardScaler()
sk_op = sk_op.fit(data_so_far)
data_delta = train_X_pd[lower:upper]
if tgt == "pandas":
pass
elif tgt == "spark":
data_delta = pandas2spark(data_delta)
else:
assert False
rasl_op = rasl_op.partial_fit(data_delta)
_check_trained_standard_scaler(
self, sk_op, rasl_op.impl, (tgt, lower, upper)
)
def test_transform(self):
(train_X_pd, _), (test_X_pd, _) = self.tgt2creditg["pandas"]
sk_trainable = SkStandardScaler()
sk_trained = sk_trainable.fit(train_X_pd)
sk_transformed = sk_trained.transform(test_X_pd)
rasl_trainable = RaslStandardScaler()
for tgt, dataset in self.tgt2creditg.items():
(train_X, _), (test_X, _) = dataset
rasl_trained = rasl_trainable.fit(train_X)
_check_trained_standard_scaler(self, sk_trained, rasl_trained.impl, tgt)
rasl_transformed = rasl_trained.transform(test_X)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertAlmostEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
msg=(row_idx, col_idx, tgt),
)
def test_scale(self):
(X_pd, _), _ = self.tgt2creditg["pandas"]
sk_transformed = sk_scale(X_pd)
for tgt, dataset in self.tgt2creditg.items():
(X, _), _ = dataset
rasl_transformed = rasl_scale(X)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertAlmostEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
msg=(row_idx, col_idx, tgt),
)
def test_predict(self):
(train_X_pd, train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2creditg["pandas"]
to_pd = Convert(astype="pandas")
lr = LogisticRegression()
sk_trainable = SkStandardScaler() >> lr
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_predicted = sk_trained.predict(test_X_pd)
rasl_trainable = RaslStandardScaler() >> to_pd >> lr
for tgt, dataset in self.tgt2creditg.items():
(train_X, train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_predicted = rasl_trained.predict(test_X)
self.assertEqual(sk_predicted.shape, rasl_predicted.shape, tgt)
self.assertEqual(sk_predicted.tolist(), rasl_predicted.tolist(), tgt)
class _BatchTestingKFold:
def __init__(self, n_batches, n_splits):
self.n_batches = n_batches
self.n_splits = n_splits
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
def split(self, X, y=None, groups=None):
# re-arrange batched data [[d0,e0,f0], [d1,e1,f1], [d2,e2,f2]]
# into non-batched data [d0+d1+d2, e0+e1+e2, f0+f1+f2]
result = [([], []) for _ in range(self.n_splits)]
cv = KFold(self.n_splits)
batches = mockup_data_loader(X, y, self.n_batches, "pandas")
for bX, by in batches:
for fold, (_, test) in enumerate(cv.split(bX, by)):
remapped = bX.index[test]
for f in range(self.n_splits):
if f != fold:
assert set(result[f][0]).isdisjoint(set(remapped))
result[f][0].extend(remapped)
assert set(result[fold][1]).isdisjoint(set(remapped))
result[fold][1].extend(remapped)
return result
class _BatchTestingCallback:
def __init__(self):
self.n_calls = 0
def __call__(
self, score_train, score_valid, n_batches_scanned, end_of_scanned_batches
):
self.n_calls += 1
assert self.n_calls == n_batches_scanned, (self.n_calls, n_batches_scanned)
assert not end_of_scanned_batches
class TestTaskGraphs(unittest.TestCase):
@classmethod
def setUpClass(cls):
X, y, fairness_info = lale.lib.aif360.fetch_creditg_df(preprocess=False)
X = Project(columns=categorical()).fit(X).transform(X)
fairness_info = { # remove numeric protected attribute age
"favorable_labels": fairness_info["favorable_labels"],
"protected_attributes": fairness_info["protected_attributes"][:1],
}
cls.creditg = X, y, fairness_info
@classmethod
def _make_sk_trainable(cls, final_est):
if final_est == "sgd":
est = SGDClassifier(random_state=97)
elif final_est == "rfc":
est = RandomForestClassifier(random_state=97)
else:
assert False, final_est
return sk_make_pipeline(
SkOrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
SkMinMaxScaler(),
est,
)
@classmethod
def _make_rasl_trainable(cls, final_est):
if final_est == "sgd":
est = SGDClassifier(random_state=97)
elif final_est == "rfc":
est = RandomForestClassifier(random_state=97)
else:
assert False, final_est
return (
RaslOrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1)
>> RaslMinMaxScaler()
>> est
)
def test_fit_no_batching(self):
train_X, train_y, _ = self.creditg
sk_trainable = self._make_sk_trainable("sgd")
sk_trained = sk_trainable.fit(train_X, train_y)
rasl_trainable = self._make_rasl_trainable("sgd")
rasl_trained = rasl_trainable.fit(train_X, train_y)
_check_trained_ordinal_encoder(
self, sk_trained.steps[0][1], rasl_trained.steps[0][1].impl, "pandas"
)
_check_trained_min_max_scaler(
self, sk_trained.steps[1][1], rasl_trained.steps[1][1].impl, "pandas"
)
def test_fit_batching(self):
train_X, train_y, _ = self.creditg
train_data_space = train_X.memory_usage().sum() + train_y.memory_usage()
sk_trainable = self._make_sk_trainable("sgd")
sk_trained = sk_trainable.fit(train_X, train_y)
unique_class_labels = list(train_y.unique())
for n_batches in [1, 3]:
for prio in [PrioStep(), PrioBatch()]:
batches = mockup_data_loader(train_X, train_y, n_batches, "pandas")
rasl_trainable = self._make_rasl_trainable("sgd")
rasl_trained = fit_with_batches(
pipeline=rasl_trainable,
batches_train=batches,
batches_valid=None,
scoring=None,
unique_class_labels=unique_class_labels,
max_resident=3 * math.ceil(train_data_space / n_batches),
prio=prio,
partial_transform=False,
verbose=0,
progress_callback=None,
)
_check_trained_ordinal_encoder(
self,
sk_trained.steps[0][1],
rasl_trained.steps[0][1].impl,
(n_batches, type(prio)),
)
_check_trained_min_max_scaler(
self,
sk_trained.steps[1][1],
rasl_trained.steps[1][1].impl,
(n_batches, type(prio)),
)
def test_partial_transform(self):
train_X, train_y, _ = self.creditg
unique_class_labels = list(train_y.unique())
for n_batches in [1, 3]:
batches = mockup_data_loader(train_X, train_y, n_batches, "pandas")
rasl_trainable = self._make_rasl_trainable("sgd")
progress_callback = _BatchTestingCallback()
_ = fit_with_batches(
pipeline=rasl_trainable,
batches_train=batches,
batches_valid=None,
scoring=rasl_get_scorer("accuracy"),
unique_class_labels=unique_class_labels,
max_resident=None,
prio=PrioBatch(),
partial_transform=True,
verbose=0,
progress_callback=progress_callback,
)
self.assertEqual(progress_callback.n_calls, n_batches)
def test_frozen_prefix(self):
train_X, train_y, _ = self.creditg
unique_class_labels = list(train_y.unique())
n_batches = 3
batches = mockup_data_loader(train_X, train_y, n_batches, "pandas")
first_batch = next(iter(batches))
trainable1 = self._make_rasl_trainable("sgd")
trained1 = fit_with_batches(
pipeline=trainable1,
batches_train=[first_batch],
batches_valid=None,
scoring=None,
unique_class_labels=unique_class_labels,
max_resident=None,
prio=PrioBatch(),
partial_transform=False,
verbose=0,
progress_callback=None,
)
prefix2 = trained1.remove_last().freeze_trained()
suffix2 = trained1.get_last()
trainable2 = prefix2 >> suffix2
assert isinstance(trainable2, TrainedPipeline)
progress_callback = _BatchTestingCallback()
_ = fit_with_batches(
pipeline=trainable2,
batches_train=batches,
batches_valid=None,
scoring=rasl_get_scorer("accuracy"),
unique_class_labels=unique_class_labels,
max_resident=None,
prio=PrioBatch(),
partial_transform="score",
verbose=0,
progress_callback=progress_callback,
)
self.assertEqual(progress_callback.n_calls, n_batches - 1)
def test_cross_val_score_accuracy(self):
X, y, _ = self.creditg
n_splits = 3
for n_batches in [1, 3]:
with self.assertWarnsRegex(DeprecationWarning, "trainable operator"):
sk_scores = sk_cross_val_score(
estimator=self._make_sk_trainable("rfc"),
X=X,
y=y,
scoring=make_scorer(sk_accuracy_score),
cv=_BatchTestingKFold(n_batches, n_splits),
)
rasl_scores = rasl_cross_val_score(
pipeline=self._make_rasl_trainable("rfc"),
batches=mockup_data_loader(X, y, n_batches, "pandas"),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(n_splits),
unique_class_labels=list(y.unique()),
max_resident=None,
prio=PrioBatch(),
same_fold=True,
verbose=0,
)
for sk_s, rasl_s in zip(sk_scores, rasl_scores):
self.assertAlmostEqual(sk_s, rasl_s, msg=n_batches)
def test_cross_val_score_disparate_impact(self):
X, y, fairness_info = self.creditg
disparate_impact_scorer = lale.lib.aif360.disparate_impact(**fairness_info)
n_splits = 3
for n_batches in [1, 3]:
with self.assertWarnsRegex(DeprecationWarning, "trainable operator"):
sk_scores = sk_cross_val_score(
estimator=self._make_sk_trainable("rfc"),
X=X,
y=y,
scoring=disparate_impact_scorer,
cv=_BatchTestingKFold(n_batches, n_splits),
)
rasl_scores = rasl_cross_val_score(
pipeline=self._make_rasl_trainable("rfc"),
batches=mockup_data_loader(X, y, n_batches, "pandas"),
scoring=disparate_impact_scorer,
cv=KFold(n_splits),
unique_class_labels=list(y.unique()),
max_resident=None,
prio=PrioBatch(),
same_fold=True,
verbose=0,
)
for sk_s, rasl_s in zip(sk_scores, rasl_scores):
self.assertAlmostEqual(sk_s, rasl_s, msg=n_batches)
def test_cross_validate(self):
X, y, _ = self.creditg
n_splits = 3
for n_batches in [1, 3]:
with self.assertWarnsRegex(DeprecationWarning, "trainable operator"):
sk_scr = sk_cross_validate(
estimator=self._make_sk_trainable("rfc"),
X=X,
y=y,
scoring=make_scorer(sk_accuracy_score),
cv=_BatchTestingKFold(n_batches, n_splits),
return_estimator=True,
)
rasl_scr = rasl_cross_validate(
pipeline=self._make_rasl_trainable("rfc"),
batches=mockup_data_loader(X, y, n_batches, "pandas"),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(n_splits),
unique_class_labels=list(y.unique()),
max_resident=None,
prio=PrioBatch(),
same_fold=True,
return_estimator=True,
verbose=0,
)
for sk_s, rasl_s in zip(sk_scr["test_score"], rasl_scr["test_score"]):
self.assertAlmostEqual(cast(float, sk_s), cast(float, rasl_s))
for sk_e, rasl_e in zip(sk_scr["estimator"], rasl_scr["estimator"]):
_check_trained_ordinal_encoder(
self,
sk_e.steps[0][1],
cast(TrainedPipeline, rasl_e).steps[0][1].impl,
n_batches,
)
_check_trained_min_max_scaler(
self,
sk_e.steps[1][1],
cast(TrainedPipeline, rasl_e).steps[1][1].impl,
n_batches,
)
class TestTaskGraphsWithConcat(unittest.TestCase):
@classmethod
def setUpClass(cls):
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False
)
cls.cat_columns = [
"checking_status",
"credit_history",
"purpose",
"savings_status",
"employment",
"personal_status",
"other_parties",
"property_magnitude",
"other_payment_plans",
"housing",
"job",
"own_telephone",
"foreign_worker",
]
cls.num_columns = [
"duration",
"credit_amount",
"installment_commitment",
"residence_since",
"age",
"existing_credits",
"num_dependents",
]
cls.creditg = (train_X, train_y), (test_X, test_y)
@classmethod
def _make_sk_trainable(cls, final_est):
from sklearn.compose import ColumnTransformer as SkColumnTransformer
from sklearn.ensemble import RandomForestClassifier as SkRandomForestClassifier
from sklearn.linear_model import SGDClassifier as SkSGDClassifier
if final_est == "sgd":
est = SkSGDClassifier(random_state=97)
elif final_est == "rfc":
est = SkRandomForestClassifier(random_state=97)
else:
assert False, final_est
return sk_make_pipeline(
SkColumnTransformer(
[
(
"prep_cat",
SkOrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=-1
),
cls.cat_columns,
),
(
"prep_num",
sk_make_pipeline(
SkSimpleImputer(strategy="mean"),
SkMinMaxScaler(),
"passthrough",
),
cls.num_columns,
),
]
),
est,
)
@classmethod
def _make_rasl_trainable(cls, final_est):
if final_est == "sgd":
est = SGDClassifier(random_state=97)
elif final_est == "rfc":
est = RandomForestClassifier(random_state=97)
else:
assert False, final_est
return (
(
(
Project(columns=cls.cat_columns)
>> RaslOrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=-1
)
)
& (
Project(columns=cls.num_columns)
>> RaslSimpleImputer(strategy="mean")
>> RaslMinMaxScaler()
)
)
>> ConcatFeatures()
>> est
)
def test_fit_no_batching(self):
(train_X, train_y), _ = self.creditg
sk_trainable = self._make_sk_trainable("sgd")
sk_trained = sk_trainable.fit(train_X, train_y)
rasl_trainable = self._make_rasl_trainable("sgd")
rasl_trained = rasl_trainable.fit(train_X, train_y)
_check_trained_ordinal_encoder(
self,
sk_trained.steps[0][1].transformers_[0][1],
rasl_trained.steps[1][1].impl,
"pandas",
)
_check_trained_min_max_scaler(
self,
sk_trained.steps[0][1].transformers_[1][1].steps[1][1],
rasl_trained.steps[4][1].impl,
"pandas",
)
def test_fit_batching(self):
(train_X, train_y), _ = self.creditg
train_data_space = train_X.memory_usage().sum() + train_y.memory_usage()
sk_trainable = self._make_sk_trainable("sgd")
sk_trained = sk_trainable.fit(train_X, train_y)
unique_class_labels = list(train_y.unique())
for n_batches in [1, 3]:
for prio in [PrioStep(), PrioBatch()]:
batches = create_data_loader(
train_X, train_y, math.ceil(len(train_y) / n_batches)
)
self.assertEqual(n_batches, len(batches))
rasl_trainable = self._make_rasl_trainable("sgd")
rasl_trained = fit_with_batches(
pipeline=rasl_trainable,
batches_train=batches, # type: ignore
batches_valid=None,
scoring=None,
unique_class_labels=unique_class_labels,
max_resident=3 * math.ceil(train_data_space / n_batches),
prio=prio,
partial_transform=False,
verbose=0,
progress_callback=None,
)
_check_trained_ordinal_encoder(
self,
sk_trained.steps[0][1].transformers_[0][1],
rasl_trained.steps[1][1].impl,
(n_batches, type(prio)),
)
_check_trained_min_max_scaler(
self,
sk_trained.steps[0][1].transformers_[1][1].steps[1][1],
rasl_trained.steps[4][1].impl,
(n_batches, type(prio)),
)
def test_cross_val_score(self):
(X, y), _ = self.creditg
sk_scores = sk_cross_val_score(
estimator=self._make_sk_trainable("rfc"),
X=X,
y=y,
scoring=make_scorer(sk_accuracy_score),
cv=KFold(3),
)
for n_batches in [1, 3]:
rasl_scores = rasl_cross_val_score(
pipeline=self._make_rasl_trainable("rfc"),
batches=mockup_data_loader(X, y, n_batches, "pandas"),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(3),
unique_class_labels=list(y.unique()),
max_resident=None,
prio=PrioBatch(),
same_fold=True,
verbose=0,
)
if n_batches == 1:
for sk_s, rasl_s in zip(sk_scores, rasl_scores):
self.assertAlmostEqual(sk_s, rasl_s)
class TestTaskGraphsWithCategoricalConcat(unittest.TestCase):
@classmethod
def setUpClass(cls):
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False
)
cls.cat_columns = [
"checking_status",
"credit_history",
"purpose",
"savings_status",
"employment",
"personal_status",
"other_parties",
"property_magnitude",
"other_payment_plans",
"housing",
"job",
"own_telephone",
"foreign_worker",
]
cls.num_columns = [
"duration",
"credit_amount",
"installment_commitment",
"residence_since",
"age",
"existing_credits",
"num_dependents",
]
cls.creditg = (train_X, train_y), (test_X, test_y)
@classmethod
def _make_sk_trainable(cls, final_est):
from sklearn.compose import ColumnTransformer as SkColumnTransformer
from sklearn.ensemble import RandomForestClassifier as SkRandomForestClassifier
from sklearn.linear_model import SGDClassifier as SkSGDClassifier
if final_est == "sgd":
est = SkSGDClassifier(random_state=97)
elif final_est == "rfc":
est = SkRandomForestClassifier(random_state=97)
else:
assert False, final_est
return sk_make_pipeline(
SkColumnTransformer(
[
(
"prep_cat",
SkOrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=-1
),
cls.cat_columns,
),
(
"prep_num",
sk_make_pipeline(
SkSimpleImputer(strategy="mean"),
SkMinMaxScaler(),
"passthrough",
),
cls.num_columns,
),
]
),
est,
)
@classmethod
def _make_rasl_trainable(cls, final_est):
if final_est == "sgd":
est = SGDClassifier(random_state=97)
elif final_est == "rfc":
est = RandomForestClassifier(random_state=97)
else:
assert False, final_est
return (
(
(
Project(columns=categorical(11), drop_columns={"type": "number"})
>> RaslOrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=-1
)
)
& (
Project(columns={"type": "number"})
>> RaslSimpleImputer(strategy="mean")
>> RaslMinMaxScaler()
)
)
>> ConcatFeatures()
>> est
)
def test_fit_no_batching(self):
(train_X, train_y), _ = self.creditg
sk_trainable = self._make_sk_trainable("sgd")
sk_trained = sk_trainable.fit(train_X, train_y)
rasl_trainable = self._make_rasl_trainable("sgd")
rasl_trained = rasl_trainable.fit(train_X, train_y)
_check_trained_ordinal_encoder(
self,
sk_trained.steps[0][1].transformers_[0][1],
rasl_trained.steps[1][1].impl,
"pandas",
)
_check_trained_min_max_scaler(
self,
sk_trained.steps[0][1].transformers_[1][1].steps[1][1],
rasl_trained.steps[4][1].impl,
"pandas",
)
def test_fit_batching(self):
(train_X, train_y), _ = self.creditg
train_data_space = train_X.memory_usage().sum() + train_y.memory_usage()
sk_trainable = self._make_sk_trainable("sgd")
sk_trained = sk_trainable.fit(train_X, train_y)
unique_class_labels = list(train_y.unique())
for n_batches in [1, 3]:
for prio in [PrioStep(), PrioBatch()]:
batches = create_data_loader(
train_X, train_y, math.ceil(len(train_y) / n_batches)
)
self.assertEqual(n_batches, len(batches))
rasl_trainable = self._make_rasl_trainable("sgd")
rasl_trained = fit_with_batches(
pipeline=rasl_trainable,
batches_train=batches, # type: ignore
batches_valid=None,
scoring=None,
unique_class_labels=unique_class_labels,
max_resident=3 * math.ceil(train_data_space / n_batches),
prio=prio,
partial_transform=False,
verbose=0,
progress_callback=None,
)
_check_trained_ordinal_encoder(
self,
sk_trained.steps[0][1].transformers_[0][1],
rasl_trained.steps[1][1].impl,
(n_batches, type(prio)),
)
_check_trained_min_max_scaler(
self,
sk_trained.steps[0][1].transformers_[1][1].steps[1][1],
rasl_trained.steps[4][1].impl,
(n_batches, type(prio)),
)
def test_cross_val_score(self):
(X, y), _ = self.creditg
sk_scores = sk_cross_val_score(
estimator=self._make_sk_trainable("rfc"),
X=X,
y=y,
scoring=make_scorer(sk_accuracy_score),
cv=KFold(3),
)
for n_batches in [1, 3]:
rasl_scores = rasl_cross_val_score(
pipeline=self._make_rasl_trainable("rfc"),
batches=mockup_data_loader(X, y, n_batches, "pandas"),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(3),
unique_class_labels=list(y.unique()),
max_resident=None,
prio=PrioBatch(),
same_fold=True,
verbose=0,
)
if n_batches == 1:
for sk_s, rasl_s in zip(sk_scores, rasl_scores):
self.assertAlmostEqual(sk_s, rasl_s)
class TestTaskGraphsSpark(unittest.TestCase):
@classmethod
def setUpClass(cls):
X, y, _ = lale.lib.aif360.fetch_creditg_df(preprocess=False)
X = Project(columns=categorical()).fit(X).transform(X)
cls.creditg = X, y
@classmethod
def _make_sk_trainable(cls):
return sk_make_pipeline(
SkOrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
SkMinMaxScaler(),
Convert(astype="pandas"),
RandomForestClassifier(random_state=97),
)
@classmethod
def _make_rasl_trainable(cls):
return (
RaslOrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1)
>> RaslMinMaxScaler()
>> Convert(astype="pandas")
>> RandomForestClassifier(random_state=97)
)
def test_fit_with_batches(self):
train_X, train_y = self.creditg
sk_trainable = self._make_sk_trainable()
sk_trained = sk_trainable.fit(train_X, train_y)
unique_class_labels = list(train_y.unique())
datatype_list: List[datatype_param_type] = ["pandas", "spark"]
for tgt, n_batches in itertools.product(datatype_list, [1, 3]):
rasl_trained = fit_with_batches(
pipeline=self._make_rasl_trainable(),
batches_train=mockup_data_loader(train_X, train_y, n_batches, tgt),
batches_valid=None,
scoring=None,
unique_class_labels=unique_class_labels,
max_resident=None,
prio=PrioBatch(),
partial_transform=False,
verbose=0,
progress_callback=None,
)
_check_trained_ordinal_encoder(
self, sk_trained.steps[0][1], rasl_trained.steps[0][1].impl, tgt
)
_check_trained_min_max_scaler(
self, sk_trained.steps[1][1], rasl_trained.steps[1][1].impl, tgt
)
def test_cross_val_score(self):
X, y = self.creditg
with self.assertWarnsRegex(DeprecationWarning, "trainable operator"):
sk_scores = sk_cross_val_score(
estimator=self._make_sk_trainable(),
X=X,
y=y,
scoring=make_scorer(sk_accuracy_score),
cv=KFold(3),
)
unique_class_labels = list(y.unique())
datatype_list: List[datatype_param_type] = ["pandas", "spark"]
for tgt, n_batches in itertools.product(datatype_list, [1, 3]):
rasl_scores = rasl_cross_val_score(
pipeline=self._make_rasl_trainable(),
batches=mockup_data_loader(X, y, n_batches, tgt),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(3),
unique_class_labels=unique_class_labels,
max_resident=None,
prio=PrioBatch(),
same_fold=True,
verbose=0,
)
if n_batches == 1:
for sk_s, rasl_s in zip(sk_scores, rasl_scores):
self.assertAlmostEqual(sk_s, rasl_s, msg=(tgt, n_batches))
def test_cross_validate(self):
X, y = self.creditg
with self.assertWarnsRegex(DeprecationWarning, "trainable operator"):
sk_scores = sk_cross_validate(
estimator=self._make_sk_trainable(),
X=X,
y=y,
scoring=make_scorer(sk_accuracy_score),
cv=KFold(3),
return_estimator=True,
)
unique_class_labels = list(y.unique())
datatype_list: List[datatype_param_type] = ["pandas", "spark"]
for tgt, n_batches in itertools.product(datatype_list, [1, 3]):
rasl_scores = rasl_cross_validate(
pipeline=self._make_rasl_trainable(),
batches=mockup_data_loader(X, y, n_batches, tgt),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(3),
unique_class_labels=unique_class_labels,
max_resident=None,
prio=PrioBatch(),
same_fold=True,
return_estimator=True,
verbose=0,
)
msg = tgt, n_batches
if n_batches == 1:
for sk_s, rasl_s in zip(
sk_scores["test_score"], rasl_scores["test_score"]
):
self.assertAlmostEqual(
cast(float, sk_s), cast(float, rasl_s), msg=msg
)
for sk_e, rasl_e in zip(
sk_scores["estimator"], rasl_scores["estimator"]
):
rasl_steps = cast(TrainedPipeline, rasl_e).steps
_check_trained_ordinal_encoder(
self,
sk_e.steps[0][1],
rasl_steps[0][1].impl,
msg=msg,
)
_check_trained_min_max_scaler(
self,
sk_e.steps[1][1],
rasl_steps[1][1].impl,
msg=msg,
)
class TestMetrics(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.creditg = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=True, astype="pandas"
)
def test_accuracy(self):
(train_X, train_y), (test_X, test_y) = self.creditg
est = LogisticRegression().fit(train_X, train_y)
y_pred = est.predict(test_X)
sk_score = sk_accuracy_score(test_y, y_pred)
self.assertAlmostEqual(sk_score, rasl_accuracy_score(test_y, y_pred))
rasl_scorer = rasl_get_scorer("accuracy")
self.assertAlmostEqual(sk_score, rasl_scorer(est, test_X, test_y))
batches = mockup_data_loader(test_X, test_y, 3, "pandas")
self.assertAlmostEqual(
sk_score, rasl_scorer.score_estimator_batched(est, batches)
)
def test_balanced_accuracy(self):
(train_X, train_y), (test_X, test_y) = self.creditg
est = LogisticRegression().fit(train_X, train_y)
y_pred = est.predict(test_X)
sk_score = sk_balanced_accuracy_score(test_y, y_pred)
self.assertAlmostEqual(sk_score, rasl_balanced_accuracy_score(test_y, y_pred))
rasl_scorer = rasl_get_scorer("balanced_accuracy")
self.assertAlmostEqual(sk_score, rasl_scorer(est, test_X, test_y))
batches = mockup_data_loader(test_X, test_y, 3, "pandas")
self.assertAlmostEqual(
sk_score, rasl_scorer.score_estimator_batched(est, batches)
)
def test_f1(self):
(train_X, train_y), (test_X, test_y) = self.creditg
est = LogisticRegression().fit(train_X, train_y)
y_pred = est.predict(test_X)
sk_score = sk_f1_score(test_y, y_pred, pos_label=1)
self.assertAlmostEqual(sk_score, rasl_f1_score(test_y, y_pred, pos_label=1))
rasl_scorer = rasl_get_scorer("f1", pos_label=1)
self.assertAlmostEqual(sk_score, rasl_scorer(est, test_X, test_y))
batches = mockup_data_loader(test_X, test_y, 3, "pandas")
self.assertAlmostEqual(
sk_score, rasl_scorer.score_estimator_batched(est, batches)
)
def test_r2_score(self):
(train_X, train_y), (test_X, test_y) = self.creditg
est = LogisticRegression().fit(train_X, train_y)
y_pred = est.predict(test_X)
sk_score = sk_r2_score(test_y, y_pred)
self.assertAlmostEqual(sk_score, rasl_r2_score(test_y, y_pred))
rasl_scorer = rasl_get_scorer("r2")
self.assertAlmostEqual(sk_score, rasl_scorer(est, test_X, test_y))
batches = mockup_data_loader(test_X, test_y, 3, "pandas")
self.assertAlmostEqual(
sk_score, rasl_scorer.score_estimator_batched(est, batches)
)
class TestBatchedBaggingClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.creditg = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False, astype="pandas"
)
@classmethod
def _make_sk_trainable(cls):
from sklearn.tree import DecisionTreeClassifier as SkDecisionTreeClassifier
return sk_make_pipeline(
SkOrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
SkMinMaxScaler(),
SkDecisionTreeClassifier(random_state=97, max_features="auto"),
)
@classmethod
def _make_rasl_trainable(cls, final_est):
if final_est == "bagging_monoid":
est = BatchedBaggingClassifier(
base_estimator=DecisionTreeClassifier(
random_state=97, max_features="auto"
)
)
else:
assert False, final_est
return (
RaslOrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1)
>> RaslMinMaxScaler()
>> est
)
def test_classifier(self):
(X_train, y_train), (X_test, _y_test) = self.creditg
import warnings
clf = BatchedBaggingClassifier()
# test_schemas_are_schemas
lale.type_checking.validate_is_schema(clf.input_schema_fit())
lale.type_checking.validate_is_schema(clf.input_schema_predict())
lale.type_checking.validate_is_schema(clf.output_schema_predict())
lale.type_checking.validate_is_schema(clf.hyperparam_schema())
# test_init_fit_predict
pipeline = self._make_rasl_trainable("bagging_monoid")
trained = pipeline.fit(X_train, y_train)
_ = trained.predict(X_test)
# test_with_hyperopt
from lale.lib.lale import Hyperopt
(X_train, y_train), (X_test, _) = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=True, astype="pandas"
)
hyperopt = Hyperopt(estimator=pipeline, max_evals=1, verbose=True)
trained = hyperopt.fit(X_train, y_train)
_ = trained.predict(X_test)
# test_cross_validation
from lale.helpers import cross_val_score
cv_results = cross_val_score(pipeline, X_train, y_train, cv=2)
self.assertEqual(len(cv_results), 2)
# test_with_gridsearchcv_auto_wrapped
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from lale.lib.lale import GridSearchCV
grid_search = GridSearchCV(
estimator=pipeline,
lale_num_samples=1,
lale_num_grids=1,
cv=2,
scoring=make_scorer(sk_accuracy_score),
)
grid_search.fit(X_train, y_train)
# test_predict_on_trainable
trained = clf.fit(X_train, y_train)
clf.predict(X_train)
# test_to_json
clf.to_json()
def test_fit_batching(self):
(train_X, train_y), (test_X, test_y) = self.creditg
train_data_space = train_X.memory_usage().sum() + train_y.memory_usage()
unique_class_labels = list(train_y.unique())
for n_batches in [1, 3]:
for prio in [PrioStep(), PrioBatch()]:
batches = mockup_data_loader(train_X, train_y, n_batches, "pandas")
rasl_trainable = self._make_rasl_trainable("bagging_monoid")
rasl_trained = fit_with_batches(
pipeline=rasl_trainable,
batches_train=batches,
batches_valid=None,
scoring=None,
unique_class_labels=unique_class_labels,
max_resident=3 * math.ceil(train_data_space / n_batches),
prio=prio,
partial_transform=False,
verbose=0,
progress_callback=None,
)
predictions = rasl_trained.predict(test_X)
rasl_acc = sk_accuracy_score(test_y, predictions)
if n_batches == 1:
sk_pipeline = self._make_sk_trainable()
sk_pipeline.fit(train_X, train_y)
predictions = sk_pipeline.predict(test_X)
sk_acc = sk_accuracy_score(test_y, predictions)
self.assertEqual(rasl_acc, sk_acc)
def test_cross_val_score(self):
(train_X, train_y), (_, _) = self.creditg
for n_batches in [1, 3]:
_ = rasl_cross_val_score(
pipeline=self._make_rasl_trainable("bagging_monoid"),
batches=mockup_data_loader(train_X, train_y, n_batches, "pandas"),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(3),
unique_class_labels=list(train_y.unique()),
max_resident=None,
prio=PrioBatch(),
same_fold=True,
verbose=0,
)
class TestXGBoost(unittest.TestCase):
def test_partial_fit_xgb_classifier(self):
X, y = sklearn.datasets.load_iris(return_X_y=True, as_frame=True)
est = XGBClassifier(verbosity=0)
for bX, by in mockup_data_loader(X, y, 3, "pandas"):
est = est.partial_fit(bX, by)
_ = est.predict(bX)
def test_partial_fit_xgb_regressor(self):
X, y = sklearn.datasets.load_diabetes(return_X_y=True, as_frame=True)
est = XGBRegressor(verbosity=0)
for bX, by in mockup_data_loader(X, y, 3, "pandas"):
est = est.partial_fit(bX, by)
_ = est.predict(bX)
class TestLightGBM(unittest.TestCase):
def test_partial_fit_lgbm_classifier(self):
X, y = sklearn.datasets.load_iris(return_X_y=True, as_frame=True)
est = LGBMClassifier()
for bX, by in mockup_data_loader(X, y, 3, "pandas"):
est = est.partial_fit(bX, by)
_ = est.predict(bX)
def test_partial_fit_lgbm_regressor(self):
X, y = sklearn.datasets.load_diabetes(return_X_y=True, as_frame=True)
est = LGBMRegressor()
for bX, by in mockup_data_loader(X, y, 3, "pandas"):
est = est.partial_fit(bX, by)
_ = est.predict(bX)
| 103,433 | 42.29594 | 162 | py |
lale | lale-master/test/test_json_pretty_viz.py | # Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import lale.operators
import lale.pretty_print
class TestToGraphviz(unittest.TestCase):
def test_with_operator_choice(self):
from lale.lib.lale import NoOp
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
Nystroem,
)
from lale.operators import make_choice
kernel_tfm_or_not = NoOp | Nystroem
tfm = PCA
clf = make_choice(LogisticRegression, KNeighborsClassifier)
clf.visualize(ipython_display=False)
optimizable = kernel_tfm_or_not >> tfm >> clf
optimizable.visualize(ipython_display=False)
def test_invalid_input(self):
from sklearn.linear_model import LogisticRegression as SklearnLR
scikit_lr = SklearnLR()
from lale.helpers import to_graphviz
with self.assertRaises(TypeError):
to_graphviz(scikit_lr)
class TestPrettyPrint(unittest.TestCase):
# pylint:disable=reimported,redefined-outer-name
def _roundtrip(self, expected, printed):
self.maxDiff = None
# sklearn_version_family changes based on the Python as well as sklearn version,
# so remove that hyperparameter while comparing if present.
import re
expected = re.sub(r"""sklearn_version_family=.\d*.,""", "", expected)
printed = re.sub(r"""sklearn_version_family=.\d*.,""", "", printed)
self.assertEqual(expected, printed)
globals2 = {}
locals2 = {}
try:
exec(printed, globals2, locals2)
except Exception as e:
import pprint
print("error during exec(printed, globals2, locals2) where:")
print(f'printed = """{printed}"""')
print(f"globals2 = {pprint.pformat(globals2)}")
print(f"locals2 = {pprint.pformat(locals2)}")
raise e
pipeline2 = locals2["pipeline"]
import sklearn.pipeline
self.assertIsInstance(
pipeline2, (lale.operators.PlannedOperator, sklearn.pipeline.Pipeline)
)
def test_distance_threshold_validation_error(self):
import jsonschema
from lale.lib.sklearn import FeatureAgglomeration, LogisticRegression
with self.assertRaises(jsonschema.ValidationError):
_ = (
FeatureAgglomeration(
distance_threshold=0.5, n_clusters=None, compute_full_tree=True
)
>> LogisticRegression()
)
def test_indiv_op_1(self):
from lale.lib.sklearn import LogisticRegression
pipeline = LogisticRegression(solver=LogisticRegression.enum.solver.saga, C=0.9)
expected = """from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = LogisticRegression(solver="saga", C=0.9)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_indiv_op_2(self):
from lale.lib.sklearn import LogisticRegression
pipeline = LogisticRegression()
expected = """from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = LogisticRegression()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_reducible(self):
from lale.lib.lale import NoOp
from lale.lib.rasl import ConcatFeatures
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
MinMaxScaler,
Nystroem,
)
from lale.lib.xgboost import XGBClassifier as XGB
pca = PCA(copy=False)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = (
(MinMaxScaler | NoOp)
>> (pca & Nystroem)
>> ConcatFeatures
>> (KNeighborsClassifier | logistic_regression | XGB)
)
expected = """from sklearn.preprocessing import MinMaxScaler
from lale.lib.lale import NoOp
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from lale.lib.rasl import ConcatFeatures
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier as XGB
import lale
lale.wrap_imported_operators()
pca = PCA(copy=False)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = (
(MinMaxScaler | NoOp)
>> (pca & Nystroem)
>> ConcatFeatures
>> (KNeighborsClassifier | logistic_regression | XGB)
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_no_combinators(self):
from lale.lib.lale import NoOp
from lale.lib.rasl import ConcatFeatures
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
MinMaxScaler,
Nystroem,
)
pca = PCA(copy=False)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = (
(MinMaxScaler | NoOp)
>> (pca & Nystroem & NoOp)
>> ConcatFeatures
>> (KNeighborsClassifier | logistic_regression)
)
expected = """from sklearn.preprocessing import MinMaxScaler
from lale.lib.lale import NoOp
from lale.operators import make_choice
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from lale.operators import make_union
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from lale.operators import make_pipeline
choice_0 = make_choice(MinMaxScaler, NoOp)
pca = PCA(copy=False)
union = make_union(pca, Nystroem, NoOp)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
choice_1 = make_choice(KNeighborsClassifier, logistic_regression)
pipeline = make_pipeline(choice_0, union, choice_1)"""
printed = lale.pretty_print.to_string(pipeline, combinators=False)
self._roundtrip(expected, printed)
def test_astype_sklearn(self):
from lale.lib.rasl import ConcatFeatures
from lale.lib.sklearn import PCA, LogisticRegression, MinMaxScaler, Nystroem
pca = PCA(copy=False)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = (
MinMaxScaler()
>> (pca & Nystroem())
>> ConcatFeatures
>> logistic_regression
)
expected = """from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from sklearn.pipeline import make_union
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
pca = PCA(copy=False)
union = make_union(pca, Nystroem())
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = make_pipeline(MinMaxScaler(), union, logistic_regression)"""
printed = lale.pretty_print.to_string(pipeline, astype="sklearn")
self._roundtrip(expected, printed)
def test_import_as_1(self):
from lale.lib.sklearn import LogisticRegression as LR
pipeline = LR(solver="saga", C=0.9)
expected = """from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
pipeline = LR(solver="saga", C=0.9)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_import_as_2(self):
from lale.lib.lale import NoOp
from lale.lib.rasl import ConcatFeatures as Concat
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import MinMaxScaler as Scaler
from lale.lib.sklearn import Nystroem
pca = PCA(copy=False)
lr = LR(solver="saga", C=0.9)
pipeline = (Scaler | NoOp) >> (pca & Nystroem) >> Concat >> (KNN | lr)
expected = """from sklearn.preprocessing import MinMaxScaler as Scaler
from lale.lib.lale import NoOp
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from lale.lib.rasl import ConcatFeatures as Concat
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
pca = PCA(copy=False)
lr = LR(solver="saga", C=0.9)
pipeline = (Scaler | NoOp) >> (pca & Nystroem) >> Concat >> (KNN | lr)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_operator_choice(self):
from lale.lib.sklearn import PCA
from lale.lib.sklearn import MinMaxScaler as Scl
pipeline = PCA | Scl
expected = """from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler as Scl
import lale
lale.wrap_imported_operators()
pipeline = PCA | Scl"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_higher_order(self):
from lale.lib.lale import Both
from lale.lib.sklearn import PCA, Nystroem
pipeline = Both(op1=PCA(n_components=2), op2=Nystroem)
expected = """from lale.lib.lale import Both
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
import lale
lale.wrap_imported_operators()
pca = PCA(n_components=2)
pipeline = Both(op1=pca, op2=Nystroem)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_higher_order_2(self):
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import VotingClassifier as Vote
pipeline = Vote(
estimators=[("knn", KNN), ("pipeline", PCA() >> LR)], voting="soft"
)
expected = """from sklearn.ensemble import VotingClassifier as Vote
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
pipeline = Vote(
estimators=[("knn", KNN), ("pipeline", PCA() >> LR)], voting="soft"
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_multimodal(self):
from lale.lib.rasl import ConcatFeatures as Cat
from lale.lib.rasl import Project
from lale.lib.sklearn import LinearSVC
from lale.lib.sklearn import Normalizer as Norm
from lale.lib.sklearn import OneHotEncoder as OneHot
project_0 = Project(columns={"type": "number"})
project_1 = Project(columns={"type": "string"})
linear_svc = LinearSVC(C=29617.4, dual=False, tol=0.005266)
pipeline = (
((project_0 >> Norm()) & (project_1 >> OneHot())) >> Cat >> linear_svc
)
expected = """from lale.lib.rasl import Project
from sklearn.preprocessing import Normalizer as Norm
from sklearn.preprocessing import OneHotEncoder as OneHot
from lale.lib.rasl import ConcatFeatures as Cat
from sklearn.svm import LinearSVC
import lale
lale.wrap_imported_operators()
project_0 = Project(columns={"type": "number"})
project_1 = Project(columns={"type": "string"})
linear_svc = LinearSVC(C=29617.4, dual=False, tol=0.005266)
pipeline = (
((project_0 >> Norm()) & (project_1 >> OneHot())) >> Cat >> linear_svc
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_irreducible_1(self):
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
MinMaxScaler,
Nystroem,
)
from lale.operators import make_pipeline_graph
choice = PCA | Nystroem
pipeline = make_pipeline_graph(
steps=[choice, MinMaxScaler, LogisticRegression, KNeighborsClassifier],
edges=[
(choice, LogisticRegression),
(MinMaxScaler, LogisticRegression),
(MinMaxScaler, KNeighborsClassifier),
],
)
expected = """from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from lale.operators import make_pipeline_graph
import lale
lale.wrap_imported_operators()
choice = PCA | Nystroem
pipeline = make_pipeline_graph(
steps=[choice, MinMaxScaler, LogisticRegression, KNeighborsClassifier],
edges=[
(choice, LogisticRegression),
(MinMaxScaler, LogisticRegression),
(MinMaxScaler, KNeighborsClassifier),
],
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_irreducible_2(self):
from lale.lib.rasl import ConcatFeatures as HStack
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import MinMaxScaler as MMS
from lale.operators import make_pipeline_graph
pipeline_0 = HStack >> LR
pipeline = make_pipeline_graph(
steps=[PCA, MMS, KNN, pipeline_0],
edges=[(PCA, KNN), (PCA, pipeline_0), (MMS, pipeline_0)],
)
expected = """from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler as MMS
from sklearn.neighbors import KNeighborsClassifier as KNN
from lale.lib.rasl import ConcatFeatures as HStack
from sklearn.linear_model import LogisticRegression as LR
from lale.operators import make_pipeline_graph
import lale
lale.wrap_imported_operators()
pipeline_0 = HStack >> LR
pipeline = make_pipeline_graph(
steps=[PCA, MMS, KNN, pipeline_0],
edges=[(PCA, KNN), (PCA, pipeline_0), (MMS, pipeline_0)],
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_nested(self):
from lale.lib.lale import NoOp
from lale.lib.sklearn import PCA
from lale.lib.sklearn import LogisticRegression as LR
lr_0 = LR(C=0.09)
lr_1 = LR(C=0.19)
pipeline = PCA >> (lr_0 | NoOp >> lr_1)
expected = """from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression as LR
from lale.lib.lale import NoOp
import lale
lale.wrap_imported_operators()
lr_0 = LR(C=0.09)
lr_1 = LR(C=0.19)
pipeline = PCA >> (lr_0 | NoOp >> lr_1)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_cat_encoder(self):
import numpy as np
from autoai_libs.transformers.exportable import CatEncoder
from lale.lib.sklearn import LogisticRegression as LR
cat_encoder = CatEncoder(
encoding="ordinal",
categories="auto",
dtype=np.float64,
handle_unknown="error",
)
pipeline = cat_encoder >> LR()
expected = """from autoai_libs.transformers.exportable import CatEncoder
import numpy as np
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
cat_encoder = CatEncoder(
encoding="ordinal",
categories="auto",
dtype=np.float64,
handle_unknown="error",
sklearn_version_family="23",
)
pipeline = cat_encoder >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_cat_encoder_defaults(self):
import numpy as np
from lale.lib.autoai_libs import CatEncoder
from lale.lib.sklearn import LogisticRegression as LR
cat_encoder = CatEncoder(
dtype=np.float64, handle_unknown="error", sklearn_version_family="1"
)
pipeline = cat_encoder >> LR()
expected = """from autoai_libs.transformers.exportable import CatEncoder
import numpy as np
from sklearn.linear_model import LogisticRegression as LR
from sklearn.pipeline import make_pipeline
cat_encoder = CatEncoder(
dtype=np.float64,
handle_unknown="error",
sklearn_version_family="1",
encoding="ordinal",
categories="auto",
)
pipeline = make_pipeline(cat_encoder, LR())"""
self._roundtrip(
expected, lale.pretty_print.to_string(pipeline, astype="sklearn")
)
def test_autoai_libs_fs1(self):
from autoai_libs.cognito.transforms.transform_utils import FS1
from lale.lib.sklearn import LogisticRegression as LR
fs1 = FS1(
cols_ids_must_keep=range(0, 7),
additional_col_count_to_keep=8,
ptype="classification",
)
pipeline = fs1 >> LR()
expected = """from autoai_libs.cognito.transforms.transform_utils import FS1
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
fs1 = FS1(
cols_ids_must_keep=range(0, 7),
additional_col_count_to_keep=8,
ptype="classification",
)
pipeline = fs1 >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_numpy_replace_missing_values(self):
from autoai_libs.transformers.exportable import NumpyReplaceMissingValues
from lale.lib.sklearn import LogisticRegression as LR
numpy_replace_missing_values = NumpyReplaceMissingValues(
filling_values=float("nan"), missing_values=["?"]
)
pipeline = numpy_replace_missing_values >> LR()
expected = """from autoai_libs.transformers.exportable import NumpyReplaceMissingValues
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
numpy_replace_missing_values = NumpyReplaceMissingValues(
missing_values=["?"], filling_values=float("nan")
)
pipeline = numpy_replace_missing_values >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_numpy_replace_unknown_values1(self):
from autoai_libs.transformers.exportable import NumpyReplaceUnknownValues
from lale.lib.sklearn import LogisticRegression as LR
numpy_replace_unknown_values = NumpyReplaceUnknownValues(
filling_values=float("nan"),
filling_values_list=[float("nan")],
known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]],
missing_values_reference_list=["", "-", "?", float("nan")],
)
pipeline = numpy_replace_unknown_values >> LR()
expected = """from autoai_libs.transformers.exportable import NumpyReplaceUnknownValues
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
numpy_replace_unknown_values = NumpyReplaceUnknownValues(
filling_values=float("nan"),
filling_values_list=[float("nan")],
missing_values_reference_list=["", "-", "?", float("nan")],
)
pipeline = numpy_replace_unknown_values >> LR()"""
try:
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
except BaseException:
expected = """from autoai_libs.transformers.exportable import NumpyReplaceUnknownValues
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
numpy_replace_unknown_values = NumpyReplaceUnknownValues(
filling_values=float("nan"),
filling_values_list=[float("nan")],
missing_values_reference_list=("", "-", "?", float("nan")),
)
pipeline = numpy_replace_unknown_values >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_numpy_replace_unknown_values2(self):
from lale.lib.autoai_libs import NumpyReplaceUnknownValues
from lale.lib.sklearn import LogisticRegression as LR
CustomOp = NumpyReplaceUnknownValues.customize_schema(
known_values_list={
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"enum": [None]},
],
"default": None,
}
)
numpy_replace_unknown_values = CustomOp(
filling_values=float("nan"),
filling_values_list=[float("nan")],
known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]],
missing_values_reference_list=["", "-", "?", float("nan")],
)
pipeline = numpy_replace_unknown_values >> LR()
expected = """from autoai_libs.transformers.exportable import (
NumpyReplaceUnknownValues as CustomOp,
)
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
custom_op = CustomOp(
filling_values=float("nan"),
filling_values_list=[float("nan")],
known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]],
missing_values_reference_list=["", "-", "?", float("nan")],
)
pipeline = custom_op >> LR()"""
try:
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
except BaseException:
expected = """from autoai_libs.transformers.exportable import (
NumpyReplaceUnknownValues as CustomOp,
)
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
custom_op = CustomOp(
filling_values=float("nan"),
filling_values_list=[float("nan")],
known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]],
missing_values_reference_list=("", "-", "?", float("nan")),
)
pipeline = custom_op >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_tam_1(self):
import autoai_libs.cognito.transforms.transform_extras
import numpy as np
from autoai_libs.cognito.transforms.transform_utils import TAM
from lale.lib.sklearn import LogisticRegression as LR
tam = TAM(
tans_class=autoai_libs.cognito.transforms.transform_extras.IsolationForestAnomaly,
name="isoforestanomaly",
col_names=["a", "b", "c"],
col_dtypes=[np.dtype("float32"), np.dtype("float32"), np.dtype("float32")],
)
pipeline = tam >> LR()
expected = """from autoai_libs.cognito.transforms.transform_utils import TAM
import autoai_libs.cognito.transforms.transform_extras
import numpy as np
from sklearn.linear_model import LogisticRegression as LR
from sklearn.pipeline import make_pipeline
tam = TAM(
tans_class=autoai_libs.cognito.transforms.transform_extras.IsolationForestAnomaly,
name="isoforestanomaly",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
],
)
pipeline = make_pipeline(tam, LR())"""
self._roundtrip(
expected, lale.pretty_print.to_string(pipeline, astype="sklearn")
)
def test_autoai_libs_tam_2(self):
import numpy as np
from lightgbm import LGBMClassifier
from sklearn.decomposition import PCA
from lale.lib.autoai_libs import TAM
from lale.operators import make_pipeline
pca = PCA(copy=False)
tam = TAM(
tans_class=pca,
name="pca",
col_names=["a", "b", "c"],
col_dtypes=[np.dtype("float32"), np.dtype("float32"), np.dtype("float32")],
)
lgbm_classifier = LGBMClassifier(class_weight="balanced", learning_rate=0.18)
pipeline = make_pipeline(tam, lgbm_classifier)
expected = """from autoai_libs.cognito.transforms.transform_utils import TAM
import sklearn.decomposition
import numpy as np
from lightgbm import LGBMClassifier
from lale.operators import make_pipeline
tam = TAM(
tans_class=sklearn.decomposition.PCA(copy=False),
name="pca",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
],
)
lgbm_classifier = LGBMClassifier(
class_weight="balanced", learning_rate=0.18, n_estimators=100
)
pipeline = make_pipeline(tam, lgbm_classifier)"""
self._roundtrip(
expected, lale.pretty_print.to_string(pipeline, combinators=False)
)
def test_autoai_libs_tam_3(self):
import autoai_libs.cognito.transforms.transform_utils
import numpy as np
import sklearn.cluster
import sklearn.linear_model
import sklearn.pipeline
import lale.helpers
import lale.operators
import lale.pretty_print
sklearn_pipeline = sklearn.pipeline.make_pipeline(
autoai_libs.cognito.transforms.transform_utils.TAM(
tans_class=sklearn.cluster.FeatureAgglomeration(
compute_full_tree="auto",
connectivity=None,
linkage="ward",
memory=None,
n_clusters=2,
pooling_func=np.mean,
),
name="featureagglomeration",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
],
),
sklearn.linear_model.LogisticRegression(
solver="liblinear", multi_class="ovr"
),
)
pipeline = lale.helpers.import_from_sklearn_pipeline(sklearn_pipeline)
expected = """from autoai_libs.cognito.transforms.transform_utils import TAM
from sklearn.cluster import FeatureAgglomeration
import numpy as np
from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
tam = TAM(
tans_class=FeatureAgglomeration(),
name="featureagglomeration",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
],
)
logistic_regression = LogisticRegression(
multi_class="ovr", solver="liblinear"
)
pipeline = tam >> logistic_regression"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_tam_4(self):
import autoai_libs.cognito.transforms.transform_utils
import numpy as np
import sklearn.decomposition
import sklearn.linear_model
import sklearn.pipeline
import lale.helpers
import lale.operators
import lale.pretty_print
sklearn_pipeline = sklearn.pipeline.make_pipeline(
autoai_libs.cognito.transforms.transform_utils.TAM(
tans_class=sklearn.decomposition.PCA(),
name="pca",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
],
),
sklearn.linear_model.LogisticRegression(
solver="liblinear", multi_class="ovr"
),
)
pipeline = lale.helpers.import_from_sklearn_pipeline(
sklearn_pipeline, fitted=False
)
assert isinstance(pipeline, lale.operators.TrainableOperator)
expected = """from autoai_libs.cognito.transforms.transform_utils import TAM
from sklearn.decomposition import PCA
import numpy as np
from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
tam = TAM(
tans_class=PCA(),
name="pca",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
],
)
logistic_regression = LogisticRegression(
multi_class="ovr", solver="liblinear"
)
pipeline = tam >> logistic_regression"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
import numpy as np
import pandas as pd
test = pd.DataFrame(
np.random.randint(0, 100, size=(15, 3)),
columns=["a", "b", "c"],
dtype=np.dtype("float32"),
)
trained = pipeline.fit(
test.to_numpy(), [0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1]
)
trained.predict(test.to_numpy())
def test_autoai_libs_ta1(self):
import autoai_libs.utils.fc_methods
import numpy as np
from autoai_libs.cognito.transforms.transform_utils import TA1
from lale.lib.sklearn import LogisticRegression as LR
ta1 = TA1(
fun=np.rint,
name="round",
datatypes=["numeric"],
feat_constraints=[autoai_libs.utils.fc_methods.is_not_categorical],
col_names=[
"a____________",
"b____________",
"c____________",
"d____________",
"e____________",
],
col_dtypes=[
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
],
)
pipeline = ta1 >> LR()
expected = """from autoai_libs.cognito.transforms.transform_utils import TA1
import numpy as np
import autoai_libs.utils.fc_methods
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
ta1 = TA1(
fun=np.rint,
name="round",
datatypes=["numeric"],
feat_constraints=[autoai_libs.utils.fc_methods.is_not_categorical],
col_names=[
"a____________", "b____________", "c____________", "d____________",
"e____________",
],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
np.dtype("float32"), np.dtype("float32"),
],
)
pipeline = ta1 >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_t_no_op(self):
from lightgbm import LGBMClassifier
from lale.lib.autoai_libs import TNoOp
from lale.operators import make_pipeline
t_no_op = TNoOp(
fun="fun",
name="no_action",
datatypes="x",
feat_constraints=[],
tgraph="tgraph",
)
lgbm_classifier = LGBMClassifier(class_weight="balanced", learning_rate=0.18)
pipeline = make_pipeline(t_no_op, lgbm_classifier)
expected = """from autoai_libs.cognito.transforms.transform_utils import TNoOp
from lightgbm import LGBMClassifier
from lale.operators import make_pipeline
t_no_op = TNoOp(
fun="fun",
name="no_action",
datatypes="x",
feat_constraints=[],
tgraph="tgraph",
)
lgbm_classifier = LGBMClassifier(
class_weight="balanced", learning_rate=0.18, n_estimators=100
)
pipeline = make_pipeline(t_no_op, lgbm_classifier)"""
self._roundtrip(
expected, lale.pretty_print.to_string(pipeline, combinators=False)
)
def test_autoai_libs_two_ops_with_combinator(self):
from autoai_libs.transformers.exportable import (
CompressStrings,
NumpyColumnSelector,
)
numpy_column_selector = NumpyColumnSelector(columns=[0, 2, 3, 5])
compress_strings = CompressStrings(
compress_type="hash",
dtypes_list=["char_str", "char_str", "char_str", "char_str"],
misslist_list=[[], [], [], []],
)
pipeline = lale.operators.make_pipeline(numpy_column_selector, compress_strings)
expected = """from autoai_libs.transformers.exportable import NumpyColumnSelector
from autoai_libs.transformers.exportable import CompressStrings
import lale
lale.wrap_imported_operators()
numpy_column_selector = NumpyColumnSelector(columns=[0, 2, 3, 5])
compress_strings = CompressStrings(
compress_type="hash",
dtypes_list=["char_str", "char_str", "char_str", "char_str"],
missing_values_reference_list=["?", "", "-", float("nan")],
misslist_list=[[], [], [], []],
)
pipeline = numpy_column_selector >> compress_strings"""
printed = lale.pretty_print.to_string(pipeline, combinators=True)
try:
self._roundtrip(expected, printed)
except BaseException:
expected = """from autoai_libs.transformers.exportable import NumpyColumnSelector
from autoai_libs.transformers.exportable import CompressStrings
import lale
lale.wrap_imported_operators()
numpy_column_selector = NumpyColumnSelector(columns=[0, 2, 3, 5])
compress_strings = CompressStrings(
compress_type="hash",
dtypes_list=["char_str", "char_str", "char_str", "char_str"],
missing_values_reference_list=("?", "", "-", float("nan")),
misslist_list=[[], [], [], []],
)
pipeline = numpy_column_selector >> compress_strings"""
self._roundtrip(expected, printed)
def test_expression(self):
from lale.expressions import it, mean
from lale.lib.rasl import Aggregate, Join, Scan
scan1 = Scan(table=it["table1.csv"])
scan2 = Scan(table=it["table2.csv"])
join = Join(pred=[it["table1.csv"].k1 == it["table2.csv"].k2])
aggregate = Aggregate(columns={"talk_time|mean": mean(it.talk_time)})
pipeline = (scan1 & scan2) >> join >> aggregate
expected = """from lale.lib.rasl import Scan
from lale.expressions import it
from lale.lib.rasl import Join
from lale.lib.rasl import Aggregate
from lale.expressions import mean
import lale
lale.wrap_imported_operators()
scan_0 = Scan(table=it["table1.csv"])
scan_1 = Scan(table=it["table2.csv"])
join = Join(pred=[it["table1.csv"].k1 == it["table2.csv"].k2])
aggregate = Aggregate(columns={"talk_time|mean": mean(it.talk_time)})
pipeline = (scan_0 & scan_1) >> join >> aggregate"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_sklearn_pipeline(self):
from lale.lib.sklearn import PCA, LogisticRegression, Pipeline
pipeline = Pipeline(steps=[("pca", PCA), ("lr", LogisticRegression(C=0.1))])
expected = """from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
logistic_regression = LogisticRegression(C=0.1)
pipeline = Pipeline(steps=[("pca", PCA), ("lr", logistic_regression)])"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_sklearn_pipeline_2(self):
from lale.lib.sklearn import PCA, LogisticRegression, Pipeline
pipeline = Pipeline(steps=[("pca", PCA), ("lr", LogisticRegression(C=0.1))])
expected = """from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
logistic_regression = LogisticRegression(C=0.1)
pipeline = Pipeline(steps=[("pca", PCA), ("lr", logistic_regression)])"""
printed = lale.pretty_print.to_string(pipeline, astype="sklearn")
self._roundtrip(expected, printed)
def test_customize_schema_enum_and_number(self):
from lale.lib.sklearn import LogisticRegression
pipeline = LogisticRegression.customize_schema(
solver={"enum": ["lbfgs", "liblinear"], "default": "liblinear"},
tol={
"type": "number",
"minimum": 0.00001,
"maximum": 0.1,
"default": 0.0001,
},
)(solver="lbfgs")
expected = """from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = LogisticRegression.customize_schema(
solver={"enum": ["lbfgs", "liblinear"], "default": "liblinear"},
tol={
"type": "number",
"minimum": 1e-05,
"maximum": 0.1,
"default": 0.0001,
},
)(solver="lbfgs")"""
self._roundtrip(expected, pipeline.pretty_print(customize_schema=True))
def test_customize_schema_none_and_boolean(self):
from lale.lib.sklearn import RandomForestRegressor
pipeline = RandomForestRegressor.customize_schema(
bootstrap={"type": "boolean", "default": True},
random_state={
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": 33,
},
)(n_estimators=50)
expected = """from sklearn.ensemble import RandomForestRegressor
import lale
lale.wrap_imported_operators()
pipeline = RandomForestRegressor.customize_schema(
bootstrap={"type": "boolean", "default": True},
random_state={
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{"description": "RandomState used by np.random", "enum": [None]},
{"description": "Explicit seed.", "type": "integer"},
],
"default": 33,
},
)(n_estimators=50)"""
# this should not include "random_state=33" because that would be
# redundant with the schema, and would prevent automated search
self._roundtrip(expected, pipeline.pretty_print(customize_schema=True))
def test_customize_schema_print_defaults(self):
from lale.lib.sklearn import RandomForestRegressor
pipeline = RandomForestRegressor.customize_schema(
bootstrap={"type": "boolean", "default": True}, # default unchanged
random_state={
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
{"type": "integer"},
],
"default": 33, # default changed
},
)(n_estimators=50)
expected = """from sklearn.ensemble import RandomForestRegressor
import lale
lale.wrap_imported_operators()
pipeline = RandomForestRegressor(n_estimators=50, random_state=33)"""
# print exactly those defaults that changed
self._roundtrip(expected, pipeline.pretty_print(customize_schema=False))
def test_user_operator_in_toplevel_module(self):
import importlib
import os.path
import sys
import tempfile
with tempfile.NamedTemporaryFile(mode="w", suffix=".py") as tmp_py_file:
file_contents = """import numpy as np
import lale.operators
class _MockClassifierImpl:
def __init__(self, int_hp=0):
self.int_hp = int_hp
def fit(self, X, y):
self.some_y = list(y)[0]
def predict(self, X):
return self.some_y
MockClassifier = lale.operators.make_operator(_MockClassifierImpl)
"""
tmp_py_file.write(file_contents)
tmp_py_file.flush()
dir_name = os.path.dirname(tmp_py_file.name)
old_pythonpath = sys.path
try:
sys.path.append(dir_name)
module_name = os.path.basename(tmp_py_file.name)[: -len(".py")]
module = importlib.import_module(module_name)
MockClf = getattr(module, "MockClassifier")
self.assertIsInstance(MockClf, lale.operators.PlannedIndividualOp)
self.assertEqual(MockClf.name(), "MockClassifier")
pipeline = MockClf(int_hp=42)
expected = f"""from {module_name} import MockClassifier as MockClf
import lale
lale.wrap_imported_operators(["{module_name}"])
pipeline = MockClf(int_hp=42)"""
self._roundtrip(expected, pipeline.pretty_print())
finally:
sys.path = old_pythonpath
def test_nonlib_operator(self):
from test.mock_custom_operators import CustomOrigOperator
from lale.lib.sklearn import LogisticRegression
pipeline = CustomOrigOperator() >> LogisticRegression()
expected = """from test.mock_module import CustomOrigOperator
from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators(["test.mock_module"])
pipeline = CustomOrigOperator() >> LogisticRegression()"""
self._roundtrip(expected, pipeline.pretty_print())
@unittest.skip("TODO: avoid spurious 'name' keys in printed dictionaries")
def test_fairness_info(self):
from lale.lib.aif360 import DisparateImpactRemover, fetch_creditg_df
from lale.lib.lale import Hyperopt, Project
from lale.lib.sklearn import KNeighborsClassifier
X, y, fairness_info = fetch_creditg_df()
disparate_impact_remover = DisparateImpactRemover(
**fairness_info,
preparation=Project(columns={"type": "number"}),
)
planned = disparate_impact_remover >> KNeighborsClassifier()
frozen = planned.freeze_trainable()
pipeline = frozen.auto_configure(X, y, optimizer=Hyperopt, cv=2, max_evals=1)
expected = """from aif360.algorithms.preprocessing import DisparateImpactRemover
from lale.lib.rasl import Project
from sklearn.neighbors import KNeighborsClassifier
import lale
lale.wrap_imported_operators()
project = Project(columns={"type": "number"})
disparate_impact_remover = DisparateImpactRemover(
favorable_labels=["good"],
protected_attributes=[
{
"reference_group": [
"male div/sep", "male mar/wid", "male single",
],
"feature": "personal_status",
},
{
"reference_group": [[26, 1000]],
"feature": "age",
},
],
preparation=project,
)
pipeline = disparate_impact_remover >> KNeighborsClassifier()"""
self._roundtrip(expected, pipeline.pretty_print())
def test_snap_logistic_regression_1(self):
# force printing arguments via "transient": "alwaysPrint", case True
from lale.lib.snapml import SnapLogisticRegression
pipeline = SnapLogisticRegression(normalize=True)
expected = """from snapml import SnapLogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = SnapLogisticRegression(fit_intercept=True, normalize=True)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_snap_logistic_regression_2(self):
# force printing arguments via "transient": "alwaysPrint", case False
from lale.lib.snapml import SnapLogisticRegression
pipeline = SnapLogisticRegression(normalize=False)
expected = """from snapml import SnapLogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = SnapLogisticRegression(normalize=False, fit_intercept=True)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
class TestToAndFromJSON(unittest.TestCase):
def test_trainable_individual_op(self):
self.maxDiff = None
from lale.json_operator import from_json, to_json
from lale.lib.sklearn import LogisticRegression as LR
operator = LR(LR.enum.solver.sag, C=0.1)
json_expected = {
"class": LR.class_name(),
"state": "trainable",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"hyperparams": {"C": 0.1, "solver": "sag"},
"is_frozen_trainable": False,
}
json = to_json(operator)
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json_2, json_expected)
def test_operator_choice(self):
self.maxDiff = None
from lale.json_operator import from_json, to_json
from lale.lib.sklearn import PCA
from lale.lib.sklearn import MinMaxScaler as Scl
operator = PCA | Scl
json_expected = {
"class": "lale.operators.OperatorChoice",
"operator": "OperatorChoice",
"state": "planned",
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "planned",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
},
"scl": {
"class": Scl.class_name(),
"state": "planned",
"operator": "MinMaxScaler",
"label": "Scl",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.min_max_scaler.html",
},
},
}
json = to_json(operator)
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json_2, json_expected)
def test_pipeline_1(self):
self.maxDiff = None
from lale.json_operator import from_json, to_json
from lale.lib.lale import NoOp
from lale.lib.rasl import ConcatFeatures
from lale.lib.sklearn import PCA
from lale.lib.sklearn import LogisticRegression as LR
operator = (PCA & NoOp) >> ConcatFeatures >> LR
json_expected = {
"class": "lale.operators.PlannedPipeline",
"state": "planned",
"edges": [
["pca", "concat_features"],
["no_op", "concat_features"],
["concat_features", "lr"],
],
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "planned",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
},
"no_op": {
"class": NoOp.class_name(),
"state": "trained",
"operator": "NoOp",
"label": "NoOp",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.no_op.html",
"hyperparams": None,
"coefs": None,
"is_frozen_trainable": True,
"is_frozen_trained": True,
},
"concat_features": {
"class": ConcatFeatures.class_name(),
"state": "trained",
"operator": "ConcatFeatures",
"label": "ConcatFeatures",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.concat_features.html",
"hyperparams": None,
"coefs": None,
"is_frozen_trainable": True,
"is_frozen_trained": True,
},
"lr": {
"class": LR.class_name(),
"state": "planned",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
},
},
}
json = to_json(operator)
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json, json_2)
def test_pipeline_2(self):
from lale.json_operator import from_json, to_json
from lale.lib.lale import NoOp
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
Nystroem,
)
from lale.operators import make_choice, make_pipeline
kernel_tfm_or_not = make_choice(NoOp, Nystroem)
tfm = PCA
clf = make_choice(LogisticRegression, KNeighborsClassifier)
operator = make_pipeline(kernel_tfm_or_not, tfm, clf)
json = to_json(operator)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json, json_2)
def test_higher_order_1(self):
from lale.json_operator import from_json
from lale.lib.lale import Both
from lale.lib.sklearn import PCA, Nystroem
operator = Both(op1=PCA(n_components=2), op2=Nystroem)
json_expected = {
"class": Both.class_name(),
"state": "trainable",
"operator": "Both",
"label": "Both",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.both.html",
"hyperparams": {
"op1": {"$ref": "../steps/pca"},
"op2": {"$ref": "../steps/nystroem"},
},
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "trainable",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
"hyperparams": {"n_components": 2},
"is_frozen_trainable": False,
},
"nystroem": {
"class": Nystroem.class_name(),
"state": "planned",
"operator": "Nystroem",
"label": "Nystroem",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.nystroem.html",
},
},
"is_frozen_trainable": False,
}
json = operator.to_json()
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = operator_2.to_json()
self.assertEqual(json, json_2)
def test_higher_order_2(self):
self.maxDiff = None
from lale.json_operator import from_json
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import VotingClassifier as Vote
operator = Vote(
estimators=[("knn", KNN), ("pipeline", PCA() >> LR)], voting="soft"
)
json_expected = {
"class": Vote.class_name(),
"state": "trainable",
"operator": "VotingClassifier",
"is_frozen_trainable": True,
"label": "Vote",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.voting_classifier.html",
"hyperparams": {
"estimators": [
("knn", {"$ref": "../steps/knn"}),
("pipeline", {"$ref": "../steps/pipeline"}),
],
"voting": "soft",
},
"steps": {
"knn": {
"class": KNN.class_name(),
"state": "planned",
"operator": "KNeighborsClassifier",
"label": "KNN",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.k_neighbors_classifier.html",
},
"pipeline": {
"class": "lale.operators.PlannedPipeline",
"state": "planned",
"edges": [["pca", "lr"]],
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "trainable",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
"hyperparams": {},
"is_frozen_trainable": False,
},
"lr": {
"class": LR.class_name(),
"state": "planned",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
},
},
},
},
}
json = operator.to_json()
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = operator_2.to_json()
self.assertEqual(json, json_2)
def test_nested(self):
self.maxDiff = None
from lale.json_operator import from_json, to_json
from lale.lib.lale import NoOp
from lale.lib.sklearn import PCA
from lale.lib.sklearn import LogisticRegression as LR
operator = PCA >> (LR(C=0.09) | NoOp >> LR(C=0.19))
json_expected = {
"class": "lale.operators.PlannedPipeline",
"state": "planned",
"edges": [["pca", "choice"]],
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "planned",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
},
"choice": {
"class": "lale.operators.OperatorChoice",
"state": "planned",
"operator": "OperatorChoice",
"steps": {
"lr_0": {
"class": LR.class_name(),
"state": "trainable",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"hyperparams": {"C": 0.09},
"is_frozen_trainable": False,
},
"pipeline_1": {
"class": "lale.operators.TrainablePipeline",
"state": "trainable",
"edges": [["no_op", "lr_1"]],
"steps": {
"no_op": {
"class": NoOp.class_name(),
"state": "trained",
"operator": "NoOp",
"label": "NoOp",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.no_op.html",
"hyperparams": None,
"coefs": None,
"is_frozen_trainable": True,
"is_frozen_trained": True,
},
"lr_1": {
"class": LR.class_name(),
"state": "trainable",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"hyperparams": {"C": 0.19},
"is_frozen_trainable": False,
},
},
},
},
},
},
}
json = to_json(operator)
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json, json_2)
def test_customize_schema(self):
from lale.json_operator import from_json, to_json
from lale.lib.sklearn import LogisticRegression as LR
operator = LR.customize_schema(
solver={"enum": ["lbfgs", "liblinear"], "default": "liblinear"},
tol={
"type": "number",
"minimum": 0.00001,
"maximum": 0.1,
"default": 0.0001,
},
)
json_expected = {
"class": LR.class_name(),
"state": "planned",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"customize_schema": {
"properties": {
"hyperparams": {
"allOf": [
{
"type": "object",
"properties": {
"solver": {
"default": "liblinear",
"enum": ["lbfgs", "liblinear"],
},
"tol": {
"type": "number",
"minimum": 0.00001,
"maximum": 0.1,
"default": 0.0001,
},
},
}
]
}
}
},
}
json = to_json(operator)
self.maxDiff = None
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json, json_2)
class TestDiff(unittest.TestCase):
def test_single_op(self):
from lale.lib.sklearn import LogisticRegression
single_op = LogisticRegression()
single_op_param = LogisticRegression(solver="saga")
expected_diff = (
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = LogisticRegression()\n"
'+ pipeline = LogisticRegression(solver="saga")\n'
"? +++++++++++++\n"
)
diff_str = single_op.diff(single_op_param, ipython_display=False)
self.assertEqual(diff_str, expected_diff)
expected_diff_reverse = (
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
'- pipeline = LogisticRegression(solver="saga")\n'
"? -------------\n\n"
"+ pipeline = LogisticRegression()"
)
diff_str_reverse = single_op_param.diff(single_op, ipython_display=False)
self.assertEqual(diff_str_reverse, expected_diff_reverse)
def test_pipeline(self):
from lale.lib.lale import NoOp
from lale.lib.sklearn import PCA, LogisticRegression, SelectKBest
pipeline_simple = PCA >> SelectKBest >> LogisticRegression
pipeline_choice = (PCA | NoOp) >> SelectKBest >> LogisticRegression
expected_diff = (
" from sklearn.decomposition import PCA\n"
"+ from lale.lib.lale import NoOp\n"
" from sklearn.feature_selection import SelectKBest\n"
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = PCA >> SelectKBest >> LogisticRegression\n"
"+ pipeline = (PCA | NoOp) >> SelectKBest >> LogisticRegression\n"
"? + ++++++++\n"
)
diff_str = pipeline_simple.diff(pipeline_choice, ipython_display=False)
self.assertEqual(diff_str, expected_diff)
expected_diff_reverse = (
" from sklearn.decomposition import PCA\n"
"- from lale.lib.lale import NoOp\n"
" from sklearn.feature_selection import SelectKBest\n"
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = (PCA | NoOp) >> SelectKBest >> LogisticRegression\n"
"? - --------\n\n"
"+ pipeline = PCA >> SelectKBest >> LogisticRegression"
)
diff_str_reverse = pipeline_choice.diff(pipeline_simple, ipython_display=False)
self.assertEqual(diff_str_reverse, expected_diff_reverse)
def test_single_op_pipeline(self):
from lale.lib.sklearn import PCA, LogisticRegression, SelectKBest
single_op = LogisticRegression()
pipeline = PCA >> SelectKBest >> LogisticRegression
expected_diff = (
"+ from sklearn.decomposition import PCA\n"
"+ from sklearn.feature_selection import SelectKBest\n"
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = LogisticRegression()\n"
"+ pipeline = PCA >> SelectKBest >> LogisticRegression"
)
diff_str = single_op.diff(pipeline, ipython_display=False)
self.assertEqual(expected_diff, diff_str)
expected_diff_reverse = (
"- from sklearn.decomposition import PCA\n"
"- from sklearn.feature_selection import SelectKBest\n"
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = PCA >> SelectKBest >> LogisticRegression\n"
"+ pipeline = LogisticRegression()"
)
diff_str_reverse = pipeline.diff(single_op, ipython_display=False)
self.assertEqual(expected_diff_reverse, diff_str_reverse)
def test_options(self):
from lale.lib.sklearn import LogisticRegression
single_op = LogisticRegression()
single_op_schema = single_op.customize_schema(solver={"enum": ["saga"]})
expected_diff_no_imports = " pipeline = LogisticRegression()"
diff_str_no_imports = single_op.diff(
single_op_schema, show_imports=False, ipython_display=False
)
self.assertEqual(diff_str_no_imports, expected_diff_no_imports)
expected_diff_no_schema = (
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = LogisticRegression()\n"
'+ pipeline = LogisticRegression.customize_schema(solver={"enum": ["saga"]})()'
)
diff_str_no_schema = single_op.diff(
single_op_schema, customize_schema=True, ipython_display=False
)
self.assertEqual(diff_str_no_schema, expected_diff_no_schema)
| 64,209 | 37.106825 | 147 | py |
lale | lale-master/lale/operator_wrapper.py | # Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import sys
from typing import List, Optional, Set
from lale.operators import Operator, clone_op, get_op_from_lale_lib
if sys.version_info < (3, 9):
from typing import Container # raises a mypy error for <3.8
else:
from collections.abc import Container
logger = logging.getLogger(__name__)
def _wrap_operators_in_symtab(
symtab,
exclude_classes: Optional[Container[str]] = None,
wrapper_modules: Optional[List[str]] = None,
) -> None:
for name, impl in symtab.items():
if (
inspect.isclass(impl)
and not issubclass(impl, Operator)
and (hasattr(impl, "predict") or hasattr(impl, "transform"))
):
if exclude_classes is not None:
if name in exclude_classes:
continue
operator = get_op_from_lale_lib(impl, wrapper_modules)
if operator is None:
# symtab[name] = make_operator(impl=impl, name=name)
logger.info(f"Lale:Not wrapping unknown operator:{name}")
else:
symtab[name] = clone_op(operator, name)
if operator.class_name().startswith("lale.lib.autogen"):
logger.info(f"Lale:Wrapped autogen operator:{name}")
else:
logger.info(f"Lale:Wrapped known operator:{name}")
def wrap_imported_operators(
exclude_classes: Optional[Container[str]] = None,
wrapper_modules: Optional[List[str]] = None,
) -> None:
"""Wrap the currently imported operators from the symbol table
to their lale wrappers.
Parameters
----------
exclude_classes : string, optional, default None
List of class names to exclude from wrapping,
alias names if they are used while importing.
wrapper_modules : set of string, optional, default None
Set of Lale modules to use for wrapping operators.
"""
current_frame = inspect.currentframe()
assert (
current_frame is not None
), "Try to use inspect.stack()[1][0] to get the calling frame"
calling_frame = current_frame.f_back
assert (
calling_frame is not None
), "Try to use inspect.stack()[1][0] to get the calling frame"
if wrapper_modules is not None:
wrapper_modules.extend(get_lale_wrapper_modules())
else:
wrapper_modules = list(get_lale_wrapper_modules())
_wrap_operators_in_symtab(
calling_frame.f_globals, exclude_classes, wrapper_modules=wrapper_modules
)
if calling_frame.f_code.co_name == "<module>": # for testing with exec()
_wrap_operators_in_symtab(
calling_frame.f_locals, exclude_classes, wrapper_modules=wrapper_modules
)
_lale_wrapper_modules: Set[str] = set()
def register_lale_wrapper_modules(m: str) -> None:
"""Register a module with lale's import system
so that :meth:`lale.helpers.import_from_sklearn_pipeline` will look for replacement classes in that module.
Example: (in `__init__.py` file for the module):
.. code-block:: python
from lale import register_lale_wrapper_modules
register_lale_wrapper_modules(__name__)
Parameters
----------
m : [str]
The module name
"""
_lale_wrapper_modules.add(m)
def get_lale_wrapper_modules() -> Set[str]:
return _lale_wrapper_modules
for builtin_lale_modules in [
"lale.lib.sklearn",
"lale.lib.autoai_libs",
"lale.lib.xgboost",
"lale.lib.lightgbm",
"lale.lib.snapml",
"autoai_ts_libs.lale",
]:
register_lale_wrapper_modules(builtin_lale_modules)
| 4,203 | 31.589147 | 111 | py |
lale | lale-master/lale/helpers.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import copy
import importlib
import logging
import sys
import time
import traceback
from importlib import util
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
TypeVar,
Union,
overload,
)
import numpy as np
import pandas as pd
import scipy.sparse
import sklearn.pipeline
from numpy.random import RandomState
from sklearn.metrics import accuracy_score, check_scoring, log_loss
from sklearn.model_selection import StratifiedKFold
from sklearn.utils.metaestimators import _safe_split
import lale.datasets.data_schemas
if sys.version_info >= (3, 8):
from typing import Literal # raises a mypy error for <3.8
else:
from typing_extensions import Literal
try:
import torch
torch_installed = True
except ImportError:
torch_installed = False
spark_loader = util.find_spec("pyspark")
spark_installed = spark_loader is not None
if spark_installed:
from pyspark.sql.dataframe import DataFrame as spark_df
logger = logging.getLogger(__name__)
LALE_NESTED_SPACE_KEY = "__lale_nested_space"
astype_type = Literal["lale", "sklearn"]
datatype_param_type = Literal["pandas", "spark"]
randomstate_type = Union[RandomState, int, None]
def make_nested_hyperopt_space(sub_space):
return {LALE_NESTED_SPACE_KEY: sub_space}
def assignee_name(level=1) -> Optional[str]:
tb = traceback.extract_stack()
file_name, _line_number, _function_name, text = tb[-(level + 2)]
try:
tree = ast.parse(text, file_name)
except SyntaxError:
return None
assert tree is not None and isinstance(tree, ast.Module)
if len(tree.body) == 1:
stmt = tree.body[0]
if isinstance(stmt, ast.Assign):
lhs = stmt.targets
if len(lhs) == 1:
res = lhs[0]
if isinstance(res, ast.Name):
return res.id
return None
def arg_name(pos=0, level=1) -> Optional[str]:
tb = traceback.extract_stack()
file_name, _line_number, _function_name, text = tb[-(level + 2)]
try:
tree = ast.parse(text, file_name)
except SyntaxError:
return None
assert tree is not None and isinstance(tree, ast.Module)
if len(tree.body) == 1:
stmt = tree.body[0]
if isinstance(stmt, ast.Expr):
expr = stmt.value
if isinstance(expr, ast.Call):
args = expr.args
if pos < len(args):
res = args[pos]
if isinstance(res, ast.Name):
return res.id
return None
def data_to_json(data, subsample_array: bool = True) -> Union[list, dict, int, float]:
if isinstance(data, tuple):
# convert to list
return [data_to_json(elem, subsample_array) for elem in data]
if isinstance(data, list):
return [data_to_json(elem, subsample_array) for elem in data]
elif isinstance(data, dict):
return {key: data_to_json(data[key], subsample_array) for key in data}
elif isinstance(data, np.ndarray):
return ndarray_to_json(data, subsample_array)
elif isinstance(data, scipy.sparse.csr_matrix):
return ndarray_to_json(data.toarray(), subsample_array)
elif isinstance(data, (pd.DataFrame, pd.Series)):
np_array = data.values
return ndarray_to_json(np_array, subsample_array)
elif torch_installed and isinstance(data, torch.Tensor):
np_array = data.detach().numpy()
return ndarray_to_json(np_array, subsample_array)
elif isinstance(data, (np.int64, np.int32, np.int16)): # type: ignore
return int(data)
elif isinstance(data, (np.float32, np.float64)): # type: ignore
return float(data)
else:
return data
def is_empty_dict(val) -> bool:
return isinstance(val, dict) and len(val) == 0
def dict_without(orig_dict: Dict[str, Any], key: str) -> Dict[str, Any]:
if key not in orig_dict:
return orig_dict
return {k: v for k, v in orig_dict.items() if k != key}
def json_lookup(ptr, jsn, default=None):
steps = ptr.split("/")
sub_jsn = jsn
for s in steps:
if s not in sub_jsn:
return default
sub_jsn = sub_jsn[s]
return sub_jsn
def ndarray_to_json(arr: np.ndarray, subsample_array: bool = True) -> Union[list, dict]:
# sample 10 rows and no limit on columns
num_subsamples: List[int]
if subsample_array:
num_subsamples = [10, np.iinfo(int).max, np.iinfo(int).max]
else:
num_subsamples = [
np.iinfo(int).max,
np.iinfo(int).max,
np.iinfo(int).max,
]
def subarray_to_json(indices: Tuple[int, ...]) -> Any:
if len(indices) == len(arr.shape):
if isinstance(arr[indices], (bool, int, float, str)):
return arr[indices]
elif np.issubdtype(arr.dtype, np.bool_):
return bool(arr[indices])
elif np.issubdtype(arr.dtype, np.integer):
return int(arr[indices])
elif np.issubdtype(arr.dtype, np.number):
return float(arr[indices])
elif arr.dtype.kind in ["U", "S", "O"]:
return str(arr[indices])
else:
raise ValueError(
f"Unexpected dtype {arr.dtype}, "
f"kind {arr.dtype.kind}, "
f"type {type(arr[indices])}."
)
else:
assert len(indices) < len(arr.shape)
return [
subarray_to_json(indices + (i,))
for i in range(
min(num_subsamples[len(indices)], arr.shape[len(indices)])
)
]
return subarray_to_json(())
def split_with_schemas(estimator, all_X, all_y, indices, train_indices=None):
subset_X, subset_y = _safe_split(estimator, all_X, all_y, indices, train_indices)
if hasattr(all_X, "json_schema"):
n_rows = subset_X.shape[0]
schema = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": all_X.json_schema["items"],
}
lale.datasets.data_schemas.add_schema(subset_X, schema)
if hasattr(all_y, "json_schema"):
n_rows = subset_y.shape[0]
schema = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": all_y.json_schema["items"],
}
lale.datasets.data_schemas.add_schema(subset_y, schema)
return subset_X, subset_y
def fold_schema(X, y, cv=1, is_classifier=True):
def fold_schema_aux(data, n_rows):
orig_schema = lale.datasets.data_schemas._to_schema(data)
aux_result = {**orig_schema, "minItems": n_rows, "maxItems": n_rows}
return aux_result
n_splits = cv if isinstance(cv, int) else cv.get_n_splits()
try:
n_samples = X.shape[0] if hasattr(X, "shape") else len(X)
except TypeError: # raised for Spark dataframes.
n_samples = X.count() if hasattr(X, "count") else 0
if n_splits == 1:
n_rows_fold = n_samples
elif is_classifier:
n_classes = len(set(y))
n_rows_unstratified = (n_samples // n_splits) * (n_splits - 1)
# in stratified case, fold sizes can differ by up to n_classes
n_rows_fold = max(1, n_rows_unstratified - n_classes)
else:
n_rows_fold = (n_samples // n_splits) * (n_splits - 1)
schema_X = fold_schema_aux(X, n_rows_fold)
schema_y = fold_schema_aux(y, n_rows_fold)
result = {"properties": {"X": schema_X, "y": schema_y}}
return result
def cross_val_score_track_trials(
estimator,
X,
y=None,
scoring: Any = accuracy_score,
cv: Any = 5,
args_to_scorer: Optional[Dict[str, Any]] = None,
args_to_cv: Optional[Dict[str, Any]] = None,
**fit_params,
):
"""
Use the given estimator to perform fit and predict for splits defined by 'cv' and compute the given score on
each of the splits.
Parameters
----------
estimator: A valid sklearn_wrapper estimator
X: Valid data that works with the estimator
y: Valid target that works with the estimator
scoring: string or a scorer object created using
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html#sklearn.metrics.make_scorer.
A string from sklearn.metrics.SCORERS.keys() can be used or a scorer created from one of
sklearn.metrics (https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics).
A completely custom scorer object can be created from a python function following the example at
https://scikit-learn.org/stable/modules/model_evaluation.html
The metric has to return a scalar value,
cv: an integer or an object that has a split function as a generator yielding (train, test) splits as arrays of indices.
Integer value is used as number of folds in sklearn.model_selection.StratifiedKFold, default is 5.
Note that any of the iterators from https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators can be used here.
args_to_scorer: A dictionary of additional keyword arguments to pass to the scorer.
Used for cases where the scorer has a signature such as ``scorer(estimator, X, y, **kwargs)``.
args_to_cv: A dictionary of additional keyword arguments to pass to the split method of cv.
This is only applicable when cv is not an integer.
fit_params: Additional parameters that should be passed when calling fit on the estimator
Returns
-------
cv_results: a list of scores corresponding to each cross validation fold
"""
if isinstance(cv, int):
cv = StratifiedKFold(cv)
if args_to_scorer is None:
args_to_scorer = {}
if args_to_cv is None:
args_to_cv = {}
scorer = check_scoring(estimator, scoring=scoring)
cv_results: List[float] = []
log_loss_results = []
time_results = []
for train, test in cv.split(X, y, **args_to_cv):
X_train, y_train = split_with_schemas(estimator, X, y, train)
X_test, y_test = split_with_schemas(estimator, X, y, test, train)
start = time.time()
# Not calling sklearn.base.clone() here, because:
# (1) For Lale pipelines, clone() calls the pipeline constructor
# with edges=None, so the resulting topology is incorrect.
# (2) For Lale individual operators, the fit() method already
# clones the impl object, so cloning again is redundant.
trained = estimator.fit(X_train, y_train, **fit_params)
score_value = scorer(trained, X_test, y_test, **args_to_scorer)
execution_time = time.time() - start
# not all estimators have predict probability
try:
y_pred_proba = trained.predict_proba(X_test)
logloss = log_loss(y_true=y_test, y_pred=y_pred_proba)
log_loss_results.append(logloss)
except BaseException:
logger.debug("Warning, log loss cannot be computed")
cv_results.append(score_value)
time_results.append(execution_time)
result = (
np.array(cv_results).mean(),
np.array(log_loss_results).mean(),
np.array(time_results).mean(),
)
return result
def cross_val_score(estimator, X, y=None, scoring: Any = accuracy_score, cv: Any = 5):
"""
Use the given estimator to perform fit and predict for splits defined by 'cv' and compute the given score on
each of the splits.
Parameters
----------
estimator: A valid sklearn_wrapper estimator
X: Valid data value that works with the estimator
y: Valid target value that works with the estimator
scoring: a scorer object from sklearn.metrics (https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics)
Default value is accuracy_score.
cv: an integer or an object that has a split function as a generator yielding (train, test) splits as arrays of indices.
Integer value is used as number of folds in sklearn.model_selection.StratifiedKFold, default is 5.
Note that any of the iterators from https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators can be used here.
Returns
-------
cv_results: a list of scores corresponding to each cross validation fold
"""
if isinstance(cv, int):
cv = StratifiedKFold(cv)
cv_results = []
for train, test in cv.split(X, y):
X_train, y_train = split_with_schemas(estimator, X, y, train)
X_test, y_test = split_with_schemas(estimator, X, y, test, train)
trained_estimator = estimator.fit(X_train, y_train)
predicted_values = trained_estimator.predict(X_test)
cv_results.append(scoring(y_test, predicted_values))
return cv_results
def create_individual_op_using_reflection(class_name, operator_name, param_dict):
instance = None
if class_name is not None:
class_name_parts = class_name.split(".")
assert (
len(class_name_parts)
) > 1, (
"The class name needs to be fully qualified, i.e. module name + class name"
)
module_name = ".".join(class_name_parts[0:-1])
class_name = class_name_parts[-1]
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
if param_dict is None:
instance = class_()
else:
instance = class_(**param_dict)
return instance
if TYPE_CHECKING:
import lale.operators
def to_graphviz(
lale_operator: "lale.operators.Operator",
ipython_display: bool = True,
call_depth: int = 1,
**dot_graph_attr,
):
import lale.json_operator
import lale.operators
import lale.visualize
if not isinstance(lale_operator, lale.operators.Operator):
raise TypeError("The input to to_graphviz needs to be a valid LALE operator.")
jsn = lale.json_operator.to_json(lale_operator, call_depth=call_depth + 1)
dot = lale.visualize.json_to_graphviz(jsn, ipython_display, dot_graph_attr)
return dot
def instantiate_from_hyperopt_search_space(obj_hyperparams, new_hyperparams):
if isinstance(new_hyperparams, dict) and LALE_NESTED_SPACE_KEY in new_hyperparams:
sub_params = new_hyperparams[LALE_NESTED_SPACE_KEY]
sub_op = obj_hyperparams
if isinstance(sub_op, list):
if len(sub_op) == 1:
sub_op = sub_op[0]
else:
step_index, step_params = list(sub_params)[0]
if step_index < len(sub_op):
sub_op = sub_op[step_index]
sub_params = step_params
return create_instance_from_hyperopt_search_space(sub_op, sub_params)
elif isinstance(new_hyperparams, (list, tuple)):
assert isinstance(obj_hyperparams, (list, tuple))
params_len = len(new_hyperparams)
assert params_len == len(obj_hyperparams)
res: Optional[List[Any]] = None
for i in range(params_len):
nhi = new_hyperparams[i]
ohi = obj_hyperparams[i]
updated_params = instantiate_from_hyperopt_search_space(ohi, nhi)
if updated_params is not None:
if res is None:
res = list(new_hyperparams)
res[i] = updated_params
if res is not None:
if isinstance(obj_hyperparams, tuple):
return tuple(res)
else:
return res
# workaround for what seems to be a hyperopt bug
# where hyperopt returns a tuple even though the
# hyperopt search space specifies a list
is_obj_tuple = isinstance(obj_hyperparams, tuple)
is_new_tuple = isinstance(new_hyperparams, tuple)
if is_obj_tuple != is_new_tuple:
if is_obj_tuple:
return tuple(new_hyperparams)
else:
return list(new_hyperparams)
return None
elif isinstance(new_hyperparams, dict):
assert isinstance(obj_hyperparams, dict)
for k, sub_params in new_hyperparams.items():
if k in obj_hyperparams:
sub_op = obj_hyperparams[k]
updated_params = instantiate_from_hyperopt_search_space(
sub_op, sub_params
)
if updated_params is not None:
new_hyperparams[k] = updated_params
return None
else:
return None
def create_instance_from_hyperopt_search_space(
lale_object, hyperparams
) -> "lale.operators.Operator":
"""
Hyperparams is a n-tuple of dictionaries of hyper-parameters, each
dictionary corresponds to an operator in the pipeline
"""
# lale_object can either be an individual operator, a pipeline or an operatorchoice
# Validate that the number of elements in the n-tuple is the same
# as the number of steps in the current pipeline
from lale.operators import (
BasePipeline,
OperatorChoice,
PlannedIndividualOp,
TrainableOperator,
TrainablePipeline,
)
if isinstance(lale_object, PlannedIndividualOp):
new_hyperparams: Dict[str, Any] = dict_without(hyperparams, "name")
hps = lale_object.hyperparams()
if hps:
obj_hyperparams = dict(hps)
else:
obj_hyperparams = {}
for k, sub_params in new_hyperparams.items():
if k in obj_hyperparams:
sub_op = obj_hyperparams[k]
updated_params = instantiate_from_hyperopt_search_space(
sub_op, sub_params
)
if updated_params is not None:
new_hyperparams[k] = updated_params
all_hyperparams = {**obj_hyperparams, **new_hyperparams}
return lale_object(**all_hyperparams)
elif isinstance(lale_object, BasePipeline):
steps = lale_object.steps_list()
if len(hyperparams) != len(steps):
raise ValueError(
"The number of steps in the hyper-parameter space does not match the number of steps in the pipeline."
)
op_instances = []
edges = lale_object.edges()
# op_map:Dict[PlannedOpType, TrainableOperator] = {}
op_map = {}
for op_index, sub_params in enumerate(hyperparams):
sub_op = steps[op_index]
op_instance = create_instance_from_hyperopt_search_space(sub_op, sub_params)
assert isinstance(op_instance, TrainableOperator)
assert (
isinstance(sub_op, OperatorChoice)
or sub_op.class_name() == op_instance.class_name()
), f"sub_op {sub_op.class_name()}, op_instance {op_instance.class_name()}"
op_instances.append(op_instance)
op_map[sub_op] = op_instance
# trainable_edges:List[Tuple[TrainableOperator, TrainableOperator]]
try:
trainable_edges = [(op_map[x], op_map[y]) for (x, y) in edges]
except KeyError as e:
raise ValueError(
"An edge was found with an endpoint that is not a step (" + str(e) + ")"
) from e
return TrainablePipeline(op_instances, trainable_edges, ordered=True) # type: ignore
elif isinstance(lale_object, OperatorChoice):
# Hyperopt search space for an OperatorChoice is generated as a dictionary with a single element
# corresponding to the choice made, the only key is the index of the step and the value is
# the params corresponding to that step.
step_index: int
choices = lale_object.steps_list()
if len(choices) == 1:
step_index = 0
else:
step_index_str, hyperparams = list(hyperparams.items())[0]
step_index = int(step_index_str)
step_object = choices[step_index]
return create_instance_from_hyperopt_search_space(step_object, hyperparams)
else:
assert False, f"Unknown operator type: {type(lale_object)}"
def find_lale_wrapper(sklearn_obj: Any) -> Optional[Any]:
"""
:param sklearn_obj: An sklearn compatible object that may have a lale wrapper
:return: The lale wrapper type, or None if one could not be found
"""
from .operator_wrapper import get_lale_wrapper_modules
module_names = get_lale_wrapper_modules()
class_name = sklearn_obj.__class__.__name__
for module_name in module_names:
try:
module = importlib.import_module(module_name)
except ModuleNotFoundError:
continue
try:
class_ = getattr(module, class_name)
return class_
except AttributeError:
continue
return None
def _import_from_sklearn_inplace_helper(
sklearn_obj, fitted: bool = True, is_nested=False
):
"""
This method take an object and tries to wrap sklearn objects
(at the top level or contained within hyperparameters of other
sklearn objects).
It will modify the object to add in the appropriate lale wrappers.
It may also return a wrapper or different object than given.
:param sklearn_obj: the object that we are going to try and wrap
:param fitted: should we return a TrainedOperator
:param is_hyperparams: is this a nested invocation (which allows for returning
a Trainable operator even if fitted is set to True)
"""
@overload
def import_nested_params(
orig_hyperparams: dict, partial_dict: bool
) -> Optional[dict]:
...
@overload
def import_nested_params(orig_hyperparams: Any, partial_dict: bool) -> Any:
...
def import_nested_params(orig_hyperparams: Any, partial_dict: bool = False):
"""
look through lists/tuples/dictionaries for sklearn compatible objects to import.
:param orig_hyperparams: the input to recursively look through for sklearn compatible objects
:param partial_dict: If this is True and the input is a dictionary, the returned dictionary will only have the
keys with modified values
:return: Either a modified version of the input or None if nothing was changed
"""
if isinstance(orig_hyperparams, (tuple, list)):
new_list: list = []
list_modified: bool = False
for e in orig_hyperparams:
new_e = import_nested_params(e, partial_dict=False)
if new_e is None:
new_list.append(e)
else:
new_list.append(new_e)
list_modified = True
if not list_modified:
return None
if isinstance(orig_hyperparams, tuple):
return tuple(new_list)
else:
return new_list
if isinstance(orig_hyperparams, dict):
new_dict: dict = {}
dict_modified: bool = False
for k, v in orig_hyperparams.items():
new_v = import_nested_params(v, partial_dict=False)
if new_v is None:
if not partial_dict:
new_dict[k] = v
else:
new_dict[k] = new_v
dict_modified = True
if not dict_modified:
return None
return new_dict
if isinstance(orig_hyperparams, object) and hasattr(
orig_hyperparams, "get_params"
):
newobj = _import_from_sklearn_inplace_helper(
orig_hyperparams, fitted=fitted, is_nested=True
) # allow nested_op to be trainable
if newobj is orig_hyperparams:
return None
return newobj
return None
if sklearn_obj is None:
return None
if isinstance(sklearn_obj, lale.operators.TrainedIndividualOp):
# if fitted=False, we may want to return a TrainedIndidivualOp
return sklearn_obj
# if the object is a trainable operator, we clean that up
if isinstance(sklearn_obj, lale.operators.TrainableIndividualOp) and hasattr(
sklearn_obj, "_trained"
):
if fitted:
# get rid of the indirection, and just return the trained operator directly
return sklearn_obj._trained
else:
# since we are not supposed to be trained, delete the trained part
delattr(sklearn_obj, "_trained") # delete _trained before returning
return sklearn_obj
if isinstance(sklearn_obj, lale.operators.Operator):
if (
fitted and is_nested or not hasattr(sklearn_obj._impl_instance(), "fit")
): # Operators such as NoOp do not have a fit, so return them as is.
return sklearn_obj
if fitted:
raise ValueError(
f"""The input pipeline has an operator {sklearn_obj} that is not trained and fitted is set to True,
please pass fitted=False if you want a trainable pipeline as output."""
)
# the lale operator is not trained and fitted=False
return sklearn_obj
# special case for FeatureUnion.
# An alternative would be to (like for sklearn pipeline)
# create a lale wrapper for the sklearn feature union
# as a higher order operator
# and then the special case would be just to throw away the outer wrapper
# Note that lale union does not currently support weights or other features of feature union.
if isinstance(sklearn_obj, sklearn.pipeline.FeatureUnion):
transformer_list = sklearn_obj.transformer_list
concat_predecessors = [
_import_from_sklearn_inplace_helper(
transformer[1], fitted=fitted, is_nested=is_nested
)
for transformer in transformer_list
]
return lale.operators.make_union(*concat_predecessors)
if not hasattr(sklearn_obj, "get_params"):
# if it does not have a get_params method,
# then we just return it without trying to wrap it
return sklearn_obj
class_ = find_lale_wrapper(sklearn_obj)
if not class_:
return sklearn_obj # Return the original object
# next, we need to figure out what the right hyperparameters are
orig_hyperparams = sklearn_obj.get_params(deep=False)
hyperparams = import_nested_params(orig_hyperparams, partial_dict=True)
if hyperparams:
# if we have updated any of the hyperparameters then we modify them in the actual sklearn object
try:
new_obj = sklearn_obj.set_params(**hyperparams)
if new_obj is not None:
sklearn_obj = new_obj
except NotImplementedError:
# if the set_params method does not work, then do our best
pass
all_new_hyperparams = {**orig_hyperparams, **hyperparams}
else:
all_new_hyperparams = orig_hyperparams
# now, we get the lale operator for the wrapper, with the corresponding hyperparameters
if not fitted: # If fitted is False, we do not want to return a Trained operator.
lale_op_obj_base = class_
else:
lale_op_obj_base = lale.operators.TrainedIndividualOp(
class_._name,
class_._impl,
class_._schemas,
None,
_lale_trained=True,
)
lale_op_obj = lale_op_obj_base(**all_new_hyperparams)
from lale.lib.sklearn import Pipeline as LaleSKPipelineWrapper
# If this is a scklearn pipeline, then we want to discard the outer wrapper
# and just return a lale pipeline
if isinstance(lale_op_obj, LaleSKPipelineWrapper): # type: ignore
return lale_op_obj.shallow_impl._pipeline
# at this point, the object's hyper-parameters are modified as needed
# and our wrapper is initialized with the correct hyperparameters.
# Now we need to replace the wrapper impl with our (possibly modified)
# sklearn object
cl_shallow_impl = lale_op_obj.shallow_impl
if hasattr(cl_shallow_impl, "_wrapped_model"):
cl_shallow_impl._wrapped_model = sklearn_obj
else:
lale_op_obj._impl = sklearn_obj
lale_op_obj._impl_class_ = sklearn_obj.__class__
return lale_op_obj
def import_from_sklearn(sklearn_obj: Any, fitted: bool = True, in_place: bool = False):
"""
This method take an object and tries to wrap sklearn objects
(at the top level or contained within hyperparameters of other
sklearn objects).
It will modify the object to add in the appropriate lale wrappers.
It may also return a wrapper or different object than given.
:param sklearn_obj: the object that we are going to try and wrap
:param fitted: should we return a TrainedOperator
:param in_place: should we try to mutate what we can in place, or should we
aggressively deepcopy everything
:return: The wrapped object (or the input object if we could not wrap it)
"""
obj = sklearn_obj
if in_place:
obj = sklearn_obj
else:
obj = copy.deepcopy(sklearn_obj)
return _import_from_sklearn_inplace_helper(obj, fitted=fitted, is_nested=False)
def import_from_sklearn_pipeline(sklearn_pipeline: Any, fitted: bool = True):
"""
Note: Same as import_from_sklearn. This alternative name exists for backwards compatibility.
This method take an object and tries to wrap sklearn objects
(at the top level or contained within hyperparameters of other
sklearn objects).
It will modify the object to add in the appropriate lale wrappers.
It may also return a wrapper or different object than given.
:param sklearn_pipeline: the object that we are going to try and wrap
:param fitted: should we return a TrainedOperator
:return: The wrapped object (or the input object if we could not wrap it)
"""
op = import_from_sklearn(sklearn_pipeline, fitted=fitted, in_place=False)
from typing import cast
from lale.operators import TrainableOperator
# simplify using the returned value in the common case
return cast(TrainableOperator, op)
class val_wrapper:
"""This is used to wrap values that cause problems for hyper-optimizer backends
lale will unwrap these when given them as the value of a hyper-parameter"""
def __init__(self, base):
self._base = base
def unwrap_self(self):
return self._base
@classmethod
def unwrap(cls, obj):
if isinstance(obj, cls):
return cls.unwrap(obj.unwrap_self())
else:
return obj
def append_batch(data, batch_data):
if data is None:
return batch_data
elif isinstance(data, np.ndarray):
if isinstance(batch_data, np.ndarray):
if len(data.shape) == 1 and len(batch_data.shape) == 1:
return np.concatenate([data, batch_data])
else:
return np.vstack((data, batch_data))
elif isinstance(data, tuple):
X, y = data
if isinstance(batch_data, tuple):
batch_X, batch_y = batch_data
X = append_batch(X, batch_X)
y = append_batch(y, batch_y)
return X, y
elif torch_installed and isinstance(data, torch.Tensor):
if isinstance(batch_data, torch.Tensor):
return torch.cat((data, batch_data))
elif isinstance(data, (pd.Series, pd.DataFrame)):
return pd.concat([data, batch_data], axis=0)
try:
import h5py
if isinstance(data, h5py.File):
if isinstance(batch_data, tuple):
batch_X, batch_y = batch_data
except ModuleNotFoundError:
pass
raise ValueError(
f"{type(data)} is unsupported. Supported types are np.ndarray, torch.Tensor and h5py file"
)
def create_data_loader(
X: Any,
y: Any = None,
batch_size: int = 1,
num_workers: int = 0,
shuffle: bool = True,
):
"""A function that takes a dataset as input and outputs a Pytorch dataloader.
Parameters
----------
X : Input data.
The formats supported are Pandas DataFrame, Numpy array,
a sparse matrix, torch.tensor, torch.utils.data.Dataset, path to a HDF5 file,
lale.util.batch_data_dictionary_dataset.BatchDataDict,
a Python dictionary of the format `{"dataset": torch.utils.data.Dataset,
"collate_fn":collate_fn for torch.utils.data.DataLoader}`
y : Labels., optional
Supported formats are Numpy array or Pandas series, by default None
batch_size : int, optional
Number of samples in each batch, by default 1
num_workers : int, optional
Number of workers used by the data loader, by default 0
shuffle: boolean, optional, default True
Whether to use SequentialSampler or RandomSampler for creating batches
Returns
-------
torch.utils.data.DataLoader
Raises
------
TypeError
Raises a TypeError if the input format is not supported.
"""
from torch.utils.data import DataLoader, Dataset, TensorDataset
from lale.util.batch_data_dictionary_dataset import BatchDataDict
from lale.util.hdf5_to_torch_dataset import HDF5TorchDataset
from lale.util.numpy_torch_dataset import NumpyTorchDataset, numpy_collate_fn
from lale.util.pandas_torch_dataset import PandasTorchDataset, pandas_collate_fn
collate_fn = None
worker_init_fn = None
if isinstance(X, Dataset) and not isinstance(X, BatchDataDict):
dataset = X
elif isinstance(X, pd.DataFrame):
dataset = PandasTorchDataset(X, y)
collate_fn = pandas_collate_fn
elif isinstance(X, scipy.sparse.csr_matrix):
# unfortunately, NumpyTorchDataset won't accept a subclass of np.ndarray
X = X.toarray() # type: ignore
if isinstance(y, lale.datasets.data_schemas.NDArrayWithSchema):
y = y.view(np.ndarray)
dataset = NumpyTorchDataset(X, y)
collate_fn = numpy_collate_fn
elif isinstance(X, np.ndarray):
# unfortunately, NumpyTorchDataset won't accept a subclass of np.ndarray
if isinstance(X, lale.datasets.data_schemas.NDArrayWithSchema):
X = X.view(np.ndarray)
if isinstance(y, lale.datasets.data_schemas.NDArrayWithSchema):
y = y.view(np.ndarray)
dataset = NumpyTorchDataset(X, y)
collate_fn = numpy_collate_fn
elif isinstance(X, str): # Assume that this is path to hdf5 file
dataset = HDF5TorchDataset(X)
elif isinstance(X, BatchDataDict):
dataset = X
def my_collate_fn(batch):
return batch[
0
] # because BatchDataDict's get_item returns a batch, so no collate is required.
return DataLoader(
dataset, batch_size=1, collate_fn=my_collate_fn, shuffle=shuffle
)
elif isinstance(X, dict): # Assumed that it is data indexed by batch number
if "dataset" in X:
dataset = X["dataset"]
collate_fn = X.get("collate_fn", None)
worker_init_fn = getattr(dataset, "worker_init_fn", None)
else:
return [X]
elif isinstance(X, torch.Tensor) and y is not None:
if isinstance(y, np.ndarray):
y = torch.from_numpy(y)
dataset = TensorDataset(X, y)
elif isinstance(X, torch.Tensor):
dataset = TensorDataset(X)
else:
raise TypeError(
f"Can not create a data loader for a dataset with type {type(X)}"
)
return DataLoader(
dataset,
batch_size=batch_size,
collate_fn=collate_fn,
num_workers=num_workers,
worker_init_fn=worker_init_fn,
shuffle=shuffle,
)
def write_batch_output_to_file(
file_obj,
file_path,
total_len,
batch_idx,
batch_X,
batch_y,
batch_out_X,
batch_out_y,
):
if file_obj is None and file_path is None:
raise ValueError("Only one of the file object or file path can be None.")
if file_obj is None:
import h5py
file_obj = h5py.File(file_path, "w")
# estimate the size of the dataset based on the first batch output size
transform_ratio = int(len(batch_out_X) / len(batch_X))
if len(batch_out_X.shape) == 1:
h5_data_shape = (transform_ratio * total_len,)
elif len(batch_out_X.shape) == 2:
h5_data_shape = (transform_ratio * total_len, batch_out_X.shape[1])
elif len(batch_out_X.shape) == 3:
h5_data_shape = (
transform_ratio * total_len,
batch_out_X.shape[1],
batch_out_X.shape[2],
)
else:
raise ValueError(
"batch_out_X is expected to be a 1-d, 2-d or 3-d array. Any other data types are not handled."
)
dataset = file_obj.create_dataset(
name="X", shape=h5_data_shape, chunks=True, compression="gzip"
)
if batch_out_y is None and batch_y is not None:
batch_out_y = batch_y
if batch_out_y is not None:
if len(batch_out_y.shape) == 1:
h5_labels_shape = (transform_ratio * total_len,)
elif len(batch_out_y.shape) == 2:
h5_labels_shape = (transform_ratio * total_len, batch_out_y.shape[1])
else:
raise ValueError(
"batch_out_y is expected to be a 1-d or 2-d array. Any other data types are not handled."
)
dataset = file_obj.create_dataset(
name="y", shape=h5_labels_shape, chunks=True, compression="gzip"
)
dataset = file_obj["X"]
dataset[
batch_idx * len(batch_out_X) : (batch_idx + 1) * len(batch_out_X)
] = batch_out_X
if batch_out_y is not None or batch_y is not None:
labels = file_obj["y"]
if batch_out_y is not None:
labels[
batch_idx * len(batch_out_y) : (batch_idx + 1) * len(batch_out_y)
] = batch_out_y
else:
labels[batch_idx * len(batch_y) : (batch_idx + 1) * len(batch_y)] = batch_y
return file_obj
def add_missing_values(orig_X, missing_rate=0.1, seed=None):
# see scikit-learn.org/stable/auto_examples/impute/plot_missing_values.html
n_samples, n_features = orig_X.shape
n_missing_samples = int(n_samples * missing_rate)
if seed is None:
rng = np.random.RandomState()
else:
rng = np.random.RandomState(seed)
missing_samples = np.zeros(n_samples, dtype=bool)
missing_samples[:n_missing_samples] = True
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
missing_X = orig_X.copy()
if isinstance(missing_X, np.ndarray):
missing_X[missing_samples, missing_features] = np.nan
else:
assert isinstance(missing_X, pd.DataFrame)
i_missing_sample = 0
for i_sample in range(n_samples):
if missing_samples[i_sample]:
i_feature = missing_features[i_missing_sample]
i_missing_sample += 1
missing_X.iloc[i_sample, i_feature] = np.nan
return missing_X
# helpers for manipulating (extended) sklearn style paths.
# documentation of the path format is part of the operators module docstring
def partition_sklearn_params(
d: Dict[str, Any]
) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]:
sub_parts: Dict[str, Dict[str, Any]] = {}
main_parts: Dict[str, Any] = {}
for k, v in d.items():
ks = k.split("__", 1)
if len(ks) == 1:
assert k not in main_parts
main_parts[k] = v
else:
assert len(ks) == 2
bucket: Dict[str, Any] = {}
group: str = ks[0]
param: str = ks[1]
if group in sub_parts:
bucket = sub_parts[group]
else:
sub_parts[group] = bucket
assert param not in bucket
bucket[param] = v
return (main_parts, sub_parts)
def partition_sklearn_choice_params(d: Dict[str, Any]) -> Tuple[int, Dict[str, Any]]:
discriminant_value: int = -1
choice_parts: Dict[str, Any] = {}
for k, v in d.items():
if k == discriminant_name:
assert discriminant_value == -1
discriminant_value = int(v)
else:
k_rest = unnest_choice(k)
choice_parts[k_rest] = v
assert discriminant_value != -1
return (discriminant_value, choice_parts)
DUMMY_SEARCH_SPACE_GRID_PARAM_NAME: str = "$"
discriminant_name: str = "?"
choice_prefix: str = "?"
structure_type_name: str = "#"
structure_type_list: str = "list"
structure_type_tuple: str = "tuple"
structure_type_dict: str = "dict"
def get_name_and_index(name: str) -> Tuple[str, int]:
"""given a name of the form "name@i", returns (name, i)
if given a name of the form "name", returns (name, 0)
"""
splits = name.split("@", 1)
if len(splits) == 1:
return splits[0], 0
else:
return splits[0], int(splits[1])
def make_degen_indexed_name(name, index):
return f"{name}@{index}"
def make_indexed_name(name, index):
if index == 0:
return name
else:
return f"{name}@{index}"
def make_array_index_name(index, is_tuple: bool = False):
sep = "##" if is_tuple else "#"
return f"{sep}{str(index)}"
def is_numeric_structure(structure_type: str):
if structure_type in ["list", "tuple"]:
return True
elif structure_type == "dict":
return False
else:
assert False, f"Unknown structure type {structure_type} found"
V = TypeVar("V")
def nest_HPparam(name: str, key: str):
if key == DUMMY_SEARCH_SPACE_GRID_PARAM_NAME:
# we can get rid of the dummy now, since we have a name for it
return name
return name + "__" + key
def nest_HPparams(name: str, grid: Mapping[str, V]) -> Dict[str, V]:
return {(nest_HPparam(name, k)): v for k, v in grid.items()}
def nest_all_HPparams(
name: str, grids: Iterable[Mapping[str, V]]
) -> List[Dict[str, V]]:
"""Given the name of an operator in a pipeline, this transforms every key(parameter name) in the grids
to use the operator name as a prefix (separated by __). This is the convention in scikit-learn pipelines.
"""
return [nest_HPparams(name, grid) for grid in grids]
def nest_choice_HPparam(key: str):
return choice_prefix + key
def nest_choice_HPparams(grid: Mapping[str, V]) -> Dict[str, V]:
return {(nest_choice_HPparam(k)): v for k, v in grid.items()}
def nest_choice_all_HPparams(grids: Iterable[Mapping[str, V]]) -> List[Dict[str, V]]:
"""this transforms every key(parameter name) in the grids
to be nested under a choice, using a ? as a prefix (separated by __). This is the convention in scikit-learn pipelines.
"""
return [nest_choice_HPparams(grid) for grid in grids]
def unnest_choice(k: str) -> str:
assert k.startswith(choice_prefix)
return k[len(choice_prefix) :]
def unnest_HPparams(k: str) -> List[str]:
return k.split("__")
def are_hyperparameters_equal(hyperparam1, hyperparam2):
if isinstance(
hyperparam1, np.ndarray
): # hyperparam2 is from schema default, so it may not always be an array
return np.all(hyperparam1 == hyperparam2)
else:
return hyperparam1 == hyperparam2
def _is_ast_subscript(expr):
return isinstance(expr, ast.Subscript)
def _is_ast_attribute(expr):
return isinstance(expr, ast.Attribute)
def _is_ast_constant(expr):
return isinstance(expr, ast.Constant)
def _is_ast_subs_or_attr(expr):
return isinstance(expr, (ast.Subscript, ast.Attribute))
def _is_ast_call(expr):
return isinstance(expr, ast.Call)
def _is_ast_name(expr):
return isinstance(expr, ast.Name)
def _ast_func_id(expr):
if isinstance(expr, ast.Name):
return expr.id
else:
raise ValueError("function name expected")
def _is_df(df):
return _is_pandas_df(df) or _is_spark_df(df)
def _is_pandas_series(df):
return isinstance(df, pd.Series)
def _is_pandas_df(df):
return isinstance(df, pd.DataFrame)
def _is_pandas(df):
return isinstance(df, (pd.Series, pd.DataFrame))
def _is_spark_df(df):
if spark_installed:
return isinstance(df, lale.datasets.data_schemas.SparkDataFrameWithIndex)
else:
return False
def _is_spark_df_without_index(df):
if spark_installed:
return isinstance(df, spark_df) and not _is_spark_df(df)
else:
return False
def _ensure_pandas(df) -> pd.DataFrame:
if _is_spark_df(df):
return df.toPandas()
assert _is_pandas(df), type(df)
return df
def _get_subscript_value(subscript_expr):
if isinstance(subscript_expr.slice, ast.Constant): # for Python 3.9
subscript_value = subscript_expr.slice.value
else:
subscript_value = subscript_expr.slice.value.s # type: ignore
return subscript_value
class GenSym:
def __init__(self, names: Set[str]):
self._names = names
def __call__(self, prefix):
if prefix in self._names:
suffix = 0
while True:
result = f"{prefix}_{suffix}"
if result not in self._names:
break
suffix += 1
else:
result = prefix
self._names |= {result}
return result
def get_sklearn_estimator_name() -> str:
"""Some higher order sklearn operators changed the name of the nested estimatator in later versions.
This returns the appropriate version dependent paramater name
"""
from packaging import version
import lale.operators
if lale.operators.sklearn_version < version.Version("1.2"):
return "base_estimator"
else:
return "estimator"
def get_estimator_param_name_from_hyperparams(hyperparams):
be = hyperparams.get("base_estimator", "deprecated")
if be == "deprecated" or (be is None and "estimator" in hyperparams):
return "estimator"
else:
return "base_estimator"
| 46,947 | 34.459215 | 150 | py |
lale | lale-master/lale/util/pandas_torch_dataset.py | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
try:
from torch.utils.data import Dataset
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"""Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
) from exc
class PandasTorchDataset(Dataset):
"""Pytorch Dataset subclass that takes a pandas DataFrame and an optional label pandas Series."""
def __init__(self, X, y=None):
"""X and y are the dataset and labels respectively.
Parameters
----------
X : pandas DataFrame
Two dimensional dataset of input features.
y : pandas Series
Labels
"""
self.X = X
self.y = y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
if self.y is not None:
return self.X.iloc[idx], self.y.iloc[idx]
else:
return self.X.iloc[idx]
def get_data(self):
if self.y is None:
return self.X
else:
return self.X, self.y
def pandas_collate_fn(batch):
return_X = None
return_y = None
for item in batch:
if isinstance(item, tuple):
if return_X is None:
return_X = [item[0].to_dict()]
else:
return_X.append(item[0].to_dict())
if return_y is None:
return_y = [item[1]]
else:
return_y.append(item[1])
else:
if return_X is None:
return_X = [item.to_dict()]
else:
return_X.append(item.to_dict())
if return_y is not None:
return (pd.DataFrame(return_X), pd.Series(return_y))
else:
return pd.DataFrame(return_X)
| 2,475 | 28.831325 | 101 | py |
lale | lale-master/lale/util/hdf5_to_torch_dataset.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from torch.utils.data import Dataset
except ModuleNotFoundError as import_exc:
raise ModuleNotFoundError(
"""Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
) from import_exc
try:
import h5py
except ModuleNotFoundError as import_exc:
raise ModuleNotFoundError(
"""Your Python environment does not have h5py installed. You can install it with
pip install h5py
or with
pip install 'lale[full]'"""
) from import_exc
class HDF5TorchDataset(Dataset):
"""Pytorch Dataset subclass that takes a hdf5 file pointer."""
def __init__(self, file_path):
""".
Parameters
----------
file : file is an object of class h5py.File
"""
self.file_path = file_path
h5_file = h5py.File(file_path)
self.length = h5_file["X"].shape[0]
def __len__(self):
return self.length
def __getitem__(self, idx):
with h5py.File(self.file_path) as h5_file:
X = h5_file["X"]
try:
y = h5_file["y"]
except KeyError:
y = None
if y is None:
element = X[idx]
else:
element = X[idx], y[idx]
return element
def get_data(self):
with h5py.File(self.file_path) as h5_file:
X = h5_file["X"][:]
try:
y = h5_file["y"][:]
except KeyError:
y = None
if y is None:
return X
else:
return X, y
| 2,412 | 30.75 | 89 | py |
lale | lale-master/lale/util/batch_data_dictionary_dataset.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch.utils.data import Dataset
class BatchDataDict(Dataset):
"""Pytorch Dataset subclass that takes a dictionary of format {'<batch_idx>': <batch_data>}."""
def __init__(self, X, y=None):
"""X is the dictionary dataset and y is ignored.
Parameters
----------
X : dict
Dictionary of format {'<batch_idx>': <batch_data>}
y : None
Ignored.
"""
self.data_dict = X
def __len__(self):
return len(self.data_dict)
def __getitem__(self, idx):
# This returns the batch at idx instead of a single element.
return self.data_dict[idx]
| 1,227 | 30.487179 | 99 | py |
lale | lale-master/lale/util/numpy_to_torch_dataset.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
from torch.utils.data import Dataset
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"""Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
) from exc
class NumpyTorchDataset(Dataset):
"""Pytorch Dataset subclass that takes a numpy array and an optional label array."""
def __init__(self, X, y=None):
"""X and y are the dataset and labels respectively.
Parameters
----------
X : numpy array
Two dimensional dataset of input features.
y : numpy array
Labels
"""
self.X = X
self.y = y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
if self.y is not None:
return self.X[idx], self.y[idx]
else:
return self.X[idx]
def get_data(self):
if self.y is None:
return self.X
else:
return self.X, self.y
def numpy_collate_fn(batch):
return_X = None
return_y = None
for item in batch:
if isinstance(item, tuple):
if return_X is None:
return_X = item[0]
else:
return_X = np.vstack((return_X, item[0]))
if return_y is None:
return_y = item[1]
else:
return_y = np.vstack((return_y, item[1])) # type: ignore
else:
if return_X is None:
return_X = item
else:
return_X = np.vstack((return_X, item)) # type: ignore
if return_y is not None:
if len(return_y.shape) > 1 and return_y.shape[1] == 1:
return_y = np.reshape(return_y, (len(return_y),))
return return_X, return_y
else:
return return_X
| 2,557 | 29.452381 | 89 | py |
lale | lale-master/lale/util/numpy_torch_dataset.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
from torch.utils.data import Dataset
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"""Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
) from exc
class NumpyTorchDataset(Dataset):
"""Pytorch Dataset subclass that takes a numpy array and an optional label array."""
def __init__(self, X, y=None):
"""X and y are the dataset and labels respectively.
Parameters
----------
X : numpy array
Two dimensional dataset of input features.
y : numpy array
Labels
"""
self.X = X
self.y = y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
if self.y is not None:
return self.X[idx], self.y[idx]
else:
return self.X[idx]
def get_data(self):
if self.y is None:
return self.X
else:
return self.X, self.y
def numpy_collate_fn(batch):
return_X = None
return_y = None
for item in batch:
if isinstance(item, tuple):
if return_X is None:
return_X = item[0]
else:
return_X = np.vstack((return_X, item[0]))
if return_y is None:
return_y = item[1]
else:
return_y = np.vstack((return_y, item[1])) # type: ignore
else:
if return_X is None:
return_X = item
else:
return_X = np.vstack((return_X, item)) # type: ignore
if return_y is not None:
if len(return_y.shape) > 1 and return_y.shape[1] == 1:
return_y = np.reshape(return_y, (len(return_y),))
return return_X, return_y
else:
return return_X
| 2,557 | 29.452381 | 89 | py |
lale | lale-master/lale/util/pandas_to_torch_dataset.py | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
try:
from torch.utils.data import Dataset
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"""Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
) from exc
class PandasTorchDataset(Dataset):
"""Pytorch Dataset subclass that takes a pandas DataFrame and an optional label pandas Series."""
def __init__(self, X, y=None):
"""X and y are the dataset and labels respectively.
Parameters
----------
X : pandas DataFrame
Two dimensional dataset of input features.
y : pandas Series
Labels
"""
self.X = X
self.y = y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
if self.y is not None:
return self.X.iloc[idx], self.y.iloc[idx]
else:
return self.X.iloc[idx]
def get_data(self):
if self.y is None:
return self.X
else:
return self.X, self.y
def pandas_collate_fn(batch):
return_X = None
return_y = None
for item in batch:
if isinstance(item, tuple):
if return_X is None:
return_X = [item[0].to_dict()]
else:
return_X.append(item[0].to_dict())
if return_y is None:
return_y = [item[1]]
else:
return_y.append(item[1])
else:
if return_X is None:
return_X = [item[0].to_dict()]
else:
return_X.append(item[0].to_dict())
if return_y is not None:
return (pd.DataFrame(return_X), pd.Series(return_y))
else:
return pd.DataFrame(return_X)
| 2,481 | 28.903614 | 101 | py |
lale | lale-master/lale/datasets/data_schemas.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Tuple, Type, Union
import numpy as np
from numpy import issubdtype, ndarray
from pandas import DataFrame, Series
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
from scipy.sparse import csr_matrix
import lale.type_checking
from lale.helpers import _is_spark_df
from lale.type_checking import JSON_TYPE
try:
import torch
from torch import Tensor
torch_installed = True
except ImportError:
torch_installed = False
try:
import py4j.protocol
from pyspark.sql import DataFrame as SparkDataFrame
from pyspark.sql import GroupedData as SparkGroupedData
spark_installed = True
except ImportError:
spark_installed = False
# See instructions for subclassing numpy ndarray:
# https://docs.scipy.org/doc/numpy/user/basics.subclassing.html
class NDArrayWithSchema(ndarray):
def __new__(
cls,
shape,
dtype=float,
buffer=None,
offset=0,
strides=None,
order=None,
json_schema=None,
table_name=None,
):
result = super( # pylint:disable=too-many-function-args
NDArrayWithSchema, cls
).__new__(
cls, shape, dtype, buffer, offset, strides, order # type: ignore
)
result.json_schema = json_schema
result.table_name = table_name
return result
def __array_finalize__(self, obj):
if obj is None:
return
self.json_schema = getattr(obj, "json_schema", None)
self.table_name = getattr(obj, "table_name", None)
# See instructions for subclassing pandas DataFrame:
# https://pandas.pydata.org/pandas-docs/stable/development/extending.html#extending-subclassing-pandas
class DataFrameWithSchema(DataFrame):
_internal_names = DataFrame._internal_names + ["json_schema", "table_name"]
_internal_names_set = set(_internal_names)
@property
def _constructor(self):
return DataFrameWithSchema
class SeriesWithSchema(Series):
_internal_names = DataFrame._internal_names + [
"json_schema",
"table_name",
"folds_for_monoid",
]
_internal_names_set = set(_internal_names)
@property
def _constructor(self):
return SeriesWithSchema
if spark_installed:
def _gen_index_name(df, cpt=None):
name = f"index{cpt if cpt is not None else ''}"
if name in df.columns:
return _gen_index_name(df, cpt=cpt + 1 if cpt is not None else 0)
else:
return name
class SparkDataFrameWithIndex(SparkDataFrame): # type: ignore
def __init__(self, df, index_names=None):
if index_names is not None and len(index_names) == 1:
index_name = index_names[0]
elif index_names is None or len(index_names) == 0:
index_name = _gen_index_name(df)
index_names = [index_name]
else:
index_name = None
if index_name is not None and index_name not in df.columns:
df_with_index = (
df.rdd.zipWithIndex()
.map(lambda row: row[0] + (row[1],))
.toDF(df.columns + [index_name])
)
else:
df_with_index = df
table_name = get_table_name(df)
if table_name is not None:
df_with_index = df_with_index.alias(table_name)
super().__init__(df_with_index._jdf, df_with_index.sql_ctx)
self.index_name = index_name
self.index_names = index_names
for f in df.schema.fieldNames():
self.schema[f].metadata = df.schema[f].metadata
def drop_indexes(self):
result = self.drop(*self.index_names)
result = add_table_name(result, get_table_name(self))
return result
@property
def columns_without_indexes(self):
cols = list(self.columns)
for name in self.index_names:
cols.remove(name)
return cols
def toPandas(self, *args, **kwargs):
df = super().toPandas(*args, **kwargs)
return df.set_index(self.index_names)
else:
class SparkDataFrameWithIndex: # type: ignore
def __init__(self, df, index_names=None) -> None:
raise ValueError("pyspark is not installed") # type: ignore
@property
def index_name(self) -> Union[str, None]:
raise ValueError("pyspark is not installed") # type: ignore
@property
def index_names(self) -> List[str]:
raise ValueError("pyspark is not installed") # type: ignore
def toPandas(self, *args, **kwargs) -> DataFrame:
raise ValueError("pyspark is not installed") # type: ignore
@property
def schema(self) -> Any:
raise ValueError("pyspark is not installed") # type: ignore
def add_schema(obj, schema=None, raise_on_failure=False, recalc=False) -> Any:
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return obj
if obj is None:
return None
if isinstance(obj, NDArrayWithSchema):
result = obj
elif isinstance(obj, ndarray):
result = obj.view(NDArrayWithSchema)
elif isinstance(obj, SeriesWithSchema):
result = obj
elif isinstance(obj, Series):
result = SeriesWithSchema(obj)
elif isinstance(obj, DataFrameWithSchema):
result = obj
elif isinstance(obj, DataFrame):
result = DataFrameWithSchema(obj)
elif is_list_tensor(obj):
obj = np.array(obj)
result = obj.view(NDArrayWithSchema)
elif raise_on_failure:
raise ValueError(f"unexpected type(obj) {type(obj)}")
else:
return obj
if recalc:
setattr(result, "json_schema", None)
if getattr(result, "json_schema", None) is None:
if schema is None:
setattr(result, "json_schema", to_schema(obj))
else:
lale.type_checking.validate_is_schema(schema)
setattr(result, "json_schema", schema)
return result
def add_schema_adjusting_n_rows(obj, schema):
assert isinstance(obj, (ndarray, DataFrame, Series)), type(obj)
assert schema.get("type", None) == "array", schema
n_rows = obj.shape[0]
mod_schema = {**schema, "minItems": n_rows, "maxItems": n_rows}
result = add_schema(obj, mod_schema)
return result
def add_table_name(obj, name) -> Any:
if obj is None:
return None
if name is None:
return obj
if spark_installed and isinstance(obj, SparkDataFrame):
# alias method documentation: https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.alias.html
# Python class DataFrame with method alias(self, alias): https://github.com/apache/spark/blob/master/python/pyspark/sql/dataframe.py
# Scala type DataFrame: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/package.scala
# Scala class DataSet with method as(alias: String): https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
o = obj.alias(name)
for f in obj.schema.fieldNames():
o.schema[f].metadata = obj.schema[f].metadata
if isinstance(obj, SparkDataFrameWithIndex):
o = SparkDataFrameWithIndex(o, obj.index_names)
return o
if isinstance(obj, NDArrayWithSchema):
result = obj.view(NDArrayWithSchema)
if hasattr(obj, "json_schema"):
result.json_schema = obj.json_schema
elif isinstance(obj, ndarray):
result = obj.view(NDArrayWithSchema)
elif isinstance(obj, SeriesWithSchema):
result = obj.copy(deep=False)
if hasattr(obj, "json_schema"):
result.json_schema = obj.json_schema
elif isinstance(obj, Series):
result = SeriesWithSchema(obj)
elif isinstance(obj, DataFrameWithSchema):
result = obj.copy(deep=False)
if hasattr(obj, "json_schema"):
result.json_schema = obj.json_schema
elif isinstance(obj, DataFrame):
result = DataFrameWithSchema(obj)
elif is_list_tensor(obj):
obj = np.array(obj)
result = obj.view(NDArrayWithSchema)
elif isinstance(obj, (DataFrameGroupBy, SeriesGroupBy)):
result = obj
elif spark_installed and isinstance(obj, SparkGroupedData):
result = obj
else:
raise ValueError(f"unexpected type(obj) {type(obj)}")
setattr(result, "table_name", name)
return result
def get_table_name(obj):
if spark_installed and isinstance(obj, SparkDataFrame):
# Python class DataFrame with field self._jdf: https://github.com/apache/spark/blob/master/python/pyspark/sql/dataframe.py
# Scala type DataFrame: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/package.scala
# Scala class DataSet with field queryExecution: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
# Scala fields turn into Java nullary methods
# Py4J exposes Java methods as Python methods
# Scala class QueryExecution with field analyzed: LogicalPlan: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala
spark_query = obj._jdf.queryExecution().analyzed() # type: ignore
try:
# calling spark_df.explain("extended") shows the analyzed contents
# after spark_df.alias("foo"), analyzed contents should be SubqueryAlias
# Scala class SuqueryAlias with field identifier: https://github.com/apache/spark/blob/master/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
# str(..) converts the Java string into a Python string
result = str(spark_query.identifier())
except py4j.protocol.Py4JError:
result = None
return result
if isinstance(
obj,
(
NDArrayWithSchema,
SeriesWithSchema,
DataFrameWithSchema,
DataFrameGroupBy,
SeriesGroupBy,
),
) or (spark_installed and isinstance(obj, SparkGroupedData)):
return getattr(obj, "table_name", None)
return None
def get_index_name(obj):
result = None
if spark_installed and isinstance(obj, SparkDataFrameWithIndex):
result = obj.index_name
elif isinstance(
obj,
(
SeriesWithSchema,
DataFrameWithSchema,
DataFrameGroupBy,
SeriesGroupBy,
),
):
result = obj.index.name
return result
def get_index_names(obj):
result = None
if spark_installed and isinstance(obj, SparkDataFrameWithIndex):
result = obj.index_names
elif isinstance(
obj,
(
SeriesWithSchema,
DataFrameWithSchema,
DataFrameGroupBy,
SeriesGroupBy,
),
):
result = obj.index.names
return result
def forward_metadata(old, new):
new = add_table_name(new, get_table_name(old))
if isinstance(old, SparkDataFrameWithIndex):
new = SparkDataFrameWithIndex(new, index_names=get_index_names(old))
return new
def strip_schema(obj):
if isinstance(obj, NDArrayWithSchema):
result = np.array(obj)
assert type(result) == ndarray # pylint:disable=unidiomatic-typecheck
elif isinstance(obj, SeriesWithSchema):
result = Series(obj)
assert type(result) == Series # pylint:disable=unidiomatic-typecheck
elif isinstance(obj, DataFrameWithSchema):
result = DataFrame(obj)
assert type(result) == DataFrame # pylint:disable=unidiomatic-typecheck
else:
result = obj
return result
def _dtype_to_schema(typ) -> JSON_TYPE:
result: JSON_TYPE
if typ is bool or issubdtype(typ, np.bool_):
result = {"type": "boolean"}
elif issubdtype(typ, np.unsignedinteger):
result = {"type": "integer", "minimum": 0}
elif issubdtype(typ, np.integer):
result = {"type": "integer"}
elif issubdtype(typ, np.number):
result = {"type": "number"}
elif issubdtype(typ, np.string_) or issubdtype(typ, np.unicode_):
result = {"type": "string"}
elif isinstance(typ, np.dtype):
if typ.fields:
props = {k: _dtype_to_schema(t) for k, t in typ.fields.items()}
result = {"type": "object", "properties": props}
elif typ.shape:
result = _shape_and_dtype_to_schema(typ.shape, typ.subdtype)
elif issubdtype(typ, np.object_):
result = {"type": "string"}
else:
assert False, f"unexpected dtype {typ}"
else:
assert False, f"unexpected non-dtype {typ}"
return result
def dtype_to_schema(typ) -> JSON_TYPE:
result = _dtype_to_schema(typ)
lale.type_checking.validate_is_schema(result)
return result
def _shape_and_dtype_to_schema(shape, dtype) -> JSON_TYPE:
result = _dtype_to_schema(dtype)
for dim in reversed(shape):
result = {"type": "array", "minItems": dim, "maxItems": dim, "items": result}
return result
def shape_and_dtype_to_schema(shape, dtype) -> JSON_TYPE:
result = _shape_and_dtype_to_schema(shape, dtype)
lale.type_checking.validate_is_schema(result)
return result
def list_tensor_to_shape_and_dtype(ls) -> Optional[Tuple[Tuple[int, ...], Type]]:
if isinstance(ls, (int, float, str)):
return ((), type(ls))
if isinstance(ls, list):
sub_result: Any = "Any"
for item in ls:
item_result = list_tensor_to_shape_and_dtype(item)
if item_result is None:
return None
if sub_result == "Any":
sub_result = item_result
elif sub_result != item_result:
return None
if sub_result == "Any" and len(ls) == 0:
return ((len(ls),) + (), int)
sub_shape, sub_dtype = sub_result
return ((len(ls),) + sub_shape, sub_dtype)
return None
def is_list_tensor(obj) -> bool:
if isinstance(obj, list):
shape_and_dtype = list_tensor_to_shape_and_dtype(obj)
return shape_and_dtype is not None
return False
def _list_tensor_to_schema(ls) -> Optional[JSON_TYPE]:
shape_and_dtype = list_tensor_to_shape_and_dtype(ls)
if shape_and_dtype is None:
return None
result = _shape_and_dtype_to_schema(*shape_and_dtype)
return result
def list_tensor_to_schema(ls) -> Optional[JSON_TYPE]:
result = _list_tensor_to_schema(ls)
if result is None:
return None
lale.type_checking.validate_is_schema(result)
return result
def _ndarray_to_schema(array) -> JSON_TYPE:
assert isinstance(array, ndarray)
if (
isinstance(array, NDArrayWithSchema)
and hasattr(array, "json_schema")
and array.json_schema is not None
):
return array.json_schema
return _shape_and_dtype_to_schema(array.shape, array.dtype)
def ndarray_to_schema(array) -> JSON_TYPE:
result = _ndarray_to_schema(array)
lale.type_checking.validate_is_schema(result)
return result
def _csr_matrix_to_schema(matrix) -> JSON_TYPE:
assert isinstance(matrix, csr_matrix)
result = _shape_and_dtype_to_schema(matrix.shape, matrix.dtype)
result["isSparse"] = {} # true schema
return result
def csr_matrix_to_schema(matrix) -> JSON_TYPE:
result = _csr_matrix_to_schema(matrix)
lale.type_checking.validate_is_schema(result)
return result
def _dataframe_to_schema(df) -> JSON_TYPE:
assert isinstance(df, DataFrame)
if (
isinstance(df, DataFrameWithSchema)
and hasattr(df, "json_schema")
and df.json_schema is not None
):
return df.json_schema
n_rows, n_columns = df.shape
df_dtypes = df.dtypes
assert n_columns == len(df.columns) and n_columns == len(df_dtypes)
items = [
{"description": str(col), **_dtype_to_schema(df_dtypes[col])}
for col in df.columns
]
result = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": {
"type": "array",
"minItems": n_columns,
"maxItems": n_columns,
"items": items,
},
}
return result
def dataframe_to_schema(df) -> JSON_TYPE:
result = _dataframe_to_schema(df)
lale.type_checking.validate_is_schema(result)
return result
def _series_to_schema(series) -> JSON_TYPE:
assert isinstance(series, Series)
if (
isinstance(series, SeriesWithSchema)
and hasattr(series, "json_schema")
and series.json_schema is not None
):
return series.json_schema
(n_rows,) = series.shape
result = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": {"description": str(series.name), **_dtype_to_schema(series.dtype)},
}
return result
def series_to_schema(series) -> JSON_TYPE:
result = _series_to_schema(series)
lale.type_checking.validate_is_schema(result)
return result
def _torch_tensor_to_schema(tensor) -> JSON_TYPE:
assert torch_installed, """Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
assert isinstance(tensor, Tensor)
result: JSON_TYPE
# https://pytorch.org/docs/stable/tensor_attributes.html#torch-dtype
if tensor.dtype == torch.bool:
result = {"type": "boolean"}
elif tensor.dtype == torch.uint8:
result = {"type": "integer", "minimum": 0, "maximum": 255}
elif torch.is_floating_point(tensor):
result = {"type": "number"}
else:
result = {"type": "integer"}
for dim in reversed(tensor.shape):
result = {"type": "array", "minItems": dim, "maxItems": dim, "items": result}
return result
def torch_tensor_to_schema(tensor) -> JSON_TYPE:
result = _torch_tensor_to_schema(tensor)
lale.type_checking.validate_is_schema(result)
return result
def is_liac_arff(obj) -> bool:
expected_types = {
"description": str,
"relation": str,
"attributes": list,
"data": list,
}
if not isinstance(obj, dict):
return False
for k, t in expected_types.items():
if k not in obj or not isinstance(obj[k], t):
return False
return True
def _liac_arff_to_schema(larff) -> JSON_TYPE:
assert is_liac_arff(
larff
), """Your Python environment might contain an 'arff' package different from 'liac-arff'. You can install it with
pip install 'liac-arff>=2.4.0'
or with
pip install 'lale[full]'"""
n_rows, n_columns = len(larff["data"]), len(larff["attributes"])
def larff_type_to_schema(larff_type) -> JSON_TYPE:
if isinstance(larff_type, str):
a2j = {
"numeric": "number",
"real": "number",
"integer": "integer",
"string": "string",
}
return {"type": a2j[larff_type.lower()]}
assert isinstance(larff_type, list)
return {"enum": [*larff_type]}
items = [
{"description": attr[0], **larff_type_to_schema(attr[1])}
for attr in larff["attributes"]
]
result = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": {
"type": "array",
"minItems": n_columns,
"maxItems": n_columns,
"items": items,
},
}
return result
def liac_arff_to_schema(larff) -> JSON_TYPE:
result = _liac_arff_to_schema(larff)
lale.type_checking.validate_is_schema(result)
return result
def make_optional_schema(schema: JSON_TYPE) -> JSON_TYPE:
return {"anyOf": [schema, {"enum": [None]}]}
def _spark_df_to_schema(df) -> JSON_TYPE:
assert spark_installed, """Your Python environment does not have spark installed. You can install it with
pip install pyspark
"""
assert isinstance(df, SparkDataFrameWithIndex)
import pyspark.sql.types as stypes
from pyspark.sql.types import StructField, StructType
def maybe_make_optional(schema: JSON_TYPE, is_option: bool) -> JSON_TYPE:
if is_option:
return make_optional_schema(schema)
return schema
def spark_datatype_to_json_schema(dtype: stypes.DataType) -> JSON_TYPE:
if isinstance(dtype, stypes.ArrayType):
return {
"type": "array",
"items": maybe_make_optional(
spark_datatype_to_json_schema(dtype.elementType), dtype.containsNull
),
}
if isinstance(dtype, stypes.BooleanType):
return {"type": "boolean"}
if isinstance(dtype, stypes.DoubleType):
return {"type": "number"}
if isinstance(dtype, stypes.FloatType):
return {"type": "number"}
if isinstance(dtype, stypes.IntegerType):
return {"type": "integer"}
if isinstance(dtype, stypes.LongType):
return {"type": "integer"}
if isinstance(dtype, stypes.ShortType):
return {"type": "integer"}
if isinstance(dtype, stypes.NullType):
return {"enum": [None]}
if isinstance(dtype, stypes.StringType):
return {"type": "string"}
return {}
def spark_struct_field_to_json_schema(f: StructField) -> JSON_TYPE:
type_schema = spark_datatype_to_json_schema(f.dataType)
result = maybe_make_optional(type_schema, f.nullable)
if f.name is not None:
result["description"] = f.name
return result
def spark_struct_to_json_schema(
s: StructType, index_names, table_name: Optional[str] = None
) -> JSON_TYPE:
items = [
spark_struct_field_to_json_schema(f) for f in s if f.name not in index_names
]
num_items = len(items)
result = {
"type": "array",
"items": {
"type": "array",
"description": "rows",
"minItems": num_items,
"maxItems": num_items,
"items": items,
},
}
if table_name is not None:
result["description"] = table_name
return result
return spark_struct_to_json_schema(df.schema, df.index_names, get_table_name(df))
def spark_df_to_schema(df) -> JSON_TYPE:
result = _spark_df_to_schema(df)
lale.type_checking.validate_is_schema(result)
return result
def _to_schema(obj) -> JSON_TYPE:
result = None
if obj is None:
result = {"enum": [None]}
elif isinstance(obj, ndarray):
result = _ndarray_to_schema(obj)
elif isinstance(obj, csr_matrix):
result = _csr_matrix_to_schema(obj)
elif isinstance(obj, DataFrame):
result = _dataframe_to_schema(obj)
elif isinstance(obj, Series):
result = _series_to_schema(obj)
elif torch_installed and isinstance(obj, Tensor):
result = _torch_tensor_to_schema(obj)
elif is_liac_arff(obj):
result = _liac_arff_to_schema(obj)
elif isinstance(obj, list):
result = _list_tensor_to_schema(obj)
elif _is_spark_df(obj):
result = _spark_df_to_schema(obj)
elif lale.type_checking.is_schema(obj):
result = obj
# Does not need to validate again the schema
return result # type: ignore
if result is None:
raise ValueError(f"to_schema(obj), type {type(obj)}, value {obj}")
return result
def to_schema(obj) -> JSON_TYPE:
result = _to_schema(obj)
lale.type_checking.validate_is_schema(result)
return result
| 24,607 | 32.389417 | 205 | py |
lale | lale-master/lale/lib/rasl/batching.py | # Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import lale.docstrings
import lale.helpers
import lale.operators
from .task_graphs import PrioBatch, PrioResourceAware, PrioStep, fit_with_batches
class _BatchingImpl:
def __init__(
self,
operator=None,
batch_size=32,
shuffle=True,
num_workers=0,
inmemory=False,
num_epochs=None,
max_resident=None,
scoring=None,
progress_callback=None,
partial_transform=False,
priority="resource_aware",
verbose=0,
):
self.operator = operator
self.batch_size = batch_size
self.shuffle = shuffle
self.num_workers = num_workers
self.inmemory = inmemory
self.num_epochs = num_epochs
self.max_resident = max_resident
self.scoring = scoring
self.progress_callback = progress_callback
self.partial_transform = partial_transform
self.priority = priority
self.verbose = verbose
def fit(self, X, y=None, classes=None):
if self.operator is None:
raise ValueError("The pipeline object can't be None at the time of fit.")
if hasattr(X, "__next__") and hasattr(
X, "__iter__"
): # allow an iterable that is not a torch data loader
assert y is None, "When X is an Iterable, y should be None"
data_loader = X
else:
try:
from torch.utils.data import DataLoader
except ImportError as exc:
raise ImportError(
"""Batching uses Pytorch for data loading. It is not
installed in the current environment, please install
the package and try again."""
) from exc
if isinstance(X, DataLoader):
assert (
y is None
), "When X is a torch.utils.data.DataLoader, y should be None"
data_loader = X
else:
data_loader = lale.helpers.create_data_loader(
X=X,
y=y,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=self.shuffle,
)
if y is not None and classes is None:
classes = np.unique(y)
if self.priority == "batch":
prio = PrioBatch()
elif self.priority == "step":
prio = PrioStep()
else:
prio = PrioResourceAware()
self.operator = fit_with_batches(
pipeline=self.operator,
batches_train=data_loader, # type:ignore
batches_valid=None,
unique_class_labels=classes, # type:ignore
max_resident=self.max_resident,
prio=prio,
partial_transform=self.partial_transform,
scoring=self.scoring,
progress_callback=self.progress_callback,
verbose=self.verbose,
)
return self
def transform(self, X, y=None):
if hasattr(X, "__next__") and hasattr(
X, "__iter__"
): # allow an iterable that is not a torch data loader
assert y is None, "When X is an Iterable, y should be None"
data_loader = X
else:
try:
from torch.utils.data import DataLoader
except ImportError as exc:
raise ImportError(
"""Batching uses Pytorch for data loading. It is not
installed in the current environment, please install
the package and try again."""
) from exc
if isinstance(X, DataLoader):
assert (
y is None
), "When X is a torch.utils.data.DataLoader, y should be None"
data_loader = X
else:
data_loader = lale.helpers.create_data_loader(
X=X,
y=y,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=self.shuffle,
)
op = self.operator
assert op is not None
transformed_data = op.transform_with_batches(
data_loader, serialize=self.inmemory
)
return transformed_data
def predict(self, X, y=None):
return self.transform(X, y)
_input_fit_schema = {
"description": "Input data schema for fit.",
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"anyOf": [
{
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"type": "boolean"},
]
},
},
{
"type": "array",
"items": {
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"type": "boolean"},
]
},
},
},
{"type": "object"},
],
},
"y": {
"anyOf": [
{
"type": "array",
"items": {
"anyOf": [
{"type": "integer"},
{"type": "number"},
{"type": "string"},
]
},
},
{"enum": [None]},
],
},
"classes": {
"anyOf": [
{
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"type": "boolean"},
]
},
},
{"enum": [None]},
],
"description": """The total number of classes in the entire training dataset.""",
},
},
}
_input_predict_transform_schema = { # TODO: separate predict vs. transform
"description": "Input data schema for predictions.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"anyOf": [
{
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"type": "boolean"},
]
},
},
{
"type": "array",
"items": {
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"type": "boolean"},
]
},
},
},
{},
],
},
"y": {
"type": "array",
"items": {"anyOf": [{"type": "integer"}, {"type": "number"}]},
},
},
}
_output_schema = { # TODO: separate predict vs. transform
"description": "Output data schema for transformed data.",
"laleType": "Any",
}
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": ["batch_size"],
"properties": {
"operator": {
"description": "A lale pipeline object to be used inside of batching",
"laleType": "operator",
},
"batch_size": {
"description": "Batch size used for transform.",
"type": "integer",
"default": 64,
"minimum": 1,
"distribution": "uniform",
"minimumForOptimizer": 32,
"maximumForOptimizer": 128,
},
"shuffle": {
"type": "boolean",
"default": False,
"description": "Shuffle dataset before batching or not.",
},
"num_workers": {
"type": "integer",
"default": 0,
"description": "Number of workers for pytorch dataloader.",
},
"inmemory": {
"type": "boolean",
"default": False,
"description": """Whether all the computations are done in memory
or intermediate outputs are serialized. Only applies to transform/predict.
For fit, use the `max_resident` argument.""",
},
"num_epochs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Number of epochs. If the operator has `num_epochs` as a parameter, that takes precedence.",
},
"max_resident": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Amount of memory to be used in bytes.",
},
"scoring": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "Batch-wise scoring metrics from `lale.lib.rasl`.",
},
"progress_callback": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "Callback function to get performance metrics per batch.",
},
"partial_transform": {
"type": "boolean",
"default": False,
"description": """Whether to allow partially-trained upstream operators
to transform data for training downstream operators even before the upstream operator has been fully trained.""",
},
"priority": {
"description": """Scheduling priority in task graphs.
"batch" will execute tasks from earlier batches first.
"step" will execute tasks from earlier steps first, like nested-loop algorithm.
And "resource_aware" will execute tasks with less non-resident data first.""",
"enum": ["batch", "step", "resource_aware"],
"default": "resource_aware",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Verbosity level, higher values mean more information.",
},
},
}
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Batching trains the given pipeline using batches.
The batch_size is used across all steps of the pipeline, serializing
the intermediate outputs if specified.""",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_transform_schema,
"output_predict": _output_schema,
"input_transform": _input_predict_transform_schema,
"output_transform": _output_schema,
},
}
Batching = lale.operators.make_operator(_BatchingImpl, _combined_schemas)
lale.docstrings.set_docstrings(Batching)
| 13,230 | 34.953804 | 127 | py |
lale | lale-master/lale/lib/rasl/concat_features.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import reduce
from typing import Optional
import numpy as np
import pandas as pd
import scipy.sparse
import lale.docstrings
import lale.operators
import lale.pretty_print
from lale.datasets.data_schemas import add_table_name, get_index_names, get_table_name
from lale.expressions import it
from lale.helpers import _is_spark_df
from lale.json_operator import JSON_TYPE
from lale.lib.rasl.alias import Alias
from lale.lib.rasl.join import Join
from lale.type_checking import is_subschema, join_schemas, validate_is_schema
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
try:
import torch
torch_installed = True
except ImportError:
torch_installed = False
def _is_pandas_df(d):
return isinstance(d, pd.DataFrame)
def _is_pandas_series(d):
return isinstance(d, pd.Series)
def _is_pandas(d):
return _is_pandas_df(d) or _is_pandas_series(d)
def _gen_table_name(avoid, cpt=0):
name = f"tbl{cpt}"
if name in avoid:
return _gen_table_name(avoid, cpt=cpt + 1)
else:
return name
class _ConcatFeaturesImpl:
def transform(self, X):
if all(_is_pandas(d) for d in X):
name2series = {}
for dataset in X:
if _is_pandas_df(dataset):
for name in dataset.columns:
name2series[name] = name2series.get(name, []) + [dataset[name]]
elif _is_pandas_series(dataset):
name = dataset.name
name2series[name] = name2series.get(name, []) + [dataset]
else:
assert False
duplicates = [name for name, ls in name2series.items() if len(ls) > 1]
if len(duplicates) == 0:
result = pd.concat(X, axis=1)
else:
logger.info(f"ConcatFeatures duplicate column names {duplicates}")
deduplicated = [ls[-1] for _, ls in name2series.items()]
result = pd.concat(deduplicated, axis=1)
elif all(_is_spark_df(d) for d in X):
def join(d1, d2):
n1 = get_table_name(d1)
n2 = get_table_name(d2)
if n1 is None:
n1 = _gen_table_name([n2])
d1 = Alias(name=n1).transform(d1)
if n2 is None:
n2 = _gen_table_name([n1])
d2 = Alias(name=n2).transform(d2)
indexes_col1 = get_index_names(d1)
indexes_col2 = get_index_names(d2)
if indexes_col1 is None or indexes_col2 is None:
raise ValueError(
"Index columns are required to concatenate features of Spark dataframes (see SparkDataFrameWithIndex)"
)
transformer = Join(
pred=[
it[n1][index_col1] == it[n2][index_col2]
for index_col1, index_col2 in zip(indexes_col1, indexes_col2)
]
)
return transformer.transform([d1, d2])
result = reduce(join, X)
elif all(_is_pandas(d) or _is_spark_df(d) for d in X):
X = [d.toPandas() if _is_spark_df(d) else d for d in X]
result = self.transform(X)
else:
np_datasets = []
# Preprocess the datasets to convert them to 2-d numpy arrays
for dataset in X:
if _is_pandas(dataset):
np_dataset = dataset.values
elif _is_spark_df(dataset):
np_dataset = dataset.toPandas().values
elif isinstance(dataset, scipy.sparse.csr_matrix):
np_dataset = dataset.toarray()
elif torch_installed and isinstance(dataset, torch.Tensor):
np_dataset = dataset.detach().cpu().numpy()
else:
np_dataset = dataset
if hasattr(np_dataset, "shape"):
if len(np_dataset.shape) == 1: # To handle numpy column vectors
np_dataset = np.reshape(np_dataset, (np_dataset.shape[0], 1))
np_datasets.append(np_dataset)
result = np.concatenate(np_datasets, axis=1)
name = reduce(
(
lambda x, y: get_table_name(x)
if get_table_name(x) == get_table_name(y)
else None
),
X,
)
return add_table_name(result, name)
def transform_schema(self, s_X):
"""Used internally by Lale for type-checking downstream operators."""
min_cols, max_cols, elem_schema = 0, 0, None
def add_ranges(min_a, max_a, min_b, max_b):
min_ab = min_a + min_b
if max_a == "unbounded" or max_b == "unbounded":
max_ab = "unbounded"
else:
max_ab = max_a + max_b
return min_ab, max_ab
elem_schema: Optional[JSON_TYPE] = None
for s_dataset in s_X["items"]:
if s_dataset.get("laleType", None) == "Any":
return {"laleType": "Any"}
arr_1d_num = {"type": "array", "items": {"type": "number"}}
arr_2d_num = {"type": "array", "items": arr_1d_num}
s_decision_func = {"anyOf": [arr_1d_num, arr_2d_num]}
if is_subschema(s_decision_func, s_dataset):
s_dataset = arr_2d_num
assert "items" in s_dataset, lale.pretty_print.to_string(s_dataset)
s_rows = s_dataset["items"]
if "type" in s_rows and "array" == s_rows["type"]:
s_cols = s_rows["items"]
if isinstance(s_cols, dict):
min_c = s_rows["minItems"] if "minItems" in s_rows else 1
max_c = s_rows["maxItems"] if "maxItems" in s_rows else "unbounded"
if elem_schema is None:
elem_schema = s_cols
else:
elem_schema = join_schemas(elem_schema, s_cols)
else:
min_c, max_c = len(s_cols), len(s_cols)
for s_col in s_cols:
if elem_schema is None:
elem_schema = s_col
else:
elem_schema = join_schemas(elem_schema, s_col)
min_cols, max_cols = add_ranges(min_cols, max_cols, min_c, max_c)
else:
if elem_schema is None:
elem_schema = s_rows
else:
elem_schema = join_schemas(elem_schema, s_rows)
min_cols, max_cols = add_ranges(min_cols, max_cols, 1, 1)
s_result = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "array",
"items": {"type": "array", "minItems": min_cols, "items": elem_schema},
}
if max_cols != "unbounded":
s_result["items"]["maxItems"] = max_cols
validate_is_schema(s_result)
return s_result
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": [],
"properties": {},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Outermost array dimension is over datasets.",
"type": "array",
"items": {
"description": "Middle array dimension is over samples (aka rows).",
"type": "array",
"items": {
"description": "Innermost array dimension is over features (aka columns).",
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"type": "number"},
],
},
},
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"description": "Outer array dimension is over samples (aka rows).",
"items": {
"description": "Inner array dimension is over features (aka columns).",
"laleType": "Any",
},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Horizontal stacking concatenates features (aka columns) of input datasets.
Examples
--------
>>> A = [ [11, 12, 13],
... [21, 22, 23],
... [31, 32, 33] ]
>>> B = [ [14, 15],
... [24, 25],
... [34, 35] ]
>>> ConcatFeatures.transform([A, B])
NDArrayWithSchema([[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35]])""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.concat_features.html",
"import_from": "lale.lib.rasl",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
ConcatFeatures = lale.operators.make_pretrained_operator(
_ConcatFeaturesImpl, _combined_schemas
)
lale.docstrings.set_docstrings(ConcatFeatures)
| 10,375 | 35.407018 | 126 | py |
lale | lale-master/lale/lib/xgboost/xgb_regressor.py | # Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
from packaging import version
import lale.docstrings
import lale.helpers
import lale.operators
import lale.schemas
from ._common_schemas import schema_silent
try:
import xgboost # type: ignore
xgboost_version = version.parse(getattr(xgboost, "__version__"))
except ImportError:
xgboost_version = None
if TYPE_CHECKING:
import xgboost # type: ignore
# xgboost does not like column names with some characters (which are legal in pandas)
# so we encode them
def _rename_one_feature(name):
mapping = {"[": "[", "]": "]", "<": "<"}
for old, new in mapping.items():
name = name.replace(old, new)
return name
def _rename_all_features(X):
if not isinstance(X, pd.DataFrame):
return X
mapped = [_rename_one_feature(f) for f in X.columns]
if list(X.columns) == mapped:
return X
return pd.DataFrame(data=X, columns=mapped)
class _XGBRegressorImpl:
_wrapped_model: xgboost.XGBRegressor
@classmethod
def validate_hyperparams(cls, **hyperparams):
assert (
xgboost_version is not None
), """Your Python environment does not have xgboost installed. You can install it with
pip install xgboost
or with
pip install 'lale[full]'"""
def __init__(self, **hyperparams):
self.validate_hyperparams(**hyperparams)
self._wrapped_model = xgboost.XGBRegressor(**hyperparams)
def fit(self, X, y, **fit_params):
renamed_X = _rename_all_features(X)
self._wrapped_model.fit(renamed_X, y, **fit_params)
return self
def partial_fit(self, X, y, **fit_params):
fit_params = lale.helpers.dict_without(fit_params, "classes")
if self._wrapped_model.__sklearn_is_fitted__():
booster = self._wrapped_model.get_booster()
fit_params = {**fit_params, "xgb_model": booster}
return self.fit(X, y, **fit_params)
def predict(self, X, **predict_params):
renamed_X = _rename_all_features(X)
result = self._wrapped_model.predict(renamed_X, **predict_params)
return result
def score(self, X, y):
from sklearn.metrics import r2_score
y_pred = self.predict(X)
return r2_score(y, y_pred)
_hyperparams_schema = {
"description": "Hyperparameter schema for a Lale wrapper for XGBoost.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
"max_depth",
"learning_rate",
"n_estimators",
"verbosity",
"objective",
"booster",
"tree_method",
"n_jobs",
"gamma",
"min_child_weight",
"max_delta_step",
"subsample",
"colsample_bytree",
"colsample_bylevel",
"colsample_bynode",
"reg_alpha",
"reg_lambda",
"scale_pos_weight",
"base_score",
"random_state",
"missing",
],
"relevantToOptimizer": [
"max_depth",
"learning_rate",
"n_estimators",
"gamma",
"min_child_weight",
"subsample",
"reg_alpha",
"reg_lambda",
],
"properties": {
"max_depth": {
"description": "Maximum tree depth for base learners.",
"type": "integer",
"default": 4,
"minimum": 0,
"distribution": "uniform",
"minimumForOptimizer": 1,
"maximumForOptimizer": 7,
},
"learning_rate": {
"description": """Boosting learning rate (xgb's "eta")""",
"type": "number",
"default": 0.1,
"distribution": "loguniform",
"minimumForOptimizer": 0.02,
"maximumForOptimizer": 1,
},
"n_estimators": {
"description": "Number of trees to fit.",
"type": "integer",
"default": 200,
"minimumForOptimizer": 50,
"maximumForOptimizer": 1000,
},
"verbosity": {
"description": "The degree of verbosity.",
"type": "integer",
"default": 1,
"minimum": 0,
"maximum": 3,
},
"silent": schema_silent,
"objective": {
"description": "Specify the learning task and the corresponding "
"learning objective or a custom objective function to be used.",
"anyOf": [
{
"enum": [
"reg:linear",
"reg:logistic",
"reg:gamma",
"reg:tweedie",
]
},
{"laleType": "callable"},
],
"default": "reg:linear",
},
"booster": {
"description": "Specify which booster to use.",
"enum": ["gbtree", "gblinear", "dart"],
"default": "gbtree",
},
"tree_method": {
"description": """Specify which tree method to use.
Default to auto. If this parameter is set to default, XGBoost will choose the most conservative option available.
Refer to https://xgboost.readthedocs.io/en/latest/parameter.html. """,
"enum": ["auto", "exact", "approx", "hist", "gpu_hist"],
"default": "auto",
},
"n_jobs": {
"type": "integer",
"description": "Number of parallel threads used to run xgboost. (replaces ``nthread``)",
"default": 1,
},
"nthread": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Number of parallel threads used to run xgboost. Deprecated, please use n_jobs",
},
"gamma": {
"type": "number",
"description": "Minimum loss reduction required to make a further partition on a leaf node of the tree.",
"default": 0,
"minimum": 0,
"maximumForOptimizer": 1.0,
},
"min_child_weight": {
"type": "integer",
"description": "Minimum sum of instance weight(hessian) needed in a child.",
"default": 10,
"distribution": "uniform",
"minimumForOptimizer": 2,
"maximumForOptimizer": 20,
},
"max_delta_step": {
"type": "integer",
"description": "Maximum delta step we allow each tree's weight estimation to be.",
"default": 0,
},
"subsample": {
"type": "number",
"description": "Subsample ratio of the training instance.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
},
"colsample_bytree": {
"type": "number",
"description": "Subsample ratio of columns when constructing each tree.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
"colsample_bylevel": {
"type": "number",
"description": "Subsample ratio of columns for each split, in each level.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
"colsample_bynode": {
"type": "number",
"description": "Subsample ratio of columns for each split.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
},
"reg_alpha": {
"type": "number",
"description": "L1 regularization term on weights",
"default": 0,
"distribution": "uniform",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
},
"reg_lambda": {
"type": "number",
"description": "L2 regularization term on weights",
"default": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1,
},
"scale_pos_weight": {
"type": "number",
"description": "Balancing of positive and negative weights.",
"default": 1,
},
"base_score": {
"type": "number",
"description": "The initial prediction score of all instances, global bias.",
"default": 0.5,
},
"random_state": {
"type": "integer",
"description": "Random number seed. (replaces seed)",
"default": 0,
},
"missing": {
"anyOf": [
{
"type": "number",
},
{
"enum": [None],
},
],
"default": None,
"description": "Value in the data which needs to be present as a missing value. If"
" If None, defaults to np.nan.",
},
"importance_type": {
"enum": [
"gain",
"weight",
"cover",
"total_gain",
"total_cover",
None,
],
"default": "gain",
"description": "The feature importance type for the `feature_importances_` property.",
},
"seed": {
"default": None,
"description": "deprecated and replaced with random_state, but adding to be backward compatible. ",
},
},
}
],
}
_input_fit_schema = {
"description": "Fit gradient boosting classifier",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Feature matrix",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Labels",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Weight for each instance",
"default": None,
},
"eval_set": {
"anyOf": [
{
"type": "array",
},
{
"enum": [None],
},
],
"default": None,
"description": "A list of (X, y) pairs to use as a validation set for",
},
"sample_weight_eval_set": {
"anyOf": [
{
"type": "array",
},
{
"enum": [None],
},
],
"default": None,
"description": "A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of",
},
"eval_metric": {
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"type": "string"},
{"enum": [None]},
{"type": "object"},
],
"default": None,
"description": "If a str, should be a built-in evaluation metric to use. See",
},
"early_stopping_rounds": {
"anyOf": [
{
"type": "integer",
},
{
"enum": [None],
},
],
"default": None,
"description": "Activates early stopping. Validation error needs to decrease at",
},
"verbose": {
"type": "boolean",
"description": "If `verbose` and an evaluation set is used, writes the evaluation",
"default": True,
},
"xgb_model": {
"anyOf": [{"type": "string"}, {"enum": [None]}],
"description": "file name of stored xgb model or 'Booster' instance Xgb model to be",
"default": None,
},
"callbacks": {
"anyOf": [{"type": "array", "items": {"type": "object"}}, {"enum": [None]}],
"default": None,
"description": "List of callback functions that are applied at each iteration. ",
},
},
}
_input_predict_schema = {
"description": "Predict with `data`.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The dmatrix storing the input.",
},
"output_margin": {
"type": "boolean",
"default": False,
"description": "Whether to output the raw untransformed margin value.",
},
"ntree_limit": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"description": "Limit number of trees in the prediction; defaults to best_ntree_limit if defined",
},
"validate_features": {
"type": "boolean",
"default": True,
"description": "When this is True, validate that the Booster's and data's feature_names are identical.",
},
},
}
_output_predict_schema = {
"description": "Output data schema for predictions (target class labels).",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`XGBRegressor`_ gradient boosted decision trees.
.. _`XGBRegressor`: https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.XGBRegressor
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.xgboost.xgb_regressor.html",
"import_from": "xgboost",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
XGBRegressor: lale.operators.PlannedIndividualOp
XGBRegressor = lale.operators.make_operator(_XGBRegressorImpl, _combined_schemas)
if xgboost_version is not None and xgboost_version >= version.Version("0.90"):
# page 58 of https://readthedocs.org/projects/xgboost/downloads/pdf/release_0.90/
XGBRegressor = XGBRegressor.customize_schema(
objective=lale.schemas.JSON(
{
"description": "Specify the learning task and the corresponding learning objective or a custom objective function to be used.",
"anyOf": [
{
"enum": [
"reg:linear",
"reg:logistic",
"reg:gamma",
"reg:tweedie",
"reg:squarederror",
]
},
{"laleType": "callable"},
],
"default": "reg:linear",
}
),
set_as_available=True,
)
if xgboost_version is not None and xgboost_version >= version.Version("1.3"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
XGBRegressor = XGBRegressor.customize_schema(
monotone_constraints={
"description": "Constraint of variable monotonicity.",
"anyOf": [{"enum": [None]}, {"type": "string"}],
"default": None,
},
interaction_constraints={
"description": "Constraints for interaction representing permitted interactions. The constraints must be specified in the form of a nest list, e.g. [[0, 1], [2, 3, 4]], where each inner list is a group of indices of features that are allowed to interact with each other.",
"anyOf": [{"enum": [None]}, {"type": "string"}],
"default": None,
},
num_parallel_tree={
"description": "Used for boosting random forest.",
"anyOf": [{"enum": [None]}, {"type": "integer"}],
"default": None,
},
validate_parameters={
"description": "Give warnings for unknown parameter.",
"anyOf": [{"enum": [None]}, {"type": "boolean"}, {"type": "integer"}],
"default": None,
},
gpu_id={
"description": "Device ordinal.",
"anyOf": [
{"type": "integer"},
{"enum": [None]},
],
"default": None,
},
max_depth={
"description": "Maximum tree depth for base learners.",
"anyOf": [
{
"type": "integer",
"minimum": 0,
"distribution": "uniform",
"minimumForOptimizer": 1,
"maximumForOptimizer": 7,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
learning_rate={
"description": """Boosting learning rate (xgb's "eta")""",
"anyOf": [
{
"type": "number",
"distribution": "loguniform",
"minimumForOptimizer": 0.02,
"maximumForOptimizer": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
booster={
"description": "Specify which booster to use.",
"enum": ["gbtree", "gblinear", "dart", None],
"default": None,
},
tree_method={
"description": """Specify which tree method to use.
Default to auto. If this parameter is set to default, XGBoost will choose the most conservative option available.
Refer to https://xgboost.readthedocs.io/en/latest/parameter.html. """,
"enum": ["auto", "exact", "approx", "hist", "gpu_hist", None],
"default": None,
},
gamma={
"description": "Minimum loss reduction required to make a further partition on a leaf node of the tree.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
min_child_weight={
"description": "Minimum sum of instance weight(hessian) needed in a child.",
"anyOf": [
{
"type": "integer",
"distribution": "uniform",
"minimumForOptimizer": 2,
"maximumForOptimizer": 20,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
max_delta_step={
"description": "Maximum delta step we allow each tree's weight estimation to be.",
"anyOf": [{"enum": [None]}, {"type": "integer"}],
"default": None,
},
subsample={
"description": "Subsample ratio of the training instance.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
colsample_bytree={
"description": "Subsample ratio of columns when constructing each tree.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
colsample_bylevel={
"description": "Subsample ratio of columns for each split, in each level.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
colsample_bynode={
"description": "Subsample ratio of columns for each split.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
reg_alpha={
"description": "L1 regularization term on weights",
"anyOf": [
{
"type": "number",
"distribution": "uniform",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
reg_lambda={
"description": "L2 regularization term on weights",
"anyOf": [
{
"type": "number",
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
scale_pos_weight={
"description": "Balancing of positive and negative weights.",
"anyOf": [
{"type": "number"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
base_score={
"description": "The initial prediction score of all instances, global bias.",
"anyOf": [
{"type": "number"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
missing={
"anyOf": [
{
"type": "number",
},
{
"enum": [None, np.NaN],
},
],
"default": np.NaN,
"description": "Value in the data which needs to be present as a missing value. If"
" If None, defaults to np.nan.",
},
verbosity={
"description": "The degree of verbosity.",
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"minimum": 0,
"maximum": 3,
},
set_as_available=True,
)
if xgboost_version is not None and xgboost_version >= version.Version("1.5"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
XGBRegressor = XGBRegressor.customize_schema(
enable_categorical={
"type": "boolean",
"description": """Experimental support for categorical data.
Do not set to true unless you are interested in development.
Only valid when gpu_hist and dataframe are used.""",
"default": False,
},
predictor={
"anyOf": [{"type": "string"}, {"enum": [None]}],
"description": """Force XGBoost to use specific predictor,
available choices are [cpu_predictor, gpu_predictor].""",
"default": None,
},
set_as_available=True,
)
if xgboost_version is not None and xgboost_version >= version.Version("1.6"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
XGBRegressor = XGBRegressor.customize_schema(
max_leaves={
"description": """Maximum number of leaves; 0 indicates no limit.""",
"anyOf": [
{"type": "integer"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
max_bin={
"description": """If using histogram-based algorithm, maximum number of bins per feature.""",
"anyOf": [
{"type": "integer"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
grow_policy={
"description": """Tree growing policy.
0 or depthwise: favor splitting at nodes closest to the node, i.e. grow depth-wise.
1 or lossguide: favor splitting at nodes with highest loss change.""",
"enum": [0, 1, "depthwise", "lossguide", None],
"default": None,
},
sampling_method={
"description": """Sampling method. Used only by gpu_hist tree method.
- uniform: select random training instances uniformly.
- gradient_based select random training instances with higher probability when the gradient and hessian are larger. (cf. CatBoost)""",
"enum": ["uniform", "gadient_based", None],
"default": None,
},
max_cat_to_onehot={
"description": """A threshold for deciding whether XGBoost should use
one-hot encoding based split for categorical data.""",
"anyOf": [
{"type": "integer"},
{"enum": [None]},
],
"default": None,
},
eval_metric={
"description": """Metric used for monitoring the training result and early stopping.""",
"anyOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"laleType": "callable"}},
{"enum": [None]},
],
"default": None,
},
early_stopping_rounds={
"description": """Activates early stopping.
Validation metric needs to improve at least once in every early_stopping_rounds round(s)
to continue training.""",
"anyOf": [
{"type": "integer"},
{"enum": [None]},
],
"default": None,
},
callbacks={
"description": """List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using Callback API.""",
"anyOf": [
{"type": "array", "items": {"laleType": "callable"}},
{"enum": [None]},
],
"default": None,
},
n_jobs={
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"description": "Number of parallel threads used to run xgboost. (replaces ``nthread``)",
"default": 1,
},
random_state={
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"description": "Random number seed. (replaces seed)",
"default": 0,
},
)
lale.docstrings.set_docstrings(XGBRegressor)
| 30,974 | 35.963007 | 284 | py |
lale | lale-master/lale/lib/xgboost/xgb_classifier.py | # Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
from packaging import version
import lale.docstrings
import lale.helpers
import lale.operators
import lale.schemas
from ._common_schemas import schema_silent
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
try:
import xgboost # type: ignore
xgboost_version = version.parse(getattr(xgboost, "__version__"))
except ImportError:
xgboost_version = None
if TYPE_CHECKING:
import xgboost # type: ignore
# xgboost does not like column names with some characters (which are legal in pandas)
# so we encode them
def _rename_one_feature(name):
mapping = {"[": "[", "]": "]", "<": "<"}
for old, new in mapping.items():
name = name.replace(old, new)
return name
def _rename_all_features(X):
if not isinstance(X, pd.DataFrame):
return X
mapped = [_rename_one_feature(f) for f in X.columns]
if list(X.columns) == mapped:
return X
return pd.DataFrame(data=X, columns=mapped)
class _XGBClassifierImpl:
_wrapped_model: xgboost.XGBClassifier
@classmethod
def validate_hyperparams(cls, **hyperparams):
assert (
xgboost_version is not None
), """Your Python environment does not have xgboost installed. You can install it with
pip install xgboost
or with
pip install 'lale[full]'"""
def __init__(self, **hyperparams):
self.validate_hyperparams(**hyperparams)
self._wrapped_model = xgboost.XGBClassifier(**hyperparams)
def fit(self, X, y, **fit_params):
renamed_X = _rename_all_features(X)
assert xgboost_version is not None
if (
xgboost_version >= version.Version("1.3.0")
and "eval_metric" not in fit_params
):
# set eval_metric explicitly to avoid spurious warning
fit_params = {"eval_metric": "logloss", **fit_params}
with warnings.catch_warnings():
if fit_params.get("use_label_encoder", True):
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
self._wrapped_model.fit(renamed_X, y, **fit_params)
return self
def partial_fit(self, X, y, **fit_params):
fit_params = lale.helpers.dict_without(fit_params, "classes")
if self._wrapped_model.__sklearn_is_fitted__():
booster = self._wrapped_model.get_booster()
fit_params = {**fit_params, "xgb_model": booster}
return self.fit(X, y, **fit_params)
def predict(self, X, **predict_params):
renamed_X = _rename_all_features(X)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
result = self._wrapped_model.predict(renamed_X, **predict_params)
return result
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
def score(self, X, y):
from sklearn.metrics import accuracy_score
y_pred = self.predict(X)
return accuracy_score(y, y_pred)
_hyperparams_schema = {
"description": "Hyperparameter schema for a Lale wrapper for XGBoost.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
"max_depth",
"learning_rate",
"n_estimators",
"verbosity",
"objective",
"booster",
"tree_method",
"n_jobs",
"gamma",
"min_child_weight",
"max_delta_step",
"subsample",
"colsample_bytree",
"colsample_bylevel",
"colsample_bynode",
"reg_alpha",
"reg_lambda",
"scale_pos_weight",
"base_score",
"random_state",
"missing",
],
"relevantToOptimizer": [
"gamma",
"max_depth",
"learning_rate",
"n_estimators",
"min_child_weight",
"subsample",
"reg_alpha",
"reg_lambda",
],
"properties": {
"max_depth": {
"description": "Maximum tree depth for base learners.",
"type": "integer",
"default": 4,
"minimum": 0,
"distribution": "uniform",
"minimumForOptimizer": 1,
"maximumForOptimizer": 7,
},
"learning_rate": {
"description": "Boosting learning rate (xgb’s “eta”)",
"type": "number",
"default": 0.1,
"distribution": "loguniform",
"minimumForOptimizer": 0.02,
"maximumForOptimizer": 1,
},
"n_estimators": {
"description": "Number of trees to fit.",
"type": "integer",
"default": 100,
"minimumForOptimizer": 50,
"maximumForOptimizer": 1000,
},
"verbosity": {
"description": "The degree of verbosity.",
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"minimum": 0,
"maximum": 3,
},
"objective": {
"description": "Specify the learning task and the corresponding "
"learning objective or a custom objective function to be used.",
"anyOf": [
{
"enum": [
"binary:logistic",
"binary:logitraw",
"binary:hinge",
"multi:softprob",
"multi:softmax",
]
},
{"laleType": "callable"},
],
"default": "binary:logistic",
},
"booster": {
"description": "Specify which booster to use.",
"enum": ["gbtree", "gblinear", "dart"],
"default": "gbtree",
},
"tree_method": {
"description": """Specify which tree method to use.
Default to auto. If this parameter is set to default, XGBoost will choose the most conservative option available.
Refer to https://xgboost.readthedocs.io/en/latest/parameter.html. """,
"enum": ["auto", "exact", "approx", "hist", "gpu_hist"],
"default": "auto",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"description": "Number of parallel threads used to run xgboost. (replaces ``nthread``)",
"default": 1,
},
"nthread": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Number of parallel threads used to run xgboost. Deprecated, please use n_jobs",
},
"gamma": {
"type": "number",
"description": "Minimum loss reduction required to make a further partition on a leaf node of the tree.",
"default": 0,
"minimum": 0,
"maximumForOptimizer": 1.0,
},
"min_child_weight": {
"type": "integer",
"description": "Minimum sum of instance weight(hessian) needed in a child.",
"default": 10,
"distribution": "uniform",
"minimumForOptimizer": 2,
"maximumForOptimizer": 20,
},
"max_delta_step": {
"type": "integer",
"description": "Maximum delta step we allow each tree's weight estimation to be.",
"default": 0,
},
"subsample": {
"type": "number",
"description": "Subsample ratio of the training instance.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
},
"colsample_bytree": {
"type": "number",
"description": "Subsample ratio of columns when constructing each tree.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
"colsample_bylevel": {
"type": "number",
"description": "Subsample ratio of columns for each split, in each level.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
"colsample_bynode": {
"type": "number",
"description": "Subsample ratio of columns for each split.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
},
"reg_alpha": {
"type": "number",
"description": "L1 regularization term on weights",
"default": 0,
"distribution": "uniform",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
},
"reg_lambda": {
"type": "number",
"description": "L2 regularization term on weights",
"default": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
"scale_pos_weight": {
"anyOf": [
{
"type": "number",
},
{
"enum": [None],
},
],
"description": "Balancing of positive and negative weights.",
"default": 1,
},
"base_score": {
"anyOf": [
{
"type": "number",
},
{
"enum": [None],
},
],
"description": "The initial prediction score of all instances, global bias.",
"default": 0.5,
},
"random_state": {
"anyOf": [
{
"type": "integer",
},
{
"enum": [None],
},
],
"description": "Random number seed. (replaces seed)",
"default": 0,
},
"missing": {
"anyOf": [
{
"type": "number",
},
{
"enum": [None],
},
],
"default": None,
"description": "Value in the data which needs to be present as a missing value. If"
" If None, defaults to np.nan.",
},
"silent": schema_silent,
"seed": {
"default": None,
"description": "deprecated and replaced with random_state, but adding to be backward compatible. ",
},
},
}
],
}
_input_fit_schema = {
"description": "Fit gradient boosting classifier",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Feature matrix",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
"description": "Labels",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Weight for each instance",
"default": None,
},
"eval_set": {
"anyOf": [
{
"type": "array",
},
{
"enum": [None],
},
],
"default": None,
"description": "A list of (X, y) pairs to use as a validation set for",
},
"sample_weight_eval_set": {
"anyOf": [
{
"type": "array",
},
{
"enum": [None],
},
],
"default": None,
"description": "A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of",
},
"eval_metric": {
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"type": "string"},
{"enum": [None]},
{"type": "object"},
],
"default": None,
"description": "If a str, should be a built-in evaluation metric to use. See",
},
"early_stopping_rounds": {
"anyOf": [
{
"type": "integer",
},
{
"enum": [None],
},
],
"default": None,
"description": "Activates early stopping. Validation error needs to decrease at",
},
"verbose": {
"type": "boolean",
"description": "If `verbose` and an evaluation set is used, writes the evaluation",
"default": True,
},
"xgb_model": {
"anyOf": [{"type": "string"}, {"enum": [None]}],
"description": "file name of stored xgb model or 'Booster' instance Xgb model to be",
"default": None,
},
"callbacks": {
"anyOf": [{"type": "array", "items": {"type": "object"}}, {"enum": [None]}],
"default": None,
"description": "List of callback functions that are applied at each iteration. ",
},
},
}
_input_predict_schema = {
"description": "Predict with `data`.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "The dmatrix storing the input.",
},
"output_margin": {
"type": "boolean",
"default": False,
"description": "Whether to output the raw untransformed margin value.",
},
"ntree_limit": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"description": "Limit number of trees in the prediction; defaults to best_ntree_limit if defined",
},
"validate_features": {
"type": "boolean",
"default": True,
"description": "When this is True, validate that the Booster's and data's feature_names are identical.",
},
},
}
_output_predict_schema = {
"description": "Predicted class label per sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_proba_schema = {
"description": "Probability of the sample for each class in the model.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`XGBClassifier`_ gradient boosted decision trees.
.. _`XGBClassifier`: https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.XGBClassifier
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.xgboost.xgb_classifier.html",
"import_from": "xgboost",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
XGBClassifier: lale.operators.PlannedIndividualOp
XGBClassifier = lale.operators.make_operator(_XGBClassifierImpl, _combined_schemas)
if xgboost_version is not None and xgboost_version >= version.Version("0.90"):
# page 58 of https://readthedocs.org/projects/xgboost/downloads/pdf/release_0.90/
XGBClassifier = XGBClassifier.customize_schema(
objective=lale.schemas.JSON(
{
"description": "Specify the learning task and the corresponding learning objective or a custom objective function to be used.",
"anyOf": [
{
"enum": [
"binary:hinge",
"binary:logistic",
"binary:logitraw",
"multi:softmax",
"multi:softprob",
]
},
{"laleType": "callable"},
],
"default": "binary:logistic",
}
),
set_as_available=True,
)
if xgboost_version is not None and xgboost_version >= version.Version("1.3"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
XGBClassifier = XGBClassifier.customize_schema(
monotone_constraints={
"description": "Constraint of variable monotonicity.",
"anyOf": [{"enum": [None]}, {"type": "string"}],
"default": None,
},
interaction_constraints={
"description": "Constraints for interaction representing permitted interactions. The constraints must be specified in the form of a nest list, e.g. [[0, 1], [2, 3, 4]], where each inner list is a group of indices of features that are allowed to interact with each other.",
"anyOf": [{"enum": [None]}, {"type": "string"}],
"default": None,
},
num_parallel_tree={
"description": "Used for boosting random forest.",
"anyOf": [{"enum": [None]}, {"type": "integer"}],
"default": None,
},
validate_parameters={
"description": "Give warnings for unknown parameter.",
"anyOf": [{"enum": [None]}, {"type": "boolean"}, {"type": "integer"}],
"default": None,
},
gpu_id={
"description": "Device ordinal.",
"anyOf": [
{"type": "integer"},
{"enum": [None]},
],
"default": None,
},
max_depth={
"description": "Maximum tree depth for base learners.",
"anyOf": [
{
"type": "integer",
"minimum": 0,
"distribution": "uniform",
"minimumForOptimizer": 1,
"maximumForOptimizer": 7,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
learning_rate={
"description": """Boosting learning rate (xgb's "eta")""",
"anyOf": [
{
"type": "number",
"distribution": "loguniform",
"minimumForOptimizer": 0.02,
"maximumForOptimizer": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
booster={
"description": "Specify which booster to use.",
"enum": ["gbtree", "gblinear", "dart", None],
"default": None,
},
tree_method={
"description": """Specify which tree method to use.
Default to auto. If this parameter is set to default, XGBoost will choose the most conservative option available.
Refer to https://xgboost.readthedocs.io/en/latest/parameter.html. """,
"enum": ["auto", "exact", "approx", "hist", "gpu_hist", None],
"default": None,
},
gamma={
"description": "Minimum loss reduction required to make a further partition on a leaf node of the tree.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
min_child_weight={
"description": "Minimum sum of instance weight(hessian) needed in a child.",
"anyOf": [
{
"type": "integer",
"distribution": "uniform",
"minimumForOptimizer": 2,
"maximumForOptimizer": 20,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
max_delta_step={
"description": "Maximum delta step we allow each tree's weight estimation to be.",
"anyOf": [{"enum": [None]}, {"type": "integer"}],
"default": None,
},
subsample={
"description": "Subsample ratio of the training instance.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
colsample_bytree={
"description": "Subsample ratio of columns when constructing each tree.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
colsample_bylevel={
"description": "Subsample ratio of columns for each split, in each level.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
colsample_bynode={
"description": "Subsample ratio of columns for each split.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
reg_alpha={
"description": "L1 regularization term on weights",
"anyOf": [
{
"type": "number",
"distribution": "uniform",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
reg_lambda={
"description": "L2 regularization term on weights",
"anyOf": [
{
"type": "number",
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
scale_pos_weight={
"description": "Balancing of positive and negative weights.",
"anyOf": [
{"type": "number"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
base_score={
"description": "The initial prediction score of all instances, global bias.",
"anyOf": [
{"type": "number"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
importance_type={
"description": "The feature importance type for the `feature_importances_` property.",
"enum": ["gain", "weight", "cover", "total_gain", "total_cover", None],
"default": "gain",
},
use_label_encoder={
"description": """(Deprecated) Use the label encoder from scikit-learn to encode the labels.
For new code, we recommend that you set this parameter to False.""",
"type": "boolean",
"default": True,
},
missing={
"anyOf": [
{
"type": "number",
},
{
"enum": [None, np.NaN],
},
],
"default": np.NaN,
"description": "Value in the data which needs to be present as a missing value. If"
" If None, defaults to np.nan.",
},
verbosity={
"description": "The degree of verbosity.",
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"minimum": 0,
"maximum": 3,
},
set_as_available=True,
)
if xgboost_version is not None and xgboost_version >= version.Version("1.5"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
XGBClassifier = XGBClassifier.customize_schema(
enable_categorical={
"type": "boolean",
"description": """Experimental support for categorical data.
Do not set to true unless you are interested in development.
Only valid when gpu_hist and dataframe are used.""",
"default": False,
},
predictor={
"anyOf": [{"type": "string"}, {"enum": [None]}],
"description": """Force XGBoost to use specific predictor,
available choices are [cpu_predictor, gpu_predictor].""",
"default": None,
},
set_as_available=True,
)
if xgboost_version is not None and xgboost_version >= version.Version("1.6"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
XGBClassifier = XGBClassifier.customize_schema(
use_label_encoder={
"description": """(Deprecated) Use the label encoder from scikit-learn to encode the labels.
For new code, we recommend that you set this parameter to False.""",
"type": "boolean",
"default": False,
},
max_leaves={
"description": """Maximum number of leaves; 0 indicates no limit.""",
"anyOf": [
{"type": "integer"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
max_bin={
"description": """If using histogram-based algorithm, maximum number of bins per feature.""",
"anyOf": [
{"type": "integer"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
grow_policy={
"description": """Tree growing policy.
0 or depthwise: favor splitting at nodes closest to the node, i.e. grow depth-wise.
1 or lossguide: favor splitting at nodes with highest loss change.""",
"enum": [0, 1, "depthwise", "lossguide", None],
"default": None,
},
sampling_method={
"description": """Sampling method. Used only by gpu_hist tree method.
- uniform: select random training instances uniformly.
- gradient_based select random training instances with higher probability when the gradient and hessian are larger. (cf. CatBoost)""",
"enum": ["uniform", "gadient_based", None],
"default": None,
},
max_cat_to_onehot={
"description": """A threshold for deciding whether XGBoost should use
one-hot encoding based split for categorical data.""",
"anyOf": [
{"type": "integer"},
{"enum": [None]},
],
"default": None,
},
eval_metric={
"description": """Metric used for monitoring the training result and early stopping.""",
"anyOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"laleType": "callable"}},
{"enum": [None]},
],
"default": None,
},
early_stopping_rounds={
"description": """Activates early stopping.
Validation metric needs to improve at least once in every early_stopping_rounds round(s)
to continue training.""",
"anyOf": [
{"type": "integer"},
{"enum": [None]},
],
"default": None,
},
callbacks={
"description": """List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using Callback API.""",
"anyOf": [
{"type": "array", "items": {"laleType": "callable"}},
{"enum": [None]},
],
"default": None,
},
)
lale.docstrings.set_docstrings(XGBClassifier)
| 33,369 | 36.326622 | 284 | py |
lale | lale-master/lale/lib/xgboost/__init__.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Scikit-learn compatible wrappers for XGBoost_ along with schemas to enable hyperparameter tuning.
.. _XGBoost: https://xgboost.readthedocs.io/en/latest/
Operators:
==========
* `XGBClassifier`_
* `XGBRegressor`_
.. _`XGBClassifier`: lale.lib.xgboost.xgb_classifier.html
.. _`XGBRegressor`: lale.lib.xgboost.xgb_regressor.html
"""
from lale import register_lale_wrapper_modules
from .xgb_classifier import XGBClassifier as XGBClassifier
from .xgb_regressor import XGBRegressor as XGBRegressor
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
register_lale_wrapper_modules(__name__)
| 1,325 | 31.341463 | 97 | py |
lale | lale-master/lale/lib/lale/auto_pipeline.py | # Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import warnings
from typing import Optional
import hyperopt
import pandas as pd
import sklearn.metrics
import sklearn.model_selection
import lale.docstrings
import lale.helpers
import lale.operators
from lale.lib._common_schemas import (
schema_best_score_single,
schema_cv,
schema_max_opt_time,
schema_scoring_single,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
try:
import xgboost # noqa: F401
xgboost_installed = True
except ImportError:
xgboost_installed = False
try:
import lightgbm.sklearn # noqa: F401
lightgbm_installed = True
except ImportError:
lightgbm_installed = False
def auto_prep(X):
from lale.lib.lale import ConcatFeatures, Project, categorical
from lale.lib.sklearn import OneHotEncoder, SimpleImputer
n_cols = X.shape[1]
n_cats = len(categorical()(X))
prep_num = SimpleImputer(strategy="mean")
prep_cat = SimpleImputer(strategy="most_frequent") >> OneHotEncoder(
handle_unknown="ignore"
)
if n_cats == 0:
result = prep_num
elif n_cats == n_cols:
result = prep_cat
else:
result = (
(
Project(columns={"type": "number"}, drop_columns=categorical())
>> prep_num
)
& (Project(columns=categorical()) >> prep_cat)
) >> ConcatFeatures
return result
def auto_gbt(prediction_type):
if prediction_type == "regression":
if xgboost_installed:
from lale.lib.xgboost import XGBRegressor
return XGBRegressor(verbosity=0)
elif lightgbm_installed:
from lale.lib.lightgbm import LGBMRegressor
return LGBMRegressor()
else:
from lale.lib.sklearn import GradientBoostingRegressor
return GradientBoostingRegressor()
else:
assert prediction_type in ["binary", "multiclass", "classification"]
if xgboost_installed:
from lale.lib.xgboost import XGBClassifier
return XGBClassifier(verbosity=0)
elif lightgbm_installed:
from lale.lib.lightgbm import LGBMClassifier
return LGBMClassifier()
else:
from lale.lib.sklearn import GradientBoostingClassifier
return GradientBoostingClassifier()
class _AutoPipelineImpl:
_summary: Optional[pd.DataFrame]
def __init__(
self,
*,
prediction_type="classification",
scoring=None,
best_score=0.0,
verbose=False,
max_evals=100,
max_opt_time=600.0,
max_eval_time=120.0,
cv=5,
):
self.prediction_type = prediction_type
self.max_opt_time = max_opt_time
self.max_eval_time = max_eval_time
self.max_evals = max_evals
self.verbose = verbose
if scoring is None:
scoring = "r2" if prediction_type == "regression" else "accuracy"
self.scoring = scoring
self._scorer = sklearn.metrics.get_scorer(scoring)
self.best_score = best_score
self._summary = None
self.cv = cv
def _try_and_add(self, name, trainable, X, y):
assert name not in self._pipelines
if self._name_of_best is not None:
if time.time() > self._start_fit + self.max_opt_time:
return
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cv = sklearn.model_selection.check_cv(
cv=self.cv, classifier=(self.prediction_type != "regression")
)
(
cv_score,
logloss,
execution_time,
) = lale.helpers.cross_val_score_track_trials(
trainable, X, y, self.scoring, cv
)
loss = self.best_score - cv_score
if self._name_of_best is None or (
self._summary is None or loss < self._summary.at[self._name_of_best, "loss"]
):
self._name_of_best = name
record = {
"name": name,
"loss": loss,
"time": execution_time,
"log_loss": logloss,
"status": hyperopt.STATUS_OK,
}
singleton_summary = pd.DataFrame.from_records([record], index="name")
if self._summary is None:
self._summary = singleton_summary
else:
self._summary = pd.concat([self._summary, singleton_summary])
if name == self._name_of_best:
self._pipelines[name] = trainable.fit(X, y)
else:
self._pipelines[name] = trainable
def _fit_dummy(self, X, y):
from lale.lib.sklearn import DummyClassifier, DummyRegressor
if self.prediction_type == "regression":
trainable = DummyRegressor()
else:
trainable = DummyClassifier()
self._try_and_add("dummy", trainable, X, y)
def _fit_gbt_num(self, X, y):
from lale.lib.lale import Project
from lale.lib.sklearn import SimpleImputer
gbt = auto_gbt(self.prediction_type)
trainable = (
Project(columns={"type": "number"}) >> SimpleImputer(strategy="mean") >> gbt
)
self._try_and_add("gbt_num", trainable, X, y)
def _fit_gbt_all(self, X, y):
prep = auto_prep(X)
gbt = auto_gbt(self.prediction_type)
trainable = prep >> gbt
self._try_and_add("gbt_all", trainable, X, y)
def _fit_hyperopt(self, X, y):
from lale.lib.lale import Hyperopt, NoOp
from lale.lib.sklearn import (
PCA,
DecisionTreeClassifier,
DecisionTreeRegressor,
KNeighborsClassifier,
KNeighborsRegressor,
MinMaxScaler,
RandomForestClassifier,
RandomForestRegressor,
RobustScaler,
SelectKBest,
SGDClassifier,
SGDRegressor,
StandardScaler,
)
remaining_time = self.max_opt_time - (time.time() - self._start_fit)
if remaining_time <= 0:
return
prep = auto_prep(X)
scale = MinMaxScaler | StandardScaler | RobustScaler | NoOp
reduce_dims = PCA | SelectKBest | NoOp
gbt = auto_gbt(self.prediction_type)
if self.prediction_type == "regression":
estim_trees = gbt | DecisionTreeRegressor | RandomForestRegressor
estim_notree = SGDRegressor | KNeighborsRegressor
else:
estim_trees = gbt | DecisionTreeClassifier | RandomForestClassifier
estim_notree = SGDClassifier | KNeighborsClassifier
model_trees = reduce_dims >> estim_trees
model_notree = scale >> reduce_dims >> estim_notree
planned = prep >> (model_trees | model_notree)
prior_evals = self._summary.shape[0] if self._summary is not None else 0
trainable = Hyperopt(
estimator=planned,
max_evals=self.max_evals - prior_evals,
scoring=self.scoring,
best_score=self.best_score,
max_opt_time=remaining_time,
max_eval_time=self.max_eval_time,
verbose=self.verbose,
show_progressbar=False,
cv=self.cv,
)
trained = trainable.fit(X, y)
# The static types are not currently smart enough to verify
# that the conditionally defined summary method is actually present
# But it must be, since the hyperopt impl type provides it
summary: pd.DataFrame = trained.summary() # type: ignore
if list(summary.status) == ["new"]:
return # only one trial and that one timed out
best_trial = trained._impl._trials.best_trial
if "loss" in best_trial["result"]:
if (
self._summary is None
or best_trial["result"]["loss"]
< self._summary.at[self._name_of_best, "loss"]
):
self._name_of_best = f'p{best_trial["tid"]}'
if self._summary is None:
self._summary = summary
else:
self._summary = pd.concat([self._summary, summary])
for name in summary.index:
assert name not in self._pipelines
if summary.at[name, "status"] == hyperopt.STATUS_OK:
self._pipelines[name] = trained.get_pipeline(name)
def fit(self, X, y):
self._start_fit = time.time()
self._name_of_best = None
self._summary = None
self._pipelines = {}
self._fit_dummy(X, y)
self._fit_gbt_num(X, y)
self._fit_gbt_all(X, y)
self._fit_hyperopt(X, y)
return self
def predict(self, X, **predict_params):
best_pipeline = self._pipelines[self._name_of_best]
result = best_pipeline.predict(X, **predict_params)
return result
def summary(self):
"""Table summarizing the trial results (name, tid, loss, time, log_loss, status).
Returns
-------
result : DataFrame"""
if self._summary is not None:
self._summary.sort_values(by="loss", inplace=True)
return self._summary
def get_pipeline(
self,
pipeline_name: Optional[str] = None,
astype: lale.helpers.astype_type = "lale",
):
"""Retrieve one of the trials.
Parameters
----------
pipeline_name : union type, default None
- string
Key for table returned by summary(), return a trainable pipeline.
- None
When not specified, return the best trained pipeline found.
astype : 'lale' or 'sklearn', default 'lale'
Type of resulting pipeline.
Returns
-------
result : Trained operator if best, trainable operator otherwise."""
if pipeline_name is None:
pipeline_name = self._name_of_best
result = self._pipelines[pipeline_name]
if result is None or astype == "lale":
return result
assert astype == "sklearn", astype
return result.export_to_sklearn_pipeline()
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"prediction_type",
"scoring",
"max_evals",
"max_opt_time",
"max_eval_time",
"cv",
],
"relevantToOptimizer": [],
"additionalProperties": False,
"properties": {
"prediction_type": {
"description": "The kind of learning problem.",
"enum": ["binary", "multiclass", "classification", "regression"],
"default": "classification",
},
"scoring": schema_scoring_single,
"best_score": schema_best_score_single,
"verbose": {
"description": """Whether to print errors from each of the trials if any.
This is also logged using logger.warning in Hyperopt.""",
"type": "boolean",
"default": False,
},
"max_evals": {
"description": "Number of trials of Hyperopt search.",
"type": "integer",
"minimum": 1,
"default": 100,
},
"max_opt_time": {
**schema_max_opt_time,
"default": 600.0,
},
"max_eval_time": {
"description": "Maximum time in seconds for each evaluation.",
"anyOf": [
{"type": "number", "minimum": 0.0, "exclusiveMinimum": True},
{"description": "No runtime bound.", "enum": [None]},
],
"default": 120.0,
},
"cv": schema_cv,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
]
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}}
},
}
_output_predict_schema = {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
]
}
_combined_schemas = {
"description": """Automatically find a pipeline for a dataset.
This is a high-level entry point to get an initial trained pipeline
without having to specify your own planned pipeline first. It is
designed to be simple at the expense of not offering much control.
For an example, see `demo_auto_pipeline.ipynb`_.
.. _`demo_auto_pipeline.ipynb`: https://nbviewer.jupyter.org/github/IBM/lale/blob/master/examples/demo_auto_pipeline.ipynb
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.auto_pipeline.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
AutoPipeline = lale.operators.make_operator(_AutoPipelineImpl, _combined_schemas)
lale.docstrings.set_docstrings(AutoPipeline)
| 14,599 | 32.87471 | 122 | py |
lale | lale-master/docs/conf.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import builtins
import os
import sys
from typing import Dict, List
import lale
from lale.settings import set_disable_hyperparams_schema_validation
# -- Project information -----------------------------------------------------
project = "LALE"
project_copyright = "2019-2022, IBM AI Research"
author = "IBM AI Research"
# The short X.Y version
version = lale.__version__
# The full version, including alpha/beta/rc tags
release = f"{lale.__version__}-dev"
sys.path.append(os.path.join(os.path.dirname(__name__), "../lale"))
import sphinx_rtd_theme # isort:skip # noqa:E402 # pylint:disable=wrong-import-position,wrong-import-order
# For packages with mock imports, if we have wrappers without our impl classes,
# schema validation fails as the mocking adds methods such as `transform`, `predict` etc.
# when the schema may not have those tags. So we disable schema validation during doc generation.
set_disable_hyperparams_schema_validation(True)
# This is so that we can detect if we are running a sphinx build
# and so generate pseudo-classes for documentation
setattr(builtins, "__sphinx_build__", True)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.imgmath",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinxcontrib.rsvgconverter",
"m2r2",
"sphinxcontrib.apidoc",
]
apidoc_module_dir = "../lale"
apidoc_output_dir = "modules"
apidoc_separate_modules = True
autoclass_content = "both"
# Mock requirements to save resources during doc build machine setup
autodoc_mock_imports = [
"aif360",
"autoai_libs",
"fairlearn",
"pytorch",
"tensorflow",
"torch",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README-*.md"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
html_static_path: List[str] = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "LALEdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements: Dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "LALE.tex", "LALE Documentation", "IBM AI Research", "manual"),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "lale", "LALE Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"LALE",
"LALE Documentation",
author,
"LALE",
"One line description of project.",
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 7,789 | 30.538462 | 108 | py |
PoolNet | PoolNet-master/main.py | import argparse
import os
from dataset.dataset import get_loader
from solver import Solver
def get_test_info(sal_mode='e'):
if sal_mode == 'e':
image_root = './data/ECSSD/Imgs/'
image_source = './data/ECSSD/test.lst'
elif sal_mode == 'p':
image_root = './data/PASCALS/Imgs/'
image_source = './data/PASCALS/test.lst'
elif sal_mode == 'd':
image_root = './data/DUTOMRON/Imgs/'
image_source = './data/DUTOMRON/test.lst'
elif sal_mode == 'h':
image_root = './data/HKU-IS/Imgs/'
image_source = './data/HKU-IS/test.lst'
elif sal_mode == 's':
image_root = './data/SOD/Imgs/'
image_source = './data/SOD/test.lst'
elif sal_mode == 't':
image_root = './data/DUTS-TE/Imgs/'
image_source = './data/DUTS-TE/test.lst'
elif sal_mode == 'm_r': # for speed test
image_root = './data/MSRA/Imgs_resized/'
image_source = './data/MSRA/test_resized.lst'
return image_root, image_source
def main(config):
if config.mode == 'train':
train_loader = get_loader(config)
run = 0
while os.path.exists("%s/run-%d" % (config.save_folder, run)):
run += 1
os.mkdir("%s/run-%d" % (config.save_folder, run))
os.mkdir("%s/run-%d/models" % (config.save_folder, run))
config.save_folder = "%s/run-%d" % (config.save_folder, run)
train = Solver(train_loader, None, config)
train.train()
elif config.mode == 'test':
config.test_root, config.test_list = get_test_info(config.sal_mode)
test_loader = get_loader(config, mode='test')
if not os.path.exists(config.test_fold): os.mkdir(config.test_fold)
test = Solver(None, test_loader, config)
test.test()
else:
raise IOError("illegal input!!!")
if __name__ == '__main__':
vgg_path = './dataset/pretrained/vgg16_20M.pth'
resnet_path = './dataset/pretrained/resnet50_caffe.pth'
parser = argparse.ArgumentParser()
# Hyper-parameters
parser.add_argument('--n_color', type=int, default=3)
parser.add_argument('--lr', type=float, default=5e-5) # Learning rate resnet:5e-5, vgg:1e-4
parser.add_argument('--wd', type=float, default=0.0005) # Weight decay
parser.add_argument('--no-cuda', dest='cuda', action='store_false')
# Training settings
parser.add_argument('--arch', type=str, default='resnet') # resnet or vgg
parser.add_argument('--pretrained_model', type=str, default=resnet_path)
parser.add_argument('--epoch', type=int, default=24)
parser.add_argument('--batch_size', type=int, default=1) # only support 1 now
parser.add_argument('--num_thread', type=int, default=1)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--save_folder', type=str, default='./results')
parser.add_argument('--epoch_save', type=int, default=3)
parser.add_argument('--iter_size', type=int, default=10)
parser.add_argument('--show_every', type=int, default=50)
# Train data
parser.add_argument('--train_root', type=str, default='')
parser.add_argument('--train_list', type=str, default='')
# Testing settings
parser.add_argument('--model', type=str, default=None) # Snapshot
parser.add_argument('--test_fold', type=str, default=None) # Test results saving folder
parser.add_argument('--sal_mode', type=str, default='e') # Test image dataset
# Misc
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
config = parser.parse_args()
if not os.path.exists(config.save_folder):
os.mkdir(config.save_folder)
# Get test set info
test_root, test_list = get_test_info(config.sal_mode)
config.test_root = test_root
config.test_list = test_list
main(config)
| 3,827 | 38.061224 | 95 | py |
PoolNet | PoolNet-master/joint_main.py | import argparse
import os
from dataset.joint_dataset import get_loader
from joint_solver import Solver
def get_test_info(sal_mode='e'):
if sal_mode == 'e':
image_root = './data/ECSSD/Imgs/'
image_source = './data/ECSSD/test.lst'
elif sal_mode == 'p':
image_root = './data/PASCALS/Imgs/'
image_source = './data/PASCALS/test.lst'
elif sal_mode == 'd':
image_root = './data/DUTOMRON/Imgs/'
image_source = './data/DUTOMRON/test.lst'
elif sal_mode == 'h':
image_root = './data/HKU-IS/Imgs/'
image_source = './data/HKU-IS/test.lst'
elif sal_mode == 's':
image_root = './data/SOD/Imgs/'
image_source = './data/SOD/test.lst'
elif sal_mode == 't':
image_root = './data/DUTS-TE/Imgs/'
image_source = './data/DUTS-TE/test.lst'
elif sal_mode == 'm_r': # for speed test
image_root = './data/MSRA/Imgs_resized/'
image_source = './data/MSRA/test_resized.lst'
elif sal_mode == 'b': # BSDS dataset for edge evaluation
image_root = './data/HED-BSDS_PASCAL/HED-BSDS/test/'
image_source = './data/HED-BSDS_PASCAL/HED-BSDS/test.lst'
return image_root, image_source
def main(config):
if config.mode == 'train':
train_loader = get_loader(config)
run = 0
while os.path.exists("%s/run-%d" % (config.save_folder, run)):
run += 1
os.mkdir("%s/run-%d" % (config.save_folder, run))
os.mkdir("%s/run-%d/models" % (config.save_folder, run))
config.save_folder = "%s/run-%d" % (config.save_folder, run)
train = Solver(train_loader, None, config)
train.train()
elif config.mode == 'test':
config.test_root, config.test_list = get_test_info(config.sal_mode)
test_loader = get_loader(config, mode='test')
if not os.path.exists(config.test_fold): os.mkdir(config.test_fold)
test = Solver(None, test_loader, config)
test.test(test_mode=config.test_mode)
else:
raise IOError("illegal input!!!")
if __name__ == '__main__':
vgg_path = './dataset/pretrained/vgg16_20M.pth'
resnet_path = './dataset/pretrained/resnet50_caffe.pth'
parser = argparse.ArgumentParser()
# Hyper-parameters
parser.add_argument('--n_color', type=int, default=3)
parser.add_argument('--lr', type=float, default=5e-5) # Learning rate resnet:5e-5, vgg:1e-4
parser.add_argument('--wd', type=float, default=0.0005) # Weight decay
parser.add_argument('--no-cuda', dest='cuda', action='store_false')
# Training settings
parser.add_argument('--arch', type=str, default='resnet') # resnet or vgg
parser.add_argument('--pretrained_model', type=str, default=resnet_path)
parser.add_argument('--epoch', type=int, default=11)
parser.add_argument('--batch_size', type=int, default=1) # only support 1 now
parser.add_argument('--num_thread', type=int, default=1)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--save_folder', type=str, default='./results')
parser.add_argument('--epoch_save', type=int, default=3)
parser.add_argument('--iter_size', type=int, default=10)
parser.add_argument('--show_every', type=int, default=50)
# Train data
parser.add_argument('--train_root', type=str, default='')
parser.add_argument('--train_list', type=str, default='')
parser.add_argument('--train_edge_root', type=str, default='') # path for edge data
parser.add_argument('--train_edge_list', type=str, default='') # list file for edge data
# Testing settings
parser.add_argument('--model', type=str, default=None) # Snapshot
parser.add_argument('--test_fold', type=str, default=None) # Test results saving folder
parser.add_argument('--test_mode', type=int, default=1) # 0->edge, 1->saliency
parser.add_argument('--sal_mode', type=str, default='e') # Test image dataset
# Misc
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
config = parser.parse_args()
if not os.path.exists(config.save_folder):
os.mkdir(config.save_folder)
# Get test set info
test_root, test_list = get_test_info(config.sal_mode)
config.test_root = test_root
config.test_list = test_list
main(config)
| 4,316 | 40.912621 | 95 | py |
PoolNet | PoolNet-master/solver.py | import torch
from collections import OrderedDict
from torch.nn import utils, functional as F
from torch.optim import Adam
from torch.autograd import Variable
from torch.backends import cudnn
from networks.poolnet import build_model, weights_init
import scipy.misc as sm
import numpy as np
import os
import torchvision.utils as vutils
import cv2
import math
import time
class Solver(object):
def __init__(self, train_loader, test_loader, config):
self.train_loader = train_loader
self.test_loader = test_loader
self.config = config
self.iter_size = config.iter_size
self.show_every = config.show_every
self.lr_decay_epoch = [15,]
self.build_model()
if config.mode == 'test':
print('Loading pre-trained model from %s...' % self.config.model)
if self.config.cuda:
self.net.load_state_dict(torch.load(self.config.model))
else:
self.net.load_state_dict(torch.load(self.config.model, map_location='cpu'))
self.net.eval()
# print the network information and parameter numbers
def print_network(self, model, name):
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(name)
print(model)
print("The number of parameters: {}".format(num_params))
# build the network
def build_model(self):
self.net = build_model(self.config.arch)
if self.config.cuda:
self.net = self.net.cuda()
# self.net.train()
self.net.eval() # use_global_stats = True
self.net.apply(weights_init)
if self.config.load == '':
self.net.base.load_pretrained_model(torch.load(self.config.pretrained_model))
else:
self.net.load_state_dict(torch.load(self.config.load))
self.lr = self.config.lr
self.wd = self.config.wd
self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, weight_decay=self.wd)
self.print_network(self.net, 'PoolNet Structure')
def test(self):
mode_name = 'sal_fuse'
time_s = time.time()
img_num = len(self.test_loader)
for i, data_batch in enumerate(self.test_loader):
images, name, im_size = data_batch['image'], data_batch['name'][0], np.asarray(data_batch['size'])
with torch.no_grad():
images = Variable(images)
if self.config.cuda:
images = images.cuda()
preds = self.net(images)
pred = np.squeeze(torch.sigmoid(preds).cpu().data.numpy())
multi_fuse = 255 * pred
cv2.imwrite(os.path.join(self.config.test_fold, name[:-4] + '_' + mode_name + '.png'), multi_fuse)
time_e = time.time()
print('Speed: %f FPS' % (img_num/(time_e-time_s)))
print('Test Done!')
# training phase
def train(self):
iter_num = len(self.train_loader.dataset) // self.config.batch_size
aveGrad = 0
for epoch in range(self.config.epoch):
r_sal_loss= 0
self.net.zero_grad()
for i, data_batch in enumerate(self.train_loader):
sal_image, sal_label = data_batch['sal_image'], data_batch['sal_label']
if (sal_image.size(2) != sal_label.size(2)) or (sal_image.size(3) != sal_label.size(3)):
print('IMAGE ERROR, PASSING```')
continue
sal_image, sal_label= Variable(sal_image), Variable(sal_label)
if self.config.cuda:
# cudnn.benchmark = True
sal_image, sal_label = sal_image.cuda(), sal_label.cuda()
sal_pred = self.net(sal_image)
sal_loss_fuse = F.binary_cross_entropy_with_logits(sal_pred, sal_label, reduction='sum')
sal_loss = sal_loss_fuse / (self.iter_size * self.config.batch_size)
r_sal_loss += sal_loss.data
sal_loss.backward()
aveGrad += 1
# accumulate gradients as done in DSS
if aveGrad % self.iter_size == 0:
self.optimizer.step()
self.optimizer.zero_grad()
aveGrad = 0
if i % (self.show_every // self.config.batch_size) == 0:
if i == 0:
x_showEvery = 1
print('epoch: [%2d/%2d], iter: [%5d/%5d] || Sal : %10.4f' % (
epoch, self.config.epoch, i, iter_num, r_sal_loss/x_showEvery))
print('Learning rate: ' + str(self.lr))
r_sal_loss= 0
if (epoch + 1) % self.config.epoch_save == 0:
torch.save(self.net.state_dict(), '%s/models/epoch_%d.pth' % (self.config.save_folder, epoch + 1))
if epoch in self.lr_decay_epoch:
self.lr = self.lr * 0.1
self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, weight_decay=self.wd)
torch.save(self.net.state_dict(), '%s/models/final.pth' % self.config.save_folder)
def bce2d(input, target, reduction=None):
assert(input.size() == target.size())
pos = torch.eq(target, 1).float()
neg = torch.eq(target, 0).float()
num_pos = torch.sum(pos)
num_neg = torch.sum(neg)
num_total = num_pos + num_neg
alpha = num_neg / num_total
beta = 1.1 * num_pos / num_total
# target pixel = 1 -> weight beta
# target pixel = 0 -> weight 1-beta
weights = alpha * pos + beta * neg
return F.binary_cross_entropy_with_logits(input, target, weights, reduction=reduction)
| 5,765 | 38.493151 | 129 | py |
PoolNet | PoolNet-master/joint_solver.py | import torch
from collections import OrderedDict
from torch.nn import utils, functional as F
from torch.optim import Adam
from torch.autograd import Variable
from torch.backends import cudnn
from networks.joint_poolnet import build_model, weights_init
import scipy.misc as sm
import numpy as np
import os
import torchvision.utils as vutils
import cv2
import math
import time
class Solver(object):
def __init__(self, train_loader, test_loader, config):
self.train_loader = train_loader
self.test_loader = test_loader
self.config = config
self.iter_size = config.iter_size
self.show_every = config.show_every
self.lr_decay_epoch = [8,]
self.build_model()
if config.mode == 'test':
print('Loading pre-trained model from %s...' % self.config.model)
if self.config.cuda:
self.net.load_state_dict(torch.load(self.config.model))
else:
self.net.load_state_dict(torch.load(self.config.model, map_location='cpu'))
self.net.eval()
# print the network information and parameter numbers
def print_network(self, model, name):
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(name)
print(model)
print("The number of parameters: {}".format(num_params))
# build the network
def build_model(self):
self.net = build_model(self.config.arch)
if self.config.cuda:
self.net = self.net.cuda()
# self.net.train()
self.net.eval() # use_global_stats = True
self.net.apply(weights_init)
if self.config.load == '':
self.net.base.load_pretrained_model(torch.load(self.config.pretrained_model))
else:
self.net.load_state_dict(torch.load(self.config.load))
self.lr = self.config.lr
self.wd = self.config.wd
self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, weight_decay=self.wd)
self.print_network(self.net, 'PoolNet Structure')
def test(self, test_mode=1):
mode_name = ['edge_fuse', 'sal_fuse']
EPSILON = 1e-8
time_s = time.time()
img_num = len(self.test_loader)
for i, data_batch in enumerate(self.test_loader):
images, name, im_size = data_batch['image'], data_batch['name'][0], np.asarray(data_batch['size'])
if test_mode == 0:
images = images.numpy()[0].transpose((1,2,0))
scale = [0.5, 1, 1.5, 2] # uncomment for multi-scale testing
# scale = [1]
multi_fuse = np.zeros(im_size, np.float32)
for k in range(0, len(scale)):
im_ = cv2.resize(images, None, fx=scale[k], fy=scale[k], interpolation=cv2.INTER_LINEAR)
im_ = im_.transpose((2, 0, 1))
im_ = torch.Tensor(im_[np.newaxis, ...])
with torch.no_grad():
im_ = Variable(im_)
if self.config.cuda:
im_ = im_.cuda()
preds = self.net(im_, mode=test_mode)
pred_0 = np.squeeze(torch.sigmoid(preds[1][0]).cpu().data.numpy())
pred_1 = np.squeeze(torch.sigmoid(preds[1][1]).cpu().data.numpy())
pred_2 = np.squeeze(torch.sigmoid(preds[1][2]).cpu().data.numpy())
pred_fuse = np.squeeze(torch.sigmoid(preds[0]).cpu().data.numpy())
pred = (pred_0 + pred_1 + pred_2 + pred_fuse) / 4
pred = (pred - np.min(pred) + EPSILON) / (np.max(pred) - np.min(pred) + EPSILON)
pred = cv2.resize(pred, (im_size[1], im_size[0]), interpolation=cv2.INTER_LINEAR)
multi_fuse += pred
multi_fuse /= len(scale)
multi_fuse = 255 * (1 - multi_fuse)
cv2.imwrite(os.path.join(self.config.test_fold, name[:-4] + '_' + mode_name[test_mode] + '.png'), multi_fuse)
elif test_mode == 1:
with torch.no_grad():
images = Variable(images)
if self.config.cuda:
images = images.cuda()
preds = self.net(images, mode=test_mode)
pred = np.squeeze(torch.sigmoid(preds).cpu().data.numpy())
multi_fuse = 255 * pred
cv2.imwrite(os.path.join(self.config.test_fold, name[:-4] + '_' + mode_name[test_mode] + '.png'), multi_fuse)
time_e = time.time()
print('Speed: %f FPS' % (img_num/(time_e-time_s)))
print('Test Done!')
# training phase
def train(self):
iter_num = 30000 # each batch only train 30000 iters.(This number is just a random choice...)
aveGrad = 0
for epoch in range(self.config.epoch):
r_edge_loss, r_sal_loss, r_sum_loss= 0,0,0
self.net.zero_grad()
for i, data_batch in enumerate(self.train_loader):
if (i + 1) == iter_num: break
edge_image, edge_label, sal_image, sal_label = data_batch['edge_image'], data_batch['edge_label'], data_batch['sal_image'], data_batch['sal_label']
if (sal_image.size(2) != sal_label.size(2)) or (sal_image.size(3) != sal_label.size(3)):
print('IMAGE ERROR, PASSING```')
continue
edge_image, edge_label, sal_image, sal_label= Variable(edge_image), Variable(edge_label), Variable(sal_image), Variable(sal_label)
if self.config.cuda:
edge_image, edge_label, sal_image, sal_label = edge_image.cuda(), edge_label.cuda(), sal_image.cuda(), sal_label.cuda()
# edge part
edge_pred = self.net(edge_image, mode=0)
edge_loss_fuse = bce2d(edge_pred[0], edge_label, reduction='sum')
edge_loss_part = []
for ix in edge_pred[1]:
edge_loss_part.append(bce2d(ix, edge_label, reduction='sum'))
edge_loss = (edge_loss_fuse + sum(edge_loss_part)) / (self.iter_size * self.config.batch_size)
r_edge_loss += edge_loss.data
# sal part
sal_pred = self.net(sal_image, mode=1)
sal_loss_fuse = F.binary_cross_entropy_with_logits(sal_pred, sal_label, reduction='sum')
sal_loss = sal_loss_fuse / (self.iter_size * self.config.batch_size)
r_sal_loss += sal_loss.data
loss = sal_loss + edge_loss
r_sum_loss += loss.data
loss.backward()
aveGrad += 1
# accumulate gradients as done in DSS
if aveGrad % self.iter_size == 0:
self.optimizer.step()
self.optimizer.zero_grad()
aveGrad = 0
if i % (self.show_every // self.config.batch_size) == 0:
if i == 0:
x_showEvery = 1
print('epoch: [%2d/%2d], iter: [%5d/%5d] || Edge : %10.4f || Sal : %10.4f || Sum : %10.4f' % (
epoch, self.config.epoch, i, iter_num, r_edge_loss/x_showEvery, r_sal_loss/x_showEvery, r_sum_loss/x_showEvery))
print('Learning rate: ' + str(self.lr))
r_edge_loss, r_sal_loss, r_sum_loss= 0,0,0
if (epoch + 1) % self.config.epoch_save == 0:
torch.save(self.net.state_dict(), '%s/models/epoch_%d.pth' % (self.config.save_folder, epoch + 1))
if epoch in self.lr_decay_epoch:
self.lr = self.lr * 0.1
self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, weight_decay=self.wd)
torch.save(self.net.state_dict(), '%s/models/final.pth' % self.config.save_folder)
def bce2d(input, target, reduction=None):
assert(input.size() == target.size())
pos = torch.eq(target, 1).float()
neg = torch.eq(target, 0).float()
num_pos = torch.sum(pos)
num_neg = torch.sum(neg)
num_total = num_pos + num_neg
alpha = num_neg / num_total
beta = 1.1 * num_pos / num_total
# target pixel = 1 -> weight beta
# target pixel = 0 -> weight 1-beta
weights = alpha * pos + beta * neg
return F.binary_cross_entropy_with_logits(input, target, weights, reduction=reduction)
| 8,569 | 44.105263 | 163 | py |
PoolNet | PoolNet-master/networks/joint_poolnet.py | import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import math
from torch.autograd import Variable
import numpy as np
from .deeplab_resnet import resnet50_locate
from .vgg import vgg16_locate
config_vgg = {'convert': [[128,256,512,512,512],[64,128,256,512,512]], 'deep_pool': [[512, 512, 256, 128], [512, 256, 128, 128], [True, True, True, False], [True, True, True, False]], 'score': 256, 'edgeinfoc':[48,128], 'block': [[512, [16]], [256, [16]], [128, [16]]], 'fuse': [[16, 16, 16], True]} # no convert layer, no conv6
config_resnet = {'convert': [[64,256,512,1024,2048],[128,256,256,512,512]], 'deep_pool': [[512, 512, 256, 256, 128], [512, 256, 256, 128, 128], [False, True, True, True, False], [True, True, True, True, False]], 'score': 256, 'edgeinfoc':[64,128], 'block': [[512, [16]], [256, [16]], [256, [16]], [128, [16]]], 'fuse': [[16, 16, 16, 16], True]}
class ConvertLayer(nn.Module):
def __init__(self, list_k):
super(ConvertLayer, self).__init__()
up = []
for i in range(len(list_k[0])):
up.append(nn.Sequential(nn.Conv2d(list_k[0][i], list_k[1][i], 1, 1, bias=False), nn.ReLU(inplace=True)))
self.convert0 = nn.ModuleList(up)
def forward(self, list_x):
resl = []
for i in range(len(list_x)):
resl.append(self.convert0[i](list_x[i]))
return resl
class DeepPoolLayer(nn.Module):
def __init__(self, k, k_out, need_x2, need_fuse):
super(DeepPoolLayer, self).__init__()
self.pools_sizes = [2,4,8]
self.need_x2 = need_x2
self.need_fuse = need_fuse
pools, convs = [],[]
for i in self.pools_sizes:
pools.append(nn.AvgPool2d(kernel_size=i, stride=i))
convs.append(nn.Conv2d(k, k, 3, 1, 1, bias=False))
self.pools = nn.ModuleList(pools)
self.convs = nn.ModuleList(convs)
self.relu = nn.ReLU()
self.conv_sum = nn.Conv2d(k, k_out, 3, 1, 1, bias=False)
if self.need_fuse:
self.conv_sum_c = nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False)
def forward(self, x, x2=None, x3=None):
x_size = x.size()
resl = x
for i in range(len(self.pools_sizes)):
y = self.convs[i](self.pools[i](x))
resl = torch.add(resl, F.interpolate(y, x_size[2:], mode='bilinear', align_corners=True))
resl = self.relu(resl)
if self.need_x2:
resl = F.interpolate(resl, x2.size()[2:], mode='bilinear', align_corners=True)
resl = self.conv_sum(resl)
if self.need_fuse:
resl = self.conv_sum_c(torch.add(torch.add(resl, x2), x3))
return resl
class BlockLayer(nn.Module):
def __init__(self, k_in, k_out_list):
super(BlockLayer, self).__init__()
up_in1, up_mid1, up_in2, up_mid2, up_out = [], [], [], [], []
for k in k_out_list:
up_in1.append(nn.Conv2d(k_in, k_in//4, 1, 1, bias=False))
up_mid1.append(nn.Sequential(nn.Conv2d(k_in//4, k_in//4, 3, 1, 1, bias=False), nn.Conv2d(k_in//4, k_in, 1, 1, bias=False)))
up_in2.append(nn.Conv2d(k_in, k_in//4, 1, 1, bias=False))
up_mid2.append(nn.Sequential(nn.Conv2d(k_in//4, k_in//4, 3, 1, 1, bias=False), nn.Conv2d(k_in//4, k_in, 1, 1, bias=False)))
up_out.append(nn.Conv2d(k_in, k, 1, 1, bias=False))
self.block_in1 = nn.ModuleList(up_in1)
self.block_in2 = nn.ModuleList(up_in2)
self.block_mid1 = nn.ModuleList(up_mid1)
self.block_mid2 = nn.ModuleList(up_mid2)
self.block_out = nn.ModuleList(up_out)
self.relu = nn.ReLU()
def forward(self, x, mode=0):
x_tmp = self.relu(x + self.block_mid1[mode](self.block_in1[mode](x)))
# x_tmp = self.block_mid2[mode](self.block_in2[mode](self.relu(x + x_tmp)))
x_tmp = self.relu(x_tmp + self.block_mid2[mode](self.block_in2[mode](x_tmp)))
x_tmp = self.block_out[mode](x_tmp)
return x_tmp
class EdgeInfoLayerC(nn.Module):
def __init__(self, k_in, k_out):
super(EdgeInfoLayerC, self).__init__()
self.trans = nn.Sequential(nn.Conv2d(k_in, k_in, 3, 1, 1, bias=False), nn.ReLU(inplace=True),
nn.Conv2d(k_in, k_out, 3, 1, 1, bias=False), nn.ReLU(inplace=True),
nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False), nn.ReLU(inplace=True),
nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False), nn.ReLU(inplace=True))
def forward(self, x, x_size):
tmp_x = []
for i_x in x:
tmp_x.append(F.interpolate(i_x, x_size[2:], mode='bilinear', align_corners=True))
x = self.trans(torch.cat(tmp_x, dim=1))
return x
class FuseLayer1(nn.Module):
def __init__(self, list_k, deep_sup):
super(FuseLayer1, self).__init__()
up = []
for i in range(len(list_k)):
up.append(nn.Conv2d(list_k[i], 1, 1, 1))
self.trans = nn.ModuleList(up)
self.fuse = nn.Conv2d(len(list_k), 1, 1, 1)
self.deep_sup = deep_sup
def forward(self, list_x, x_size):
up_x = []
for i, i_x in enumerate(list_x):
up_x.append(F.interpolate(self.trans[i](i_x), x_size[2:], mode='bilinear', align_corners=True))
out_fuse = self.fuse(torch.cat(up_x, dim = 1))
if self.deep_sup:
out_all = []
for up_i in up_x:
out_all.append(up_i)
return [out_fuse, out_all]
else:
return [out_fuse]
class ScoreLayer(nn.Module):
def __init__(self, k):
super(ScoreLayer, self).__init__()
self.score = nn.Conv2d(k ,1, 3, 1, 1)
def forward(self, x, x_size=None):
x = self.score(x)
if x_size is not None:
x = F.interpolate(x, x_size[2:], mode='bilinear', align_corners=True)
return x
def extra_layer(base_model_cfg, base):
if base_model_cfg == 'vgg':
config = config_vgg
elif base_model_cfg == 'resnet':
config = config_resnet
convert_layers, deep_pool_layers, block_layers, fuse_layers, edgeinfo_layers, score_layers = [], [], [], [], [], []
convert_layers = ConvertLayer(config['convert'])
for k in config['block']:
block_layers += [BlockLayer(k[0], k[1])]
for i in range(len(config['deep_pool'][0])):
deep_pool_layers += [DeepPoolLayer(config['deep_pool'][0][i], config['deep_pool'][1][i], config['deep_pool'][2][i], config['deep_pool'][3][i])]
fuse_layers = FuseLayer1(config['fuse'][0], config['fuse'][1])
edgeinfo_layers = EdgeInfoLayerC(config['edgeinfoc'][0], config['edgeinfoc'][1])
score_layers = ScoreLayer(config['score'])
return base, convert_layers, deep_pool_layers, block_layers, fuse_layers, edgeinfo_layers, score_layers
class PoolNet(nn.Module):
def __init__(self, base_model_cfg, base, convert_layers, deep_pool_layers, block_layers, fuse_layers, edgeinfo_layers, score_layers):
super(PoolNet, self).__init__()
self.base_model_cfg = base_model_cfg
self.base = base
self.block = nn.ModuleList(block_layers)
self.deep_pool = nn.ModuleList(deep_pool_layers)
self.fuse = fuse_layers
self.edgeinfo = edgeinfo_layers
self.score = score_layers
if self.base_model_cfg == 'resnet':
self.convert = convert_layers
def forward(self, x, mode):
x_size = x.size()
conv2merge, infos = self.base(x)
if self.base_model_cfg == 'resnet':
conv2merge = self.convert(conv2merge)
conv2merge = conv2merge[::-1]
edge_merge = []
merge = self.deep_pool[0](conv2merge[0], conv2merge[1], infos[0])
edge_merge.append(merge)
for k in range(1, len(conv2merge)-1):
merge = self.deep_pool[k](merge, conv2merge[k+1], infos[k])
edge_merge.append(merge)
if mode == 0:
edge_merge = [self.block[i](kk) for i, kk in enumerate(edge_merge)]
merge = self.fuse(edge_merge, x_size)
elif mode == 1:
merge = self.deep_pool[-1](merge)
edge_merge = [self.block[i](kk).detach() for i, kk in enumerate(edge_merge)]
edge_merge = self.edgeinfo(edge_merge, merge.size())
merge = self.score(torch.cat([merge, edge_merge], dim=1), x_size)
return merge
def build_model(base_model_cfg='vgg'):
if base_model_cfg == 'vgg':
return PoolNet(base_model_cfg, *extra_layer(base_model_cfg, vgg16_locate()))
elif base_model_cfg == 'resnet':
return PoolNet(base_model_cfg, *extra_layer(base_model_cfg, resnet50_locate()))
def weights_init(m):
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
| 8,853 | 41.772947 | 344 | py |
PoolNet | PoolNet-master/networks/vgg.py | import torch.nn as nn
import math
import torch
import numpy as np
import torch.nn.functional as F
# vgg16
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
stage = 1
for v in cfg:
if v == 'M':
stage += 1
if stage == 6:
layers += [nn.MaxPool2d(kernel_size=3, stride=1, padding=1)]
else:
layers += [nn.MaxPool2d(kernel_size=3, stride=2, padding=1)]
else:
if stage == 6:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return layers
class vgg16(nn.Module):
def __init__(self):
super(vgg16, self).__init__()
self.cfg = {'tun': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'tun_ex': [512, 512, 512]}
self.extract = [8, 15, 22, 29] # [3, 8, 15, 22, 29]
self.base = nn.ModuleList(vgg(self.cfg['tun'], 3))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_pretrained_model(self, model):
self.base.load_state_dict(model, strict=False)
def forward(self, x):
tmp_x = []
for k in range(len(self.base)):
x = self.base[k](x)
if k in self.extract:
tmp_x.append(x)
return tmp_x
class vgg16_locate(nn.Module):
def __init__(self):
super(vgg16_locate,self).__init__()
self.vgg16 = vgg16()
self.in_planes = 512
self.out_planes = [512, 256, 128]
ppms, infos = [], []
for ii in [1, 3, 5]:
ppms.append(nn.Sequential(nn.AdaptiveAvgPool2d(ii), nn.Conv2d(self.in_planes, self.in_planes, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.ppms = nn.ModuleList(ppms)
self.ppm_cat = nn.Sequential(nn.Conv2d(self.in_planes * 4, self.in_planes, 3, 1, 1, bias=False), nn.ReLU(inplace=True))
for ii in self.out_planes:
infos.append(nn.Sequential(nn.Conv2d(self.in_planes, ii, 3, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.infos = nn.ModuleList(infos)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_pretrained_model(self, model):
self.vgg16.load_pretrained_model(model)
def forward(self, x):
x_size = x.size()[2:]
xs = self.vgg16(x)
xls = [xs[-1]]
for k in range(len(self.ppms)):
xls.append(F.interpolate(self.ppms[k](xs[-1]), xs[-1].size()[2:], mode='bilinear', align_corners=True))
xls = self.ppm_cat(torch.cat(xls, dim=1))
infos = []
for k in range(len(self.infos)):
infos.append(self.infos[k](F.interpolate(xls, xs[len(self.infos) - 1 - k].size()[2:], mode='bilinear', align_corners=True)))
return xs, infos
| 3,581 | 35.927835 | 148 | py |
PoolNet | PoolNet-master/networks/poolnet.py | import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import math
from torch.autograd import Variable
import numpy as np
from .deeplab_resnet import resnet50_locate
from .vgg import vgg16_locate
config_vgg = {'convert': [[128,256,512,512,512],[64,128,256,512,512]], 'deep_pool': [[512, 512, 256, 128], [512, 256, 128, 128], [True, True, True, False], [True, True, True, False]], 'score': 128} # no convert layer, no conv6
config_resnet = {'convert': [[64,256,512,1024,2048],[128,256,256,512,512]], 'deep_pool': [[512, 512, 256, 256, 128], [512, 256, 256, 128, 128], [False, True, True, True, False], [True, True, True, True, False]], 'score': 128}
class ConvertLayer(nn.Module):
def __init__(self, list_k):
super(ConvertLayer, self).__init__()
up = []
for i in range(len(list_k[0])):
up.append(nn.Sequential(nn.Conv2d(list_k[0][i], list_k[1][i], 1, 1, bias=False), nn.ReLU(inplace=True)))
self.convert0 = nn.ModuleList(up)
def forward(self, list_x):
resl = []
for i in range(len(list_x)):
resl.append(self.convert0[i](list_x[i]))
return resl
class DeepPoolLayer(nn.Module):
def __init__(self, k, k_out, need_x2, need_fuse):
super(DeepPoolLayer, self).__init__()
self.pools_sizes = [2,4,8]
self.need_x2 = need_x2
self.need_fuse = need_fuse
pools, convs = [],[]
for i in self.pools_sizes:
pools.append(nn.AvgPool2d(kernel_size=i, stride=i))
convs.append(nn.Conv2d(k, k, 3, 1, 1, bias=False))
self.pools = nn.ModuleList(pools)
self.convs = nn.ModuleList(convs)
self.relu = nn.ReLU()
self.conv_sum = nn.Conv2d(k, k_out, 3, 1, 1, bias=False)
if self.need_fuse:
self.conv_sum_c = nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False)
def forward(self, x, x2=None, x3=None):
x_size = x.size()
resl = x
for i in range(len(self.pools_sizes)):
y = self.convs[i](self.pools[i](x))
resl = torch.add(resl, F.interpolate(y, x_size[2:], mode='bilinear', align_corners=True))
resl = self.relu(resl)
if self.need_x2:
resl = F.interpolate(resl, x2.size()[2:], mode='bilinear', align_corners=True)
resl = self.conv_sum(resl)
if self.need_fuse:
resl = self.conv_sum_c(torch.add(torch.add(resl, x2), x3))
return resl
class ScoreLayer(nn.Module):
def __init__(self, k):
super(ScoreLayer, self).__init__()
self.score = nn.Conv2d(k ,1, 1, 1)
def forward(self, x, x_size=None):
x = self.score(x)
if x_size is not None:
x = F.interpolate(x, x_size[2:], mode='bilinear', align_corners=True)
return x
def extra_layer(base_model_cfg, vgg):
if base_model_cfg == 'vgg':
config = config_vgg
elif base_model_cfg == 'resnet':
config = config_resnet
convert_layers, deep_pool_layers, score_layers = [], [], []
convert_layers = ConvertLayer(config['convert'])
for i in range(len(config['deep_pool'][0])):
deep_pool_layers += [DeepPoolLayer(config['deep_pool'][0][i], config['deep_pool'][1][i], config['deep_pool'][2][i], config['deep_pool'][3][i])]
score_layers = ScoreLayer(config['score'])
return vgg, convert_layers, deep_pool_layers, score_layers
class PoolNet(nn.Module):
def __init__(self, base_model_cfg, base, convert_layers, deep_pool_layers, score_layers):
super(PoolNet, self).__init__()
self.base_model_cfg = base_model_cfg
self.base = base
self.deep_pool = nn.ModuleList(deep_pool_layers)
self.score = score_layers
if self.base_model_cfg == 'resnet':
self.convert = convert_layers
def forward(self, x):
x_size = x.size()
conv2merge, infos = self.base(x)
if self.base_model_cfg == 'resnet':
conv2merge = self.convert(conv2merge)
conv2merge = conv2merge[::-1]
edge_merge = []
merge = self.deep_pool[0](conv2merge[0], conv2merge[1], infos[0])
for k in range(1, len(conv2merge)-1):
merge = self.deep_pool[k](merge, conv2merge[k+1], infos[k])
merge = self.deep_pool[-1](merge)
merge = self.score(merge, x_size)
return merge
def build_model(base_model_cfg='vgg'):
if base_model_cfg == 'vgg':
return PoolNet(base_model_cfg, *extra_layer(base_model_cfg, vgg16_locate()))
elif base_model_cfg == 'resnet':
return PoolNet(base_model_cfg, *extra_layer(base_model_cfg, resnet50_locate()))
def weights_init(m):
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
| 4,800 | 37.103175 | 227 | py |
PoolNet | PoolNet-master/networks/deeplab_resnet.py | import torch.nn as nn
import math
import torch
import numpy as np
import torch.nn.functional as F
affine_par = True
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, affine = affine_par)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, affine = affine_par)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation_ = 1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
padding = 1
if dilation_ == 2:
padding = 2
elif dilation_ == 4:
padding = 4
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=padding, bias=False, dilation = dilation_)
self.bn2 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par)
for i in self.bn3.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64,affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation__ = 2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1,dilation__ = 1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation__ == 2 or dilation__ == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par),
)
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation_=dilation__, downsample = downsample ))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,dilation_=dilation__))
return nn.Sequential(*layers)
def forward(self, x):
tmp_x = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
tmp_x.append(x)
x = self.maxpool(x)
x = self.layer1(x)
tmp_x.append(x)
x = self.layer2(x)
tmp_x.append(x)
x = self.layer3(x)
tmp_x.append(x)
x = self.layer4(x)
tmp_x.append(x)
return tmp_x
class ResNet_locate(nn.Module):
def __init__(self, block, layers):
super(ResNet_locate,self).__init__()
self.resnet = ResNet(block, layers)
self.in_planes = 512
self.out_planes = [512, 256, 256, 128]
self.ppms_pre = nn.Conv2d(2048, self.in_planes, 1, 1, bias=False)
ppms, infos = [], []
for ii in [1, 3, 5]:
ppms.append(nn.Sequential(nn.AdaptiveAvgPool2d(ii), nn.Conv2d(self.in_planes, self.in_planes, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.ppms = nn.ModuleList(ppms)
self.ppm_cat = nn.Sequential(nn.Conv2d(self.in_planes * 4, self.in_planes, 3, 1, 1, bias=False), nn.ReLU(inplace=True))
for ii in self.out_planes:
infos.append(nn.Sequential(nn.Conv2d(self.in_planes, ii, 3, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.infos = nn.ModuleList(infos)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_pretrained_model(self, model):
self.resnet.load_state_dict(model, strict=False)
def forward(self, x):
x_size = x.size()[2:]
xs = self.resnet(x)
xs_1 = self.ppms_pre(xs[-1])
xls = [xs_1]
for k in range(len(self.ppms)):
xls.append(F.interpolate(self.ppms[k](xs_1), xs_1.size()[2:], mode='bilinear', align_corners=True))
xls = self.ppm_cat(torch.cat(xls, dim=1))
infos = []
for k in range(len(self.infos)):
infos.append(self.infos[k](F.interpolate(xls, xs[len(self.infos) - 1 - k].size()[2:], mode='bilinear', align_corners=True)))
return xs, infos
def resnet50_locate():
model = ResNet_locate(Bottleneck, [3, 4, 6, 3])
return model
| 7,161 | 34.107843 | 148 | py |
PoolNet | PoolNet-master/dataset/dataset.py | import os
from PIL import Image
import cv2
import torch
from torch.utils import data
from torchvision import transforms
from torchvision.transforms import functional as F
import numbers
import numpy as np
import random
class ImageDataTrain(data.Dataset):
def __init__(self, data_root, data_list):
self.sal_root = data_root
self.sal_source = data_list
with open(self.sal_source, 'r') as f:
self.sal_list = [x.strip() for x in f.readlines()]
self.sal_num = len(self.sal_list)
def __getitem__(self, item):
# sal data loading
im_name = self.sal_list[item % self.sal_num].split()[0]
gt_name = self.sal_list[item % self.sal_num].split()[1]
sal_image = load_image(os.path.join(self.sal_root, im_name))
sal_label = load_sal_label(os.path.join(self.sal_root, gt_name))
sal_image, sal_label = cv_random_flip(sal_image, sal_label)
sal_image = torch.Tensor(sal_image)
sal_label = torch.Tensor(sal_label)
sample = {'sal_image': sal_image, 'sal_label': sal_label}
return sample
def __len__(self):
return self.sal_num
class ImageDataTest(data.Dataset):
def __init__(self, data_root, data_list):
self.data_root = data_root
self.data_list = data_list
with open(self.data_list, 'r') as f:
self.image_list = [x.strip() for x in f.readlines()]
self.image_num = len(self.image_list)
def __getitem__(self, item):
image, im_size = load_image_test(os.path.join(self.data_root, self.image_list[item]))
image = torch.Tensor(image)
return {'image': image, 'name': self.image_list[item % self.image_num], 'size': im_size}
def __len__(self):
return self.image_num
def get_loader(config, mode='train', pin=False):
shuffle = False
if mode == 'train':
shuffle = True
dataset = ImageDataTrain(config.train_root, config.train_list)
data_loader = data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=shuffle, num_workers=config.num_thread, pin_memory=pin)
else:
dataset = ImageDataTest(config.test_root, config.test_list)
data_loader = data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=shuffle, num_workers=config.num_thread, pin_memory=pin)
return data_loader
def load_image(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = cv2.imread(path)
in_ = np.array(im, dtype=np.float32)
in_ -= np.array((104.00699, 116.66877, 122.67892))
in_ = in_.transpose((2,0,1))
return in_
def load_image_test(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = cv2.imread(path)
in_ = np.array(im, dtype=np.float32)
im_size = tuple(in_.shape[:2])
in_ -= np.array((104.00699, 116.66877, 122.67892))
in_ = in_.transpose((2,0,1))
return in_, im_size
def load_sal_label(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = Image.open(path)
label = np.array(im, dtype=np.float32)
if len(label.shape) == 3:
label = label[:,:,0]
label = label / 255.
label = label[np.newaxis, ...]
return label
def cv_random_flip(img, label):
flip_flag = random.randint(0, 1)
if flip_flag == 1:
img = img[:,:,::-1].copy()
label = label[:,:,::-1].copy()
return img, label
| 3,469 | 32.047619 | 148 | py |
PoolNet | PoolNet-master/dataset/joint_dataset.py | import os
from PIL import Image
import cv2
import torch
from torch.utils import data
from torchvision import transforms
from torchvision.transforms import functional as F
import numbers
import numpy as np
import random
class ImageDataTrain(data.Dataset):
def __init__(self, sal_data_root, sal_data_list, edge_data_root, edge_data_list):
self.sal_root = sal_data_root
self.sal_source = sal_data_list
self.edge_root = edge_data_root
self.edge_source = edge_data_list
with open(self.sal_source, 'r') as f:
self.sal_list = [x.strip() for x in f.readlines()]
with open(self.edge_source, 'r') as f:
self.edge_list = [x.strip() for x in f.readlines()]
self.sal_num = len(self.sal_list)
self.edge_num = len(self.edge_list)
def __getitem__(self, item):
# edge data loading
edge_im_name = self.edge_list[item % self.edge_num].split()[0]
edge_gt_name = self.edge_list[item % self.edge_num].split()[1]
edge_image = load_image(os.path.join(self.edge_root, edge_im_name))
edge_label = load_edge_label(os.path.join(self.edge_root, edge_gt_name))
edge_image = torch.Tensor(edge_image)
edge_label = torch.Tensor(edge_label)
# sal data loading
sal_im_name = self.sal_list[item % self.sal_num].split()[0]
sal_gt_name = self.sal_list[item % self.sal_num].split()[1]
sal_image = load_image(os.path.join(self.sal_root, sal_im_name))
sal_label = load_sal_label(os.path.join(self.sal_root, sal_gt_name))
sal_image, sal_label = cv_random_flip(sal_image, sal_label)
sal_image = torch.Tensor(sal_image)
sal_label = torch.Tensor(sal_label)
sample = {'edge_image': edge_image, 'edge_label': edge_label, 'sal_image': sal_image, 'sal_label': sal_label}
return sample
def __len__(self):
return max(self.sal_num, self.edge_num)
class ImageDataTest(data.Dataset):
def __init__(self, data_root, data_list):
self.data_root = data_root
self.data_list = data_list
with open(self.data_list, 'r') as f:
self.image_list = [x.strip() for x in f.readlines()]
self.image_num = len(self.image_list)
def __getitem__(self, item):
image, im_size = load_image_test(os.path.join(self.data_root, self.image_list[item]))
image = torch.Tensor(image)
return {'image': image, 'name': self.image_list[item % self.image_num], 'size': im_size}
def __len__(self):
return self.image_num
def get_loader(config, mode='train', pin=False):
shuffle = False
if mode == 'train':
shuffle = True
dataset = ImageDataTrain(config.train_root, config.train_list, config.train_edge_root, config.train_edge_list)
data_loader = data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=shuffle, num_workers=config.num_thread, pin_memory=pin)
else:
dataset = ImageDataTest(config.test_root, config.test_list)
data_loader = data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=shuffle, num_workers=config.num_thread, pin_memory=pin)
return data_loader
def load_image(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = cv2.imread(path)
in_ = np.array(im, dtype=np.float32)
in_ -= np.array((104.00699, 116.66877, 122.67892))
in_ = in_.transpose((2,0,1))
return in_
def load_image_test(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = cv2.imread(path)
in_ = np.array(im, dtype=np.float32)
im_size = tuple(in_.shape[:2])
in_ -= np.array((104.00699, 116.66877, 122.67892))
in_ = in_.transpose((2,0,1))
return in_, im_size
def load_sal_label(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = Image.open(path)
label = np.array(im, dtype=np.float32)
if len(label.shape) == 3:
label = label[:,:,0]
label = label / 255.
label = label[np.newaxis, ...]
return label
def load_edge_label(path):
"""
pixels > 0.5 -> 1.
"""
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = Image.open(path)
label = np.array(im, dtype=np.float32)
if len(label.shape) == 3:
label = label[:,:,0]
label = label / 255.
label[np.where(label > 0.5)] = 1.
label = label[np.newaxis, ...]
return label
def cv_random_flip(img, label):
flip_flag = random.randint(0, 1)
if flip_flag == 1:
img = img[:,:,::-1].copy()
label = label[:,:,::-1].copy()
return img, label
| 4,702 | 34.360902 | 148 | py |
GNNDelete | GNNDelete-main/train_node.py | import os
import wandb
import pickle
import torch
from torch_geometric.seed import seed_everything
from torch_geometric.utils import to_undirected, is_undirected
import torch_geometric.transforms as T
from torch_geometric.datasets import CitationFull, Coauthor, Flickr, RelLinkPredDataset, WordNet18, WordNet18RR
from torch_geometric.seed import seed_everything
from framework import get_model, get_trainer
from framework.training_args import parse_args
from framework.trainer.base import NodeClassificationTrainer
from framework.utils import negative_sampling_kg
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def main():
args = parse_args()
args.checkpoint_dir = 'checkpoint_node'
args.dataset = 'DBLP'
args.unlearning_model = 'original'
args.checkpoint_dir = os.path.join(args.checkpoint_dir, args.dataset, args.gnn, args.unlearning_model, str(args.random_seed))
os.makedirs(args.checkpoint_dir, exist_ok=True)
seed_everything(args.random_seed)
# Dataset
dataset = CitationFull(os.path.join(args.data_dir, args.dataset), args.dataset, transform=T.NormalizeFeatures())
data = dataset[0]
print('Original data', data)
split = T.RandomNodeSplit()
data = split(data)
assert is_undirected(data.edge_index)
print('Split data', data)
args.in_dim = data.x.shape[1]
args.out_dim = dataset.num_classes
wandb.init(config=args)
# Model
model = get_model(args, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type).to(device)
wandb.watch(model, log_freq=100)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)#, weight_decay=args.weight_decay)
# Train
trainer = NodeClassificationTrainer(args)
trainer.train(model, data, optimizer, args)
# Test
trainer.test(model, data)
trainer.save_log()
if __name__ == "__main__":
main()
| 1,881 | 30.898305 | 129 | py |
GNNDelete | GNNDelete-main/graph_stat.py | import os
from torch_geometric.data import Data
import torch_geometric.transforms as T
from torch_geometric.datasets import CitationFull, Coauthor, Flickr, RelLinkPredDataset, WordNet18RR
from ogb.linkproppred import PygLinkPropPredDataset
data_dir = './data'
datasets = ['Cora', 'PubMed', 'DBLP', 'CS', 'Physics', 'ogbl-citation2', 'ogbl-collab', 'FB15k-237', 'WordNet18RR', 'ogbl-biokg', 'ogbl-wikikg2'][-2:]
def get_stat(d):
if d in ['Cora', 'PubMed', 'DBLP']:
dataset = CitationFull(os.path.join(data_dir, d), d, transform=T.NormalizeFeatures())
if d in ['CS', 'Physics']:
dataset = Coauthor(os.path.join(data_dir, d), d, transform=T.NormalizeFeatures())
if d in ['Flickr']:
dataset = Flickr(os.path.join(data_dir, d), transform=T.NormalizeFeatures())
if 'ogbl' in d:
dataset = PygLinkPropPredDataset(root=os.path.join(data_dir, d), name=d)
data = dataset[0]
print(d)
print('Number of nodes:', data.num_nodes)
print('Number of edges:', data.num_edges)
print('Number of max deleted edges:', int(0.05 * data.num_edges))
if hasattr(data, 'edge_type'):
print('Number of nodes:', data.edge_type.unique().shape)
def main():
for d in datasets:
get_stat(d)
if __name__ == "__main__":
main()
| 1,292 | 35.942857 | 150 | py |
GNNDelete | GNNDelete-main/delete_node_feature.py | import os
import copy
import json
import wandb
import pickle
import argparse
import torch
import torch.nn as nn
from torch_geometric.utils import to_undirected, to_networkx, k_hop_subgraph, is_undirected
from torch_geometric.data import Data
import torch_geometric.transforms as T
from torch_geometric.datasets import CitationFull, Coauthor, Flickr, RelLinkPredDataset, WordNet18, WordNet18RR
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from torch_geometric.seed import seed_everything
from framework import get_model, get_trainer
from framework.models.gcn import GCN
from framework.models.deletion import GCNDelete
from framework.training_args import parse_args
from framework.utils import *
from framework.trainer.gnndelete_nodeemb import GNNDeleteNodeClassificationTrainer
from train_mi import MLPAttacker
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.autograd.set_detect_anomaly(True)
def to_directed(edge_index):
row, col = edge_index
mask = row < col
return torch.cat([row[mask], col[mask]], dim=0)
def main():
args = parse_args()
args.checkpoint_dir = 'checkpoint_node_feature'
args.dataset = 'DBLP'
original_path = os.path.join(args.checkpoint_dir, args.dataset, args.gnn, 'original', str(args.random_seed))
attack_path_all = os.path.join(args.checkpoint_dir, args.dataset, 'member_infer_all', str(args.random_seed))
attack_path_sub = os.path.join(args.checkpoint_dir, args.dataset, 'member_infer_sub', str(args.random_seed))
seed_everything(args.random_seed)
if 'gnndelete' in args.unlearning_model:
args.checkpoint_dir = os.path.join(
args.checkpoint_dir, args.dataset, args.gnn, f'{args.unlearning_model}-node_deletion',
'-'.join([str(i) for i in [args.loss_fct, args.loss_type, args.alpha, args.neg_sample_random]]),
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
else:
args.checkpoint_dir = os.path.join(
args.checkpoint_dir, args.dataset, args.gnn, f'{args.unlearning_model}-node_deletion',
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
os.makedirs(args.checkpoint_dir, exist_ok=True)
# Dataset
dataset = CitationFull(os.path.join(args.data_dir, args.dataset), args.dataset, transform=T.NormalizeFeatures())
data = dataset[0]
print('Original data', data)
split = T.RandomNodeSplit()
data = split(data)
assert is_undirected(data.edge_index)
print('Split data', data)
args.in_dim = data.x.shape[1]
args.out_dim = dataset.num_classes
wandb.init(config=args)
# Df and Dr
if args.df_size >= 100: # df_size is number of nodes/edges to be deleted
df_size = int(args.df_size)
else: # df_size is the ratio
df_size = int(args.df_size / 100 * data.train_pos_edge_index.shape[1])
print(f'Original size: {data.num_nodes:,}')
print(f'Df size: {df_size:,}')
# Delete node feature
df_nodes = torch.randperm(data.num_nodes)[:df_size]
global_node_mask = torch.ones(data.num_nodes, dtype=torch.bool)
# global_node_mask[df_nodes] = False
data.x[df_nodes] = 0
assert data.x[df_nodes].sum() == 0
dr_mask_node = torch.ones(data.num_nodes, dtype=torch.bool)
df_mask_node = ~global_node_mask
# assert df_mask_node.sum() == df_size
# Delete edges associated with deleted nodes from training set
res = [torch.eq(data.edge_index, aelem).logical_or_(torch.eq(data.edge_index, aelem)) for aelem in df_nodes]
df_mask_edge = torch.any(torch.stack(res, dim=0), dim = 0)
df_mask_edge = df_mask_edge.sum(0).bool()
dr_mask_edge = ~df_mask_edge
df_edge = data.edge_index[:, df_mask_edge]
data.directed_df_edge_index = to_directed(df_edge)
# print(df_edge.shape, directed_df_edge_index.shape)
# raise
print('Deleting the following nodes:', df_nodes)
# # Delete edges associated with deleted nodes from valid and test set
# res = [torch.eq(data.val_pos_edge_index, aelem).logical_or_(torch.eq(data.val_pos_edge_index, aelem)) for aelem in df_nodes]
# mask = torch.any(torch.stack(res, dim=0), dim = 0)
# mask = mask.sum(0).bool()
# mask = ~mask
# data.val_pos_edge_index = data.val_pos_edge_index[:, mask]
# data.val_neg_edge_index = data.val_neg_edge_index[:, :data.val_pos_edge_index.shape[1]]
# res = [torch.eq(data.test_pos_edge_index, aelem).logical_or_(torch.eq(data.test_pos_edge_index, aelem)) for aelem in df_nodes]
# mask = torch.any(torch.stack(res, dim=0), dim = 0)
# mask = mask.sum(0).bool()
# mask = ~mask
# data.test_pos_edge_index = data.test_pos_edge_index[:, mask]
# data.test_neg_edge_index = data.test_neg_edge_index[:, :data.test_pos_edge_index.shape[1]]
# For testing
# data.directed_df_edge_index = data.train_pos_edge_index[:, df_mask_edge]
# if args.gnn in ['rgcn', 'rgat']:
# data.directed_df_edge_type = data.train_edge_type[df_mask]
# Edges in S_Df
_, two_hop_edge, _, two_hop_mask = k_hop_subgraph(
data.edge_index[:, df_mask_edge].flatten().unique(),
2,
data.edge_index,
num_nodes=data.num_nodes)
# Nodes in S_Df
_, one_hop_edge, _, one_hop_mask = k_hop_subgraph(
data.edge_index[:, df_mask_edge].flatten().unique(),
1,
data.edge_index,
num_nodes=data.num_nodes)
sdf_node_1hop = torch.zeros(data.num_nodes, dtype=torch.bool)
sdf_node_2hop = torch.zeros(data.num_nodes, dtype=torch.bool)
sdf_node_1hop[one_hop_edge.flatten().unique()] = True
sdf_node_2hop[two_hop_edge.flatten().unique()] = True
assert sdf_node_1hop.sum() == len(one_hop_edge.flatten().unique())
assert sdf_node_2hop.sum() == len(two_hop_edge.flatten().unique())
data.sdf_node_1hop_mask = sdf_node_1hop
data.sdf_node_2hop_mask = sdf_node_2hop
# To undirected for message passing
# print(is_undir0.0175ected(data.train_pos_edge_index), data.train_pos_edge_index.shape, two_hop_mask.shape, df_mask.shape, two_hop_mask.shape)
# assert not is_undirected(data.edge_index)
print(is_undirected(data.edge_index))
if args.gnn in ['rgcn', 'rgat']:
r, c = data.train_pos_edge_index
rev_edge_index = torch.stack([c, r], dim=0)
rev_edge_type = data.train_edge_type + args.num_edge_type
data.edge_index = torch.cat((data.train_pos_edge_index, rev_edge_index), dim=1)
data.edge_type = torch.cat([data.train_edge_type, rev_edge_type], dim=0)
# data.train_mask = data.train_mask.repeat(2)
two_hop_mask = two_hop_mask.repeat(2).view(-1)
df_mask = df_mask.repeat(2).view(-1)
dr_mask = dr_mask.repeat(2).view(-1)
assert is_undirected(data.edge_index)
else:
# train_pos_edge_index, [df_mask, two_hop_mask] = to_undirected(data.train_pos_edge_index, [df_mask.int(), two_hop_mask.int()])
two_hop_mask = two_hop_mask.bool()
df_mask_edge = df_mask_edge.bool()
dr_mask_edge = ~df_mask_edge
# data.train_pos_edge_index = train_pos_edge_index
# assert is_undirected(data.train_pos_edge_index)
print('Undirected dataset:', data)
# print(is_undirected(train_pos_edge_index), train_pos_edge_index.shape, two_hop_mask.shape, df_mask.shape, two_hop_mask.shape)
data.sdf_mask = two_hop_mask
data.df_mask = df_mask_edge
data.dr_mask = dr_mask_edge
data.dtrain_mask = dr_mask_edge
# print(is_undirected(data.train_pos_edge_index), data.train_pos_edge_index.shape, data.two_hop_mask.shape, data.df_mask.shape, data.two_hop_mask.shape)
# raise
# Model
model = GCNDelete(args)
# model = get_model(args, sdf_node_1hop, sdf_node_2hop, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type)
if args.unlearning_model != 'retrain': # Start from trained GNN model
if os.path.exists(os.path.join(original_path, 'pred_proba.pt')):
logits_ori = torch.load(os.path.join(original_path, 'pred_proba.pt'))
if logits_ori is not None:
logits_ori = logits_ori.to(device)
else:
logits_ori = None
model_ckpt = torch.load(os.path.join(original_path, 'model_best.pt'), map_location=device)
model.load_state_dict(model_ckpt['model_state'], strict=False)
else: # Initialize a new GNN model
retrain = None
logits_ori = None
model = model.to(device)
if 'gnndelete' in args.unlearning_model and 'nodeemb' in args.unlearning_model:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters() if 'del' in n], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters() if 'del' in n])
if 'layerwise' in args.loss_type:
optimizer1 = torch.optim.Adam(model.deletion1.parameters(), lr=args.lr)
optimizer2 = torch.optim.Adam(model.deletion2.parameters(), lr=args.lr)
optimizer = [optimizer1, optimizer2]
else:
optimizer = torch.optim.Adam(parameters_to_optimize, lr=args.lr)
else:
if 'gnndelete' in args.unlearning_model:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters() if 'del' in n], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters() if 'del' in n])
else:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters()], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters()])
optimizer = torch.optim.Adam(parameters_to_optimize, lr=args.lr)#, weight_decay=args.weight_decay)
wandb.watch(model, log_freq=100)
# MI attack model
attack_model_all = None
# attack_model_all = MLPAttacker(args)
# attack_ckpt = torch.load(os.path.join(attack_path_all, 'attack_model_best.pt'))
# attack_model_all.load_state_dict(attack_ckpt['model_state'])
# attack_model_all = attack_model_all.to(device)
attack_model_sub = None
# attack_model_sub = MLPAttacker(args)
# attack_ckpt = torch.load(os.path.join(attack_path_sub, 'attack_model_best.pt'))
# attack_model_sub.load_state_dict(attack_ckpt['model_state'])
# attack_model_sub = attack_model_sub.to(device)
# Train
trainer = GNNDeleteNodeClassificationTrainer(args)
trainer.train(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
# Test
if args.unlearning_model != 'retrain':
retrain_path = os.path.join(
'checkpoint', args.dataset, args.gnn, 'retrain',
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
retrain_ckpt = torch.load(os.path.join(retrain_path, 'model_best.pt'), map_location=device)
retrain_args = copy.deepcopy(args)
retrain_args.unlearning_model = 'retrain'
retrain = get_model(retrain_args, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type)
retrain.load_state_dict(retrain_ckpt['model_state'])
retrain = retrain.to(device)
retrain.eval()
else:
retrain = None
trainer.test(model, data, model_retrain=retrain, attack_model_all=attack_model_all, attack_model_sub=attack_model_sub)
trainer.save_log()
if __name__ == "__main__":
main()
| 11,564 | 40.902174 | 156 | py |
GNNDelete | GNNDelete-main/delete_gnn.py | import os
import copy
import json
import wandb
import pickle
import argparse
import torch
import torch.nn as nn
from torch_geometric.utils import to_undirected, to_networkx, k_hop_subgraph, is_undirected
from torch_geometric.data import Data
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from torch_geometric.seed import seed_everything
from framework import get_model, get_trainer
from framework.models.gcn import GCN
from framework.training_args import parse_args
from framework.utils import *
from train_mi import MLPAttacker
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def load_args(path):
with open(path, 'r') as f:
d = json.load(f)
parser = argparse.ArgumentParser()
for k, v in d.items():
parser.add_argument('--' + k, default=v)
try:
parser.add_argument('--df_size', default=0.5)
except:
pass
args = parser.parse_args()
for k, v in d.items():
setattr(args, k, v)
return args
@torch.no_grad()
def get_node_embedding(model, data):
model.eval()
node_embedding = model(data.x.to(device), data.edge_index.to(device))
return node_embedding
@torch.no_grad()
def get_output(model, node_embedding, data):
model.eval()
node_embedding = node_embedding.to(device)
edge = data.edge_index.to(device)
output = model.decode(node_embedding, edge, edge_type)
return output
torch.autograd.set_detect_anomaly(True)
def main():
args = parse_args()
original_path = os.path.join(args.checkpoint_dir, args.dataset, args.gnn, 'original', str(args.random_seed))
attack_path_all = os.path.join(args.checkpoint_dir, args.dataset, 'member_infer_all', str(args.random_seed))
attack_path_sub = os.path.join(args.checkpoint_dir, args.dataset, 'member_infer_sub', str(args.random_seed))
seed_everything(args.random_seed)
if 'gnndelete' in args.unlearning_model:
args.checkpoint_dir = os.path.join(
args.checkpoint_dir, args.dataset, args.gnn, args.unlearning_model,
'-'.join([str(i) for i in [args.loss_fct, args.loss_type, args.alpha, args.neg_sample_random]]),
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
else:
args.checkpoint_dir = os.path.join(
args.checkpoint_dir, args.dataset, args.gnn, args.unlearning_model,
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
os.makedirs(args.checkpoint_dir, exist_ok=True)
# Dataset
with open(os.path.join(args.data_dir, args.dataset, f'd_{args.random_seed}.pkl'), 'rb') as f:
dataset, data = pickle.load(f)
print('Directed dataset:', dataset, data)
if args.gnn not in ['rgcn', 'rgat']:
args.in_dim = dataset.num_features
print('Training args', args)
wandb.init(config=args)
# Df and Dr
assert args.df != 'none'
if args.df_size >= 100: # df_size is number of nodes/edges to be deleted
df_size = int(args.df_size)
else: # df_size is the ratio
df_size = int(args.df_size / 100 * data.train_pos_edge_index.shape[1])
print(f'Original size: {data.train_pos_edge_index.shape[1]:,}')
print(f'Df size: {df_size:,}')
df_mask_all = torch.load(os.path.join(args.data_dir, args.dataset, f'df_{args.random_seed}.pt'))[args.df]
df_nonzero = df_mask_all.nonzero().squeeze()
idx = torch.randperm(df_nonzero.shape[0])[:df_size]
df_global_idx = df_nonzero[idx]
print('Deleting the following edges:', df_global_idx)
# df_idx = [int(i) for i in args.df_idx.split(',')]
# df_idx_global = df_mask.nonzero()[df_idx]
dr_mask = torch.ones(data.train_pos_edge_index.shape[1], dtype=torch.bool)
dr_mask[df_global_idx] = False
df_mask = torch.zeros(data.train_pos_edge_index.shape[1], dtype=torch.bool)
df_mask[df_global_idx] = True
# For testing
data.directed_df_edge_index = data.train_pos_edge_index[:, df_mask]
if args.gnn in ['rgcn', 'rgat']:
data.directed_df_edge_type = data.train_edge_type[df_mask]
# data.dr_mask = dr_mask
# data.df_mask = df_mask
# data.edge_index = data.train_pos_edge_index[:, dr_mask]
# assert df_mask.sum() == len(df_global_idx)
# assert dr_mask.shape[0] - len(df_global_idx) == data.train_pos_edge_index[:, dr_mask].shape[1]
# data.dtrain_mask = dr_mask
# Edges in S_Df
_, two_hop_edge, _, two_hop_mask = k_hop_subgraph(
data.train_pos_edge_index[:, df_mask].flatten().unique(),
2,
data.train_pos_edge_index,
num_nodes=data.num_nodes)
data.sdf_mask = two_hop_mask
# Nodes in S_Df
_, one_hop_edge, _, one_hop_mask = k_hop_subgraph(
data.train_pos_edge_index[:, df_mask].flatten().unique(),
1,
data.train_pos_edge_index,
num_nodes=data.num_nodes)
sdf_node_1hop = torch.zeros(data.num_nodes, dtype=torch.bool)
sdf_node_2hop = torch.zeros(data.num_nodes, dtype=torch.bool)
sdf_node_1hop[one_hop_edge.flatten().unique()] = True
sdf_node_2hop[two_hop_edge.flatten().unique()] = True
assert sdf_node_1hop.sum() == len(one_hop_edge.flatten().unique())
assert sdf_node_2hop.sum() == len(two_hop_edge.flatten().unique())
data.sdf_node_1hop_mask = sdf_node_1hop
data.sdf_node_2hop_mask = sdf_node_2hop
# To undirected for message passing
# print(is_undir0.0175ected(data.train_pos_edge_index), data.train_pos_edge_index.shape, two_hop_mask.shape, df_mask.shape, two_hop_mask.shape)
assert not is_undirected(data.train_pos_edge_index)
if args.gnn in ['rgcn', 'rgat']:
r, c = data.train_pos_edge_index
rev_edge_index = torch.stack([c, r], dim=0)
rev_edge_type = data.train_edge_type + args.num_edge_type
data.edge_index = torch.cat((data.train_pos_edge_index, rev_edge_index), dim=1)
data.edge_type = torch.cat([data.train_edge_type, rev_edge_type], dim=0)
if hasattr(data, 'train_mask'):
data.train_mask = data.train_mask.repeat(2).view(-1)
two_hop_mask = two_hop_mask.repeat(2).view(-1)
df_mask = df_mask.repeat(2).view(-1)
dr_mask = dr_mask.repeat(2).view(-1)
assert is_undirected(data.edge_index)
else:
train_pos_edge_index, [df_mask, two_hop_mask] = to_undirected(data.train_pos_edge_index, [df_mask.int(), two_hop_mask.int()])
two_hop_mask = two_hop_mask.bool()
df_mask = df_mask.bool()
dr_mask = ~df_mask
data.train_pos_edge_index = train_pos_edge_index
data.edge_index = train_pos_edge_index
assert is_undirected(data.train_pos_edge_index)
print('Undirected dataset:', data)
data.sdf_mask = two_hop_mask
data.df_mask = df_mask
data.dr_mask = dr_mask
# data.dtrain_mask = dr_mask
# print(is_undirected(train_pos_edge_index), train_pos_edge_index.shape, two_hop_mask.shape, df_mask.shape, two_hop_mask.shape)
# print(is_undirected(data.train_pos_edge_index), data.train_pos_edge_index.shape, data.df_mask.shape, )
# raise
# Model
model = get_model(args, sdf_node_1hop, sdf_node_2hop, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type)
if args.unlearning_model != 'retrain': # Start from trained GNN model
if os.path.exists(os.path.join(original_path, 'pred_proba.pt')):
logits_ori = torch.load(os.path.join(original_path, 'pred_proba.pt'))
if logits_ori is not None:
logits_ori = logits_ori.to(device)
else:
logits_ori = None
model_ckpt = torch.load(os.path.join(original_path, 'model_best.pt'), map_location=device)
model.load_state_dict(model_ckpt['model_state'], strict=False)
else: # Initialize a new GNN model
retrain = None
logits_ori = None
model = model.to(device)
if 'gnndelete' in args.unlearning_model and 'nodeemb' in args.unlearning_model:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters() if 'del' in n], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters() if 'del' in n])
if 'layerwise' in args.loss_type:
optimizer1 = torch.optim.Adam(model.deletion1.parameters(), lr=args.lr)
optimizer2 = torch.optim.Adam(model.deletion2.parameters(), lr=args.lr)
optimizer = [optimizer1, optimizer2]
else:
optimizer = torch.optim.Adam(parameters_to_optimize, lr=args.lr)
else:
if 'gnndelete' in args.unlearning_model:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters() if 'del' in n], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters() if 'del' in n])
else:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters()], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters()])
optimizer = torch.optim.Adam(parameters_to_optimize, lr=args.lr)#, weight_decay=args.weight_decay)
wandb.watch(model, log_freq=100)
# MI attack model
attack_model_all = None
# attack_model_all = MLPAttacker(args)
# attack_ckpt = torch.load(os.path.join(attack_path_all, 'attack_model_best.pt'))
# attack_model_all.load_state_dict(attack_ckpt['model_state'])
# attack_model_all = attack_model_all.to(device)
attack_model_sub = None
# attack_model_sub = MLPAttacker(args)
# attack_ckpt = torch.load(os.path.join(attack_path_sub, 'attack_model_best.pt'))
# attack_model_sub.load_state_dict(attack_ckpt['model_state'])
# attack_model_sub = attack_model_sub.to(device)
# Train
trainer = get_trainer(args)
trainer.train(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
# Test
if args.unlearning_model != 'retrain':
retrain_path = os.path.join(
'checkpoint', args.dataset, args.gnn, 'retrain',
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]),
'model_best.pt')
if os.path.exists(retrain_path):
retrain_ckpt = torch.load(retrain_path, map_location=device)
retrain_args = copy.deepcopy(args)
retrain_args.unlearning_model = 'retrain'
retrain = get_model(retrain_args, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type)
retrain.load_state_dict(retrain_ckpt['model_state'])
retrain = retrain.to(device)
retrain.eval()
else:
retrain = None
else:
retrain = None
test_results = trainer.test(model, data, model_retrain=retrain, attack_model_all=attack_model_all, attack_model_sub=attack_model_sub)
print(test_results[-1])
trainer.save_log()
if __name__ == "__main__":
main()
| 11,069 | 37.4375 | 147 | py |
GNNDelete | GNNDelete-main/train_gnn.py | import os
import wandb
import pickle
import torch
from torch_geometric.seed import seed_everything
from torch_geometric.utils import to_undirected, is_undirected
from torch_geometric.datasets import RelLinkPredDataset, WordNet18
from torch_geometric.seed import seed_everything
from framework import get_model, get_trainer
from framework.training_args import parse_args
from framework.trainer.base import Trainer
from framework.utils import negative_sampling_kg
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def main():
args = parse_args()
args.unlearning_model = 'original'
args.checkpoint_dir = os.path.join(args.checkpoint_dir, args.dataset, args.gnn, args.unlearning_model, str(args.random_seed))
os.makedirs(args.checkpoint_dir, exist_ok=True)
seed_everything(args.random_seed)
# Dataset
with open(os.path.join(args.data_dir, args.dataset, f'd_{args.random_seed}.pkl'), 'rb') as f:
dataset, data = pickle.load(f)
print('Directed dataset:', dataset, data)
if args.gnn not in ['rgcn', 'rgat']:
args.in_dim = dataset.num_features
wandb.init(config=args)
# Use proper training data for original and Dr
if args.gnn in ['rgcn', 'rgat']:
if not hasattr(data, 'train_mask'):
data.train_mask = torch.ones(data.edge_index.shape[1], dtype=torch.bool)
# data.dtrain_mask = torch.ones(data.train_pos_edge_index.shape[1], dtype=torch.bool)
# data.edge_index_mask = data.dtrain_mask.repeat(2)
else:
data.dtrain_mask = torch.ones(data.train_pos_edge_index.shape[1], dtype=torch.bool)
# To undirected
if args.gnn in ['rgcn', 'rgat']:
r, c = data.train_pos_edge_index
rev_edge_index = torch.stack([c, r], dim=0)
rev_edge_type = data.train_edge_type + args.num_edge_type
data.edge_index = torch.cat((data.train_pos_edge_index, rev_edge_index), dim=1)
data.edge_type = torch.cat([data.train_edge_type, rev_edge_type], dim=0)
# data.train_mask = data.train_mask.repeat(2)
data.dr_mask = torch.ones(data.edge_index.shape[1], dtype=torch.bool)
assert is_undirected(data.edge_index)
else:
train_pos_edge_index = to_undirected(data.train_pos_edge_index)
data.train_pos_edge_index = train_pos_edge_index
data.dtrain_mask = torch.ones(data.train_pos_edge_index.shape[1], dtype=torch.bool)
assert is_undirected(data.train_pos_edge_index)
print('Undirected dataset:', data)
# Model
model = get_model(args, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type).to(device)
wandb.watch(model, log_freq=100)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)#, weight_decay=args.weight_decay)
# Train
trainer = get_trainer(args)
trainer.train(model, data, optimizer, args)
# Test
trainer.test(model, data)
trainer.save_log()
if __name__ == "__main__":
main()
| 2,977 | 34.035294 | 129 | py |
GNNDelete | GNNDelete-main/prepare_dataset.py | import os
import math
import pickle
import torch
import pandas as pd
import networkx as nx
from tqdm import tqdm
from torch_geometric.seed import seed_everything
import torch_geometric.transforms as T
from torch_geometric.data import Data
from torch_geometric.datasets import CitationFull, Coauthor, Flickr, RelLinkPredDataset, WordNet18, WordNet18RR
from torch_geometric.utils import train_test_split_edges, k_hop_subgraph, negative_sampling, to_undirected, is_undirected, to_networkx
from ogb.linkproppred import PygLinkPropPredDataset
from framework.utils import *
data_dir = './data'
df_size = [i / 100 for i in range(10)] + [i / 10 for i in range(10)] + [i for i in range(10)] # Df_size in percentage
seeds = [42, 21, 13, 87, 100]
graph_datasets = ['Cora', 'PubMed', 'DBLP', 'CS', 'ogbl-citation2', 'ogbl-collab'][4:]
kg_datasets = ['FB15k-237', 'WordNet18', 'WordNet18RR', 'ogbl-biokg'][-1:]
os.makedirs(data_dir, exist_ok=True)
num_edge_type_mapping = {
'FB15k-237': 237,
'WordNet18': 18,
'WordNet18RR': 11
}
def train_test_split_edges_no_neg_adj_mask(data, val_ratio: float = 0.05, test_ratio: float = 0.1, two_hop_degree=None, kg=False):
'''Avoid adding neg_adj_mask'''
num_nodes = data.num_nodes
row, col = data.edge_index
edge_attr = data.edge_attr
if kg:
edge_type = data.edge_type
data.edge_index = data.edge_attr = data.edge_weight = data.edge_year = data.edge_type = None
if not kg:
# Return upper triangular portion.
mask = row < col
row, col = row[mask], col[mask]
if edge_attr is not None:
edge_attr = edge_attr[mask]
n_v = int(math.floor(val_ratio * row.size(0)))
n_t = int(math.floor(test_ratio * row.size(0)))
if two_hop_degree is not None: # Use low degree edges for test sets
low_degree_mask = two_hop_degree < 50
low = low_degree_mask.nonzero().squeeze()
high = (~low_degree_mask).nonzero().squeeze()
low = low[torch.randperm(low.size(0))]
high = high[torch.randperm(high.size(0))]
perm = torch.cat([low, high])
else:
perm = torch.randperm(row.size(0))
row = row[perm]
col = col[perm]
# Train
r, c = row[n_v + n_t:], col[n_v + n_t:]
if kg:
# data.edge_index and data.edge_type has reverse edges and edge types for message passing
pos_edge_index = torch.stack([r, c], dim=0)
# rev_pos_edge_index = torch.stack([r, c], dim=0)
train_edge_type = edge_type[n_v + n_t:]
# train_rev_edge_type = edge_type[n_v + n_t:] + edge_type.unique().shape[0]
# data.edge_index = torch.cat((torch.stack([r, c], dim=0), torch.stack([r, c], dim=0)), dim=1)
# data.edge_type = torch.cat([train_edge_type, train_rev_edge_type], dim=0)
data.edge_index = pos_edge_index
data.edge_type = train_edge_type
# data.train_pos_edge_index and data.train_edge_type only has one direction edges and edge types for decoding
data.train_pos_edge_index = torch.stack([r, c], dim=0)
data.train_edge_type = train_edge_type
else:
data.train_pos_edge_index = torch.stack([r, c], dim=0)
if edge_attr is not None:
# out = to_undirected(data.train_pos_edge_index, edge_attr[n_v + n_t:])
data.train_pos_edge_index, data.train_pos_edge_attr = out
else:
data.train_pos_edge_index = data.train_pos_edge_index
# data.train_pos_edge_index = to_undirected(data.train_pos_edge_index)
assert not is_undirected(data.train_pos_edge_index)
# Test
r, c = row[:n_t], col[:n_t]
data.test_pos_edge_index = torch.stack([r, c], dim=0)
if kg:
data.test_edge_type = edge_type[:n_t]
neg_edge_index = negative_sampling_kg(
edge_index=data.test_pos_edge_index,
edge_type=data.test_edge_type)
else:
neg_edge_index = negative_sampling(
edge_index=data.test_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.test_pos_edge_index.shape[1])
data.test_neg_edge_index = neg_edge_index
# Valid
r, c = row[n_t:n_t+n_v], col[n_t:n_t+n_v]
data.val_pos_edge_index = torch.stack([r, c], dim=0)
if kg:
data.val_edge_type = edge_type[n_t:n_t+n_v]
neg_edge_index = negative_sampling_kg(
edge_index=data.val_pos_edge_index,
edge_type=data.val_edge_type)
else:
neg_edge_index = negative_sampling(
edge_index=data.val_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.val_pos_edge_index.shape[1])
data.val_neg_edge_index = neg_edge_index
return data
def process_graph():
for d in graph_datasets:
if d in ['Cora', 'PubMed', 'DBLP']:
dataset = CitationFull(os.path.join(data_dir, d), d, transform=T.NormalizeFeatures())
elif d in ['CS', 'Physics']:
dataset = Coauthor(os.path.join(data_dir, d), d, transform=T.NormalizeFeatures())
elif d in ['Flickr']:
dataset = Flickr(os.path.join(data_dir, d), transform=T.NormalizeFeatures())
elif 'ogbl' in d:
dataset = PygLinkPropPredDataset(root=os.path.join(data_dir, d), name=d)
else:
raise NotImplementedError
print('Processing:', d)
print(dataset)
data = dataset[0]
data.train_mask = data.val_mask = data.test_mask = None
graph = to_networkx(data)
# Get two hop degree for all nodes
node_to_neighbors = {}
for n in tqdm(graph.nodes(), desc='Two hop neighbors'):
neighbor_1 = set(graph.neighbors(n))
neighbor_2 = sum([list(graph.neighbors(i)) for i in neighbor_1], [])
neighbor_2 = set(neighbor_2)
neighbor = neighbor_1 | neighbor_2
node_to_neighbors[n] = neighbor
two_hop_degree = []
row, col = data.edge_index
mask = row < col
row, col = row[mask], col[mask]
for r, c in tqdm(zip(row, col), total=len(row)):
neighbor_row = node_to_neighbors[r.item()]
neighbor_col = node_to_neighbors[c.item()]
neighbor = neighbor_row | neighbor_col
num = len(neighbor)
two_hop_degree.append(num)
two_hop_degree = torch.tensor(two_hop_degree)
for s in seeds:
seed_everything(s)
# D
data = dataset[0]
if 'ogbl' in d:
data = train_test_split_edges_no_neg_adj_mask(data, test_ratio=0.05, two_hop_degree=two_hop_degree)
else:
data = train_test_split_edges_no_neg_adj_mask(data, test_ratio=0.05)
print(s, data)
with open(os.path.join(data_dir, d, f'd_{s}.pkl'), 'wb') as f:
pickle.dump((dataset, data), f)
# Two ways to sample Df from the training set
## 1. Df is within 2 hop local enclosing subgraph of Dtest
## 2. Df is outside of 2 hop local enclosing subgraph of Dtest
# All the candidate edges (train edges)
# graph = to_networkx(Data(edge_index=data.train_pos_edge_index, x=data.x))
# Get the 2 hop local enclosing subgraph for all test edges
_, local_edges, _, mask = k_hop_subgraph(
data.test_pos_edge_index.flatten().unique(),
2,
data.train_pos_edge_index,
num_nodes=dataset[0].num_nodes)
distant_edges = data.train_pos_edge_index[:, ~mask]
print('Number of edges. Local: ', local_edges.shape[1], 'Distant:', distant_edges.shape[1])
in_mask = mask
out_mask = ~mask
# df_in_mask = torch.zeros_like(mask)
# df_out_mask = torch.zeros_like(mask)
# df_in_all_idx = in_mask.nonzero().squeeze()
# df_out_all_idx = out_mask.nonzero().squeeze()
# df_in_selected_idx = df_in_all_idx[torch.randperm(df_in_all_idx.shape[0])[:df_size]]
# df_out_selected_idx = df_out_all_idx[torch.randperm(df_out_all_idx.shape[0])[:df_size]]
# df_in_mask[df_in_selected_idx] = True
# df_out_mask[df_out_selected_idx] = True
# assert (in_mask & out_mask).sum() == 0
# assert (df_in_mask & df_out_mask).sum() == 0
# local_edges = set()
# for i in range(data.test_pos_edge_index.shape[1]):
# edge = data.test_pos_edge_index[:, i].tolist()
# subgraph = get_enclosing_subgraph(graph, edge)
# local_edges = local_edges | set(subgraph[2])
# distant_edges = graph.edges() - local_edges
# print('aaaaaaa', len(local_edges), len(distant_edges))
# local_edges = torch.tensor(sorted(list([i for i in local_edges if i[0] < i[1]])))
# distant_edges = torch.tensor(sorted(list([i for i in distant_edges if i[0] < i[1]])))
# df_in = torch.randperm(local_edges.shape[1])[:df_size]
# df_out = torch.randperm(distant_edges.shape[1])[:df_size]
# df_in = local_edges[:, df_in]
# df_out = distant_edges[:, df_out]
# df_in_mask = torch.zeros(data.train_pos_edge_index.shape[1], dtype=torch.bool)
# df_out_mask = torch.zeros(data.train_pos_edge_index.shape[1], dtype=torch.bool)
# for row in df_in:
# i = (data.train_pos_edge_index.T == row).all(axis=1).nonzero()
# df_in_mask[i] = True
# for row in df_out:
# i = (data.train_pos_edge_index.T == row).all(axis=1).nonzero()
# df_out_mask[i] = True
torch.save(
{'out': out_mask, 'in': in_mask},
os.path.join(data_dir, d, f'df_{s}.pt')
)
def process_kg():
for d in kg_datasets:
# Create the dataset to calculate node degrees
if d in ['FB15k-237']:
dataset = RelLinkPredDataset(os.path.join(data_dir, d), d, transform=T.NormalizeFeatures())
data = dataset[0]
data.x = torch.arange(data.num_nodes)
edge_index = torch.cat([data.train_edge_index, data.valid_edge_index, data.test_edge_index], dim=1)
edge_type = torch.cat([data.train_edge_type, data.valid_edge_type, data.test_edge_type])
data = Data(edge_index=edge_index, edge_type=edge_type)
elif d in ['WordNet18RR']:
dataset = WordNet18RR(os.path.join(data_dir, d), transform=T.NormalizeFeatures())
data = dataset[0]
data.x = torch.arange(data.num_nodes)
data.train_mask = data.val_mask = data.test_mask = None
elif d in ['WordNet18']:
dataset = WordNet18(os.path.join(data_dir, d), transform=T.NormalizeFeatures())
data = dataset[0]
data.x = torch.arange(data.num_nodes)
# Use original split
data.train_pos_edge_index = data.edge_index[:, data.train_mask]
data.train_edge_type = data.edge_type[data.train_mask]
data.val_pos_edge_index = data.edge_index[:, data.val_mask]
data.val_edge_type = data.edge_type[data.val_mask]
data.val_neg_edge_index = negative_sampling_kg(data.val_pos_edge_index, data.val_edge_type)
data.test_pos_edge_index = data.edge_index[:, data.test_mask]
data.test_edge_type = data.edge_type[data.test_mask]
data.test_neg_edge_index = negative_sampling_kg(data.test_pos_edge_index, data.test_edge_type)
elif 'ogbl' in d:
dataset = PygLinkPropPredDataset(root=os.path.join(data_dir, d), name=d)
split_edge = dataset.get_edge_split()
train_edge, valid_edge, test_edge = split_edge["train"], split_edge["valid"], split_edge["test"]
entity_dict = dict()
cur_idx = 0
for key in dataset[0]['num_nodes_dict']:
entity_dict[key] = (cur_idx, cur_idx + dataset[0]['num_nodes_dict'][key])
cur_idx += dataset[0]['num_nodes_dict'][key]
nentity = sum(dataset[0]['num_nodes_dict'].values())
valid_head_neg = valid_edge.pop('head_neg')
valid_tail_neg = valid_edge.pop('tail_neg')
test_head_neg = test_edge.pop('head_neg')
test_tail_neg = test_edge.pop('tail_neg')
train = pd.DataFrame(train_edge)
valid = pd.DataFrame(valid_edge)
test = pd.DataFrame(test_edge)
# Convert to global index
train['head'] = [idx + entity_dict[tp][0] for idx, tp in zip(train['head'], train['head_type'])]
train['tail'] = [idx + entity_dict[tp][0] for idx, tp in zip(train['tail'], train['tail_type'])]
valid['head'] = [idx + entity_dict[tp][0] for idx, tp in zip(valid['head'], valid['head_type'])]
valid['tail'] = [idx + entity_dict[tp][0] for idx, tp in zip(valid['tail'], valid['tail_type'])]
test['head'] = [idx + entity_dict[tp][0] for idx, tp in zip(test['head'], test['head_type'])]
test['tail'] = [idx + entity_dict[tp][0] for idx, tp in zip(test['tail'], test['tail_type'])]
valid_pos_edge_index = torch.tensor([valid['head'], valid['tail']])
valid_edge_type = torch.tensor(valid.relation)
valid_neg_edge_index = torch.stack([valid_pos_edge_index[0], valid_tail_neg[:, 0]])
test_pos_edge_index = torch.tensor([test['head'], test['tail']])
test_edge_type = torch.tensor(test.relation)
test_neg_edge_index = torch.stack([test_pos_edge_index[0], test_tail_neg[:, 0]])
train_directed = train[train.head_type != train.tail_type]
train_undirected = train[train.head_type == train.tail_type]
train_undirected_uni = train_undirected[train_undirected['head'] < train_undirected['tail']]
train_uni = pd.concat([train_directed, train_undirected_uni], ignore_index=True)
train_pos_edge_index = torch.tensor([train_uni['head'], train_uni['tail']])
train_edge_type = torch.tensor(train_uni.relation)
r, c = train_pos_edge_index
rev_edge_index = torch.stack([c, r])
rev_edge_type = train_edge_type + 51
edge_index = torch.cat([train_pos_edge_index, rev_edge_index], dim=1)
edge_type = torch.cat([train_edge_type, rev_edge_type], dim=0)
data = Data(
x=torch.arange(nentity), edge_index=edge_index, edge_type=edge_type,
train_pos_edge_index=train_pos_edge_index, train_edge_type=train_edge_type,
val_pos_edge_index=valid_pos_edge_index, val_edge_type=valid_edge_type, val_neg_edge_index=valid_neg_edge_index,
test_pos_edge_index=test_pos_edge_index, test_edge_type=test_edge_type, test_neg_edge_index=test_neg_edge_index)
else:
raise NotImplementedError
print('Processing:', d)
print(dataset)
for s in seeds:
seed_everything(s)
# D
# data = train_test_split_edges_no_neg_adj_mask(data, test_ratio=0.05, two_hop_degree=two_hop_degree, kg=True)
print(s, data)
with open(os.path.join(data_dir, d, f'd_{s}.pkl'), 'wb') as f:
pickle.dump((dataset, data), f)
# Two ways to sample Df from the training set
## 1. Df is within 2 hop local enclosing subgraph of Dtest
## 2. Df is outside of 2 hop local enclosing subgraph of Dtest
# All the candidate edges (train edges)
# graph = to_networkx(Data(edge_index=data.train_pos_edge_index, x=data.x))
# Get the 2 hop local enclosing subgraph for all test edges
_, local_edges, _, mask = k_hop_subgraph(
data.test_pos_edge_index.flatten().unique(),
2,
data.train_pos_edge_index,
num_nodes=dataset[0].num_nodes)
distant_edges = data.train_pos_edge_index[:, ~mask]
print('Number of edges. Local: ', local_edges.shape[1], 'Distant:', distant_edges.shape[1])
in_mask = mask
out_mask = ~mask
torch.save(
{'out': out_mask, 'in': in_mask},
os.path.join(data_dir, d, f'df_{s}.pt')
)
def main():
process_graph()
# process_kg()
if __name__ == "__main__":
main()
| 16,717 | 39.97549 | 134 | py |
GNNDelete | GNNDelete-main/delete_node.py | import os
import copy
import json
import wandb
import pickle
import argparse
import torch
import torch.nn as nn
from torch_geometric.utils import to_undirected, to_networkx, k_hop_subgraph, is_undirected
from torch_geometric.data import Data
import torch_geometric.transforms as T
from torch_geometric.datasets import CitationFull, Coauthor, Flickr, RelLinkPredDataset, WordNet18, WordNet18RR
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from torch_geometric.seed import seed_everything
from framework import get_model, get_trainer
from framework.models.gcn import GCN
from framework.models.deletion import GCNDelete
from framework.training_args import parse_args
from framework.utils import *
from framework.trainer.gnndelete_nodeemb import GNNDeleteNodeClassificationTrainer
from train_mi import MLPAttacker
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.autograd.set_detect_anomaly(True)
def to_directed(edge_index):
row, col = edge_index
mask = row < col
return torch.cat([row[mask], col[mask]], dim=0)
def main():
args = parse_args()
args.checkpoint_dir = 'checkpoint_node'
args.dataset = 'DBLP'
original_path = os.path.join(args.checkpoint_dir, args.dataset, args.gnn, 'original', str(args.random_seed))
attack_path_all = os.path.join(args.checkpoint_dir, args.dataset, 'member_infer_all', str(args.random_seed))
attack_path_sub = os.path.join(args.checkpoint_dir, args.dataset, 'member_infer_sub', str(args.random_seed))
seed_everything(args.random_seed)
if 'gnndelete' in args.unlearning_model:
args.checkpoint_dir = os.path.join(
args.checkpoint_dir, args.dataset, args.gnn, f'{args.unlearning_model}-node_deletion',
'-'.join([str(i) for i in [args.loss_fct, args.loss_type, args.alpha, args.neg_sample_random]]),
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
else:
args.checkpoint_dir = os.path.join(
args.checkpoint_dir, args.dataset, args.gnn, f'{args.unlearning_model}-node_deletion',
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
os.makedirs(args.checkpoint_dir, exist_ok=True)
# Dataset
dataset = CitationFull(os.path.join(args.data_dir, args.dataset), args.dataset, transform=T.NormalizeFeatures())
data = dataset[0]
print('Original data', data)
split = T.RandomNodeSplit()
data = split(data)
assert is_undirected(data.edge_index)
print('Split data', data)
args.in_dim = data.x.shape[1]
args.out_dim = dataset.num_classes
wandb.init(config=args)
# Df and Dr
if args.df_size >= 100: # df_size is number of nodes/edges to be deleted
df_size = int(args.df_size)
else: # df_size is the ratio
df_size = int(args.df_size / 100 * data.train_pos_edge_index.shape[1])
print(f'Original size: {data.num_nodes:,}')
print(f'Df size: {df_size:,}')
# Delete nodes
df_nodes = torch.randperm(data.num_nodes)[:df_size]
global_node_mask = torch.ones(data.num_nodes, dtype=torch.bool)
global_node_mask[df_nodes] = False
dr_mask_node = global_node_mask
df_mask_node = ~global_node_mask
assert df_mask_node.sum() == df_size
# Delete edges associated with deleted nodes from training set
res = [torch.eq(data.edge_index, aelem).logical_or_(torch.eq(data.edge_index, aelem)) for aelem in df_nodes]
df_mask_edge = torch.any(torch.stack(res, dim=0), dim = 0)
df_mask_edge = df_mask_edge.sum(0).bool()
dr_mask_edge = ~df_mask_edge
df_edge = data.edge_index[:, df_mask_edge]
data.directed_df_edge_index = to_directed(df_edge)
# print(df_edge.shape, directed_df_edge_index.shape)
# raise
print('Deleting the following nodes:', df_nodes)
# # Delete edges associated with deleted nodes from valid and test set
# res = [torch.eq(data.val_pos_edge_index, aelem).logical_or_(torch.eq(data.val_pos_edge_index, aelem)) for aelem in df_nodes]
# mask = torch.any(torch.stack(res, dim=0), dim = 0)
# mask = mask.sum(0).bool()
# mask = ~mask
# data.val_pos_edge_index = data.val_pos_edge_index[:, mask]
# data.val_neg_edge_index = data.val_neg_edge_index[:, :data.val_pos_edge_index.shape[1]]
# res = [torch.eq(data.test_pos_edge_index, aelem).logical_or_(torch.eq(data.test_pos_edge_index, aelem)) for aelem in df_nodes]
# mask = torch.any(torch.stack(res, dim=0), dim = 0)
# mask = mask.sum(0).bool()
# mask = ~mask
# data.test_pos_edge_index = data.test_pos_edge_index[:, mask]
# data.test_neg_edge_index = data.test_neg_edge_index[:, :data.test_pos_edge_index.shape[1]]
# For testing
# data.directed_df_edge_index = data.train_pos_edge_index[:, df_mask_edge]
# if args.gnn in ['rgcn', 'rgat']:
# data.directed_df_edge_type = data.train_edge_type[df_mask]
# Edges in S_Df
_, two_hop_edge, _, two_hop_mask = k_hop_subgraph(
data.edge_index[:, df_mask_edge].flatten().unique(),
2,
data.edge_index,
num_nodes=data.num_nodes)
# Nodes in S_Df
_, one_hop_edge, _, one_hop_mask = k_hop_subgraph(
data.edge_index[:, df_mask_edge].flatten().unique(),
1,
data.edge_index,
num_nodes=data.num_nodes)
sdf_node_1hop = torch.zeros(data.num_nodes, dtype=torch.bool)
sdf_node_2hop = torch.zeros(data.num_nodes, dtype=torch.bool)
sdf_node_1hop[one_hop_edge.flatten().unique()] = True
sdf_node_2hop[two_hop_edge.flatten().unique()] = True
assert sdf_node_1hop.sum() == len(one_hop_edge.flatten().unique())
assert sdf_node_2hop.sum() == len(two_hop_edge.flatten().unique())
data.sdf_node_1hop_mask = sdf_node_1hop
data.sdf_node_2hop_mask = sdf_node_2hop
# To undirected for message passing
# print(is_undir0.0175ected(data.train_pos_edge_index), data.train_pos_edge_index.shape, two_hop_mask.shape, df_mask.shape, two_hop_mask.shape)
# assert not is_undirected(data.edge_index)
print(is_undirected(data.edge_index))
if args.gnn in ['rgcn', 'rgat']:
r, c = data.train_pos_edge_index
rev_edge_index = torch.stack([c, r], dim=0)
rev_edge_type = data.train_edge_type + args.num_edge_type
data.edge_index = torch.cat((data.train_pos_edge_index, rev_edge_index), dim=1)
data.edge_type = torch.cat([data.train_edge_type, rev_edge_type], dim=0)
# data.train_mask = data.train_mask.repeat(2)
two_hop_mask = two_hop_mask.repeat(2).view(-1)
df_mask = df_mask.repeat(2).view(-1)
dr_mask = dr_mask.repeat(2).view(-1)
assert is_undirected(data.edge_index)
else:
# train_pos_edge_index, [df_mask, two_hop_mask] = to_undirected(data.train_pos_edge_index, [df_mask.int(), two_hop_mask.int()])
two_hop_mask = two_hop_mask.bool()
df_mask_edge = df_mask_edge.bool()
dr_mask_edge = ~df_mask_edge
# data.train_pos_edge_index = train_pos_edge_index
# assert is_undirected(data.train_pos_edge_index)
print('Undirected dataset:', data)
# print(is_undirected(train_pos_edge_index), train_pos_edge_index.shape, two_hop_mask.shape, df_mask.shape, two_hop_mask.shape)
data.sdf_mask = two_hop_mask
data.df_mask = df_mask_edge
data.dr_mask = dr_mask_edge
data.dtrain_mask = dr_mask_edge
# print(is_undirected(data.train_pos_edge_index), data.train_pos_edge_index.shape, data.two_hop_mask.shape, data.df_mask.shape, data.two_hop_mask.shape)
# raise
# Model
model = GCNDelete(args)
# model = get_model(args, sdf_node_1hop, sdf_node_2hop, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type)
if args.unlearning_model != 'retrain': # Start from trained GNN model
if os.path.exists(os.path.join(original_path, 'pred_proba.pt')):
logits_ori = torch.load(os.path.join(original_path, 'pred_proba.pt'))
if logits_ori is not None:
logits_ori = logits_ori.to(device)
else:
logits_ori = None
model_ckpt = torch.load(os.path.join(original_path, 'model_best.pt'), map_location=device)
model.load_state_dict(model_ckpt['model_state'], strict=False)
else: # Initialize a new GNN model
retrain = None
logits_ori = None
model = model.to(device)
if 'gnndelete' in args.unlearning_model and 'nodeemb' in args.unlearning_model:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters() if 'del' in n], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters() if 'del' in n])
if 'layerwise' in args.loss_type:
optimizer1 = torch.optim.Adam(model.deletion1.parameters(), lr=args.lr)
optimizer2 = torch.optim.Adam(model.deletion2.parameters(), lr=args.lr)
optimizer = [optimizer1, optimizer2]
else:
optimizer = torch.optim.Adam(parameters_to_optimize, lr=args.lr)
else:
if 'gnndelete' in args.unlearning_model:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters() if 'del' in n], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters() if 'del' in n])
else:
parameters_to_optimize = [
{'params': [p for n, p in model.named_parameters()], 'weight_decay': 0.0}
]
print('parameters_to_optimize', [n for n, p in model.named_parameters()])
optimizer = torch.optim.Adam(parameters_to_optimize, lr=args.lr)#, weight_decay=args.weight_decay)
wandb.watch(model, log_freq=100)
# MI attack model
attack_model_all = None
# attack_model_all = MLPAttacker(args)
# attack_ckpt = torch.load(os.path.join(attack_path_all, 'attack_model_best.pt'))
# attack_model_all.load_state_dict(attack_ckpt['model_state'])
# attack_model_all = attack_model_all.to(device)
attack_model_sub = None
# attack_model_sub = MLPAttacker(args)
# attack_ckpt = torch.load(os.path.join(attack_path_sub, 'attack_model_best.pt'))
# attack_model_sub.load_state_dict(attack_ckpt['model_state'])
# attack_model_sub = attack_model_sub.to(device)
# Train
trainer = GNNDeleteNodeClassificationTrainer(args)
trainer.train(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
# Test
if args.unlearning_model != 'retrain':
retrain_path = os.path.join(
'checkpoint', args.dataset, args.gnn, 'retrain',
'-'.join([str(i) for i in [args.df, args.df_size, args.random_seed]]))
retrain_ckpt = torch.load(os.path.join(retrain_path, 'model_best.pt'), map_location=device)
retrain_args = copy.deepcopy(args)
retrain_args.unlearning_model = 'retrain'
retrain = get_model(retrain_args, num_nodes=data.num_nodes, num_edge_type=args.num_edge_type)
retrain.load_state_dict(retrain_ckpt['model_state'])
retrain = retrain.to(device)
retrain.eval()
else:
retrain = None
trainer.test(model, data, model_retrain=retrain, attack_model_all=attack_model_all, attack_model_sub=attack_model_sub)
trainer.save_log()
if __name__ == "__main__":
main()
| 11,453 | 40.80292 | 156 | py |
GNNDelete | GNNDelete-main/framework/data_loader.py | import os
import torch
from torch_geometric.data import Data, GraphSAINTRandomWalkSampler
def load_dict(filename):
'''Load entity and relation to id mapping'''
mapping = {}
with open(filename, 'r') as f:
for l in f:
l = l.strip().split('\t')
mapping[l[0]] = l[1]
return mapping
def load_edges(filename):
with open(filename, 'r') as f:
r = f.readlines()
r = [i.strip().split('\t') for i in r]
return r
def generate_true_dict(all_triples):
heads = {(r, t) : [] for _, r, t in all_triples}
tails = {(h, r) : [] for h, r, _ in all_triples}
for h, r, t in all_triples:
heads[r, t].append(h)
tails[h, r].append(t)
return heads, tails
def get_loader(args, delete=[]):
prefix = os.path.join('./data', args.dataset)
# Edges
train = load_edges(os.path.join(prefix, 'train.txt'))
valid = load_edges(os.path.join(prefix, 'valid.txt'))
test = load_edges(os.path.join(prefix, 'test.txt'))
train = [(int(i[0]), int(i[1]), int(i[2])) for i in train]
valid = [(int(i[0]), int(i[1]), int(i[2])) for i in valid]
test = [(int(i[0]), int(i[1]), int(i[2])) for i in test]
train_rev = [(int(i[2]), int(i[1]), int(i[0])) for i in train]
valid_rev = [(int(i[2]), int(i[1]), int(i[0])) for i in valid]
test_rev = [(int(i[2]), int(i[1]), int(i[0])) for i in test]
train = train + train_rev
valid = valid + valid_rev
test = test + test_rev
all_edge = train + valid + test
true_triples = generate_true_dict(all_edge)
edge = torch.tensor([(int(i[0]), int(i[2])) for i in all_edge], dtype=torch.long).t()
edge_type = torch.tensor([int(i[1]) for i in all_edge], dtype=torch.long)#.view(-1, 1)
# Masks
train_size = len(train)
valid_size = len(valid)
test_size = len(test)
total_size = train_size + valid_size + test_size
train_mask = torch.zeros((total_size,)).bool()
train_mask[:train_size] = True
valid_mask = torch.zeros((total_size,)).bool()
valid_mask[train_size:train_size + valid_size] = True
test_mask = torch.zeros((total_size,)).bool()
test_mask[-test_size:] = True
# Graph size
num_nodes = edge.flatten().unique().shape[0]
num_edges = edge.shape[1]
num_edge_type = edge_type.unique().shape[0]
# Node feature
x = torch.rand((num_nodes, args.in_dim))
# Delete edges
if len(delete) > 0:
delete_idx = torch.tensor(delete, dtype=torch.long)
num_train_edges = train_size // 2
train_mask[delete_idx] = False
train_mask[delete_idx + num_train_edges] = False
train_size -= 2 * len(delete)
node_id = torch.arange(num_nodes)
dataset = Data(
edge_index=edge, edge_type=edge_type, x=x, node_id=node_id,
train_mask=train_mask, valid_mask=valid_mask, test_mask=test_mask)
dataloader = GraphSAINTRandomWalkSampler(
dataset, batch_size=args.batch_size, walk_length=args.walk_length, num_steps=args.num_steps)
print(f'Dataset: {args.dataset}, Num nodes: {num_nodes}, Num edges: {num_edges//2}, Num relation types: {num_edge_type}')
print(f'Train edges: {train_size//2}, Valid edges: {valid_size//2}, Test edges: {test_size//2}')
return dataloader, valid, test, true_triples, num_nodes, num_edges, num_edge_type
| 3,344 | 32.45 | 125 | py |
GNNDelete | GNNDelete-main/framework/utils.py | import numpy as np
import torch
import networkx as nx
def get_node_edge(graph):
degree_sorted_ascend = sorted(graph.degree, key=lambda x: x[1])
return degree_sorted_ascend[-1][0]
def h_hop_neighbor(G, node, h):
path_lengths = nx.single_source_dijkstra_path_length(G, node)
return [node for node, length in path_lengths.items() if length == h]
def get_enclosing_subgraph(graph, edge_to_delete):
subgraph = {0: [edge_to_delete]}
s, t = edge_to_delete
neighbor_s = []
neighbor_t = []
for h in range(1, 2+1):
neighbor_s += h_hop_neighbor(graph, s, h)
neighbor_t += h_hop_neighbor(graph, t, h)
nodes = neighbor_s + neighbor_t + [s, t]
subgraph[h] = list(graph.subgraph(nodes).edges())
return subgraph
@torch.no_grad()
def get_link_labels(pos_edge_index, neg_edge_index):
E = pos_edge_index.size(1) + neg_edge_index.size(1)
link_labels = torch.zeros(E, dtype=torch.float, device=pos_edge_index.device)
link_labels[:pos_edge_index.size(1)] = 1.
return link_labels
@torch.no_grad()
def get_link_labels_kg(pos_edge_index, neg_edge_index):
E = pos_edge_index.size(1) + neg_edge_index.size(1)
link_labels = torch.zeros(E, dtype=torch.float, device=pos_edge_index.device)
link_labels[:pos_edge_index.size(1)] = 1.
return link_labels
@torch.no_grad()
def negative_sampling_kg(edge_index, edge_type):
'''Generate negative samples but keep the node type the same'''
edge_index_copy = edge_index.clone()
for et in edge_type.unique():
mask = (edge_type == et)
old_source = edge_index_copy[0, mask]
new_index = torch.randperm(old_source.shape[0])
new_source = old_source[new_index]
edge_index_copy[0, mask] = new_source
return edge_index_copy
| 1,852 | 30.40678 | 81 | py |
GNNDelete | GNNDelete-main/framework/evaluation.py | import torch
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, average_precision_score
from .utils import get_link_labels
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
@torch.no_grad()
def eval_lp(model, stage, data=None, loader=None):
model.eval()
# For full batch
if data is not None:
pos_edge_index = data[f'{stage}_pos_edge_index']
neg_edge_index = data[f'{stage}_neg_edge_index']
if hasattr(data, 'dtrain_mask') and data.dtrain_mask is not None:
embedding = model(data.x.to(device), data.train_pos_edge_index[:, data.dtrain_mask].to(device))
else:
embedding = model(data.x.to(device), data.train_pos_edge_index.to(device))
logits = model.decode(embedding, pos_edge_index, neg_edge_index).sigmoid()
label = get_link_labels(pos_edge_index, neg_edge_index)
# For mini batch
if loader is not None:
logits = []
label = []
for batch in loader:
edge_index = batch.edge_index.to(device)
if hasattr(batch, 'edge_type'):
edge_type = batch.edge_type.to(device)
embedding1 = model1(edge_index, edge_type)
embedding2 = model2(edge_index, edge_type)
s1 = model.decode(embedding1, edge_index, edge_type)
s2 = model.decode(embedding2, edge_index, edge_type)
else:
embedding1 = model1(edge_index)
embedding2 = model2(edge_index)
s1 = model.decode(embedding1, edge_index)
s2 = model.decode(embedding2, edge_index)
embedding = model(data.train_pos_edge_index.to(device))
lg = model.decode(embedding, pos_edge_index, neg_edge_index).sigmoid()
lb = get_link_labels(pos_edge_index, neg_edge_index)
logits.append(lg)
label.append(lb)
loss = F.binary_cross_entropy_with_logits(logits, label)
auc = roc_auc_score(label.cpu(), logits.cpu())
aup = average_precision_score(label.cpu(), logits.cpu())
return loss, auc, aup
@torch.no_grad()
def verification_error(model1, model2):
'''L2 distance between aproximate model and re-trained model'''
model1 = model1.to('cpu')
model2 = model2.to('cpu')
modules1 = {n: p for n, p in model1.named_parameters()}
modules2 = {n: p for n, p in model2.named_parameters()}
all_names = set(modules1.keys()) & set(modules2.keys())
print(all_names)
diff = torch.tensor(0.0).float()
for n in all_names:
diff += torch.norm(modules1[n] - modules2[n])
return diff
@torch.no_grad()
def member_infer_attack(target_model, attack_model, data, logits=None):
'''Membership inference attack'''
edge = data.train_pos_edge_index[:, data.df_mask]
z = target_model(data.x, data.train_pos_edge_index[:, data.dr_mask])
feature1 = target_model.decode(z, edge).sigmoid()
feature0 = 1 - feature1
feature = torch.stack([feature0, feature1], dim=1)
# feature = torch.cat([z[edge[0]], z[edge][1]], dim=-1)
logits = attack_model(feature)
_, pred = torch.max(logits, 1)
suc_rate = 1 - pred.float().mean()
return torch.softmax(logits, dim=-1).squeeze().tolist(), suc_rate.cpu().item()
@torch.no_grad()
def member_infer_attack_node(target_model, attack_model, data, logits=None):
'''Membership inference attack'''
edge = data.train_pos_edge_index[:, data.df_mask]
z = target_model(data.x, data.train_pos_edge_index[:, data.dr_mask])
feature = torch.cat([z[edge[0]], z[edge][1]], dim=-1)
logits = attack_model(feature)
_, pred = torch.max(logits, 1)
suc_rate = 1 - pred.float().mean()
return torch.softmax(logits, dim=-1).squeeze().tolist(), suc_rate.cpu().item()
@torch.no_grad()
def get_node_embedding_data(model, data):
model.eval()
if hasattr(data, 'dtrain_mask') and data.dtrain_mask is not None:
node_embedding = model(data.x.to(device), data.train_pos_edge_index[:, data.dtrain_mask].to(device))
else:
node_embedding = model(data.x.to(device), data.train_pos_edge_index.to(device))
return node_embedding
@torch.no_grad()
def output_kldiv(model1, model2, data=None, loader=None):
'''KL-Divergence between output distribution of model and re-trained model'''
model1.eval()
model2.eval()
# For full batch
if data is not None:
embedding1 = get_node_embedding_data(model1, data).to(device)
embedding2 = get_node_embedding_data(model2, data).to(device)
if data.edge_index is not None:
edge_index = data.edge_index.to(device)
if data.train_pos_edge_index is not None:
edge_index = data.train_pos_edge_index.to(device)
if hasattr(data, 'edge_type'):
edge_type = data.edge_type.to(device)
score1 = model1.decode(embedding1, edge_index, edge_type)
score2 = model2.decode(embedding2, edge_index, edge_type)
else:
score1 = model1.decode(embedding1, edge_index)
score2 = model2.decode(embedding2, edge_index)
# For mini batch
if loader is not None:
score1 = []
score2 = []
for batch in loader:
edge_index = batch.edge_index.to(device)
if hasattr(batch, 'edge_type'):
edge_type = batch.edge_type.to(device)
embedding1 = model1(edge, edge_type)
embedding2 = model2(edge, edge_type)
s1 = model.decode(embedding1, edge, edge_type)
s2 = model.decode(embedding2, edge, edge_type)
else:
embedding1 = model1(edge)
embedding2 = model2(edge)
s1 = model.decode(embedding1, edge)
s2 = model.decode(embedding2, edge)
score1.append(s1)
score2.append(s2)
score1 = torch.hstack(score1)
score2 = torch.hstack(score2)
kldiv = F.kl_div(
F.log_softmax(score1, dim=-1),
F.softmax(score2, dim=-1)
)
return kldiv
| 6,151 | 32.254054 | 108 | py |
GNNDelete | GNNDelete-main/framework/trainer/base.py | import os
import time
import json
import wandb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import trange, tqdm
from ogb.graphproppred import Evaluator
from torch_geometric.data import DataLoader
from torch_geometric.utils import negative_sampling
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, f1_score
from ..evaluation import *
from ..training_args import parse_args
from ..utils import *
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# device = 'cpu'
class Trainer:
def __init__(self, args):
self.args = args
self.trainer_log = {
'unlearning_model': args.unlearning_model,
'dataset': args.dataset,
'log': []}
self.logit_all_pair = None
self.df_pos_edge = []
with open(os.path.join(self.args.checkpoint_dir, 'training_args.json'), 'w') as f:
json.dump(vars(args), f)
def freeze_unused_weights(self, model, mask):
grad_mask = torch.zeros_like(mask)
grad_mask[mask] = 1
model.deletion1.deletion_weight.register_hook(lambda grad: grad.mul_(grad_mask))
model.deletion2.deletion_weight.register_hook(lambda grad: grad.mul_(grad_mask))
@torch.no_grad()
def get_link_labels(self, pos_edge_index, neg_edge_index):
E = pos_edge_index.size(1) + neg_edge_index.size(1)
link_labels = torch.zeros(E, dtype=torch.float, device=pos_edge_index.device)
link_labels[:pos_edge_index.size(1)] = 1.
return link_labels
@torch.no_grad()
def get_embedding(self, model, data, on_cpu=False):
original_device = next(model.parameters()).device
if on_cpu:
model = model.cpu()
data = data.cpu()
z = model(data.x, data.train_pos_edge_index[:, data.dtrain_mask])
model = model.to(original_device)
return z
def train(self, model, data, optimizer, args):
if self.args.dataset in ['Cora', 'PubMed', 'DBLP', 'CS']:
return self.train_fullbatch(model, data, optimizer, args)
if self.args.dataset in ['Physics']:
return self.train_minibatch(model, data, optimizer, args)
if 'ogbl' in self.args.dataset:
return self.train_minibatch(model, data, optimizer, args)
def train_fullbatch(self, model, data, optimizer, args):
start_time = time.time()
best_valid_loss = 1000000
data = data.to(device)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
# Positive and negative sample
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.dtrain_mask.sum())
z = model(data.x, data.train_pos_edge_index)
# edge = torch.cat([train_pos_edge_index, neg_edge_index], dim=-1)
# logits = model.decode(z, edge[0], edge[1])
logits = model.decode(z, data.train_pos_edge_index, neg_edge_index)
label = get_link_labels(data.train_pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': loss.item()
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid loss = {best_valid_loss:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_valid_loss'] = best_valid_loss
def train_minibatch(self, model, data, optimizer, args):
start_time = time.time()
best_valid_loss = 1000000
data.edge_index = data.train_pos_edge_index
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
epoch_loss = 0
for step, batch in enumerate(tqdm(loader, desc='Step', leave=False)):
# Positive and negative sample
train_pos_edge_index = batch.edge_index.to(device)
z = model(batch.x.to(device), train_pos_edge_index)
neg_edge_index = negative_sampling(
edge_index=train_pos_edge_index,
num_nodes=z.size(0))
logits = model.decode(z, train_pos_edge_index, neg_edge_index)
label = get_link_labels(train_pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
log = {
'epoch': epoch,
'step': step,
'train_loss': loss.item(),
}
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
epoch_loss += loss.item()
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid loss = {best_valid_loss:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_valid_loss'] = best_valid_loss
self.trainer_log['training_time'] = np.mean([i['epoch_time'] for i in self.trainer_log['log'] if 'epoch_time' in i])
@torch.no_grad()
def eval(self, model, data, stage='val', pred_all=False):
model.eval()
pos_edge_index = data[f'{stage}_pos_edge_index']
neg_edge_index = data[f'{stage}_neg_edge_index']
if self.args.eval_on_cpu:
model = model.to('cpu')
if hasattr(data, 'dtrain_mask'):
mask = data.dtrain_mask
else:
mask = data.dr_mask
z = model(data.x, data.train_pos_edge_index[:, mask])
logits = model.decode(z, pos_edge_index, neg_edge_index).sigmoid()
label = self.get_link_labels(pos_edge_index, neg_edge_index)
# DT AUC AUP
loss = F.binary_cross_entropy_with_logits(logits, label).cpu().item()
dt_auc = roc_auc_score(label.cpu(), logits.cpu())
dt_aup = average_precision_score(label.cpu(), logits.cpu())
# DF AUC AUP
if self.args.unlearning_model in ['original']:
df_logit = []
else:
# df_logit = model.decode(z, data.train_pos_edge_index[:, data.df_mask]).sigmoid().tolist()
df_logit = model.decode(z, data.directed_df_edge_index).sigmoid().tolist()
if len(df_logit) > 0:
df_auc = []
df_aup = []
# Sample pos samples
if len(self.df_pos_edge) == 0:
for i in range(500):
mask = torch.zeros(data.train_pos_edge_index[:, data.dr_mask].shape[1], dtype=torch.bool)
idx = torch.randperm(data.train_pos_edge_index[:, data.dr_mask].shape[1])[:len(df_logit)]
mask[idx] = True
self.df_pos_edge.append(mask)
# Use cached pos samples
for mask in self.df_pos_edge:
pos_logit = model.decode(z, data.train_pos_edge_index[:, data.dr_mask][:, mask]).sigmoid().tolist()
logit = df_logit + pos_logit
label = [0] * len(df_logit) + [1] * len(df_logit)
df_auc.append(roc_auc_score(label, logit))
df_aup.append(average_precision_score(label, logit))
df_auc = np.mean(df_auc)
df_aup = np.mean(df_aup)
else:
df_auc = np.nan
df_aup = np.nan
# Logits for all node pairs
if pred_all:
logit_all_pair = (z @ z.t()).cpu()
else:
logit_all_pair = None
log = {
f'{stage}_loss': loss,
f'{stage}_dt_auc': dt_auc,
f'{stage}_dt_aup': dt_aup,
f'{stage}_df_auc': df_auc,
f'{stage}_df_aup': df_aup,
f'{stage}_df_logit_mean': np.mean(df_logit) if len(df_logit) > 0 else np.nan,
f'{stage}_df_logit_std': np.std(df_logit) if len(df_logit) > 0 else np.nan
}
if self.args.eval_on_cpu:
model = model.to(device)
return loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, log
@torch.no_grad()
def test(self, model, data, model_retrain=None, attack_model_all=None, attack_model_sub=None, ckpt='best'):
if ckpt == 'best': # Load best ckpt
ckpt = torch.load(os.path.join(self.args.checkpoint_dir, 'model_best.pt'))
model.load_state_dict(ckpt['model_state'])
if 'ogbl' in self.args.dataset:
pred_all = False
else:
pred_all = True
loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, test_log = self.eval(model, data, 'test', pred_all)
self.trainer_log['dt_loss'] = loss
self.trainer_log['dt_auc'] = dt_auc
self.trainer_log['dt_aup'] = dt_aup
self.trainer_log['df_logit'] = df_logit
self.logit_all_pair = logit_all_pair
self.trainer_log['df_auc'] = df_auc
self.trainer_log['df_aup'] = df_aup
self.trainer_log['auc_sum'] = dt_auc + df_auc
self.trainer_log['aup_sum'] = dt_aup + df_aup
self.trainer_log['auc_gap'] = abs(dt_auc - df_auc)
self.trainer_log['aup_gap'] = abs(dt_aup - df_aup)
# # AUC AUP on Df
# if len(df_logit) > 0:
# auc = []
# aup = []
# if self.args.eval_on_cpu:
# model = model.to('cpu')
# z = model(data.x, data.train_pos_edge_index[:, data.dtrain_mask])
# for i in range(500):
# mask = torch.zeros(data.train_pos_edge_index[:, data.dr_mask].shape[1], dtype=torch.bool)
# idx = torch.randperm(data.train_pos_edge_index[:, data.dr_mask].shape[1])[:len(df_logit)]
# mask[idx] = True
# pos_logit = model.decode(z, data.train_pos_edge_index[:, data.dr_mask][:, mask]).sigmoid().tolist()
# logit = df_logit + pos_logit
# label = [0] * len(df_logit) + [1] * len(df_logit)
# auc.append(roc_auc_score(label, logit))
# aup.append(average_precision_score(label, logit))
# self.trainer_log['df_auc'] = np.mean(auc)
# self.trainer_log['df_aup'] = np.mean(aup)
if model_retrain is not None: # Deletion
self.trainer_log['ve'] = verification_error(model, model_retrain).cpu().item()
# self.trainer_log['dr_kld'] = output_kldiv(model, model_retrain, data=data).cpu().item()
# MI Attack after unlearning
if attack_model_all is not None:
mi_logit_all_after, mi_sucrate_all_after = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_after'] = mi_logit_all_after
self.trainer_log['mi_sucrate_all_after'] = mi_sucrate_all_after
if attack_model_sub is not None:
mi_logit_sub_after, mi_sucrate_sub_after = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_after'] = mi_logit_sub_after
self.trainer_log['mi_sucrate_sub_after'] = mi_sucrate_sub_after
self.trainer_log['mi_ratio_all'] = np.mean([i[1] / j[1] for i, j in zip(self.trainer_log['mi_logit_all_after'], self.trainer_log['mi_logit_all_before'])])
self.trainer_log['mi_ratio_sub'] = np.mean([i[1] / j[1] for i, j in zip(self.trainer_log['mi_logit_sub_after'], self.trainer_log['mi_logit_sub_before'])])
print(self.trainer_log['mi_ratio_all'], self.trainer_log['mi_ratio_sub'], self.trainer_log['mi_sucrate_all_after'], self.trainer_log['mi_sucrate_sub_after'])
print(self.trainer_log['df_auc'], self.trainer_log['df_aup'])
return loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, test_log
@torch.no_grad()
def get_output(self, model, node_embedding, data):
model.eval()
node_embedding = node_embedding.to(device)
edge = data.edge_index.to(device)
output = model.decode(node_embedding, edge, edge_type)
return output
def save_log(self):
# print(self.trainer_log)
with open(os.path.join(self.args.checkpoint_dir, 'trainer_log.json'), 'w') as f:
json.dump(self.trainer_log, f)
torch.save(self.logit_all_pair, os.path.join(self.args.checkpoint_dir, 'pred_proba.pt'))
class KGTrainer(Trainer):
def train(self, model, data, optimizer, args):
model = model.to(device)
start_time = time.time()
best_metric = 0
print('Num workers:', len(os.sched_getaffinity(0)))
loader = GraphSAINTRandomWalkSampler(
data, batch_size=128, walk_length=2, num_steps=args.num_steps, num_workers=len(os.sched_getaffinity(0))
)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
epoch_loss = 0
epoch_time = 0
for step, batch in enumerate(tqdm(loader, desc='Step', leave=False)):
start_time = time.time()
batch = batch.to(device)
# Message passing
edge_index = batch.edge_index#[:, batch.train_mask]
edge_type = batch.edge_type#[batch.train_mask]
z = model(batch.x, edge_index, edge_type)
# Positive and negative sample
decoding_mask = (edge_type < args.num_edge_type) # Only select directed edges for link prediction
decoding_edge_index = edge_index[:, decoding_mask]
decoding_edge_type = edge_type[decoding_mask]
neg_edge_index = negative_sampling_kg(
edge_index=decoding_edge_index,
edge_type=decoding_edge_type)
pos_logits = model.decode(z, decoding_edge_index, decoding_edge_type)
neg_logits = model.decode(z, neg_edge_index, decoding_edge_type)
logits = torch.cat([pos_logits, neg_logits], dim=-1)
label = get_link_labels(decoding_edge_index, neg_edge_index)
# reg_loss = z.pow(2).mean() + model.W.pow(2).mean()
loss = F.binary_cross_entropy_with_logits(logits, label)# + 1e-2 * reg_loss
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
log = {
'epoch': epoch,
'step': step,
'train_loss': loss.item(),
}
wandb.log(log)
# msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
# tqdm.write(' | '.join(msg))
epoch_loss += loss.item()
epoch_time += time.time() - start_time
if (epoch + 1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step,
'epoch_time': epoch_time
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if dt_aup > best_metric:
best_metric = dt_aup
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid aup = {best_metric:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_metric'] = best_metric
self.trainer_log['training_time'] = np.mean([i['epoch_time'] for i in self.trainer_log['log'] if 'epoch_time' in i])
@torch.no_grad()
def eval(self, model, data, stage='val', pred_all=False):
model.eval()
pos_edge_index = data[f'{stage}_pos_edge_index']
neg_edge_index = data[f'{stage}_neg_edge_index']
pos_edge_type = data[f'{stage}_edge_type']
neg_edge_type = data[f'{stage}_edge_type']
if self.args.eval_on_cpu:
model = model.to('cpu')
z = model(data.x, data.edge_index[:, data.dr_mask], data.edge_type[data.dr_mask])
decoding_edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1)
decoding_edge_type = torch.cat([pos_edge_type, neg_edge_type], dim=-1)
logits = model.decode(z, decoding_edge_index, decoding_edge_type)
label = get_link_labels(pos_edge_index, neg_edge_index)
# DT AUC AUP
loss = F.binary_cross_entropy_with_logits(logits, label).cpu().item()
dt_auc = roc_auc_score(label.cpu(), logits.cpu())
dt_aup = average_precision_score(label.cpu(), logits.cpu())
# DF AUC AUP
if self.args.unlearning_model in ['original']:
df_logit = []
else:
# df_logit = model.decode(z, data.train_pos_edge_index[:, data.df_mask], data.train_edge_type[data.df_mask]).sigmoid().tolist()
df_logit = model.decode(z, data.directed_df_edge_index, data.directed_df_edge_type).sigmoid().tolist()
dr_mask = data.dr_mask[:data.dr_mask.shape[0] // 2]
if len(df_logit) > 0:
df_auc = []
df_aup = []
for i in range(500):
mask = torch.zeros(data.train_pos_edge_index[:, dr_mask].shape[1], dtype=torch.bool)
idx = torch.randperm(data.train_pos_edge_index[:, dr_mask].shape[1])[:len(df_logit)]
mask[idx] = True
pos_logit = model.decode(z, data.train_pos_edge_index[:, dr_mask][:, mask], data.train_edge_type[dr_mask][mask]).sigmoid().tolist()
logit = df_logit + pos_logit
label = [0] * len(df_logit) + [1] * len(df_logit)
df_auc.append(roc_auc_score(label, logit))
df_aup.append(average_precision_score(label, logit))
df_auc = np.mean(df_auc)
df_aup = np.mean(df_aup)
else:
df_auc = np.nan
df_aup = np.nan
# Logits for all node pairs
if pred_all:
logit_all_pair = (z @ z.t()).cpu()
else:
logit_all_pair = None
log = {
f'{stage}_loss': loss,
f'{stage}_dt_auc': dt_auc,
f'{stage}_dt_aup': dt_aup,
f'{stage}_df_auc': df_auc,
f'{stage}_df_aup': df_aup,
f'{stage}_df_logit_mean': np.mean(df_logit) if len(df_logit) > 0 else np.nan,
f'{stage}_df_logit_std': np.std(df_logit) if len(df_logit) > 0 else np.nan
}
if self.args.eval_on_cpu:
model = model.to(device)
return loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, log
@torch.no_grad()
def test(self, model, data, model_retrain=None, attack_model_all=None, attack_model_sub=None, ckpt='ckpt'):
if ckpt is 'best': # Load best ckpt
ckpt = torch.load(os.path.join(self.args.checkpoint_dir, 'model_best.pt'))
model.load_state_dict(ckpt['model_state'])
if 'ogbl' in self.args.dataset:
pred_all = False
else:
pred_all = True
loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, test_log = self.eval(model, data, 'test', pred_all)
self.trainer_log['dt_loss'] = loss
self.trainer_log['dt_auc'] = dt_auc
self.trainer_log['dt_aup'] = dt_aup
self.trainer_log['df_logit'] = df_logit
self.logit_all_pair = logit_all_pair
self.trainer_log['df_auc'] = df_auc
self.trainer_log['df_aup'] = df_aup
# if model_retrain is not None: # Deletion
# self.trainer_log['ve'] = verification_error(model, model_retrain).cpu().item()
# self.trainer_log['dr_kld'] = output_kldiv(model, model_retrain, data=data).cpu().item()
# MI Attack after unlearning
if attack_model_all is not None:
mi_logit_all_after, mi_sucrate_all_after = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_after'] = mi_logit_all_after
self.trainer_log['mi_sucrate_all_after'] = mi_sucrate_all_after
if attack_model_sub is not None:
mi_logit_sub_after, mi_sucrate_sub_after = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_after'] = mi_logit_sub_after
self.trainer_log['mi_sucrate_sub_after'] = mi_sucrate_sub_after
self.trainer_log['mi_ratio_all'] = np.mean([i[1] / j[1] for i, j in zip(self.trainer_log['mi_logit_all_after'], self.trainer_log['mi_logit_all_before'])])
self.trainer_log['mi_ratio_sub'] = np.mean([i[1] / j[1] for i, j in zip(self.trainer_log['mi_logit_sub_after'], self.trainer_log['mi_logit_sub_before'])])
print(self.trainer_log['mi_ratio_all'], self.trainer_log['mi_ratio_sub'], self.trainer_log['mi_sucrate_all_after'], self.trainer_log['mi_sucrate_sub_after'])
print(self.trainer_log['df_auc'], self.trainer_log['df_aup'])
return loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, test_log
def _train(self, model, data, optimizer, args):
model = model.to(device)
data = data.to(device)
start_time = time.time()
best_valid_loss = 1000000
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
# Message passing
z = model(data.x, data.edge_index, data.edge_type)
# Positive and negative sample
mask = (data.edge_type < args.num_edge_type) # Only select directed edges for link prediction
neg_edge_index = negative_sampling_kg(
edge_index=data.train_pos_edge_index,
edge_type=data.train_edge_type)
pos_logits = model.decode(z, data.train_pos_edge_index, data.train_edge_type)
neg_logits = model.decode(z, neg_edge_index, data.train_edge_type)
logits = torch.cat([pos_logits, neg_logits], dim=-1)
label = get_link_labels(data.train_pos_edge_index, neg_edge_index)
reg_loss = z.pow(2).mean() + model.W.pow(2).mean()
loss = F.binary_cross_entropy_with_logits(logits, label) + 1e-2 * reg_loss
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
log = {
'epoch': epoch,
'train_loss': loss.item(),
}
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if valid_loss < best_valid_loss:
best_valid_loss = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid loss = {best_valid_loss:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_valid_loss'] = best_valid_loss
self.trainer_log['training_time'] = np.mean([i['epoch_time'] for i in self.trainer_log['log'] if 'epoch_time' in i])
class NodeClassificationTrainer(Trainer):
def train(self, model, data, optimizer, args):
start_time = time.time()
best_epoch = 0
best_valid_acc = 0
data = data.to(device)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
z = F.log_softmax(model(data.x, data.edge_index), dim=1)
loss = F.nll_loss(z[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
optimizer.zero_grad()
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_acc, dt_f1, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': loss.item()
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if dt_acc > best_valid_acc:
best_valid_acc = dt_acc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid Acc = {dt_acc:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid acc = {best_valid_acc:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_valid_acc'] = best_valid_acc
@torch.no_grad()
def eval(self, model, data, stage='val', pred_all=False):
model.eval()
if self.args.eval_on_cpu:
model = model.to('cpu')
# if hasattr(data, 'dtrain_mask'):
# mask = data.dtrain_mask
# else:
# mask = data.dr_mask
z = F.log_softmax(model(data.x, data.edge_index), dim=1)
# DT AUC AUP
loss = F.nll_loss(z[data.val_mask], data.y[data.val_mask]).cpu().item()
pred = torch.argmax(z[data.val_mask], dim=1).cpu()
dt_acc = accuracy_score(data.y[data.val_mask].cpu(), pred)
dt_f1 = f1_score(data.y[data.val_mask].cpu(), pred, average='micro')
# DF AUC AUP
# if self.args.unlearning_model in ['original', 'original_node']:
# df_logit = []
# else:
# df_logit = model.decode(z, data.directed_df_edge_index).sigmoid().tolist()
# if len(df_logit) > 0:
# df_auc = []
# df_aup = []
# # Sample pos samples
# if len(self.df_pos_edge) == 0:
# for i in range(500):
# mask = torch.zeros(data.train_pos_edge_index[:, data.dr_mask].shape[1], dtype=torch.bool)
# idx = torch.randperm(data.train_pos_edge_index[:, data.dr_mask].shape[1])[:len(df_logit)]
# mask[idx] = True
# self.df_pos_edge.append(mask)
# # Use cached pos samples
# for mask in self.df_pos_edge:
# pos_logit = model.decode(z, data.train_pos_edge_index[:, data.dr_mask][:, mask]).sigmoid().tolist()
# logit = df_logit + pos_logit
# label = [0] * len(df_logit) + [1] * len(df_logit)
# df_auc.append(roc_auc_score(label, logit))
# df_aup.append(average_precision_score(label, logit))
# df_auc = np.mean(df_auc)
# df_aup = np.mean(df_aup)
# else:
# df_auc = np.nan
# df_aup = np.nan
# Logits for all node pairs
if pred_all:
logit_all_pair = (z @ z.t()).cpu()
else:
logit_all_pair = None
log = {
f'{stage}_loss': loss,
f'{stage}_dt_acc': dt_acc,
f'{stage}_dt_f1': dt_f1,
}
if self.args.eval_on_cpu:
model = model.to(device)
return loss, dt_acc, dt_f1, log
@torch.no_grad()
def test(self, model, data, model_retrain=None, attack_model_all=None, attack_model_sub=None, ckpt='best'):
if ckpt == 'best': # Load best ckpt
ckpt = torch.load(os.path.join(self.args.checkpoint_dir, 'model_best.pt'))
model.load_state_dict(ckpt['model_state'])
if 'ogbl' in self.args.dataset:
pred_all = False
else:
pred_all = True
loss, dt_acc, dt_f1, test_log = self.eval(model, data, 'test', pred_all)
self.trainer_log['dt_loss'] = loss
self.trainer_log['dt_acc'] = dt_acc
self.trainer_log['dt_f1'] = dt_f1
# self.trainer_log['df_logit'] = df_logit
# self.logit_all_pair = logit_all_pair
# self.trainer_log['df_auc'] = df_auc
# self.trainer_log['df_aup'] = df_aup
if model_retrain is not None: # Deletion
self.trainer_log['ve'] = verification_error(model, model_retrain).cpu().item()
# self.trainer_log['dr_kld'] = output_kldiv(model, model_retrain, data=data).cpu().item()
# MI Attack after unlearning
if attack_model_all is not None:
mi_logit_all_after, mi_sucrate_all_after = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_after'] = mi_logit_all_after
self.trainer_log['mi_sucrate_all_after'] = mi_sucrate_all_after
if attack_model_sub is not None:
mi_logit_sub_after, mi_sucrate_sub_after = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_after'] = mi_logit_sub_after
self.trainer_log['mi_sucrate_sub_after'] = mi_sucrate_sub_after
self.trainer_log['mi_ratio_all'] = np.mean([i[1] / j[1] for i, j in zip(self.trainer_log['mi_logit_all_after'], self.trainer_log['mi_logit_all_before'])])
self.trainer_log['mi_ratio_sub'] = np.mean([i[1] / j[1] for i, j in zip(self.trainer_log['mi_logit_sub_after'], self.trainer_log['mi_logit_sub_before'])])
print(self.trainer_log['mi_ratio_all'], self.trainer_log['mi_ratio_sub'], self.trainer_log['mi_sucrate_all_after'], self.trainer_log['mi_sucrate_sub_after'])
print(self.trainer_log['df_auc'], self.trainer_log['df_aup'])
return loss, dt_acc, dt_f1, test_log
class GraphTrainer(Trainer):
def train(self, model, dataset, split_idx, optimizer, args):
self.train_loader = DataLoader(dataset[split_idx["train"]], batch_size=32, shuffle=True)
self.valid_loader = DataLoader(dataset[split_idx["valid"]], batch_size=32, shuffle=False)
self.test_loader = DataLoader(dataset[split_idx["test"]], batch_size=32, shuffle=False)
start_time = time.time()
best_epoch = 0
best_valid_auc = 0
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
for batch in tqdm(self.train_loader, desc="Iteration", leave=False):
batch = batch.to(device)
pred = model(batch)
optimizer.zero_grad()
## ignore nan targets (unlabeled) when computing training loss.
is_labeled = batch.y == batch.y
loss = F.binary_cross_entropy_with_logits(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
loss.backward()
optimizer.step()
if (epoch+1) % args.valid_freq == 0:
valid_auc, valid_log = self.eval(model, dataset, 'val')
train_log = {
'epoch': epoch,
'train_loss': loss.item()
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if valid_auc > best_valid_auc:
best_valid_auc = valid_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid auc = {valid_auc:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid auc = {best_valid_auc:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_valid_auc'] = best_valid_auc
@torch.no_grad()
def eval(self, model, data, stage='val', pred_all=False):
model.eval()
y_true = []
y_pred = []
if stage == 'val':
loader = self.valid_loader
else:
loader = self.test_loader
if self.args.eval_on_cpu:
model = model.to('cpu')
for batch in tqdm(loader):
batch = batch.to(device)
pred = model(batch)
y_true.append(batch.y.view(pred.shape).detach().cpu())
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim = 0).numpy()
y_pred = torch.cat(y_pred, dim = 0).numpy()
evaluator = Evaluator('ogbg-molhiv')
auc = evaluator.eval({"y_true": y_true, "y_pred": y_pred})['rocauc']
log = {
f'val_auc': auc,
}
if self.args.eval_on_cpu:
model = model.to(device)
return auc, log
@torch.no_grad()
def test(self, model, data, model_retrain=None, attack_model_all=None, attack_model_sub=None, ckpt='best'):
if ckpt == 'best': # Load best ckpt
ckpt = torch.load(os.path.join(self.args.checkpoint_dir, 'model_best.pt'))
model.load_state_dict(ckpt['model_state'])
dt_auc, test_log = self.eval(model, data, 'test')
self.trainer_log['dt_auc'] = dt_auc
if model_retrain is not None: # Deletion
self.trainer_log['ve'] = verification_error(model, model_retrain).cpu().item()
# self.trainer_log['dr_kld'] = output_kldiv(model, model_retrain, data=data).cpu().item()
return dt_auc, test_log
| 41,518 | 41.366327 | 169 | py |
GNNDelete | GNNDelete-main/framework/trainer/member_infer.py | import os
import json
import wandb
import numpy as np
import torch
import torch.nn as nn
from tqdm import trange, tqdm
from torch_geometric.utils import negative_sampling
from sklearn.metrics import accuracy_score, roc_auc_score, average_precision_score, f1_score
from .base import Trainer
from ..evaluation import *
from ..utils import *
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class MIAttackTrainer(Trainer):
'''This code is adapted from https://github.com/iyempissy/rebMIGraph'''
def __init__(self, args):
self.args = args
self.trainer_log = {
'unlearning_model': 'member_infer',
'dataset': args.dataset,
'seed': args.random_seed,
'shadow_log': [],
'attack_log': []}
self.logit_all_pair = None
with open(os.path.join(self.args.checkpoint_dir, 'training_args.json'), 'w') as f:
json.dump(vars(args), f)
def train_shadow(self, model, data, optimizer, args):
best_valid_loss = 1000000
all_neg = []
# Train shadow model using the test data
for epoch in trange(args.epochs, desc='Train shadow model'):
model.train()
# Positive and negative sample
neg_edge_index = negative_sampling(
edge_index=data.test_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.test_pos_edge_index.shape[1])
z = model(data.x, data.test_pos_edge_index)
logits = model.decode(z, data.test_pos_edge_index, neg_edge_index)
label = get_link_labels(data.test_pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
optimizer.step()
optimizer.zero_grad()
all_neg.append(neg_edge_index.cpu())
if (epoch+1) % args.valid_freq == 0:
valid_loss, auc, aup, df_logit, logit_all_pair = self.eval_shadow(model, data, 'val')
log = {
'shadow_epoch': epoch,
'shadow_train_loss': loss.item(),
'shadow_valid_loss': valid_loss,
'shadow_valid_auc': auc,
'shadow_valid_aup': aup,
'shadow_df_logit': df_logit
}
wandb.log(log)
self.trainer_log['shadow_log'].append(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
best_epoch = epoch
self.trainer_log['shadow_best_epoch'] = best_epoch
self.trainer_log['shadow_best_valid_loss'] = best_valid_loss
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'shadow_model_best.pt'))
return torch.cat(all_neg, dim=-1)
@torch.no_grad()
def eval_shadow(self, model, data, stage='val'):
model.eval()
pos_edge_index = data[f'{stage}_pos_edge_index']
neg_edge_index = data[f'{stage}_neg_edge_index']
z = model(data.x, data.val_pos_edge_index)
logits = model.decode(z, pos_edge_index, neg_edge_index).sigmoid()
label = self.get_link_labels(pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label).cpu().item()
auc = roc_auc_score(label.cpu(), logits.cpu())
aup = average_precision_score(label.cpu(), logits.cpu())
df_logit = float('nan')
logit_all_pair = (z @ z.t()).cpu()
log = {
f'{stage}_loss': loss,
f'{stage}_auc': auc,
f'{stage}_aup': aup,
f'{stage}_df_logit': df_logit,
}
wandb.log(log)
msg = [f'{i}: {j:.4f}' if isinstance(j, (np.floating, float)) else f'{i}: {j:>4d}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
return loss, auc, aup, df_logit, logit_all_pair
def train_attack(self, model, train_loader, valid_loader, optimizer, args):
loss_fct = nn.CrossEntropyLoss()
best_auc = 0
best_epoch = 0
for epoch in trange(50, desc='Train attack model'):
model.train()
train_loss = 0
for x, y in train_loader:
logits = model(x.to(device))
loss = loss_fct(logits, y.to(device))
loss.backward()
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item()
valid_loss, valid_acc, valid_auc, valid_f1 = self.eval_attack(model, valid_loader)
log = {
'attack_train_loss': train_loss / len(train_loader),
'attack_valid_loss': valid_loss,
'attack_valid_acc': valid_acc,
'attack_valid_auc': valid_auc,
'attack_valid_f1': valid_f1}
wandb.log(log)
self.trainer_log['attack_log'].append(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
if valid_auc > best_auc:
best_auc = valid_auc
best_epoch = epoch
self.trainer_log['attack_best_auc'] = valid_auc
self.trainer_log['attack_best_epoch'] = epoch
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'attack_model_best.pt'))
@torch.no_grad()
def eval_attack(self, model, eval_loader):
loss_fct = nn.CrossEntropyLoss()
pred = []
label = []
for x, y in eval_loader:
logits = model(x.to(device))
loss = loss_fct(logits, y.to(device))
_, p = torch.max(logits, 1)
pred.extend(p.cpu())
label.extend(y)
pred = torch.stack(pred)
label = torch.stack(label)
return loss.item(), accuracy_score(label.numpy(), pred.numpy()), roc_auc_score(label.numpy(), pred.numpy()), f1_score(label.numpy(), pred.numpy(), average='macro')
@torch.no_grad()
def prepare_attack_training_data(self, model, data, all_neg=None):
'''Prepare the training data of attack model (Present vs. Absent)
Present edges (label = 1): training data of shadow model (Test pos and neg edges)
Absent edges (label = 0): validation data of shadow model (Valid pos and neg edges)
'''
z = model(data.x, data.test_pos_edge_index)
# Sample same size of neg as pos
sample_idx = torch.randperm(all_neg.shape[1])[:data.test_pos_edge_index.shape[1]]
neg_subset = all_neg[:, sample_idx]
present_edge_index = torch.cat([data.test_pos_edge_index, data.test_neg_edge_index], dim=-1)
if 'sub' in self.args.unlearning_model:
absent_edge_index = torch.cat([data.val_pos_edge_index, data.val_neg_edge_index], dim=-1)
else: #if 'all' in self.args.unlearning_model:
absent_edge_index = torch.cat([data.val_pos_edge_index, data.val_neg_edge_index, data.train_pos_edge_index, neg_subset.to(device)], dim=-1)
edge_index = torch.cat([present_edge_index, absent_edge_index], dim=-1)
feature = torch.cat([z[edge_index[0]], z[edge_index[1]]], dim=-1).cpu()
label = get_link_labels(present_edge_index, absent_edge_index).long().cpu()
return feature, label
| 8,132 | 37.728571 | 171 | py |
GNNDelete | GNNDelete-main/framework/trainer/gradient_ascent_with_mp.py | import os
import json
from tqdm import tqdm, trange
import torch
import torch.nn.functional as F
from torch_geometric.utils import negative_sampling
from .base import Trainer
from ..evaluation import *
from ..utils import *
class GradientAscentWithMessagePassingTrainer(Trainer):
def __init__(self,):
self.trainer_log = {'unlearning_model': 'gradient_ascent_with_mp', 'log': []}
def freeze_unused_mask(self, model, edge_to_delete, subgraph, h):
gradient_mask = torch.zeros_like(delete_model.operator)
edges = subgraph[h]
for s, t in edges:
if s < t:
gradient_mask[s, t] = 1
gradient_mask = gradient_mask.to(device)
model.operator.register_hook(lambda grad: grad.mul_(gradient_mask))
def train(self, model_retrain, model, data, optimizer, args):
best_loss = 100000
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
total_step = 0
total_loss = 0
## Gradient Ascent
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index[:, data.ga_mask],
num_nodes=data.num_nodes,
num_neg_samples=data.ga_mask.sum())
# print('data train to unlearn', data.train_pos_edge_index[:, data.ga_mask])
z = model(data.x, data.train_pos_edge_index[:, data.ga_mask])
logits = model.decode(z, data.train_pos_edge_index[:, data.ga_mask])
label = torch.tensor([1], dtype=torch.float, device='cuda')
loss_ga = -F.binary_cross_entropy_with_logits(logits, label)
## Message Passing
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index[:, data.mp_mask],
num_nodes=data.num_nodes,
num_neg_samples=data.mp_mask.sum())
z = model(data.x, data.train_pos_edge_index[:, data.mp_mask])
logits = model.decode(z, data.train_pos_edge_index[:, data.mp_mask])
label = self.get_link_labels(data.train_pos_edge_index[:, data.mp_mask], dtype=torch.float, device='cuda')
loss_mp = F.binary_cross_entropy_with_logits(logits, label)
loss = loss_ga + loss_mp
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
total_step += 1
total_loss += loss.item()
msg = [
f'Epoch: {epoch:>4d}',
f'train loss: {total_loss / total_step:.6f}'
]
tqdm.write(' | '.join(msg))
valid_loss, auc, aup = self.eval(model, data, 'val')
self.trainer_log['log'].append({
'dt_loss': valid_loss,
'dt_auc': auc,
'dt_aup': aup
})
# Eval unlearn
loss, auc, aup = self.test(model, data)
self.trainer_log['dt_loss'] = loss
self.trainer_log['dt_auc'] = auc
self.trainer_log['dt_aup'] = aup
self.trainer_log['ve'] = verification_error(model, model_retrain).cpu().item()
self.trainer_log['dr_kld'] = output_kldiv(model, model_retrain, data=data).cpu().item()
embedding = get_node_embedding_data(model, data)
logits = model.decode(embedding, data.train_pos_edge_index[:, data.dtrain_mask]).sigmoid().detach().cpu()
self.trainer_log['df_score'] = logits[:1].cpu().item()
# Save
ckpt = {
'model_state': model.state_dict(),
'node_emb': z,
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model.pt'))
print(self.trainer_log)
with open(os.path.join(args.checkpoint_dir, 'trainer_log.json'), 'w') as f:
json.dump(self.trainer_log, f)
| 3,937 | 36.865385 | 118 | py |
GNNDelete | GNNDelete-main/framework/trainer/retrain.py | import os
import time
import wandb
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.utils import negative_sampling
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from .base import Trainer, KGTrainer
from ..evaluation import *
from ..utils import *
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
class RetrainTrainer(Trainer):
def freeze_unused_mask(self, model, edge_to_delete, subgraph, h):
gradient_mask = torch.zeros_like(delete_model.operator)
edges = subgraph[h]
for s, t in edges:
if s < t:
gradient_mask[s, t] = 1
gradient_mask = gradient_mask.to(device)
model.operator.register_hook(lambda grad: grad.mul_(gradient_mask))
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
if 'ogbl' in self.args.dataset:
return self.train_minibatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
else:
return self.train_fullbatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
def train_fullbatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to('cuda')
data = data.to('cuda')
best_metric = 0
loss_fct = nn.MSELoss()
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
for epoch in trange(args.epochs, desc='Unlearning'):
model.train()
start_time = time.time()
total_step = 0
total_loss = 0
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index[:, data.dr_mask],
num_nodes=data.num_nodes,
num_neg_samples=data.dr_mask.sum())
z = model(data.x, data.train_pos_edge_index[:, data.dr_mask])
logits = model.decode(z, data.train_pos_edge_index[:, data.dr_mask], neg_edge_index)
label = self.get_link_labels(data.train_pos_edge_index[:, data.dr_mask], neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
total_step += 1
total_loss += loss.item()
end_time = time.time()
epoch_time = end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'train_time': epoch_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % self.args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best metric = {best_metric:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_metric'] = best_metric
def train_minibatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
start_time = time.time()
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
data.edge_index = data.train_pos_edge_index
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
start_time = time.time()
epoch_loss = 0
for step, batch in enumerate(tqdm(loader, desc='Step', leave=False)):
batch = batch.to(device)
# Positive and negative sample
train_pos_edge_index = batch.edge_index[:, batch.dr_mask]
z = model(batch.x, train_pos_edge_index)
neg_edge_index = negative_sampling(
edge_index=train_pos_edge_index,
num_nodes=z.size(0))
logits = model.decode(z, train_pos_edge_index, neg_edge_index)
label = get_link_labels(train_pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
step_log = {
'epoch': epoch,
'step': step,
'train_loss': loss.item(),
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
epoch_loss += loss.item()
end_time = time.time()
epoch_time = end_time - start_time
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best metric = {best_metric:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_metric'] = best_metric
class KGRetrainTrainer(KGTrainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to(device)
start_time = time.time()
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
loader = GraphSAINTRandomWalkSampler(
data, batch_size=128, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
epoch_loss = 0
for step, batch in enumerate(tqdm(loader, desc='Step', leave=False)):
batch = batch.to(device)
# Message passing
edge_index = batch.edge_index[:, batch.dr_mask]
edge_type = batch.edge_type[batch.dr_mask]
z = model(batch.x, edge_index, edge_type)
# Positive and negative sample
decoding_mask = (edge_type < args.num_edge_type) # Only select directed edges for link prediction
decoding_edge_index = edge_index[:, decoding_mask]
decoding_edge_type = edge_type[decoding_mask]
neg_edge_index = negative_sampling_kg(
edge_index=decoding_edge_index,
edge_type=decoding_edge_type)
pos_logits = model.decode(z, decoding_edge_index, decoding_edge_type)
neg_logits = model.decode(z, neg_edge_index, decoding_edge_type)
logits = torch.cat([pos_logits, neg_logits], dim=-1)
label = get_link_labels(decoding_edge_index, neg_edge_index)
# reg_loss = z.pow(2).mean() + model.W.pow(2).mean()
loss = F.binary_cross_entropy_with_logits(logits, label)# + 1e-2 * reg_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
log = {
'epoch': epoch,
'step': step,
'train_loss': loss.item(),
}
wandb.log(log)
# msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
# tqdm.write(' | '.join(msg))
epoch_loss += loss.item()
if (epoch + 1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid loss = {best_metric:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_metric'] = best_metric
self.trainer_log['training_time'] = np.mean([i['epoch_time'] for i in self.trainer_log['log'] if 'epoch_time' in i])
| 14,611 | 41.976471 | 127 | py |
GNNDelete | GNNDelete-main/framework/trainer/gnndelete_nodeemb.py | import os
import copy
import time
import wandb
from tqdm import tqdm, trange
import torch
import torch.nn as nn
from torch_geometric.utils import negative_sampling, k_hop_subgraph
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from .base import Trainer, KGTrainer, NodeClassificationTrainer
from ..evaluation import *
from ..utils import *
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def BoundedKLDMean(logits, truth):
return 1 - torch.exp(-F.kl_div(F.log_softmax(logits, -1), truth.softmax(-1), None, None, 'batchmean'))
def BoundedKLDSum(logits, truth):
return 1 - torch.exp(-F.kl_div(F.log_softmax(logits, -1), truth.softmax(-1), None, None, 'sum'))
def CosineDistanceMean(logits, truth):
return (1 - F.cosine_similarity(logits, truth)).mean()
def CosineDistanceSum(logits, truth):
return (1 - F.cosine_similarity(logits, truth)).sum()
def centering(K):
n = K.shape[0]
unit = torch.ones([n, n], device=K.device)
I = torch.eye(n, device=K.device)
H = I - unit / n
return torch.matmul(torch.matmul(H, K), H)
def rbf(X, sigma=None):
GX = torch.matmul(X, X.T)
KX = torch.diag(GX) - GX + (torch.diag(GX) - GX).T
if sigma is None:
mdist = torch.median(KX[KX != 0])
sigma = math.sqrt(mdist)
KX *= - 0.5 / (sigma * sigma)
KX = torch.exp(KX)
return KX
def kernel_HSIC(X, Y, sigma=None):
return torch.sum(centering(rbf(X, sigma)) * centering(rbf(Y, sigma)))
def linear_HSIC(X, Y):
L_X = torch.matmul(X, X.T)
L_Y = torch.matmul(Y, Y.T)
return torch.sum(centering(L_X) * centering(L_Y))
def LinearCKA(X, Y):
hsic = linear_HSIC(X, Y)
var1 = torch.sqrt(linear_HSIC(X, X))
var2 = torch.sqrt(linear_HSIC(Y, Y))
return hsic / (var1 * var2)
def RBFCKA(X, Y, sigma=None):
hsic = kernel_HSIC(X, Y, sigma)
var1 = torch.sqrt(kernel_HSIC(X, X, sigma))
var2 = torch.sqrt(kernel_HSIC(Y, Y, sigma))
return hsic / (var1 * var2)
def get_loss_fct(name):
# if name == 'mse':
# loss_fct = nn.MSELoss(reduction='mean')
# elif name == 'kld':
# loss_fct = BoundedKLDMean
# elif name == 'cosine':
# loss_fct = CosineDistanceMean
if name == 'kld_mean':
loss_fct = BoundedKLDMean
elif name == 'kld_sum':
loss_fct = BoundedKLDSum
elif name == 'mse_mean':
loss_fct = nn.MSELoss(reduction='mean')
elif name == 'mse_sum':
loss_fct = nn.MSELoss(reduction='sum')
elif name == 'cosine_mean':
loss_fct = CosineDistanceMean
elif name == 'cosine_sum':
loss_fct = CosineDistanceSum
elif name == 'linear_cka':
loss_fct = LinearCKA
elif name == 'rbf_cka':
loss_fct = RBFCKA
else:
raise NotImplementedError
return loss_fct
class GNNDeleteNodeembTrainer(Trainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
if 'ogbl' in self.args.dataset:
return self.train_minibatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
else:
return self.train_fullbatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
def train_fullbatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to('cuda')
data = data.to('cuda')
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
# All node paris in S_Df without Df
## S_Df 1 hop all pair mask
sdf1_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_1hop_mask], with_replacement=True).t()
sdf1_all_pair_mask[idx[0], idx[1]] = True
sdf1_all_pair_mask[idx[1], idx[0]] = True
assert sdf1_all_pair_mask.sum().cpu() == data.sdf_node_1hop_mask.sum().cpu() * data.sdf_node_1hop_mask.sum().cpu()
## Remove Df itself
sdf1_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
sdf1_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
## S_Df 2 hop all pair mask
sdf2_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_2hop_mask], with_replacement=True).t()
sdf2_all_pair_mask[idx[0], idx[1]] = True
sdf2_all_pair_mask[idx[1], idx[0]] = True
assert sdf2_all_pair_mask.sum().cpu() == data.sdf_node_2hop_mask.sum().cpu() * data.sdf_node_2hop_mask.sum().cpu()
## Remove Df itself
sdf2_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
sdf2_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
## Lower triangular mask
idx = torch.tril_indices(data.num_nodes, data.num_nodes, -1)
lower_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
lower_mask[idx[0], idx[1]] = True
## The final mask is the intersection
sdf1_all_pair_without_df_mask = sdf1_all_pair_mask & lower_mask
sdf2_all_pair_without_df_mask = sdf2_all_pair_mask & lower_mask
# print(data.sdf_node_2hop_mask.sum())
# print(sdf_all_pair_mask.nonzero())
# print(data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1])
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum(), a, sdf_all_pair_mask.sum())
# print('aaaaaaaaaaaa', lower_mask.sum())
# print('aaaaaaaaaaaa', sdf_all_pair_without_df_mask.sum())
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum())
# assert sdf_all_pair_without_df_mask.sum() == \
# data.sdf_node_2hop_mask.sum().cpu() * (data.sdf_node_2hop_mask.sum().cpu() - 1) // 2 - data.df_mask.sum().cpu()
#
non_df_node_mask = torch.ones(data.x.shape[0], dtype=torch.bool, device=data.x.device)
non_df_node_mask[data.directed_df_edge_index.flatten().unique()] = False
data.sdf_node_1hop_mask_non_df_mask = data.sdf_node_1hop_mask & non_df_node_mask
data.sdf_node_2hop_mask_non_df_mask = data.sdf_node_2hop_mask & non_df_node_mask
# Original node embeddings
with torch.no_grad():
z1_ori, z2_ori = model.get_original_embeddings(data.x, data.train_pos_edge_index[:, data.dr_mask], return_all_emb=True)
loss_fct = get_loss_fct(self.args.loss_fct)
neg_edge = neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.df_mask.sum())
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
start_time = time.time()
z1, z2 = model(data.x, data.train_pos_edge_index[:, data.sdf_mask], return_all_emb=True)
# print('current deletion weight', model.deletion1.deletion_weight.sum(), model.deletion2.deletion_weight.sum())
# print('aaaaaa', z[data.sdf_node_2hop_mask].sum())
# Randomness
pos_edge = data.train_pos_edge_index[:, data.df_mask]
# neg_edge = torch.randperm(data.num_nodes)[:pos_edge.view(-1).shape[0]].view(2, -1)
embed1 = torch.cat([z1[pos_edge[0]], z1[pos_edge[1]]], dim=0)
embed1_ori = torch.cat([z1_ori[neg_edge[0]], z1_ori[neg_edge[1]]], dim=0)
embed2 = torch.cat([z2[pos_edge[0]], z2[pos_edge[1]]], dim=0)
embed2_ori = torch.cat([z2_ori[neg_edge[0]], z2_ori[neg_edge[1]]], dim=0)
loss_r1 = loss_fct(embed1, embed1_ori)
loss_r2 = loss_fct(embed2, embed2_ori)
# Local causality
loss_l1 = loss_fct(z1[data.sdf_node_1hop_mask_non_df_mask], z1_ori[data.sdf_node_1hop_mask_non_df_mask])
loss_l2 = loss_fct(z2[data.sdf_node_2hop_mask_non_df_mask], z2_ori[data.sdf_node_2hop_mask_non_df_mask])
# Total loss
'''both_all, both_layerwise, only2_layerwise, only2_all, only1'''
if self.args.loss_type == 'both_all':
loss_l = loss_l1 + loss_l2
loss_r = loss_r1 + loss_r2
#### alpha * loss_r + (1 - alpha) * loss_l
loss = self.args.alpha * loss_r + (1 - self.args.alpha) * loss_l
#### loss_r + lambda * loss_l
# loss = loss_l + self.args.alpha * loss_r
loss.backward()
optimizer.step()
elif self.args.loss_type == 'both_layerwise':
#### alpha * loss_r + (1 - alpha) * loss_l
loss_l = loss_l1 + loss_l2
loss_r = loss_r1 + loss_r2
loss1 = self.args.alpha * loss_r1 + (1 - self.args.alpha) * loss_l1
loss1.backward(retain_graph=True)
optimizer[0].step()
optimizer[0].zero_grad()
loss2 = self.args.alpha * loss_r2 + (1 - self.args.alpha) * loss_l2
loss2.backward(retain_graph=True)
optimizer[1].step()
optimizer[1].zero_grad()
loss = loss1 + loss2
#### loss_r + lambda * loss_l
# loss_l = loss_l1 + loss_l2
# loss_r = loss_r1 + loss_r2
# loss1 = loss_r1 + self.args.alpha * loss_l1
# loss1.backward(retain_graph=True)
# optimizer[0].step()
# optimizer[0].zero_grad()
# loss2 = loss_r2 + self.args.alpha * loss_l2
# loss2.backward()
# optimizer[1].step()
# optimizer[1].zero_grad()
# loss = loss1 + loss2
elif self.args.loss_type == 'only2_layerwise':
loss_l = loss_l1 + loss_l2
loss_r = loss_r1 + loss_r2
optimizer[0].zero_grad()
#### alpha * loss_r + (1 - alpha) * loss_l
loss2 = self.args.alpha * loss_r2 + (1 - self.args.alpha) * loss_l2
#### loss_r + lambda * loss_l
# loss2 = loss_r2 + self.args.alpha * loss_l2
loss2.backward()
optimizer[1].step()
optimizer[1].zero_grad()
loss = loss2
elif self.args.loss_type == 'only2_all':
loss_l = loss_l2
loss_r = loss_r2
loss = loss_l + self.args.alpha * loss_r
loss.backward()
optimizer.step()
optimizer.zero_grad()
elif self.args.loss_type == 'only1':
loss_l = loss_l1
loss_r = loss_r1
loss = loss_l + self.args.alpha * loss_r
loss.backward()
optimizer.step()
optimizer.zero_grad()
else:
raise NotImplementedError
end_time = time.time()
epoch_time = end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % self.args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
# 'optimizer_state': [optimizer[0].state_dict(), optimizer[1].state_dict()],
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
# 'optimizer_state': [optimizer[0].state_dict(), optimizer[1].state_dict()],
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
def train_minibatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
best_metric = 0
if 'kld' in args.unlearning_model:
loss_fct = BoundedKLD
else:
loss_fct = nn.MSELoss()
# neg_size = 10
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
non_df_node_mask = torch.ones(data.x.shape[0], dtype=torch.bool, device=data.x.device)
non_df_node_mask[data.directed_df_edge_index.flatten().unique()] = False
data.sdf_node_1hop_mask_non_df_mask = data.sdf_node_1hop_mask & non_df_node_mask
data.sdf_node_2hop_mask_non_df_mask = data.sdf_node_2hop_mask & non_df_node_mask
data.edge_index = data.train_pos_edge_index
data.node_id = torch.arange(data.x.shape[0])
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
# all_neg_edge = negative_sampling(
# edge_index=data.train_pos_edge_index,
# num_nodes=data.num_nodes,
# num_neg_samples=100000
# )
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
epoch_loss_l = 0
epoch_loss_r = 0
epoch_loss = 0
epoch_time = 0
for step, batch in enumerate(tqdm(loader, leave=False)):
batch = batch.to(device)
start_time = time.time()
# Original embedding
with torch.no_grad():
z1_ori, z2_ori = model.get_original_embeddings(batch.x, batch.edge_index, return_all_emb=True)
# z1_ori = z1_ori[batch.sdf_node_2hop_mask]
# z2_ori = z2_ori[batch.sdf_node_2hop_mask]
z1, z2 = model(batch.x, batch.edge_index[:, batch.sdf_mask], batch.sdf_node_1hop_mask, batch.sdf_node_2hop_mask, return_all_emb=True)
# Randomness
pos_edge = batch.edge_index[:, batch.df_mask]
neg_edge = negative_sampling(
edge_index=batch.edge_index,
num_nodes=batch.x.shape[0],
num_neg_samples=pos_edge.shape[1]
)
# neg_edge = all_neg_edge[:, :pos_edge.shape[1]]
embed1 = torch.cat([z1[pos_edge[0]], z1[pos_edge[1]]], dim=0)
embed1_ori = torch.cat([z1_ori[neg_edge[0]], z1_ori[neg_edge[1]]], dim=0)
embed2 = torch.cat([z2[pos_edge[0]], z2[pos_edge[1]]], dim=0)
embed2_ori = torch.cat([z2_ori[neg_edge[0]], z2_ori[neg_edge[1]]], dim=0)
loss_r1 = loss_fct(embed1, embed1_ori)
loss_r2 = loss_fct(embed2, embed2_ori)
# Local causality
loss_l1 = loss_fct(z1[batch.sdf_node_1hop_mask_non_df_mask], z1_ori[batch.sdf_node_1hop_mask_non_df_mask])
loss_l2 = loss_fct(z2[batch.sdf_node_2hop_mask_non_df_mask], z2_ori[batch.sdf_node_2hop_mask_non_df_mask])
# Total loss
loss_l = loss_l1 + loss_l2
loss_r = loss_r1 + loss_r2
loss1 = self.args.alpha * loss_r1 + (1 - self.args.alpha) * loss_l1
loss1.backward(retain_graph=True)
optimizer[0].step()
optimizer[0].zero_grad()
loss2 = self.args.alpha * loss_r2 + (1 - self.args.alpha) * loss_l2
loss2.backward(retain_graph=True)
optimizer[1].step()
optimizer[1].zero_grad()
loss = loss1 + loss2
end_time = time.time()
epoch_loss_l += loss_l.item()
epoch_loss_r += loss_r.item()
epoch_loss += loss.item()
epoch_time += end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'train_loss_l': loss_l.item(),
'train_loss_r': loss_r.item(),
'train_time': end_time - start_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step,
'train_loss_l': epoch_loss_l / step,
'train_loss_r': epoch_loss_r / step,
'train_time': epoch_time / step,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
# 'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
# 'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
class GNNDeleteNodeClassificationTrainer(NodeClassificationTrainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to('cuda')
data = data.to('cuda')
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
# All node paris in S_Df without Df
## S_Df 1 hop all pair mask
# sdf1_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
# idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_1hop_mask], with_replacement=True).t()
# sdf1_all_pair_mask[idx[0], idx[1]] = True
# sdf1_all_pair_mask[idx[1], idx[0]] = True
# assert sdf1_all_pair_mask.sum().cpu() == data.sdf_node_1hop_mask.sum().cpu() * data.sdf_node_1hop_mask.sum().cpu()
# ## Remove Df itself
# sdf1_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
# sdf1_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
# ## S_Df 2 hop all pair mask
# sdf2_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
# idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_2hop_mask], with_replacement=True).t()
# sdf2_all_pair_mask[idx[0], idx[1]] = True
# sdf2_all_pair_mask[idx[1], idx[0]] = True
# assert sdf2_all_pair_mask.sum().cpu() == data.sdf_node_2hop_mask.sum().cpu() * data.sdf_node_2hop_mask.sum().cpu()
# ## Remove Df itself
# sdf2_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
# sdf2_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
# ## Lower triangular mask
# idx = torch.tril_indices(data.num_nodes, data.num_nodes, -1)
# lower_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
# lower_mask[idx[0], idx[1]] = True
# ## The final mask is the intersection
# sdf1_all_pair_without_df_mask = sdf1_all_pair_mask & lower_mask
# sdf2_all_pair_without_df_mask = sdf2_all_pair_mask & lower_mask
non_df_node_mask = torch.ones(data.x.shape[0], dtype=torch.bool, device=data.x.device)
non_df_node_mask[data.directed_df_edge_index.flatten().unique()] = False
data.sdf_node_1hop_mask_non_df_mask = data.sdf_node_1hop_mask & non_df_node_mask
data.sdf_node_2hop_mask_non_df_mask = data.sdf_node_2hop_mask & non_df_node_mask
# Original node embeddings
with torch.no_grad():
z1_ori, z2_ori = model.get_original_embeddings(data.x, data.edge_index[:, data.dr_mask], return_all_emb=True)
loss_fct = get_loss_fct(self.args.loss_fct)
neg_edge = neg_edge_index = negative_sampling(
edge_index=data.edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.df_mask.sum())
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
start_time = time.time()
z1, z2 = model(data.x, data.edge_index[:, data.sdf_mask], return_all_emb=True)
# print('current deletion weight', model.deletion1.deletion_weight.sum(), model.deletion2.deletion_weight.sum())
# print('aaaaaa', z[data.sdf_node_2hop_mask].sum())
# Randomness
pos_edge = data.edge_index[:, data.df_mask]
# neg_edge = torch.randperm(data.num_nodes)[:pos_edge.view(-1).shape[0]].view(2, -1)
embed1 = torch.cat([z1[pos_edge[0]], z1[pos_edge[1]]], dim=0)
embed1_ori = torch.cat([z1_ori[neg_edge[0]], z1_ori[neg_edge[1]]], dim=0)
embed2 = torch.cat([z2[pos_edge[0]], z2[pos_edge[1]]], dim=0)
embed2_ori = torch.cat([z2_ori[neg_edge[0]], z2_ori[neg_edge[1]]], dim=0)
loss_r1 = loss_fct(embed1, embed1_ori)
loss_r2 = loss_fct(embed2, embed2_ori)
# Local causality
loss_l1 = loss_fct(z1[data.sdf_node_1hop_mask_non_df_mask], z1_ori[data.sdf_node_1hop_mask_non_df_mask])
loss_l2 = loss_fct(z2[data.sdf_node_2hop_mask_non_df_mask], z2_ori[data.sdf_node_2hop_mask_non_df_mask])
# Total loss
'''both_all, both_layerwise, only2_layerwise, only2_all, only1'''
loss_l = loss_l1 + loss_l2
loss_r = loss_r1 + loss_r2
loss1 = self.args.alpha * loss_r1 + (1 - self.args.alpha) * loss_l1
loss1.backward(retain_graph=True)
optimizer[0].step()
optimizer[0].zero_grad()
loss2 = self.args.alpha * loss_r2 + (1 - self.args.alpha) * loss_l2
loss2.backward(retain_graph=True)
optimizer[1].step()
optimizer[1].zero_grad()
loss = loss1 + loss2
end_time = time.time()
epoch_time = end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % self.args.valid_freq == 0:
valid_loss, dt_acc, dt_f1, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_acc + dt_f1 > best_metric:
best_metric = dt_acc + dt_f1
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
# 'optimizer_state': [optimizer[0].state_dict(), optimizer[1].state_dict()],
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
# 'optimizer_state': [optimizer[0].state_dict(), optimizer[1].state_dict()],
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
class KGGNNDeleteNodeembTrainer(KGTrainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
best_metric = 0
if 'kld' in args.unlearning_model:
loss_fct = BoundedKLD
else:
loss_fct = nn.MSELoss()
# neg_size = 10
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
# All node paris in S_Df without Df
## S_Df 1 hop all pair mask
# sdf1_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
# idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_1hop_mask], with_replacement=True).t()
# sdf1_all_pair_mask[idx[0], idx[1]] = True
# sdf1_all_pair_mask[idx[1], idx[0]] = True
# assert sdf1_all_pair_mask.sum().cpu() == data.sdf_node_1hop_mask.sum().cpu() * data.sdf_node_1hop_mask.sum().cpu()
# ## Remove Df itself
# sdf1_all_pair_mask[data.edge_index[:, data.df_mask][0], data.edge_index[:, data.df_mask][1]] = False
# sdf1_all_pair_mask[data.edge_index[:, data.df_mask][1], data.edge_index[:, data.df_mask][0]] = False
# ## S_Df 2 hop all pair mask
# sdf2_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
# idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_2hop_mask], with_replacement=True).t()
# sdf2_all_pair_mask[idx[0], idx[1]] = True
# sdf2_all_pair_mask[idx[1], idx[0]] = True
# assert sdf2_all_pair_mask.sum().cpu() == data.sdf_node_2hop_mask.sum().cpu() * data.sdf_node_2hop_mask.sum().cpu()
# ## Remove Df itself
# sdf2_all_pair_mask[data.edge_index[:, data.df_mask][0], data.edge_index[:, data.df_mask][1]] = False
# sdf2_all_pair_mask[data.edge_index[:, data.df_mask][1], data.edge_index[:, data.df_mask][0]] = False
# ## Lower triangular mask
# idx = torch.tril_indices(data.num_nodes, data.num_nodes, -1)
# lower_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
# lower_mask[idx[0], idx[1]] = True
# ## The final mask is the intersection
# sdf1_all_pair_without_df_mask = sdf1_all_pair_mask & lower_mask
# sdf2_all_pair_without_df_mask = sdf2_all_pair_mask & lower_mask
# print(data.sdf_node_2hop_mask.sum())
# print(sdf_all_pair_mask.nonzero())
# print(data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1])
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum(), a, sdf_all_pair_mask.sum())
# print('aaaaaaaaaaaa', lower_mask.sum())
# print('aaaaaaaaaaaa', sdf_all_pair_without_df_mask.sum())
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum())
# assert sdf_all_pair_without_df_mask.sum() == \
# data.sdf_node_2hop_mask.sum().cpu() * (data.sdf_node_2hop_mask.sum().cpu() - 1) // 2 - data.df_mask.sum().cpu()
#
non_df_node_mask = torch.ones(data.x.shape[0], dtype=torch.bool, device=data.x.device)
non_df_node_mask[data.directed_df_edge_index.flatten().unique()] = False
data.sdf_node_1hop_mask_non_df_mask = data.sdf_node_1hop_mask & non_df_node_mask
data.sdf_node_2hop_mask_non_df_mask = data.sdf_node_2hop_mask & non_df_node_mask
model_ori = copy.deepcopy(model)
loss_fct = get_loss_fct(self.args.loss_fct)
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
epoch_loss_e = 0
epoch_loss_l = 0
epoch_loss = 0
epoch_time = 0
for step, batch in enumerate(tqdm(loader, leave=False)):
start_time = time.time()
batch = batch.to(device)
# Message passing
edge_index = batch.edge_index[:, batch.dr_mask]
edge_type = batch.edge_type[batch.dr_mask]
z1, z2 = model(batch.x, edge_index, edge_type, batch.sdf_node_1hop_mask_non_df_mask, batch.sdf_node_2hop_mask_non_df_mask, return_all_emb=True)
# Original node embeddings
with torch.no_grad():
z1_ori, z2_ori = model.get_original_embeddings(batch.x, edge_index, edge_type, return_all_emb=True)
# Randomness
pos_edge_index = batch.edge_index[:, batch.df_mask]
pos_edge_type = batch.edge_type[batch.df_mask]
decoding_mask = pos_edge_type < self.args.num_edge_type
decoding_edge_index = pos_edge_index[:, decoding_mask]
decoding_edge_type = pos_edge_type[decoding_mask]
# print(pos_edge_type.max(), decoding_edge_type.max(), self.args.num_edge_type)
# raise
neg_edge_index = negative_sampling_kg(
edge_index=decoding_edge_index,
edge_type=decoding_edge_type)
embed1 = torch.cat([z1[decoding_edge_index[0]], z1[decoding_edge_index[1]]], dim=0)
embed1_ori = torch.cat([z1_ori[neg_edge_index[0]], z1_ori[neg_edge_index[1]]], dim=0)
embed2 = torch.cat([z2[decoding_edge_index[0]], z2[decoding_edge_index[1]]], dim=0)
embed2_ori = torch.cat([z2_ori[neg_edge_index[0]], z2_ori[neg_edge_index[1]]], dim=0)
loss_r1 = loss_fct(embed1, embed1_ori)
loss_r2 = loss_fct(embed2, embed2_ori)
# Local causality
loss_l1 = loss_fct(z1[batch.sdf_node_1hop_mask_non_df_mask], z1_ori[batch.sdf_node_1hop_mask_non_df_mask])
loss_l2 = loss_fct(z2[batch.sdf_node_2hop_mask_non_df_mask], z2_ori[batch.sdf_node_2hop_mask_non_df_mask])
# Loss
loss_l = loss_l1 + loss_l2
loss_r = loss_r1 + loss_r2
loss1 = self.args.alpha * loss_r1 + (1 - self.args.alpha) * loss_l1
loss1.backward(retain_graph=True)
optimizer[0].step()
optimizer[0].zero_grad()
loss2 = self.args.alpha * loss_r2 + (1 - self.args.alpha) * loss_l2
loss2.backward(retain_graph=True)
optimizer[1].step()
optimizer[1].zero_grad()
loss = loss1 + loss2
end_time = time.time()
epoch_time = end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % self.args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt')) | 37,356 | 43.105077 | 159 | py |
GNNDelete | GNNDelete-main/framework/trainer/gnndelete.py | import os
import time
import wandb
from tqdm import tqdm, trange
import torch
import torch.nn as nn
from torch_geometric.utils import negative_sampling, k_hop_subgraph
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from .base import Trainer
from ..evaluation import *
from ..utils import *
def BoundedKLD(logits, truth):
# print('aaaaaaaaa', truth.shape, truth)
return 1 - torch.exp(-F.kl_div(F.log_softmax(logits, -1), truth.softmax(-1), None, None, 'batchmean'))
def CosineDistance(logits, truth):
if len(logits.shape) == 1:
return 1 - F.cosine_similarity(logits.view(1, -1), truth.view(1, -1))
else:
return 1 - F.cosine_similarity(logits, truth)
def get_loss_fct(name):
if name == 'kld':
loss_fct = BoundedKLD
elif name == 'mse':
loss_fct = nn.MSELoss()
elif name == 'cosine':
loss_fct = CosineDistance
else:
raise NotImplementedError
return loss_fct
class GNNDeleteTrainer(Trainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
if 'ogbl' in self.args.dataset:
return self.train_minibatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
else:
return self.train_fullbatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
def compute_loss(self, model, data, random_loss_fct, compute_random_on, random_layer, local_loss_fct, compute_local_on, local_layer,
z1=None, z2=None, z1_ori=None, z2_ori=None, logits_ori=None,
sdf1_all_pair_without_df_mask=None, sdf2_all_pair_without_df_mask=None):
# Randomness
loss_r = 0
if random_layer == '1':
all_z = [z1]
elif random_layer == '2':
all_z = [z2]
elif random_layer == 'both':
all_z = [z1, z2]
else:
raise NotImplementedError
neg_size = data.df_mask.sum()
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=neg_size)
if compute_random_on == 'edgeprob': # Compute Randomness on edge probability
for z in all_z:
df_logits = model.decode(z, data.train_pos_edge_index[:, data.df_mask], neg_edge_index)
loss_r += random_loss_fct(df_logits[:neg_size], df_logits[neg_size:])
elif compute_random_on == 'nodeemb':
for z in all_z:
z_random_source, z_random_target = z[neg_edge_index[0]], z[neg_edge_index[1]]
z_source, z_target = z[data.train_pos_edge_index[:, data.df_mask][0]], z[data.train_pos_edge_index[:, data.df_mask][1]]
loss_r += (random_loss_fct(z_source, z_random_source) + random_loss_fct(z_target, z_random_target))
elif compute_random_on == 'none':
loss_r = None
else:
raise NotImplementedError
# Local causality
loss_l = 0
if local_layer == '1':
all_z = [z1]
all_z_ori = [z1_ori]
all_sdf_lower_triangular_mask = [sdf1_all_pair_without_df_mask]
all_sdf_node_mask = [data.sdf_node_1hop_mask]
elif local_layer == '2':
all_z = [z2]
all_z_ori = [z2_ori]
all_sdf_lower_triangular_mask = [sdf2_all_pair_without_df_mask]
all_sdf_node_mask = [data.sdf_node_2hop_mask]
elif local_layer == 'both':
all_z = [z1, z2]
all_z_ori = [z1_ori, z2_ori]
all_sdf_lower_triangular_mask = [sdf1_all_pair_without_df_mask, sdf2_all_pair_without_df_mask]
all_sdf_node_mask = [data.sdf_node_1hop_mask, data.sdf_node_2hop_mask]
else:
raise NotImplementedError
if compute_local_on == 'edgeprob':
for z_ori, z, sdf_lower_triangular_mask in zip(all_z_ori, all_z, all_sdf_lower_triangular_mask):
logits = (z @ z.t())[sdf_lower_triangular_mask].sigmoid()
logits_ori = (z_ori @ z_ori.t())[sdf_lower_triangular_mask].sigmoid()
loss_l += local_loss_fct(logits, logits_ori)
elif compute_local_on == 'nodeemb':
for z_ori, z, sdf_node_mask in zip(all_z_ori, all_z, all_sdf_node_mask):
print(z_ori.shape, z.shape, sdf_node_mask.shape, sdf_node_mask.sum())
loss_l += local_loss_fct(z_ori[sdf_node_mask], z[sdf_node_mask])
elif compute_local_on == 'none':
loss_l = None
else:
raise NotImplementedError
if compute_random_on == 'none':
loss = loss_l
elif compute_local_on == 'none':
loss = loss_r
else:
alpha = 0.5
loss = alpha * loss_r + (1 - alpha) * loss_l
return loss, loss_r, loss_l
def train_fullbatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to('cuda')
data = data.to('cuda')
best_metric = 0
# '''Model naming convention: "gnndelete_random_mse_edgeprob_1_local_mse_edgeprob_1" '''
# _, _, random_loss_fct, compute_random_on, random_layer, _, local_loss_fct, compute_local_on, local_layer = self.args.unlearning_model.split('_')
# random_loss_fct = get_loss_fct(random_loss_fct)
# local_loss_fct = get_loss_fct(local_loss_fct)
# neg_size = 10
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
# All node paris in S_Df without Df
## S_Df 1 hop all pair mask
sdf1_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_1hop_mask], with_replacement=True).t()
sdf1_all_pair_mask[idx[0], idx[1]] = True
sdf1_all_pair_mask[idx[1], idx[0]] = True
assert sdf1_all_pair_mask.sum().cpu() == data.sdf_node_1hop_mask.sum().cpu() * data.sdf_node_1hop_mask.sum().cpu()
## Remove Df itself
sdf1_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
sdf1_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
## S_Df 2 hop all pair mask
sdf2_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_2hop_mask], with_replacement=True).t()
sdf2_all_pair_mask[idx[0], idx[1]] = True
sdf2_all_pair_mask[idx[1], idx[0]] = True
assert sdf2_all_pair_mask.sum().cpu() == data.sdf_node_2hop_mask.sum().cpu() * data.sdf_node_2hop_mask.sum().cpu()
## Remove Df itself
sdf2_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
sdf2_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
## Lower triangular mask
idx = torch.tril_indices(data.num_nodes, data.num_nodes, -1)
lower_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
lower_mask[idx[0], idx[1]] = True
## The final mask is the intersection
sdf1_all_pair_without_df_mask = sdf1_all_pair_mask & lower_mask
sdf2_all_pair_without_df_mask = sdf2_all_pair_mask & lower_mask
# print(data.sdf_node_2hop_mask.sum())
# print(sdf_all_pair_mask.nonzero())
# print(data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1])
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum(), a, sdf_all_pair_mask.sum())
# print('aaaaaaaaaaaa', lower_mask.sum())
# print('aaaaaaaaaaaa', sdf_all_pair_without_df_mask.sum())
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum())
# assert sdf_all_pair_without_df_mask.sum() == \
# data.sdf_node_2hop_mask.sum().cpu() * (data.sdf_node_2hop_mask.sum().cpu() - 1) // 2 - data.df_mask.sum().cpu()
# Original node embeddings
# with torch.no_grad():
# z1_ori, z2_ori = model.get_original_embeddings(data.x, data.train_pos_edge_index[:, data.dtrain_mask], return_all_emb=True)
loss_fct = nn.MSELoss()
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
start_time = time.time()
z = model(data.x, data.train_pos_edge_index[:, data.sdf_mask])
# z1, z2 = model(data.x, data.train_pos_edge_index[:, data.sdf_mask], return_all_emb=True)
# print('current deletion weight', model.deletion1.deletion_weight.sum(), model.deletion2.deletion_weight.sum())
# print('aaaaaa', z[data.sdf_node_2hop_mask].sum())
# Effectiveness and Randomness
neg_size = data.df_mask.sum()
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=neg_size)
df_logits = model.decode(z, data.train_pos_edge_index[:, data.df_mask], neg_edge_index)
loss_r = loss_fct(df_logits[:neg_size], df_logits[neg_size:])
# df_logits = model.decode(
# z,
# data.train_pos_edge_index[:, data.df_mask].repeat(1, neg_size),
# neg_edge_index).sigmoid()
# loss_e = loss_fct(df_logits[:neg_size], df_logits[neg_size:])
# print('df_logits', df_logits)
# raise
# Local causality
if sdf2_all_pair_without_df_mask.sum() != 0:
logits_sdf = (z @ z.t())[sdf2_all_pair_without_df_mask].sigmoid()
loss_l = loss_fct(logits_sdf, logits_ori[sdf2_all_pair_without_df_mask].sigmoid())
# print('local proba', logits_sdf.shape, logits_sdf, logits_ori[sdf2_all_pair_without_df_mask].sigmoid())
else:
loss_l = torch.tensor(0)
print('local proba', 0)
alpha = 0.5
loss = alpha * loss_r + (1 - alpha) * loss_l
# loss, loss_r, loss_l = self.compute_loss(
# model, data, random_loss_fct, compute_random_on, random_layer, local_loss_fct, compute_local_on, local_layer,
# z1, z2, z1_ori, z2_ori, logits_ori, sdf1_all_pair_without_df_mask, sdf2_all_pair_without_df_mask)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
end_time = time.time()
epoch_time = end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'loss_r': loss_r.item(),
'loss_l': loss_l.item(),
'train_time': epoch_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % self.args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'train_loss_l': loss_l.item(),
'train_loss_r': loss_r.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
def train_minibatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
best_metric = 0
if 'kld' in args.unlearning_model:
loss_fct = BoundedKLD
else:
loss_fct = nn.MSELoss()
# neg_size = 10
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
z_ori = self.get_embedding(model, data, on_cpu=True)
z_ori_two_hop = z_ori[data.sdf_node_2hop_mask]
data.edge_index = data.train_pos_edge_index
data.node_id = torch.arange(data.x.shape[0])
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
# print('current deletion weight', model.deletion1.deletion_weight.sum(), model.deletion2.deletion_weight.sum())
epoch_loss_e = 0
epoch_loss_l = 0
epoch_loss = 0
epoch_time = 0
for step, batch in enumerate(tqdm(loader, leave=False)):
start_time = time.time()
batch = batch.to('cuda')
train_pos_edge_index = batch.edge_index
z = model(batch.x, train_pos_edge_index[:, batch.sdf_mask], batch.sdf_node_1hop_mask, batch.sdf_node_2hop_mask)
z_two_hop = z[batch.sdf_node_2hop_mask]
# Effectiveness and Randomness
neg_size = batch.df_mask.sum()
neg_edge_index = negative_sampling(
edge_index=train_pos_edge_index,
num_nodes=z.size(0),
num_neg_samples=neg_size)
df_logits = model.decode(z, train_pos_edge_index[:, batch.df_mask], neg_edge_index)
loss_e = loss_fct(df_logits[:neg_size], df_logits[neg_size:])
# Local causality
# Only take the lower triangular part
# mask = torch.zeros(data.x.shape[0], dtype=torch.bool)
# mask[batch.node_id[batch.sdf_node_2hop_mask]] = True
# z_ori_subset = z_ori[mask].to('cuda')
# num_nodes = z_ori_subset.shape[0]
# idx = torch.tril_indices(num_nodes, num_nodes, -1)
# local_lower_mask = torch.zeros(num_nodes, num_nodes, dtype=torch.bool)
# local_lower_mask[idx[0], idx[1]] = True
# logits_ori = (z_ori_subset @ z_ori_subset.t())[local_lower_mask]#.sigmoid()
# logits = (z_two_hop @ z_two_hop.t())[local_lower_mask]#.sigmoid()
edge = batch.edge_index[:, batch.sdf_mask]
lower_mask = edge[0] < edge[1]
row, col = edge[0][lower_mask], edge[1][lower_mask]
logits_ori = (z_ori[row] * z_ori[col]).sum(dim=-1).to('cuda')
logits = (z[row] * z[col]).sum(dim=-1)
loss_l = loss_fct(logits, logits_ori)
# print(loss_e, loss_l, z_ori.device, z.device)
alpha = 0.5
if 'ablation_random' in self.args.unlearning_model:
loss_l = torch.tensor(0)
loss = loss_e
elif 'ablation_locality' in self.args.unlearning_model:
loss_e = torch.tensor(0)
loss = loss_l
else:
loss = alpha * loss_e + (1 - alpha) * loss_l
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
end_time = time.time()
epoch_loss_e += loss_e.item()
epoch_loss_l += loss_l.item()
epoch_loss += loss.item()
epoch_time += end_time - start_time
epoch_loss_e /= step
epoch_loss_l /= step
epoch_loss /= step
epoch_time /= step
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step,
'train_loss_l': epoch_loss_e / step,
'train_loss_e': epoch_loss_l / step,
'train_time': epoch_time / step,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
| 19,850 | 43.015521 | 154 | py |
GNNDelete | GNNDelete-main/framework/trainer/gradient_ascent.py | import os
import time
import wandb
from tqdm import tqdm, trange
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.utils import negative_sampling
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from .base import Trainer, KGTrainer
from ..evaluation import *
from ..utils import *
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def weight(model):
t = 0
for p in model.parameters():
t += torch.norm(p)
return t
class GradientAscentTrainer(Trainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
if 'ogbl' in self.args.dataset:
return self.train_minibatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
else:
return self.train_fullbatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
def train_fullbatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to('cuda')
data = data.to('cuda')
start_time = time.time()
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
start_time = time.time()
# Positive and negative sample
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index[:, data.df_mask],
num_nodes=data.num_nodes,
num_neg_samples=data.df_mask.sum())
z = model(data.x, data.train_pos_edge_index)
logits = model.decode(z, data.train_pos_edge_index[:, data.df_mask])
label = torch.ones_like(logits, dtype=torch.float, device='cuda')
loss = -F.binary_cross_entropy_with_logits(logits, label)
# print('aaaaaaaaaaaaaa', data.df_mask.sum(), weight(model))
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
end_time = time.time()
epoch_time = end_time - start_time
step_log = {
'Epoch': epoch,
'train_loss': loss.item(),
'train_time': epoch_time
}
wandb.log(step_log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in step_log.items()]
tqdm.write(' | '.join(msg))
if (epoch + 1) % self.args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
valid_log['epoch'] = epoch
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'train_time': epoch_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save
ckpt = {
'model_state': {k: v.cpu() for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
def train_minibatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
data.edge_index = data.train_pos_edge_index
data.node_id = torch.arange(data.x.shape[0])
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
epoch_loss = 0
epoch_time = 0
for step, batch in enumerate(tqdm(loader, leave=False)):
start_time = time.time()
batch = batch.to(device)
z = model(batch.x, batch.edge_index[:, batch.dr_mask])
# Positive and negative sample
neg_edge_index = negative_sampling(
edge_index=batch.edge_index[:, batch.df_mask],
num_nodes=z.size(0))
logits = model.decode(z, batch.edge_index[:, batch.df_mask])
label = torch.ones_like(logits, dtype=torch.float, device=device)
loss = -F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
end_time = time.time()
epoch_loss += loss.item()
epoch_time += end_time - start_time
epoch_loss /= step
epoch_time /= step
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step,
'train_time': epoch_time / step,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
torch.save(z, os.path.join(args.checkpoint_dir, 'node_embeddings.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
class KGGradientAscentTrainer(KGTrainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to(device)
start_time = time.time()
best_metric = 0
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
loader = GraphSAINTRandomWalkSampler(
data, batch_size=128, walk_length=args.walk_length, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Epoch'):
model.train()
epoch_loss = 0
for step, batch in enumerate(tqdm(loader, desc='Step', leave=False)):
batch = batch.to(device)
# Message passing
edge_index = batch.edge_index[:, batch.dr_mask]
edge_type = batch.edge_type[batch.dr_mask]
z = model(batch.x, edge_index, edge_type)
# Positive and negative sample
decoding_edge_index = batch.edge_index[:, batch.df_mask]
decoding_edge_type = batch.edge_type[batch.df_mask]
decoding_mask = (decoding_edge_type < args.num_edge_type) # Only select directed edges for link prediction
decoding_edge_index = decoding_edge_index[:, decoding_mask]
decoding_edge_type = decoding_edge_type[decoding_mask]
logits = model.decode(z, decoding_edge_index, decoding_edge_type)
label = torch.ones_like(logits, dtype=torch.float, device=device)
loss = -F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
log = {
'epoch': epoch,
'step': step,
'train_loss': loss.item(),
}
wandb.log(log)
# msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
# tqdm.write(' | '.join(msg))
epoch_loss += loss.item()
if (epoch + 1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': epoch_loss / step
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(train_log)
self.trainer_log['log'].append(valid_log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
self.trainer_log['training_time'] = time.time() - start_time
# Save models and node embeddings
print('Saving final checkpoint')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
print(f'Training finished. Best checkpoint at epoch = {best_epoch:04d}, best valid loss = {best_metric:.4f}')
self.trainer_log['best_epoch'] = best_epoch
self.trainer_log['best_metric'] = best_metric
self.trainer_log['training_time'] = np.mean([i['epoch_time'] for i in self.trainer_log['log'] if 'epoch_time' in i])
| 13,223 | 42.074919 | 128 | py |
GNNDelete | GNNDelete-main/framework/trainer/graph_eraser.py | import os
import json
import copy
import math
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn.functional as F
from torch_geometric.utils import negative_sampling, subgraph
from .base import Trainer
from ..evaluation import *
from ..utils import *
class ConstrainedKmeans:
'''This code is from https://github.com/MinChen00/Graph-Unlearning'''
def __init__(self, args, data_feat, num_clusters, node_threshold, terminate_delta, max_iteration=20):
self.args = args
self.data_feat = data_feat
self.num_clusters = num_clusters
self.node_threshold = node_threshold
self.terminate_delta = terminate_delta
self.max_iteration = max_iteration
def initialization(self):
centroids = np.random.choice(np.arange(self.data_feat.shape[0]), self.num_clusters, replace=False)
self.centroid = {}
for i in range(self.num_clusters):
self.centroid[i] = self.data_feat[centroids[i]]
def clustering(self):
centroid = copy.deepcopy(self.centroid)
km_delta = []
# pbar = tqdm(total=self.max_iteration)
# pbar.set_description('Clustering')
for i in trange(self.max_iteration, desc='Graph partition'):
# self.logger.info('iteration %s' % (i,))
self._node_reassignment()
self._centroid_updating()
# record the average change of centroids, if the change is smaller than a very small value, then terminate
delta = self._centroid_delta(centroid, self.centroid)
km_delta.append(delta)
centroid = copy.deepcopy(self.centroid)
if delta <= self.terminate_delta:
break
print("delta: %s" % delta)
# pbar.close()
return self.clusters, km_delta
def _node_reassignment(self):
self.clusters = {}
for i in range(self.num_clusters):
self.clusters[i] = np.zeros(0, dtype=np.uint64)
distance = np.zeros([self.num_clusters, self.data_feat.shape[0]])
for i in range(self.num_clusters):
distance[i] = np.sum(np.power((self.data_feat - self.centroid[i]), 2), axis=1)
sort_indices = np.unravel_index(np.argsort(distance, axis=None), distance.shape)
clusters = sort_indices[0]
users = sort_indices[1]
selected_nodes = np.zeros(0, dtype=np.int64)
counter = 0
while len(selected_nodes) < self.data_feat.shape[0]:
cluster = int(clusters[counter])
user = users[counter]
if self.clusters[cluster].size < self.node_threshold:
self.clusters[cluster] = np.append(self.clusters[cluster], np.array(int(user)))
selected_nodes = np.append(selected_nodes, np.array(int(user)))
# delete all the following pairs for the selected user
user_indices = np.where(users == user)[0]
a = np.arange(users.size)
b = user_indices[user_indices > counter]
remain_indices = a[np.where(np.logical_not(np.isin(a, b)))[0]]
clusters = clusters[remain_indices]
users = users[remain_indices]
counter += 1
def _centroid_updating(self):
for i in range(self.num_clusters):
self.centroid[i] = np.mean(self.data_feat[self.clusters[i].astype(int)], axis=0)
def _centroid_delta(self, centroid_pre, centroid_cur):
delta = 0.0
for i in range(len(centroid_cur)):
delta += np.sum(np.abs(centroid_cur[i] - centroid_pre[i]))
return delta
def generate_shard_data(self, data):
shard_data = {}
for shard in trange(self.args['num_shards'], desc='Generate shard data'):
train_shard_indices = list(self.community_to_node[shard])
shard_indices = np.union1d(train_shard_indices, self.test_indices)
x = data.x[shard_indices]
y = data.y[shard_indices]
edge_index = utils.filter_edge_index_1(data, shard_indices)
data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
data.train_mask = torch.from_numpy(np.isin(shard_indices, train_shard_indices))
data.test_mask = torch.from_numpy(np.isin(shard_indices, self.test_indices))
shard_data[shard] = data
self.data_store.save_shard_data(self.shard_data)
class OptimalAggregator:
def __init__(self, run, target_model, data, args):
self.args = args
self.run = run
self.target_model = target_model
self.data = data
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_shards = args.num_clusters
def generate_train_data(self):
data_store = DataStore(self.args)
train_indices, _ = data_store.load_train_test_split()
# sample a set of nodes from train_indices
if self.args["num_opt_samples"] == 1000:
train_indices = np.random.choice(train_indices, size=1000, replace=False)
elif self.args["num_opt_samples"] == 10000:
train_indices = np.random.choice(train_indices, size=int(train_indices.shape[0] * 0.1), replace=False)
elif self.args["num_opt_samples"] == 1:
train_indices = np.random.choice(train_indices, size=int(train_indices.shape[0]), replace=False)
train_indices = np.sort(train_indices)
self.logger.info("Using %s samples for optimization" % (int(train_indices.shape[0])))
x = self.data.x[train_indices]
y = self.data.y[train_indices]
edge_index = utils.filter_edge_index(self.data.edge_index, train_indices)
train_data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
train_data.train_mask = torch.zeros(train_indices.shape[0], dtype=torch.bool)
train_data.test_mask = torch.ones(train_indices.shape[0], dtype=torch.bool)
self.true_labels = y
self.posteriors = {}
for shard in range(self.num_shards):
self.target_model.data = train_data
data_store.load_target_model(self.run, self.target_model, shard)
self.posteriors[shard] = self.target_model.posterior().to(self.device)
def optimization(self):
weight_para = nn.Parameter(torch.full((self.num_shards,), fill_value=1.0 / self.num_shards), requires_grad=True)
optimizer = optim.Adam([weight_para], lr=self.args['opt_lr'])
scheduler = MultiStepLR(optimizer, milestones=[500, 1000], gamma=self.args['opt_lr'])
train_dset = OptDataset(self.posteriors, self.true_labels)
train_loader = DataLoader(train_dset, batch_size=32, shuffle=True, num_workers=0)
min_loss = 1000.0
for epoch in range(self.args.epochs):
loss_all = 0.0
for posteriors, labels in train_loader:
labels = labels.to(self.device)
optimizer.zero_grad()
loss = self._loss_fn(posteriors, labels, weight_para)
loss.backward()
loss_all += loss
optimizer.step()
with torch.no_grad():
weight_para[:] = torch.clamp(weight_para, min=0.0)
scheduler.step()
if loss_all < min_loss:
ret_weight_para = copy.deepcopy(weight_para)
min_loss = loss_all
self.logger.info('epoch: %s, loss: %s' % (epoch, loss_all))
return ret_weight_para / torch.sum(ret_weight_para)
def _loss_fn(self, posteriors, labels, weight_para):
aggregate_posteriors = torch.zeros_like(posteriors[0])
for shard in range(self.num_shards):
aggregate_posteriors += weight_para[shard] * posteriors[shard]
aggregate_posteriors = F.softmax(aggregate_posteriors, dim=1)
loss_1 = F.cross_entropy(aggregate_posteriors, labels)
loss_2 = torch.sqrt(torch.sum(weight_para ** 2))
return loss_1 + loss_2
class Aggregator:
def __init__(self, run, target_model, data, shard_data, args):
self.args = args
self.run = run
self.target_model = target_model
self.data = data
self.shard_data = shard_data
self.num_shards = args.num_clusters
def generate_posterior(self, suffix=""):
self.true_label = self.shard_data[0].y[self.shard_data[0]['test_mask']].detach().cpu().numpy()
self.posteriors = {}
for shard in range(self.args.num_clusters):
self.target_model.data = self.shard_data[shard]
self.data_store.load_target_model(self.run, self.target_model, shard, suffix)
self.posteriors[shard] = self.target_model.posterior()
def _optimal_aggregator(self):
optimal = OptimalAggregator(self.run, self.target_model, self.data, self.args)
optimal.generate_train_data()
weight_para = optimal.optimization()
self.data_store.save_optimal_weight(weight_para, run=self.run)
posterior = self.posteriors[0] * weight_para[0]
for shard in range(1, self.num_shards):
posterior += self.posteriors[shard] * weight_para[shard]
return f1_score(self.true_label, posterior.argmax(axis=1).cpu().numpy(), average="micro")
class GraphEraserTrainer(Trainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
with torch.no_grad():
z = model(data.x, data.train_pos_edge_index[:, data.dr_mask])
# Retrain the model
for c in model.children():
print('before', torch.norm(c.lin.weight), torch.norm(c.bias))
for c in model.children():
c.reset_parameters()
for c in model.children():
print('after', torch.norm(c.lin.weight), torch.norm(c.bias))
model = model.cpu()
num_nodes = data.num_nodes
node_threshold = math.ceil(
num_nodes / args.num_clusters + args.shard_size_delta * (num_nodes - num_nodes / args.num_clusters))
print(f'Number of nodes: {num_nodes}. Shard threshold: {node_threshold}')
cluster = ConstrainedKmeans(
args,
z.cpu().numpy(),
args.num_clusters,
node_threshold,
args.terminate_delta,
args.kmeans_max_iters)
cluster.initialization()
community, km_deltas = cluster.clustering()
# with open(os.path.join(args.checkpoint_dir, 'kmeans_delta.pkl'), 'wb') as f:
# pickle.dump(km_deltas, f)
community_to_node = {}
for i in range(args.num_clusters):
community_to_node[i] = np.array(community[i].astype(int))
models = {}
test_result = []
for shard_id in trange(args.num_clusters, desc='Sharded retraining'):
model_shard_id = copy.deepcopy(model).to('cuda')
optimizer = torch.optim.Adam(model_shard_id.parameters(), lr=args.lr)
subset_train, _ = subgraph(
torch.tensor(community[shard_id], dtype=torch.long, device=device),
data.train_pos_edge_index,
num_nodes=data.num_nodes)
self.train_model(model_shard_id, data, subset_train, optimizer, args, shard_id)
with torch.no_grad():
z = model_shard_id(data.x, subset_train)
logits = model_shard_id.decode(data.test_pos_edge_index, data.test_neg_edge_index)
weight_para = nn.Parameter(torch.full((self.num_shards,), fill_value=1.0 / self.num_shards), requires_grad=True)
optimizer = optim.Adam([weight_para], lr=self.args.lr)
aggregator.generate_posterior()
self.aggregate_f1_score = aggregator.aggregate()
aggregate_time = time.time() - start_time
self.logger.info("Partition cost %s seconds." % aggregate_time)
self.logger.info("Final Test F1: %s" % (self.aggregate_f1_score,))
def train_model(self, model, data, subset_train, optimizer, args, shard_id):
best_loss = 100000
for epoch in range(args.epochs):
model.train()
neg_edge_index = negative_sampling(
edge_index=subset_train,
num_nodes=data.num_nodes,
num_neg_samples=subset_train.shape[1])
z = model(data.x, subset_train)
logits = model.decode(z, subset_train, neg_edge_index)
label = self.get_link_labels(subset_train, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
valid_loss, auc, aup, _, _, = self.eval_model(model, data, subset_train, 'val')
log = {
'train_loss': loss.item(),
'valid_loss': valid_loss,
'valid_auc': auc,
'valid_aup': aup,
}
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log[f'shard_{shard_id}'] = log
torch.save(model.state_dict(), os.path.join(args.checkpoint_dir, f'model_{shard_id}.pt'))
@torch.no_grad()
def eval_model(self, model, data, subset_train, stage='val', pred_all=False):
model.eval()
pos_edge_index = data[f'{stage}_pos_edge_index']
neg_edge_index = data[f'{stage}_neg_edge_index']
z = model(data.x, subset_train)
logits = model.decode(z, pos_edge_index, neg_edge_index).sigmoid()
label = self.get_link_labels(pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label).cpu().item()
auc = roc_auc_score(label.cpu(), logits.cpu())
aup = average_precision_score(label.cpu(), logits.cpu())
if self.args.unlearning_model in ['original', 'retrain']:
df_logit = float('nan')
else:
# df_logit = float('nan')
df_logit = model.decode(z, subset_train).sigmoid().detach().cpu().item()
if pred_all:
logit_all_pair = (z @ z.t()).cpu()
else:
logit_all_pair = None
log = {
f'{stage}_loss': loss,
f'{stage}_auc': auc,
f'{stage}_aup': aup,
f'{stage}_df_logit': df_logit,
}
wandb.log(log)
msg = [f'{i}: {j:.4f}' if isinstance(j, (np.floating, float)) else f'{i}: {j:>4d}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
return loss, auc, aup, df_logit, logit_all_pair
| 14,714 | 38.24 | 120 | py |
GNNDelete | GNNDelete-main/framework/trainer/descent_to_delete.py | import os
import time
import wandb
from tqdm import tqdm, trange
import torch
import torch.nn.functional as F
from torch_geometric.utils import negative_sampling
from .base import Trainer
from ..evaluation import *
from ..utils import *
class DtdTrainer(Trainer):
'''This code is adapte from https://github.com/ChrisWaites/descent-to-delete'''
def compute_sigma(self, num_examples, iterations, lipshitz, smooth, strong, epsilon, delta):
"""Theorem 3.1 https://arxiv.org/pdf/2007.02923.pdf"""
print('delta', delta)
gamma = (smooth - strong) / (smooth + strong)
numerator = 4 * np.sqrt(2) * lipshitz * np.power(gamma, iterations)
denominator = (strong * num_examples * (1 - np.power(gamma, iterations))) * ((np.sqrt(np.log(1 / delta) + epsilon)) - np.sqrt(np.log(1 / delta)))
# print('sigma', numerator, denominator, numerator / denominator)
return numerator / denominator
def publish(self, model, sigma):
"""Publishing function which adds Gaussian noise with scale sigma."""
with torch.no_grad():
for n, p in model.named_parameters():
p.copy_(p + torch.empty_like(p).normal_(0, sigma))
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
start_time = time.time()
best_valid_loss = 100000
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
# Positive and negative sample
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index[:, data.dr_mask],
num_nodes=data.num_nodes,
num_neg_samples=data.dr_mask.sum())
z = model(data.x, data.train_pos_edge_index[:, data.dr_mask])
logits = model.decode(z, data.train_pos_edge_index[:, data.dr_mask], neg_edge_index)
label = get_link_labels(data.train_pos_edge_index[:, data.dr_mask], neg_edge_index)
loss = F.binary_cross_entropy_with_logits(logits, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
log = {
'Epoch': epoch,
'train_loss': loss.item(),
}
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
valid_loss, auc, aup, df_logt, logit_all_pair = self.eval(model, data, 'val')
self.trainer_log['log'].append({
'dt_loss': valid_loss,
'dt_auc': auc,
'dt_aup': aup
})
train_size = data.dr_mask.sum().cpu().item()
sigma = self.compute_sigma(
train_size,
args.epochs,
1 + args.weight_decay,
4 - args.weight_decay,
args.weight_decay,
5,
1 / train_size / train_size)
self.publish(model, sigma)
self.trainer_log['sigma'] = sigma
self.trainer_log['training_time'] = time.time() - start_time
# Save
ckpt = {
'model_state': {k: v.cpu() for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
| 4,135 | 38.390476 | 153 | py |
GNNDelete | GNNDelete-main/framework/trainer/approx_retrain.py | import os
import wandb
from tqdm import tqdm, trange
import torch
import torch.nn.functional as F
from torch_geometric.utils import negative_sampling
from torch.utils.data import DataLoader, TensorDataset
from .base import Trainer
from ..evaluation import *
from ..utils import *
DTYPE = np.float16
class ApproxTrainer(Trainer):
'''This code is adapted from https://github.com/zleizzo/datadeletion'''
def gram_schmidt(self, X):
"""
Uses numpy's qr factorization method to perform Gram-Schmidt.
Args:
X: (k x d matrix) X[i] = i-th vector
Returns:
U: (k x d matrix) U[i] = i-th orthonormal vector
C: (k x k matrix) Coefficient matrix, C[i] = coeffs for X[i], X = CU
"""
(k, d) = X.shape
if k <= d:
q, r = np.linalg.qr(np.transpose(X))
else:
q, r = np.linalg.qr(np.transpose(X), mode='complete')
U = np.transpose(q)
C = np.transpose(r)
return U, C
def LKO_pred(self, X, Y, ind, H=None, reg=1e-4):
"""
Computes the LKO model's prediction values on the left-out points.
Args:
X: (n x d matrix) Covariate matrix
Y: (n x 1 vector) Response vector
ind: (k x 1 list) List of indices to be removed
H: (n x n matrix, optional) Hat matrix X (X^T X)^{-1} X^T
Returns:
LKO: (k x 1 vector) Retrained model's predictions on X[i], i in ind
"""
n = len(Y)
k = len(ind)
d = len(X[0, :])
if H is None:
H = np.matmul(X, np.linalg.solve(np.matmul(X.T, X) + reg * np.eye(d), X.T))
LOO = np.zeros(k)
for i in range(k):
idx = ind[i]
# This is the LOO residual y_i - \hat{y}^{LOO}_i
LOO[i] = (Y[idx] - np.matmul(H[idx, :], Y)) / (1 - H[idx, idx])
# S = I - T from the paper
S = np.eye(k)
for i in range(k):
for j in range(k):
if j != i:
idx_i = ind[i]
idx_j = ind[j]
S[i, j] = -H[idx_i, idx_j] / (1 - H[idx_i, idx_i])
LKO = np.linalg.solve(S, LOO)
return Y[ind] - LKO
def lin_res(self, X, Y, theta, ind, H=None, reg=1e-4):
"""
Approximate retraining via the projective residual update.
Args:
X: (n x d matrix) Covariate matrix
Y: (n x 1 vector) Response vector
theta: (d x 1 vector) Current value of parameters to be updated
ind: (k x 1 list) List of indices to be removed
H: (n x n matrix, optional) Hat matrix X (X^T X)^{-1} X^T
Returns:
updated: (d x 1 vector) Updated parameters
"""
d = len(X[0])
k = len(ind)
# Step 1: Compute LKO predictions
LKO = self.LKO_pred(X, Y, ind, H, reg)
# Step 2: Eigendecompose B
# 2.I
U, C = self.gram_schmidt(X[ind, :])
# 2.II
Cmatrix = np.matmul(C.T, C)
eigenval, a = np.linalg.eigh(Cmatrix)
V = np.matmul(a.T, U)
# Step 3: Perform the update
# 3.I
grad = np.zeros_like(theta) # 2D grad
for i in range(k):
grad += (X[ind[i], :] * theta - LKO[i]) * X[ind[i], :]
# 3.II
step = np.zeros_like(theta) # 2D grad
for i in range(k):
factor = 1 / eigenval[i] if eigenval[i] > 1e-10 else 0
step += factor * V[i, :] * grad * V[i, :]
# 3.III
return step
# update = theta - step
# return update
@torch.no_grad()
def train(self, model, data, optimizer, args, logits_ori=None, attack_model=None):
model.eval()
best_loss = 100000
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index[:, data.dr_mask],
num_nodes=data.num_nodes,
num_neg_samples=data.dr_mask.sum())
z = model(data.x, data.train_pos_edge_index[:, data.dr_mask])
edge_index_all = torch.cat([data.train_pos_edge_index[:, data.dr_mask], neg_edge_index], dim=1)
X = z[edge_index_all[0]] * z[edge_index_all[1]]
Y = self.get_link_labels(data.train_pos_edge_index[:, data.dr_mask], neg_edge_index)
X = X.cpu()
Y = Y.cpu()
# According to the code, theta should be of (d, d). So only update the weights of the last layer
theta = model.conv2.lin.weight.cpu().numpy()
ind = [int(i) for i in self.args.df_idx.split(',')]
# Not enough RAM for solving matrix inverse. So break into multiple batches
update = []
loader = DataLoader(TensorDataset(X, Y), batch_size=4096, num_workers=8)
for x, y in tqdm(loader, desc='Unlearning'):
x = x.numpy()
y = y.numpy()
update_step = self.lin_res(x, y, theta.T, ind)
update.append(torch.tensor(update_step))
update = torch.stack(update).mean(0)
model.conv2.lin.weight = torch.nn.Parameter(model.conv2.lin.weight - update.t().cuda())
print(f'Update model weights from {torch.norm(torch.tensor(theta))} to {torch.norm(model.conv2.lin.weight)}')
valid_loss, auc, aup, df_logt, logit_all_pair = self.eval(model, data, 'val')
self.trainer_log['log'].append({
'dt_loss': valid_loss,
'dt_auc': auc,
'dt_aup': aup
})
# Save
ckpt = {
'model_state': {k: v.cpu() for k, v in model.state_dict().items()},
'node_emb': None,
'optimizer_state': None,
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
| 5,736 | 33.14881 | 117 | py |
GNNDelete | GNNDelete-main/framework/trainer/gnndelete_embdis.py | import os
import time
import wandb
from tqdm import tqdm, trange
import torch
import torch.nn as nn
from torch_geometric.utils import negative_sampling, k_hop_subgraph
from torch_geometric.loader import GraphSAINTRandomWalkSampler
from .base import Trainer
from ..evaluation import *
from ..utils import *
def BoundedKLD(logits, truth):
return 1 - torch.exp(-F.kl_div(F.log_softmax(logits, -1), truth.softmax(-1), None, None, 'batchmean'))
class GNNDeleteEmbeddingDistanceTrainer(Trainer):
def train(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
if 'ogbl' in self.args.dataset:
return self.train_minibatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
else:
return self.train_fullbatch(model, data, optimizer, args, logits_ori, attack_model_all, attack_model_sub)
def train_fullbatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
model = model.to('cuda')
data = data.to('cuda')
best_metric = 0
if 'kld' in args.unlearning_model:
loss_fct = BoundedKLD
else:
loss_fct = nn.MSELoss()
# neg_size = 10
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
# All node paris in S_Df without Df. For Local Causality
## S_Df all pair mask
sdf_all_pair_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
idx = torch.combinations(torch.arange(data.num_nodes)[data.sdf_node_2hop_mask], with_replacement=True).t()
sdf_all_pair_mask[idx[0], idx[1]] = True
sdf_all_pair_mask[idx[1], idx[0]] = True
# print(data.sdf_node_2hop_mask.sum())
# print(sdf_all_pair_mask.nonzero())
# print(data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1])
assert sdf_all_pair_mask.sum().cpu() == data.sdf_node_2hop_mask.sum().cpu() * data.sdf_node_2hop_mask.sum().cpu()
## Remove Df itself
sdf_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][0], data.train_pos_edge_index[:, data.df_mask][1]] = False
sdf_all_pair_mask[data.train_pos_edge_index[:, data.df_mask][1], data.train_pos_edge_index[:, data.df_mask][0]] = False
## Lower triangular mask
idx = torch.tril_indices(data.num_nodes, data.num_nodes, -1)
lower_mask = torch.zeros(data.num_nodes, data.num_nodes, dtype=torch.bool)
lower_mask[idx[0], idx[1]] = True
## The final mask is the intersection
sdf_all_pair_without_df_mask = sdf_all_pair_mask & lower_mask
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum(), a, sdf_all_pair_mask.sum())
# print('aaaaaaaaaaaa', lower_mask.sum())
# print('aaaaaaaaaaaa', sdf_all_pair_without_df_mask.sum())
# print('aaaaaaaaaaaa', data.sdf_node_2hop_mask.sum())
# assert sdf_all_pair_without_df_mask.sum() == \
# data.sdf_node_2hop_mask.sum().cpu() * (data.sdf_node_2hop_mask.sum().cpu() - 1) // 2 - data.df_mask.sum().cpu()
# Node representation for local causality
with torch.no_grad():
z1_ori, z2_ori = model.get_original_embeddings(data.x, data.train_pos_edge_index[:, data.dtrain_mask], return_all_emb=True)
total_time = 0
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
start_time = time.time()
z1, z2 = model(data.x, data.train_pos_edge_index[:, data.sdf_mask], return_all_emb=True)
print('current deletion weight', model.deletion1.deletion_weight.sum(), model.deletion2.deletion_weight.sum())
# Effectiveness and Randomness
neg_size = data.df_mask.sum()
neg_edge_index = negative_sampling(
edge_index=data.train_pos_edge_index,
num_nodes=data.num_nodes,
num_neg_samples=neg_size)
df_logits = model.decode(z2, data.train_pos_edge_index[:, data.df_mask], neg_edge_index)
loss_e = loss_fct(df_logits[:neg_size], df_logits[neg_size:])
# df_logits = model.decode(
# z,
# data.train_pos_edge_index[:, data.df_mask].repeat(1, neg_size),
# neg_edge_index).sigmoid()
# loss_e = loss_fct(df_logits[:neg_size], df_logits[neg_size:])
# print('df_logits', df_logits)
# raise
# Local causality
if sdf_all_pair_without_df_mask.sum() != 0:
loss_l = loss_fct(z1_ori[data.sdf_node_1hop_mask], z1[data.sdf_node_1hop_mask]) + \
loss_fct(z2_ori[data.sdf_node_2hop_mask], z2[data.sdf_node_2hop_mask])
print('local proba', loss_l.item())
else:
loss_l = torch.tensor(0)
print('local proba', 0)
alpha = 0.5
if 'ablation_random' in self.args.unlearning_model:
loss_l = torch.tensor(0)
loss = loss_e
elif 'ablation_locality' in self.args.unlearning_model:
loss_e = torch.tensor(0)
loss = loss_l
else:
loss = alpha * loss_e + (1 - alpha) * loss_l
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
end_time = time.time()
log = {
'epoch': epoch,
'train_loss': loss.item(),
'train_loss_l': loss_l.item(),
'train_loss_e': loss_e.item(),
'train_time': end_time - start_time,
}
# wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
if (epoch+1) % args.valid_freq == 0:
valid_loss, dt_auc, dt_aup, df_auc, df_aup, df_logit, logit_all_pair, valid_log = self.eval(model, data, 'val')
train_log = {
'epoch': epoch,
'train_loss': loss.item(),
'train_loss_l': loss_e.item(),
'train_loss_e': loss_l.item(),
'train_time': end_time - start_time,
}
for log in [train_log, valid_log]:
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
if dt_auc + df_auc > best_metric:
best_metric = dt_auc + df_auc
best_epoch = epoch
print(f'Save best checkpoint at epoch {epoch:04d}. Valid loss = {valid_loss:.4f}')
ckpt = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_final.pt'))
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
def train_minibatch(self, model, data, optimizer, args, logits_ori=None, attack_model_all=None, attack_model_sub=None):
start_time = time.time()
best_loss = 100000
if 'kld' in args.unlearning_model:
loss_fct = BoundedKLD
else:
loss_fct = nn.MSELoss()
# neg_size = 10
# MI Attack before unlearning
if attack_model_all is not None:
mi_logit_all_before, mi_sucrate_all_before = member_infer_attack(model, attack_model_all, data)
self.trainer_log['mi_logit_all_before'] = mi_logit_all_before
self.trainer_log['mi_sucrate_all_before'] = mi_sucrate_all_before
if attack_model_sub is not None:
mi_logit_sub_before, mi_sucrate_sub_before = member_infer_attack(model, attack_model_sub, data)
self.trainer_log['mi_logit_sub_before'] = mi_logit_sub_before
self.trainer_log['mi_sucrate_sub_before'] = mi_sucrate_sub_before
z_ori = self.get_embedding(model, data, on_cpu=True)
z_ori_two_hop = z_ori[data.sdf_node_2hop_mask]
data.edge_index = data.train_pos_edge_index
data.node_id = torch.arange(data.x.shape[0])
loader = GraphSAINTRandomWalkSampler(
data, batch_size=args.batch_size, walk_length=2, num_steps=args.num_steps,
)
for epoch in trange(args.epochs, desc='Unlerning'):
model.train()
print('current deletion weight', model.deletion1.deletion_weight.sum(), model.deletion2.deletion_weight.sum())
epoch_loss_e = 0
epoch_loss_l = 0
epoch_loss = 0
for step, batch in enumerate(tqdm(loader, leave=False)):
# print('data', batch)
# print('two hop nodes', batch.sdf_node_2hop_mask.sum())
batch = batch.to('cuda')
train_pos_edge_index = batch.edge_index
z = model(batch.x, train_pos_edge_index[:, batch.sdf_mask], batch.sdf_node_1hop_mask, batch.sdf_node_2hop_mask)
z_two_hop = z[batch.sdf_node_2hop_mask]
# Effectiveness and Randomness
neg_size = batch.df_mask.sum()
neg_edge_index = negative_sampling(
edge_index=train_pos_edge_index,
num_nodes=z.size(0),
num_neg_samples=neg_size)
df_logits = model.decode(z, train_pos_edge_index[:, batch.df_mask], neg_edge_index)
loss_e = loss_fct(df_logits[:neg_size], df_logits[neg_size:])
# Local causality
mask = torch.zeros(data.x.shape[0], dtype=torch.bool)
mask[batch.node_id[batch.sdf_node_2hop_mask]] = True
z_ori_subset = z_ori[mask].to('cuda')
# Only take the lower triangular part
num_nodes = z_ori_subset.shape[0]
idx = torch.tril_indices(num_nodes, num_nodes, -1)
local_lower_mask = torch.zeros(num_nodes, num_nodes, dtype=torch.bool)
local_lower_mask[idx[0], idx[1]] = True
logits_ori = (z_ori_subset @ z_ori_subset.t())[local_lower_mask].sigmoid()
logits = (z_two_hop @ z_two_hop.t())[local_lower_mask].sigmoid()
loss_l = loss_fct(logits, logits_ori)
alpha = 0.5
if 'ablation_random' in self.args.unlearning_model:
loss_l = torch.tensor(0)
loss = loss_e
elif 'ablation_locality' in self.args.unlearning_model:
loss_e = torch.tensor(0)
loss = loss_l
else:
loss = alpha * loss_e + (1 - alpha) * loss_l
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
epoch_loss_e += loss_e.item()
epoch_loss_l += loss_l.item()
epoch_loss += loss.item()
epoch_loss_e /= step
epoch_loss_l /= step
epoch_loss /= step
if (epoch+1) % args.valid_freq == 0:
valid_loss, auc, aup, df_logt, logit_all_pair = self.eval(model, data, 'val')
log = {
'epoch': epoch,
'train_loss': epoch_loss,
'train_loss_e': epoch_loss_e,
'train_loss_l': epoch_loss_l,
'valid_dt_loss': valid_loss,
'valid_dt_auc': auc,
'valid_dt_aup': aup,
}
wandb.log(log)
msg = [f'{i}: {j:>4d}' if isinstance(j, int) else f'{i}: {j:.4f}' for i, j in log.items()]
tqdm.write(' | '.join(msg))
self.trainer_log['log'].append(log)
self.trainer_log['training_time'] = time.time() - start_time
# Save
ckpt = {
'model_state': {k: v.to('cpu') for k, v in model.state_dict().items()},
'optimizer_state': optimizer.state_dict(),
}
torch.save(ckpt, os.path.join(args.checkpoint_dir, 'model_best.pt'))
| 13,600 | 42.453674 | 135 | py |
GNNDelete | GNNDelete-main/framework/models/gin.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GINConv
class GIN(nn.Module):
def __init__(self, args, **kwargs):
super().__init__()
self.conv1 = GINConv(nn.Linear(args.in_dim, args.hidden_dim))
self.conv2= GINConv(nn.Linear(args.hidden_dim, args.out_dim))
# self.transition = nn.Sequential(
# nn.ReLU(),
# # nn.Dropout(p=args.dropout)
# )
# self.mlp1 = nn.Sequential(
# nn.Linear(args.in_dim, args.hidden_dim),
# nn.ReLU(),
# )
# self.mlp2 = nn.Sequential(
# nn.Linear(args.hidden_dim, args.out_dim),
# nn.ReLU(),
# )
def forward(self, x, edge_index, return_all_emb=False):
x1 = self.conv1(x, edge_index)
x = F.relu(x1)
x2 = self.conv2(x, edge_index)
if return_all_emb:
return x1, x2
return x2
def decode(self, z, pos_edge_index, neg_edge_index=None):
if neg_edge_index is not None:
edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1)
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
else:
edge_index = pos_edge_index
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
return logits
| 1,373 | 28.869565 | 76 | py |
GNNDelete | GNNDelete-main/framework/models/rgat.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, average_precision_score
from typing import Optional
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Parameter, ReLU
from torch_scatter import scatter_add
from torch_sparse import SparseTensor
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.nn.inits import glorot, ones, zeros
from torch_geometric.typing import Adj, OptTensor, Size
from torch_geometric.utils import softmax
# Source: torch_geometric
class RGATConv(MessagePassing):
_alpha: OptTensor
def __init__(
self,
in_channels: int,
out_channels: int,
num_relations: int,
num_bases: Optional[int] = None,
num_blocks: Optional[int] = None,
mod: Optional[str] = None,
attention_mechanism: str = "across-relation",
attention_mode: str = "additive-self-attention",
heads: int = 1,
dim: int = 1,
concat: bool = True,
negative_slope: float = 0.2,
dropout: float = 0.0,
edge_dim: Optional[int] = None,
bias: bool = True,
**kwargs,
):
kwargs.setdefault('aggr', 'add')
super().__init__(node_dim=0, **kwargs)
self.heads = heads
self.negative_slope = negative_slope
self.dropout = dropout
self.mod = mod
self.activation = ReLU()
self.concat = concat
self.attention_mode = attention_mode
self.attention_mechanism = attention_mechanism
self.dim = dim
self.edge_dim = edge_dim
self.in_channels = in_channels
self.out_channels = out_channels
self.num_relations = num_relations
self.num_bases = num_bases
self.num_blocks = num_blocks
mod_types = ['additive', 'scaled', 'f-additive', 'f-scaled']
if (self.attention_mechanism != "within-relation"
and self.attention_mechanism != "across-relation"):
raise ValueError('attention mechanism must either be '
'"within-relation" or "across-relation"')
if (self.attention_mode != "additive-self-attention"
and self.attention_mode != "multiplicative-self-attention"):
raise ValueError('attention mode must either be '
'"additive-self-attention" or '
'"multiplicative-self-attention"')
if self.attention_mode == "additive-self-attention" and self.dim > 1:
raise ValueError('"additive-self-attention" mode cannot be '
'applied when value of d is greater than 1. '
'Use "multiplicative-self-attention" instead.')
if self.dropout > 0.0 and self.mod in mod_types:
raise ValueError('mod must be None with dropout value greater '
'than 0 in order to sample attention '
'coefficients stochastically')
if num_bases is not None and num_blocks is not None:
raise ValueError('Can not apply both basis-decomposition and '
'block-diagonal-decomposition at the same time.')
# The learnable parameters to compute both attention logits and
# attention coefficients:
self.q = Parameter(
torch.Tensor(self.heads * self.out_channels,
self.heads * self.dim))
self.k = Parameter(
torch.Tensor(self.heads * self.out_channels,
self.heads * self.dim))
if bias and concat:
self.bias = Parameter(
torch.Tensor(self.heads * self.dim * self.out_channels))
elif bias and not concat:
self.bias = Parameter(torch.Tensor(self.dim * self.out_channels))
else:
self.register_parameter('bias', None)
if edge_dim is not None:
self.lin_edge = Linear(self.edge_dim,
self.heads * self.out_channels, bias=False,
weight_initializer='glorot')
self.e = Parameter(
torch.Tensor(self.heads * self.out_channels,
self.heads * self.dim))
else:
self.lin_edge = None
self.register_parameter('e', None)
if num_bases is not None:
self.att = Parameter(
torch.Tensor(self.num_relations, self.num_bases))
self.basis = Parameter(
torch.Tensor(self.num_bases, self.in_channels,
self.heads * self.out_channels))
elif num_blocks is not None:
assert (
self.in_channels % self.num_blocks == 0
and (self.heads * self.out_channels) % self.num_blocks == 0), (
"both 'in_channels' and 'heads * out_channels' must be "
"multiple of 'num_blocks' used")
self.weight = Parameter(
torch.Tensor(self.num_relations, self.num_blocks,
self.in_channels // self.num_blocks,
(self.heads * self.out_channels) //
self.num_blocks))
else:
self.weight = Parameter(
torch.Tensor(self.num_relations, self.in_channels,
self.heads * self.out_channels))
self.w = Parameter(torch.ones(self.out_channels))
self.l1 = Parameter(torch.Tensor(1, self.out_channels))
self.b1 = Parameter(torch.Tensor(1, self.out_channels))
self.l2 = Parameter(torch.Tensor(self.out_channels, self.out_channels))
self.b2 = Parameter(torch.Tensor(1, self.out_channels))
self._alpha = None
self.reset_parameters()
def reset_parameters(self):
if self.num_bases is not None:
glorot(self.basis)
glorot(self.att)
else:
glorot(self.weight)
glorot(self.q)
glorot(self.k)
zeros(self.bias)
ones(self.l1)
zeros(self.b1)
torch.full(self.l2.size(), 1 / self.out_channels)
zeros(self.b2)
if self.lin_edge is not None:
glorot(self.lin_edge)
glorot(self.e)
def forward(self, x: Tensor, edge_index: Adj, edge_type: OptTensor = None,
edge_attr: OptTensor = None, size: Size = None,
return_attention_weights=None):
# propagate_type: (x: Tensor, edge_type: OptTensor, edge_attr: OptTensor) # noqa
out = self.propagate(edge_index=edge_index, edge_type=edge_type, x=x,
size=size, edge_attr=edge_attr)
alpha = self._alpha
assert alpha is not None
self._alpha = None
if isinstance(return_attention_weights, bool):
if isinstance(edge_index, Tensor):
return out, (edge_index, alpha)
elif isinstance(edge_index, SparseTensor):
return out, edge_index.set_value(alpha, layout='coo')
else:
return out
def message(self, x_i: Tensor, x_j: Tensor, edge_type: Tensor,
edge_attr: OptTensor, index: Tensor, ptr: OptTensor,
size_i: Optional[int]) -> Tensor:
if self.num_bases is not None: # Basis-decomposition =================
w = torch.matmul(self.att, self.basis.view(self.num_bases, -1))
w = w.view(self.num_relations, self.in_channels,
self.heads * self.out_channels)
if self.num_blocks is not None: # Block-diagonal-decomposition =======
if (x_i.dtype == torch.long and x_j.dtype == torch.long
and self.num_blocks is not None):
raise ValueError('Block-diagonal decomposition not supported '
'for non-continuous input features.')
w = self.weight
x_i = x_i.view(-1, 1, w.size(1), w.size(2))
x_j = x_j.view(-1, 1, w.size(1), w.size(2))
w = torch.index_select(w, 0, edge_type)
outi = torch.einsum('abcd,acde->ace', x_i, w)
outi = outi.contiguous().view(-1, self.heads * self.out_channels)
outj = torch.einsum('abcd,acde->ace', x_j, w)
outj = outj.contiguous().view(-1, self.heads * self.out_channels)
else: # No regularization/Basis-decomposition ========================
if self.num_bases is None:
w = self.weight
w = torch.index_select(w, 0, edge_type)
outi = torch.bmm(x_i.unsqueeze(1), w).squeeze(-2)
outj = torch.bmm(x_j.unsqueeze(1), w).squeeze(-2)
qi = torch.matmul(outi, self.q)
kj = torch.matmul(outj, self.k)
alpha_edge, alpha = 0, torch.tensor([0])
if edge_attr is not None:
if edge_attr.dim() == 1:
edge_attr = edge_attr.view(-1, 1)
assert self.lin_edge is not None, (
"Please set 'edge_dim = edge_attr.size(-1)' while calling the "
"RGATConv layer")
edge_attributes = self.lin_edge(edge_attr).view(
-1, self.heads * self.out_channels)
if edge_attributes.size(0) != edge_attr.size(0):
edge_attributes = torch.index_select(edge_attributes, 0,
edge_type)
alpha_edge = torch.matmul(edge_attributes, self.e)
if self.attention_mode == "additive-self-attention":
if edge_attr is not None:
alpha = torch.add(qi, kj) + alpha_edge
else:
alpha = torch.add(qi, kj)
alpha = F.leaky_relu(alpha, self.negative_slope)
elif self.attention_mode == "multiplicative-self-attention":
if edge_attr is not None:
alpha = (qi * kj) * alpha_edge
else:
alpha = qi * kj
if self.attention_mechanism == "within-relation":
across_out = torch.zeros_like(alpha)
for r in range(self.num_relations):
mask = edge_type == r
across_out[mask] = softmax(alpha[mask], index[mask])
alpha = across_out
elif self.attention_mechanism == "across-relation":
alpha = softmax(alpha, index, ptr, size_i)
self._alpha = alpha
if self.mod == "additive":
if self.attention_mode == "additive-self-attention":
ones = torch.ones_like(alpha)
h = (outj.view(-1, self.heads, self.out_channels) *
ones.view(-1, self.heads, 1))
h = torch.mul(self.w, h)
return (outj.view(-1, self.heads, self.out_channels) *
alpha.view(-1, self.heads, 1) + h)
elif self.attention_mode == "multiplicative-self-attention":
ones = torch.ones_like(alpha)
h = (outj.view(-1, self.heads, 1, self.out_channels) *
ones.view(-1, self.heads, self.dim, 1))
h = torch.mul(self.w, h)
return (outj.view(-1, self.heads, 1, self.out_channels) *
alpha.view(-1, self.heads, self.dim, 1) + h)
elif self.mod == "scaled":
if self.attention_mode == "additive-self-attention":
ones = alpha.new_ones(index.size())
degree = scatter_add(ones, index,
dim_size=size_i)[index].unsqueeze(-1)
degree = torch.matmul(degree, self.l1) + self.b1
degree = self.activation(degree)
degree = torch.matmul(degree, self.l2) + self.b2
return torch.mul(
outj.view(-1, self.heads, self.out_channels) *
alpha.view(-1, self.heads, 1),
degree.view(-1, 1, self.out_channels))
elif self.attention_mode == "multiplicative-self-attention":
ones = alpha.new_ones(index.size())
degree = scatter_add(ones, index,
dim_size=size_i)[index].unsqueeze(-1)
degree = torch.matmul(degree, self.l1) + self.b1
degree = self.activation(degree)
degree = torch.matmul(degree, self.l2) + self.b2
return torch.mul(
outj.view(-1, self.heads, 1, self.out_channels) *
alpha.view(-1, self.heads, self.dim, 1),
degree.view(-1, 1, 1, self.out_channels))
elif self.mod == "f-additive":
alpha = torch.where(alpha > 0, alpha + 1, alpha)
elif self.mod == "f-scaled":
ones = alpha.new_ones(index.size())
degree = scatter_add(ones, index,
dim_size=size_i)[index].unsqueeze(-1)
alpha = alpha * degree
elif self.training and self.dropout > 0:
alpha = F.dropout(alpha, p=self.dropout, training=True)
else:
alpha = alpha # original
if self.attention_mode == "additive-self-attention":
return alpha.view(-1, self.heads, 1) * outj.view(
-1, self.heads, self.out_channels)
else:
return (alpha.view(-1, self.heads, self.dim, 1) *
outj.view(-1, self.heads, 1, self.out_channels))
def update(self, aggr_out: Tensor) -> Tensor:
if self.attention_mode == "additive-self-attention":
if self.concat is True:
aggr_out = aggr_out.view(-1, self.heads * self.out_channels)
else:
aggr_out = aggr_out.mean(dim=1)
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
else:
if self.concat is True:
aggr_out = aggr_out.view(
-1, self.heads * self.dim * self.out_channels)
else:
aggr_out = aggr_out.mean(dim=1)
aggr_out = aggr_out.view(-1, self.dim * self.out_channels)
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
def __repr__(self) -> str:
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
class RGAT(nn.Module):
def __init__(self, args, num_nodes, num_edge_type, **kwargs):
super().__init__()
self.args = args
self.num_edge_type = num_edge_type
# Encoder: RGAT
self.node_emb = nn.Embedding(num_nodes, args.in_dim)
if num_edge_type > 20:
self.conv1 = RGATConv(args.in_dim, args.hidden_dim, num_edge_type * 2, num_blocks=4)
self.conv2 = RGATConv(args.hidden_dim, args.out_dim, num_edge_type * 2, num_blocks=4)
else:
self.conv1 = RGATConv(args.in_dim, args.hidden_dim, num_edge_type * 2)
self.conv2 = RGATConv(args.hidden_dim, args.out_dim, num_edge_type * 2)
self.relu = nn.ReLU()
# Decoder: DistMult
self.W = nn.Parameter(torch.Tensor(num_edge_type, args.out_dim))
nn.init.xavier_uniform_(self.W, gain=nn.init.calculate_gain('relu'))
def forward(self, x, edge, edge_type, return_all_emb=False):
x = self.node_emb(x)
x1 = self.conv1(x, edge, edge_type)
x = self.relu(x1)
x2 = self.conv2(x, edge, edge_type)
if return_all_emb:
return x1, x2
return x2
def decode(self, z, edge_index, edge_type):
h = z[edge_index[0]]
t = z[edge_index[1]]
r = self.W[edge_type]
logits = torch.sum(h * r * t, dim=1)
return logits
| 16,095 | 40.061224 | 97 | py |
GNNDelete | GNNDelete-main/framework/models/deletion.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from . import GCN, GAT, GIN, RGCN, RGAT
class DeletionLayer(nn.Module):
def __init__(self, dim, mask):
super().__init__()
self.dim = dim
self.mask = mask
self.deletion_weight = nn.Parameter(torch.ones(dim, dim) / 1000)
# self.deletion_weight = nn.Parameter(torch.eye(dim, dim))
# init.xavier_uniform_(self.deletion_weight)
def forward(self, x, mask=None):
'''Only apply deletion operator to the local nodes identified by mask'''
if mask is None:
mask = self.mask
if mask is not None:
new_rep = x.clone()
new_rep[mask] = torch.matmul(new_rep[mask], self.deletion_weight)
return new_rep
return x
class DeletionLayerKG(nn.Module):
def __init__(self, dim, mask):
super().__init__()
self.dim = dim
self.mask = mask
self.deletion_weight = nn.Parameter(torch.ones(dim, dim) / 1000)
def forward(self, x, mask=None):
'''Only apply deletion operator to the local nodes identified by mask'''
if mask is None:
mask = self.mask
if mask is not None:
new_rep = x.clone()
new_rep[mask] = torch.matmul(new_rep[mask], self.deletion_weight)
return new_rep
return x
class GCNDelete(GCN):
def __init__(self, args, mask_1hop=None, mask_2hop=None, **kwargs):
super().__init__(args)
self.deletion1 = DeletionLayer(args.hidden_dim, mask_1hop)
self.deletion2 = DeletionLayer(args.out_dim, mask_2hop)
self.conv1.requires_grad = False
self.conv2.requires_grad = False
def forward(self, x, edge_index, mask_1hop=None, mask_2hop=None, return_all_emb=False):
# with torch.no_grad():
x1 = self.conv1(x, edge_index)
x1 = self.deletion1(x1, mask_1hop)
x = F.relu(x1)
x2 = self.conv2(x, edge_index)
x2 = self.deletion2(x2, mask_2hop)
if return_all_emb:
return x1, x2
return x2
def get_original_embeddings(self, x, edge_index, return_all_emb=False):
return super().forward(x, edge_index, return_all_emb)
class GATDelete(GAT):
def __init__(self, args, mask_1hop=None, mask_2hop=None, **kwargs):
super().__init__(args)
self.deletion1 = DeletionLayer(args.hidden_dim, mask_1hop)
self.deletion2 = DeletionLayer(args.out_dim, mask_2hop)
self.conv1.requires_grad = False
self.conv2.requires_grad = False
def forward(self, x, edge_index, mask_1hop=None, mask_2hop=None, return_all_emb=False):
with torch.no_grad():
x1 = self.conv1(x, edge_index)
x1 = self.deletion1(x1, mask_1hop)
x = F.relu(x1)
x2 = self.conv2(x, edge_index)
x2 = self.deletion2(x2, mask_2hop)
if return_all_emb:
return x1, x2
return x2
def get_original_embeddings(self, x, edge_index, return_all_emb=False):
return super().forward(x, edge_index, return_all_emb)
class GINDelete(GIN):
def __init__(self, args, mask_1hop=None, mask_2hop=None, **kwargs):
super().__init__(args)
self.deletion1 = DeletionLayer(args.hidden_dim, mask_1hop)
self.deletion2 = DeletionLayer(args.out_dim, mask_2hop)
self.conv1.requires_grad = False
self.conv2.requires_grad = False
def forward(self, x, edge_index, mask_1hop=None, mask_2hop=None, return_all_emb=False):
with torch.no_grad():
x1 = self.conv1(x, edge_index)
x1 = self.deletion1(x1, mask_1hop)
x = F.relu(x1)
x2 = self.conv2(x, edge_index)
x2 = self.deletion2(x2, mask_2hop)
if return_all_emb:
return x1, x2
return x2
def get_original_embeddings(self, x, edge_index, return_all_emb=False):
return super().forward(x, edge_index, return_all_emb)
class RGCNDelete(RGCN):
def __init__(self, args, num_nodes, num_edge_type, mask_1hop=None, mask_2hop=None, **kwargs):
super().__init__(args, num_nodes, num_edge_type)
self.deletion1 = DeletionLayer(args.hidden_dim, mask_1hop)
self.deletion2 = DeletionLayer(args.out_dim, mask_2hop)
self.node_emb.requires_grad = False
self.conv1.requires_grad = False
self.conv2.requires_grad = False
def forward(self, x, edge_index, edge_type, mask_1hop=None, mask_2hop=None, return_all_emb=False):
with torch.no_grad():
x = self.node_emb(x)
x1 = self.conv1(x, edge_index, edge_type)
x1 = self.deletion1(x1, mask_1hop)
x = F.relu(x1)
x2 = self.conv2(x, edge_index, edge_type)
x2 = self.deletion2(x2, mask_2hop)
if return_all_emb:
return x1, x2
return x2
def get_original_embeddings(self, x, edge_index, edge_type, return_all_emb=False):
return super().forward(x, edge_index, edge_type, return_all_emb)
class RGATDelete(RGAT):
def __init__(self, args, num_nodes, num_edge_type, mask_1hop=None, mask_2hop=None, **kwargs):
super().__init__(args, num_nodes, num_edge_type)
self.deletion1 = DeletionLayer(args.hidden_dim, mask_1hop)
self.deletion2 = DeletionLayer(args.out_dim, mask_2hop)
self.node_emb.requires_grad = False
self.conv1.requires_grad = False
self.conv2.requires_grad = False
def forward(self, x, edge_index, edge_type, mask_1hop=None, mask_2hop=None, return_all_emb=False):
with torch.no_grad():
x = self.node_emb(x)
x1 = self.conv1(x, edge_index, edge_type)
x1 = self.deletion1(x1, mask_1hop)
x = F.relu(x1)
x2 = self.conv2(x, edge_index, edge_type)
x2 = self.deletion2(x2, mask_2hop)
if return_all_emb:
return x1, x2
return x2
def get_original_embeddings(self, x, edge_index, edge_type, return_all_emb=False):
return super().forward(x, edge_index, edge_type, return_all_emb)
| 6,273 | 31.340206 | 102 | py |
GNNDelete | GNNDelete-main/framework/models/rgcn.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import RGCNConv, FastRGCNConv
from sklearn.metrics import roc_auc_score, average_precision_score
class RGCN(nn.Module):
def __init__(self, args, num_nodes, num_edge_type, **kwargs):
super().__init__()
self.args = args
self.num_edge_type = num_edge_type
# Encoder: RGCN
self.node_emb = nn.Embedding(num_nodes, args.in_dim)
if num_edge_type > 20:
self.conv1 = RGCNConv(args.in_dim, args.hidden_dim, num_edge_type * 2, num_blocks=4)
self.conv2 = RGCNConv(args.hidden_dim, args.out_dim, num_edge_type * 2, num_blocks=4)
else:
self.conv1 = RGCNConv(args.in_dim, args.hidden_dim, num_edge_type * 2)
self.conv2 = RGCNConv(args.hidden_dim, args.out_dim, num_edge_type * 2)
self.relu = nn.ReLU()
# Decoder: DistMult
self.W = nn.Parameter(torch.Tensor(num_edge_type, args.out_dim))
nn.init.xavier_uniform_(self.W, gain=nn.init.calculate_gain('relu'))
def forward(self, x, edge, edge_type, return_all_emb=False):
x = self.node_emb(x)
x1 = self.conv1(x, edge, edge_type)
x = self.relu(x1)
x2 = self.conv2(x, edge, edge_type)
if return_all_emb:
return x1, x2
return x2
def decode(self, z, edge_index, edge_type):
h = z[edge_index[0]]
t = z[edge_index[1]]
r = self.W[edge_type]
logits = torch.sum(h * r * t, dim=1)
return logits
class RGCNDelete(RGCN):
def __init__(self):
pass
| 1,689 | 31.5 | 97 | py |
GNNDelete | GNNDelete-main/framework/models/gcn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
class GCN(nn.Module):
def __init__(self, args, **kwargs):
super().__init__()
self.conv1 = GCNConv(args.in_dim, args.hidden_dim)
self.conv2 = GCNConv(args.hidden_dim, args.out_dim)
# self.dropout = nn.Dropout(args.dropout)
def forward(self, x, edge_index, return_all_emb=False):
x1 = self.conv1(x, edge_index)
x = F.relu(x1)
# x = self.dropout(x)
x2 = self.conv2(x, edge_index)
if return_all_emb:
return x1, x2
return x2
def decode(self, z, pos_edge_index, neg_edge_index=None):
if neg_edge_index is not None:
edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1)
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
else:
edge_index = pos_edge_index
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
return logits
| 1,039 | 27.888889 | 76 | py |
GNNDelete | GNNDelete-main/framework/models/gat.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GATConv
class GAT(nn.Module):
def __init__(self, args, **kwargs):
super().__init__()
self.conv1 = GATConv(args.in_dim, args.hidden_dim)
self.conv2 = GATConv(args.hidden_dim, args.out_dim)
# self.dropout = nn.Dropout(args.dropout)
def forward(self, x, edge_index, return_all_emb=False):
x1 = self.conv1(x, edge_index)
x = F.relu(x1)
# x = self.dropout(x)
x2 = self.conv2(x, edge_index)
if return_all_emb:
return x1, x2
return x2
def decode(self, z, pos_edge_index, neg_edge_index=None):
if neg_edge_index is not None:
edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1)
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
else:
edge_index = pos_edge_index
logits = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=-1)
return logits
| 1,039 | 27.888889 | 76 | py |
GNNDelete | GNNDelete-main/framework/models/graph_classification/gcn_delete.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from ogb.graphproppred.mol_encoder import AtomEncoder
from torch_geometric.nn import GCNConv, MessagePassing, global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set
from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder
from torch_geometric.utils import degree
from torch_geometric.nn.inits import uniform
from torch_scatter import scatter_mean
from ..deletion import DeletionLayer
def remove_edges(edge_index, edge_attr=None, ratio=0.025):
row, col = edge_index
mask = row < col
row, col = row[mask], col[mask]
if edge_attr is not None:
edge_attr = edge_attr[mask]
num_edges = len(row)
num_remove = max(1, int(num_edges * ratio))
selected = torch.randperm(num_edges)[:num_edges - num_remove]
row = row[selected]
col = col[selected]
edge_attr = edge_attr[selected]
return torch.stack([row, col], dim=0), edge_attr
'''
Source: OGB github
https://github.com/snap-stanford/ogb/blob/master/examples/graphproppred/mol/main_pyg.py
'''
### GCN convolution along the graph structure
class GCNConv(MessagePassing):
def __init__(self, emb_dim):
super().__init__(aggr='add')
self.linear = nn.Linear(emb_dim, emb_dim)
self.root_emb = nn.Embedding(1, emb_dim)
self.bond_encoder = BondEncoder(emb_dim=emb_dim)
def forward(self, x, edge_index, edge_attr):
x = self.linear(x)
edge_embedding = self.bond_encoder(edge_attr)
row, col = edge_index
#edge_weight = torch.ones((edge_index.size(1), ), device=edge_index.device)
deg = degree(row, x.size(0), dtype=x.dtype) + 1
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return self.propagate(edge_index, x=x, edge_attr=edge_embedding, norm=norm) + F.relu(x + self.root_emb.weight) * 1./deg.view(-1,1)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GNN to generate node embedding
class GNN_node_delete(nn.Module):
def __init__(self, num_layer, emb_dim, drop_ratio=0.5, JK="last", residual=False, mask_1hop=None, mask_2hop=None):
super().__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
self.atom_encoder = AtomEncoder(emb_dim)
###List of GNNs
self.deletes = nn.ModuleList([
DeletionLayer(emb_dim, None),
DeletionLayer(emb_dim, None)
])
self.convs = nn.ModuleList()
self.batch_norms = nn.ModuleList()
for layer in range(num_layer):
self.convs.append(GCNConv(emb_dim))
self.batch_norms.append(nn.BatchNorm1d(emb_dim))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
edge_index, edge_attr = remove_edges(edge_index, edge_attr)
### computing input node embedding
h_list = [self.atom_encoder(x)]
for layer in range(self.num_layer):
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.deletes[layer](h)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training=self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training=self.training)
if self.residual:
h += h_list[layer]
h_list.append(h)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer + 1):
node_representation += h_list[layer]
return node_representation
class GNN(torch.nn.Module):
def __init__(self, num_tasks, num_layer=2, emb_dim=300, virtual_node=True, residual=False, drop_ratio=0.5, JK="last", graph_pooling="mean"):
super().__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
self.emb_dim = emb_dim
self.num_tasks = num_tasks
self.graph_pooling = graph_pooling
### GNN to generate node embeddings
self.gnn_node = GNN_node_delete(num_layer, emb_dim, JK=JK, drop_ratio=drop_ratio, residual=residual)
### Pooling function to generate whole-graph embeddings
if self.graph_pooling == "sum":
self.pool = global_add_pool
elif self.graph_pooling == "mean":
self.pool = global_mean_pool
elif self.graph_pooling == "max":
self.pool = global_max_pool
elif self.graph_pooling == "attention":
self.pool = GlobalAttention(gate_nn=nn.Sequential(Linear(emb_dim, 2*emb_dim), nn.BatchNorm1d(2*emb_dim), nn.ReLU(), nn.Linear(2*emb_dim, 1)))
elif self.graph_pooling == "set2set":
self.pool = Set2Set(emb_dim, processing_steps = 2)
else:
raise ValueError("Invalid graph pooling type.")
if graph_pooling == "set2set":
self.graph_pred_linear = nn.Linear(2*self.emb_dim, self.num_tasks)
else:
self.graph_pred_linear = nn.Linear(self.emb_dim, self.num_tasks)
def forward(self, batched_data):
h_node = self.gnn_node(batched_data)
h_graph = self.pool(h_node, batched_data.batch)
return self.graph_pred_linear(h_graph)
| 5,857 | 34.719512 | 153 | py |
GNNDelete | GNNDelete-main/framework/models/graph_classification/gcn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from ogb.graphproppred.mol_encoder import AtomEncoder
from torch_geometric.nn import GCNConv, MessagePassing, global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set
from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder
from torch_geometric.utils import degree
from torch_geometric.nn.inits import uniform
from torch_scatter import scatter_mean
'''
Source: OGB github
https://github.com/snap-stanford/ogb/blob/master/examples/graphproppred/mol/main_pyg.py
'''
### GCN convolution along the graph structure
class GCNConv(MessagePassing):
def __init__(self, emb_dim):
super().__init__(aggr='add')
self.linear = nn.Linear(emb_dim, emb_dim)
self.root_emb = nn.Embedding(1, emb_dim)
self.bond_encoder = BondEncoder(emb_dim=emb_dim)
def forward(self, x, edge_index, edge_attr):
x = self.linear(x)
edge_embedding = self.bond_encoder(edge_attr)
row, col = edge_index
#edge_weight = torch.ones((edge_index.size(1), ), device=edge_index.device)
deg = degree(row, x.size(0), dtype=x.dtype) + 1
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return self.propagate(edge_index, x=x, edge_attr=edge_embedding, norm=norm) + F.relu(x + self.root_emb.weight) * 1./deg.view(-1,1)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GNN to generate node embedding
class GNN_node(nn.Module):
def __init__(self, num_layer, emb_dim, drop_ratio=0.5, JK="last", residual=False):
super().__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
self.atom_encoder = AtomEncoder(emb_dim)
###List of GNNs
self.convs = nn.ModuleList()
self.batch_norms = nn.ModuleList()
for layer in range(num_layer):
self.convs.append(GCNConv(emb_dim))
self.batch_norms.append(nn.BatchNorm1d(emb_dim))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
### computing input node embedding
h_list = [self.atom_encoder(x)]
for layer in range(self.num_layer):
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training=self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training=self.training)
if self.residual:
h += h_list[layer]
h_list.append(h)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer + 1):
node_representation += h_list[layer]
return node_representation
class GNN(torch.nn.Module):
def __init__(self, num_tasks, num_layer=2, emb_dim=300, virtual_node=True, residual=False, drop_ratio=0.5, JK="last", graph_pooling="mean"):
super().__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
self.emb_dim = emb_dim
self.num_tasks = num_tasks
self.graph_pooling = graph_pooling
### GNN to generate node embeddings
self.gnn_node = GNN_node(num_layer, emb_dim, JK=JK, drop_ratio=drop_ratio, residual=residual)
### Pooling function to generate whole-graph embeddings
if self.graph_pooling == "sum":
self.pool = global_add_pool
elif self.graph_pooling == "mean":
self.pool = global_mean_pool
elif self.graph_pooling == "max":
self.pool = global_max_pool
elif self.graph_pooling == "attention":
self.pool = GlobalAttention(gate_nn=nn.Sequential(Linear(emb_dim, 2*emb_dim), nn.BatchNorm1d(2*emb_dim), nn.ReLU(), nn.Linear(2*emb_dim, 1)))
elif self.graph_pooling == "set2set":
self.pool = Set2Set(emb_dim, processing_steps = 2)
else:
raise ValueError("Invalid graph pooling type.")
if graph_pooling == "set2set":
self.graph_pred_linear = nn.Linear(2*self.emb_dim, self.num_tasks)
else:
self.graph_pred_linear = nn.Linear(self.emb_dim, self.num_tasks)
def forward(self, batched_data):
h_node = self.gnn_node(batched_data)
h_graph = self.pool(h_node, batched_data.batch)
return self.graph_pred_linear(h_graph)
| 5,019 | 35.642336 | 153 | py |
DeeBERT | DeeBERT-master/setup.py | """
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it - make sure you use python 3.x).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
"""
from io import open
from setuptools import find_packages, setup
setup(
name="transformers",
version="2.1.1",
author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors",
author_email="thomas@huggingface.co",
description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch",
long_description=open("README.md", "r", encoding='utf-8').read(),
long_description_content_type="text/markdown",
keywords='NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU',
license='Apache',
url="https://github.com/huggingface/transformers",
packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]),
install_requires=['numpy',
'boto3',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses'],
entry_points={
'console_scripts': [
"transformers=transformers.__main__:main",
]
},
# python_requires='>=3.5.0',
tests_require=['pytest'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| 2,923 | 39.054795 | 183 | py |
DeeBERT | DeeBERT-master/hubconf.py | from transformers import (
AutoTokenizer, AutoConfig, AutoModel, AutoModelWithLMHead, AutoModelForSequenceClassification, AutoModelForQuestionAnswering
)
from transformers.file_utils import add_start_docstrings
dependencies = ['torch', 'tqdm', 'boto3', 'requests', 'regex', 'sentencepiece', 'sacremoses']
@add_start_docstrings(AutoConfig.__doc__)
def config(*args, **kwargs):
r"""
# Using torch.hub !
import torch
config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased') # Download configuration from S3 and cache.
config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/my_configuration.json')
config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attention=True, foo=False)
assert config.output_attention == True
config, unused_kwargs = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attention=True, foo=False, return_unused_kwargs=True)
assert config.output_attention == True
assert unused_kwargs == {'foo': False}
"""
return AutoConfig.from_pretrained(*args, **kwargs)
@add_start_docstrings(AutoTokenizer.__doc__)
def tokenizer(*args, **kwargs):
r"""
# Using torch.hub !
import torch
tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', 'bert-base-uncased') # Download vocabulary from S3 and cache.
tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', './test/bert_saved_model/') # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`
"""
return AutoTokenizer.from_pretrained(*args, **kwargs)
@add_start_docstrings(AutoModel.__doc__)
def model(*args, **kwargs):
r"""
# Using torch.hub !
import torch
model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased') # Download model and configuration from S3 and cache.
model = torch.hub.load('huggingface/transformers', 'model', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
return AutoModel.from_pretrained(*args, **kwargs)
@add_start_docstrings(AutoModelWithLMHead.__doc__)
def modelWithLMHead(*args, **kwargs):
r"""
# Using torch.hub !
import torch
model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased') # Download model and configuration from S3 and cache.
model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
return AutoModelWithLMHead.from_pretrained(*args, **kwargs)
@add_start_docstrings(AutoModelForSequenceClassification.__doc__)
def modelForSequenceClassification(*args, **kwargs):
r"""
# Using torch.hub !
import torch
model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased') # Download model and configuration from S3 and cache.
model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
return AutoModelForSequenceClassification.from_pretrained(*args, **kwargs)
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__)
def modelForQuestionAnswering(*args, **kwargs):
r"""
# Using torch.hub !
import torch
model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased') # Download model and configuration from S3 and cache.
model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
return AutoModelForQuestionAnswering.from_pretrained(*args, **kwargs)
| 6,489 | 56.433628 | 189 | py |
DeeBERT | DeeBERT-master/examples/run_lm_finetuning.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import pickle
import random
import re
import shutil
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
BertConfig, BertForMaskedLM, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
}
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_path='train', block_size=512):
assert os.path.isfile(file_path)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(directory, args.model_name_or_path + '_cached_lm_' + str(block_size) + '_' + filename)
if os.path.exists(cached_features_file):
logger.info("Loading features from cached file %s", cached_features_file)
with open(cached_features_file, 'rb') as handle:
self.examples = pickle.load(handle)
else:
logger.info("Creating features from dataset file at %s", directory)
self.examples = []
with open(file_path, encoding="utf-8") as f:
text = f.read()
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
for i in range(0, len(tokenized_text)-block_size+1, block_size): # Truncate in block of block_size
self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i:i+block_size]))
# Note that we are loosing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should loook for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.
logger.info("Saving features into cached file %s", cached_features_file)
with open(cached_features_file, 'wb') as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item])
def load_and_cache_examples(args, tokenizer, evaluate=False):
dataset = TextDataset(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
return dataset
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _rotate_checkpoints(args, checkpoint_prefix, use_mtime=False):
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
glob_checkpoints = glob.glob(os.path.join(args.output_dir, '{}-*'.format(checkpoint_prefix)))
if len(glob_checkpoints) <= args.save_total_limit:
return
ordering_and_checkpoint_path = []
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match('.*{}-([0-9]+)'.format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def mask_tokens(inputs, tokenizer, args):
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, args.mlm_probability)
special_tokens_mask = [tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -1 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
model.train()
outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
checkpoint_prefix = 'checkpoint'
# Save model checkpoint
output_dir = os.path.join(args.output_dir, '{}-{}'.format(checkpoint_prefix, global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
_rotate_checkpoints(args, checkpoint_prefix)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
with torch.no_grad():
outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {
"perplexity": perplexity
}
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--eval_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--model_type", default="bert", type=str,
help="The model architecture to be fine-tuned.")
parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--mlm", action='store_true',
help="Train with masked-language modeling loss instead of language modeling.")
parser.add_argument("--mlm_probability", type=float, default=0.15,
help="Ratio of tokens to mask for masked language modeling loss")
parser.add_argument("--config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--cache_dir", default="", type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument('--save_total_limit', type=int, default=None,
help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm:
raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling).")
if args.eval_data_file is None and args.do_eval:
raise ValueError("Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.block_size <= 0:
args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
model.to(args.device)
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
if args.local_rank == 0:
torch.distributed.barrier()
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 28,845 | 50.881295 | 165 | py |
DeeBERT | DeeBERT-master/examples/run_squad.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet)."""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import glob
import timeit
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig,
BertForQuestionAnswering, BertTokenizer,
XLMConfig, XLMForQuestionAnswering,
XLMTokenizer, XLNetConfig,
XLNetForQuestionAnswering,
XLNetTokenizer,
DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer)
from transformers import AdamW, get_linear_schedule_with_warmup
from utils_squad import (read_squad_examples, convert_examples_to_features,
RawResult, write_predictions,
RawResultExtended, write_predictions_extended)
# The follwing import is the official SQuAD evaluation script (2.0).
# You can remove it from the dependencies if you are using this script outside of the library
# We've added it here for automated tests (see examples/test_examples.py file)
from utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \
for conf in (BertConfig, XLNetConfig, XLMConfig)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer),
'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer),
'distilbert': (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer)
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'start_positions': batch[3],
'end_positions': batch[4]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2]
if args.model_type in ['xlnet', 'xlm']:
inputs.update({'cls_index': batch[5],
'p_mask': batch[6]})
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""):
dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
start_time = timeit.default_timer()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1]
}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids
example_indices = batch[3]
if args.model_type in ['xlnet', 'xlm']:
inputs.update({'cls_index': batch[4],
'p_mask': batch[5]})
outputs = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
if args.model_type in ['xlnet', 'xlm']:
# XLNet uses a more complex post-processing procedure
result = RawResultExtended(unique_id = unique_id,
start_top_log_probs = to_list(outputs[0][i]),
start_top_index = to_list(outputs[1][i]),
end_top_log_probs = to_list(outputs[2][i]),
end_top_index = to_list(outputs[3][i]),
cls_logits = to_list(outputs[4][i]))
else:
result = RawResult(unique_id = unique_id,
start_logits = to_list(outputs[0][i]),
end_logits = to_list(outputs[1][i]))
all_results.append(result)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
# Compute predictions
output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix))
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix))
else:
output_null_log_odds_file = None
if args.model_type in ['xlnet', 'xlm']:
# XLNet uses a more complex post-processing procedure
write_predictions_extended(examples, features, all_results, args.n_best_size,
args.max_answer_length, output_prediction_file,
output_nbest_file, output_null_log_odds_file, args.predict_file,
model.config.start_n_top, model.config.end_n_top,
args.version_2_with_negative, tokenizer, args.verbose_logging)
else:
write_predictions(examples, features, all_results, args.n_best_size,
args.max_answer_length, args.do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file, args.verbose_logging,
args.version_2_with_negative, args.null_score_diff_threshold)
# Evaluate with the official SQuAD script
evaluate_options = EVAL_OPTS(data_file=args.predict_file,
pred_file=output_prediction_file,
na_prob_file=output_null_log_odds_file)
results = evaluate_on_squad(evaluate_options)
return results
def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
input_file = args.predict_file if evaluate else args.train_file
cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format(
'dev' if evaluate else 'train',
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length)))
if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", input_file)
examples = read_squad_examples(input_file=input_file,
is_training=not evaluate,
version_2_with_negative=args.version_2_with_negative)
features = convert_examples_to_features(examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0,
pad_token_segment_id=3 if args.model_type in ['xlnet'] else 0,
cls_token_at_end=True if args.model_type in ['xlnet'] else False,
sequence_a_is_doc=True if args.model_type in ['xlnet'] else False)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
if evaluate:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_example_index, all_cls_index, all_p_mask)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions,
all_cls_index, all_p_mask)
if output_examples:
return dataset, examples, features
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_file", default=None, type=str, required=True,
help="SQuAD json for training. E.g., train-v1.1.json")
parser.add_argument("--predict_file", default=None, type=str, required=True,
help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints and predictions will be written.")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--version_2_with_negative', action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.')
parser.add_argument('--null_score_diff_threshold', type=float, default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.")
parser.add_argument("--max_seq_length", default=384, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--doc_stride", default=128, type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.")
parser.add_argument("--max_query_length", default=64, type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--n_best_size", default=20, type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.")
parser.add_argument("--max_answer_length", default=30, type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.")
parser.add_argument("--verbose_logging", action='store_true',
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, 'einsum')
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model.to(args.device)
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
# Evaluate
result = evaluate(args, model, tokenizer, prefix=global_step)
result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items())
results.update(result)
logger.info("Results: {}".format(results))
return results
if __name__ == "__main__":
main()
| 31,570 | 54.001742 | 151 | py |
DeeBERT | DeeBERT-master/examples/run_highway_glue.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import time
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig,
BertTokenizer,
RobertaConfig,
RobertaTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer)
from transformers.modeling_highway_bert import BertForSequenceClassification
from transformers.modeling_highway_roberta import RobertaForSequenceClassification
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers import glue_convert_examples_to_features as convert_examples_to_features
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig,
RobertaConfig, DistilBertConfig)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer)
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def get_wanted_result(result):
if "spearmanr" in result:
print_result = result["spearmanr"]
elif "f1" in result:
print_result = result["f1"]
elif "mcc" in result:
print_result = result["mcc"]
elif "acc" in result:
print_result = result["acc"]
else:
print(result)
exit(1)
return print_result
def train(args, train_dataset, model, tokenizer, train_highway=False):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
if train_highway:
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if
("highway" in n) and (not any(nd in n for nd in no_decay))],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if
("highway" in n) and (any(nd in n for nd in no_decay))],
'weight_decay': 0.0}
]
else:
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if
("highway" not in n) and (not any(nd in n for nd in no_decay))],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if
("highway" not in n) and (any(nd in n for nd in no_decay))],
'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
inputs['train_highway'] = train_highway
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix="", output_layer=-1, eval_highway=False):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
exit_layer_counter = {(i+1):0 for i in range(model.num_layers)}
st = time.time()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
if output_layer >= 0:
inputs['output_layer'] = output_layer
outputs = model(**inputs)
if eval_highway:
exit_layer_counter[outputs[-1]] += 1
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_time = time.time() - st
print("Eval time:", eval_time)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
if eval_highway:
print("Exit layer counter", exit_layer_counter)
actual_cost = sum([l*c for l, c in exit_layer_counter.items()])
full_cost = len(eval_dataloader) * model.num_layers
print("Expected saving", actual_cost/full_cost)
if args.early_exit_entropy>=0:
save_fname = args.plot_data_dir + '/' +\
args.model_name_or_path[2:] +\
"/entropy_{}.npy".format(args.early_exit_entropy)
if not os.path.exists(os.path.dirname(save_fname)):
os.makedirs(os.path.dirname(save_fname))
print_result = get_wanted_result(result)
np.save(save_fname,
np.array([exit_layer_counter,
eval_time,
actual_cost/full_cost,
print_result]))
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train',
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length),
str(task)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
features = convert_examples_to_features(examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--plot_data_dir", default="./plotting/", type=str, required=False,
help="The directory to store data for plotting figures.")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--eval_each_highway", action='store_true',
help="Set this flag to evaluate each highway.")
parser.add_argument("--eval_after_first_stage", action='store_true',
help="Set this flag to evaluate after training only bert (not highway).")
parser.add_argument("--eval_highway", action='store_true',
help="Set this flag if it's evaluating highway models")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--early_exit_entropy", default=-1, type=float,
help = "Entropy threshold for early exit.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.model_type == "bert":
model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.bert.init_highway_pooler()
else:
model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.roberta.init_highway_pooler()
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.eval_after_first_stage:
result = evaluate(args, model, tokenizer, prefix="")
print_result = get_wanted_result(result)
train(args, train_dataset, model, tokenizer, train_highway=True)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
model = model_class.from_pretrained(checkpoint)
if args.model_type=="bert":
model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy)
else:
model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix,
eval_highway=args.eval_highway)
print_result = get_wanted_result(result)
print("Result: {}".format(print_result))
if args.eval_each_highway:
last_layer_results = print_result
each_layer_results = []
for i in range(model.num_layers):
logger.info("\n")
_result = evaluate(args, model, tokenizer, prefix=prefix,
output_layer=i, eval_highway=args.eval_highway)
if i+1 < model.num_layers:
each_layer_results.append(get_wanted_result(_result))
each_layer_results.append(last_layer_results)
save_fname = args.plot_data_dir + '/' + args.model_name_or_path[2:] + "/each_layer.npy"
if not os.path.exists(os.path.dirname(save_fname)):
os.makedirs(os.path.dirname(save_fname))
np.save(save_fname,
np.array(each_layer_results))
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 33,078 | 51.423138 | 158 | py |
DeeBERT | DeeBERT-master/examples/run_glue.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig,
BertForSequenceClassification, BertTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer)
# from transformers.modeling_highway_roberta import RobertaForSequenceClassification
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers import glue_convert_examples_to_features as convert_examples_to_features
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig,
RobertaConfig, DistilBertConfig)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer)
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train',
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length),
str(task)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
features = convert_examples_to_features(examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 28,140 | 52.398482 | 158 | py |
DeeBERT | DeeBERT-master/examples/benchmarks.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Benchmarking the library on inference and training """
# If checking the tensors placement
# tf.debugging.set_log_device_placement(True)
from typing import List
import timeit
from transformers import is_tf_available, is_torch_available
from time import time
import argparse
import csv
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModel
if is_torch_available():
import torch
from transformers import AutoModel
from transformers import AutoConfig, AutoTokenizer
input_text = """Bent over their instruments, three hundred Fertilizers were plunged, as
the Director of Hatcheries and Conditioning entered the room, in the
scarcely breathing silence, the absent-minded, soliloquizing hum or
whistle, of absorbed concentration. A troop of newly arrived students,
very young, pink and callow, followed nervously, rather abjectly, at the
Director's heels. Each of them carried a notebook, in which, whenever
the great man spoke, he desperately scribbled. Straight from the
horse's mouth. It was a rare privilege. The D. H. C. for Central London
always made a point of personally conducting his new students round
the various departments.
"Just to give you a general idea," he would explain to them. For of
course some sort of general idea they must have, if they were to do
their work intelligently-though as little of one, if they were to be good
and happy members of society, as possible. For particulars, as every
one knows, make for virtue and happiness; generalities are intellectu-
ally necessary evils. Not philosophers but fret-sawyers and stamp col-
lectors compose the backbone of society.
"To-morrow," he would add, smiling at them with a slightly menacing
geniality, "you'll be settling down to serious work. You won't have time
for generalities. Meanwhile ..."
Meanwhile, it was a privilege. Straight from the horse's mouth into the
notebook. The boys scribbled like mad.
Tall and rather thin but upright, the Director advanced into the room.
He had a long chin and big rather prominent teeth, just covered, when
he was not talking, by his full, floridly curved lips. Old, young? Thirty?
Fifty? Fifty-five? It was hard to say. And anyhow the question didn't
arise; in this year of stability, A. F. 632, it didn't occur to you to ask it.
"I shall begin at the beginning," said the D.H.C. and the more zealous
students recorded his intention in their notebooks: Begin at the begin-
ning. "These," he waved his hand, "are the incubators." And opening
an insulated door he showed them racks upon racks of numbered test-
tubes. "The week's supply of ova. Kept," he explained, "at blood heat;
whereas the male gametes," and here he opened another door, "they
have to be kept at thirty-five instead of thirty-seven. Full blood heat
sterilizes." Rams wrapped in theremogene beget no lambs.
Still leaning against the incubators he gave them, while the pencils
scurried illegibly across the pages, a brief description of the modern
fertilizing process; spoke first, of course, of its surgical introduc-
tion-"the operation undergone voluntarily for the good of Society, not
to mention the fact that it carries a bonus amounting to six months'
salary"; continued with some account of the technique for preserving
the excised ovary alive and actively developing; passed on to a consid-
eration of optimum temperature, salinity, viscosity; referred to the liq-
uor in which the detached and ripened eggs were kept; and, leading
his charges to the work tables, actually showed them how this liquor
was drawn off from the test-tubes; how it was let out drop by drop
onto the specially warmed slides of the microscopes; how the eggs
which it contained were inspected for abnormalities, counted and
transferred to a porous receptacle; how (and he now took them to
watch the operation) this receptacle was immersed in a warm bouillon
containing free-swimming spermatozoa-at a minimum concentration
of one hundred thousand per cubic centimetre, he insisted; and how,
after ten minutes, the container was lifted out of the liquor and its
contents re-examined; how, if any of the eggs remained unfertilized, it
was again immersed, and, if necessary, yet again; how the fertilized
ova went back to the incubators; where the Alphas and Betas re-
mained until definitely bottled; while the Gammas, Deltas and Epsilons
were brought out again, after only thirty-six hours, to undergo Bo-
kanovsky's Process.
"Bokanovsky's Process," repeated the Director, and the students un-
derlined the words in their little notebooks.
One egg, one embryo, one adult-normality. But a bokanovskified egg
will bud, will proliferate, will divide. From eight to ninety-six buds, and
every bud will grow into a perfectly formed embryo, and every embryo
into a full-sized adult. Making ninety-six human beings grow where
only one grew before. Progress.
"Essentially," the D.H.C. concluded, "bokanovskification consists of a
series of arrests of development. We check the normal growth and,
paradoxically enough, the egg responds by budding."
Responds by budding. The pencils were busy.
He pointed. On a very slowly moving band a rack-full of test-tubes was
entering a large metal box, another, rack-full was emerging. Machinery
faintly purred. It took eight minutes for the tubes to go through, he
told them. Eight minutes of hard X-rays being about as much as an
egg can stand. A few died; of the rest, the least susceptible divided
into two; most put out four buds; some eight; all were returned to the
incubators, where the buds began to develop; then, after two days,
were suddenly chilled, chilled and checked. Two, four, eight, the buds
in their turn budded; and having budded were dosed almost to death
with alcohol; consequently burgeoned again and having budded-bud
out of bud out of bud-were thereafter-further arrest being generally
fatal-left to develop in peace. By which time the original egg was in a
fair way to becoming anything from eight to ninety-six embryos- a
prodigious improvement, you will agree, on nature. Identical twins-but
not in piddling twos and threes as in the old viviparous days, when an
egg would sometimes accidentally divide; actually by dozens, by
scores at a time.
"Scores," the Director repeated and flung out his arms, as though he
were distributing largesse. "Scores."
But one of the students was fool enough to ask where the advantage
lay.
"My good boy!" The Director wheeled sharply round on him. "Can't you
see? Can't you see?" He raised a hand; his expression was solemn.
"Bokanovsky's Process is one of the major instruments of social stabil-
ity!"
Major instruments of social stability.
Standard men and women; in uniform batches. The whole of a small
factory staffed with the products of a single bokanovskified egg.
"Ninety-six identical twins working ninety-six identical machines!" The
voice was almost tremulous with enthusiasm. "You really know where
you are. For the first time in history." He quoted the planetary motto.
"Community, Identity, Stability." Grand words. "If we could bo-
kanovskify indefinitely the whole problem would be solved."
Solved by standard Gammas, unvarying Deltas, uniform Epsilons. Mil-
lions of identical twins. The principle of mass production at last applied
to biology.
"But, alas," the Director shook his head, "we can't bokanovskify indefi-
nitely."
Ninety-six seemed to be the limit; seventy-two a good average. From
the same ovary and with gametes of the same male to manufacture as
many batches of identical twins as possible-that was the best (sadly a
second best) that they could do. And even that was difficult.
"For in nature it takes thirty years for two hundred eggs to reach ma-
turity. But our business is to stabilize the population at this moment,
here and now. Dribbling out twins over a quarter of a century-what
would be the use of that?"
Obviously, no use at all. But Podsnap's Technique had immensely ac-
celerated the process of ripening. They could make sure of at least a
hundred and fifty mature eggs within two years. Fertilize and bo-
kanovskify-in other words, multiply by seventy-two-and you get an
average of nearly eleven thousand brothers and sisters in a hundred
and fifty batches of identical twins, all within two years of the same
age.
"And in exceptional cases we can make one ovary yield us over fifteen
thousand adult individuals."
Beckoning to a fair-haired, ruddy young man who happened to be
passing at the moment. "Mr. Foster," he called. The ruddy young man
approached. "Can you tell us the record for a single ovary, Mr. Foster?"
"Sixteen thousand and twelve in this Centre," Mr. Foster replied with-
out hesitation. He spoke very quickly, had a vivacious blue eye, and
took an evident pleasure in quoting figures. "Sixteen thousand and
twelve; in one hundred and eighty-nine batches of identicals. But of
course they've done much better," he rattled on, "in some of the tropi-
cal Centres. Singapore has often produced over sixteen thousand five
hundred; and Mombasa has actually touched the seventeen thousand
mark. But then they have unfair advantages. You should see the way a
negro ovary responds to pituitary! It's quite astonishing, when you're
used to working with European material. Still," he added, with a laugh
(but the light of combat was in his eyes and the lift of his chin was
challenging), "still, we mean to beat them if we can. I'm working on a
wonderful Delta-Minus ovary at this moment. Only just eighteen
months old. Over twelve thousand seven hundred children already, ei-
ther decanted or in embryo. And still going strong. We'll beat them
yet."
"That's the spirit I like!" cried the Director, and clapped Mr. Foster on
the shoulder. "Come along with us, and give these boys the benefit of
your expert knowledge."
Mr. Foster smiled modestly. "With pleasure." They went.
In the Bottling Room all was harmonious bustle and ordered activity.
Flaps of fresh sow's peritoneum ready cut to the proper size came
shooting up in little lifts from the Organ Store in the sub-basement.
Whizz and then, click! the lift-hatches hew open; the bottle-liner had
only to reach out a hand, take the flap, insert, smooth-down, and be-
fore the lined bottle had had time to travel out of reach along the end-
less band, whizz, click! another flap of peritoneum had shot up from
the depths, ready to be slipped into yet another bottle, the next of that
slow interminable procession on the band.
Next to the Liners stood the Matriculators. The procession advanced;
one by one the eggs were transferred from their test-tubes to the
larger containers; deftly the peritoneal lining was slit, the morula
dropped into place, the saline solution poured in ... and already the
bottle had passed, and it was the turn of the labellers. Heredity, date
of fertilization, membership of Bokanovsky Group-details were trans-
ferred from test-tube to bottle. No longer anonymous, but named,
identified, the procession marched slowly on; on through an opening in
the wall, slowly on into the Social Predestination Room.
"Eighty-eight cubic metres of card-index," said Mr. Foster with relish,
as they entered."""
def create_setup_and_compute(model_names: List[str],
gpu: bool = True,
tensorflow: bool = False,
average_over: int = 3,
torchscript: bool = False,
xla: bool = False,
amp: bool = False,
fp16: bool = False,
save_to_csv: bool = False,
csv_filename: str = f"results_{round(time())}.csv"):
if xla:
tf.config.optimizer.set_jit(True)
if amp:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
if tensorflow:
dictionary = {model_name: {} for model_name in model_names}
results = _compute_tensorflow(model_names, dictionary, average_over, amp)
else:
device = 'cuda' if (gpu and torch.cuda.is_available()) else 'cpu'
dictionary = {model_name: {} for model_name in model_names}
results = _compute_pytorch(model_names, dictionary, average_over, device, torchscript, fp16)
print("=========== RESULTS ===========")
for model_name in model_names:
print("\t" + f"======= MODEL CHECKPOINT: {model_name} =======")
for batch_size in results[model_name]["bs"]:
print("\t\t" + f"===== BATCH SIZE: {batch_size} =====")
for slice_size in results[model_name]["ss"]:
result = results[model_name]['results'][batch_size][slice_size]
if isinstance(result, str):
print(f"\t\t{model_name}/{batch_size}/{slice_size}: "
f"{result}")
else:
print(f"\t\t{model_name}/{batch_size}/{slice_size}: "
f"{(round(1000 * result) / 1000)}"
f"s")
if save_to_csv:
with open(csv_filename, mode='w') as csv_file:
fieldnames = ['model',
'1x8', '1x64', '1x128', '1x256', '1x512', '1x1024',
'2x8', '2x64', '2x128', '2x256', '2x512', '2x1024',
'4x8', '4x64', '4x128', '4x256', '4x512', '4x1024',
'8x8', '8x64', '8x128', '8x256', '8x512', '8x1024',
]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for model_name in model_names:
model_results = {
f'{bs}x{ss}': results[model_name]['results'][bs][ss]
for bs in results[model_name]["results"]
for ss in results[model_name]['results'][bs]
}
writer.writerow({'model': model_name, **model_results})
def _compute_pytorch(model_names, dictionary, average_over, device, torchscript, fp16):
for c, model_name in enumerate(model_names):
print(f"{c + 1} / {len(model_names)}")
config = AutoConfig.from_pretrained(model_name, torchscript=torchscript)
model = AutoModel.from_pretrained(model_name, config=config)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenized_sequence = tokenizer.encode(input_text, add_special_tokens=False)
max_input_size = tokenizer.max_model_input_sizes[model_name]
batch_sizes = [1, 2, 4, 8]
slice_sizes = [8, 64, 128, 256, 512, 1024]
dictionary[model_name] = {"bs": batch_sizes, "ss": slice_sizes, "results": {}}
dictionary[model_name]["results"] = {i: {} for i in batch_sizes}
for batch_size in batch_sizes:
if fp16:
model.half()
model.to(device)
model.eval()
for slice_size in slice_sizes:
if max_input_size is not None and slice_size > max_input_size:
dictionary[model_name]["results"][batch_size][slice_size] = "N/A"
else:
sequence = torch.tensor(tokenized_sequence[:slice_size], device=device).repeat(batch_size, 1)
try:
if torchscript:
print("Tracing model with sequence size", sequence.shape)
inference = torch.jit.trace(model, sequence)
inference(sequence)
else:
inference = model
inference(sequence)
print("Going through model with sequence of shape", sequence.shape)
runtimes = timeit.repeat(lambda: inference(sequence), repeat=average_over, number=3)
average_time = sum(runtimes)/float(len(runtimes)) / 3.0
dictionary[model_name]["results"][batch_size][slice_size] = average_time
except RuntimeError as e:
print("Doesn't fit on GPU.", e)
torch.cuda.empty_cache()
dictionary[model_name]["results"][batch_size][slice_size] = "N/A"
return dictionary
def _compute_tensorflow(model_names, dictionary, average_over, amp):
for c, model_name in enumerate(model_names):
print(f"{c + 1} / {len(model_names)}")
config = AutoConfig.from_pretrained(model_name)
model = TFAutoModel.from_pretrained(model_name, config=config)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenized_sequence = tokenizer.encode(input_text, add_special_tokens=False)
max_input_size = tokenizer.max_model_input_sizes[model_name]
batch_sizes = [1, 2, 4, 8]
slice_sizes = [8, 64, 128, 256, 512, 1024]
dictionary[model_name] = {"bs": batch_sizes, "ss": slice_sizes, "results": {}}
dictionary[model_name]["results"] = {i: {} for i in batch_sizes}
print("Using model", model)
@tf.function
def inference(inputs):
return model(inputs)
for batch_size in batch_sizes:
for slice_size in slice_sizes:
if max_input_size is not None and slice_size > max_input_size:
dictionary[model_name]["results"][batch_size][slice_size] = "N/A"
else:
sequence = tf.stack([tf.squeeze(tf.constant(tokenized_sequence[:slice_size])[None, :])] * batch_size)
try:
print("Going through model with sequence of shape", sequence.shape)
# To make sure that the model is traced + that the tensors are on the appropriate device
inference(sequence)
runtimes = timeit.repeat(lambda: inference(sequence), repeat=average_over, number=3)
average_time = sum(runtimes)/float(len(runtimes)) / 3.0
dictionary[model_name]["results"][batch_size][slice_size] = average_time
except tf.errors.ResourceExhaustedError as e:
print("Doesn't fit on GPU.", e)
torch.cuda.empty_cache()
dictionary[model_name]["results"][batch_size][slice_size] = "N/A"
return dictionary
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--models", required=False, type=str, default='all', help="Model checkpoints to be provided "
"to the AutoModel classes. Leave "
"blank to benchmark the base version "
"of all available model "
"architectures.")
parser.add_argument("--torch", required=False, action="store_true", help="Benchmark the Pytorch version of the "
"models")
parser.add_argument("--torch_cuda", required=False, action="store_true", help="Pytorch only: run on available "
"cuda devices")
parser.add_argument("--torchscript", required=False, action="store_true", help="Pytorch only: trace the models "
"using torchscript")
parser.add_argument("--tensorflow", required=False, action="store_true", help="Benchmark the TensorFlow version "
"of the models. Will run on GPU if "
"the correct dependencies are "
"installed")
parser.add_argument("--xla", required=False, action="store_true", help="TensorFlow only: use XLA acceleration.")
parser.add_argument("--amp", required=False, action="store_true", help="TensorFlow only: use automatic mixed precision acceleration.")
parser.add_argument("--fp16", required=False, action="store_true", help="PyTorch only: use FP16 to accelerate inference.")
parser.add_argument("--keras_predict", required=False, action="store_true", help="Whether to use model.predict "
"instead of model() to do a "
"forward pass.")
parser.add_argument("--save_to_csv", required=False, action="store_true", help="Save to a CSV file.")
parser.add_argument("--csv_filename", required=False, default=None, help="CSV filename used if saving results to csv.")
parser.add_argument("--average_over", required=False, default=30, type=int, help="Times an experiment will be run.")
args = parser.parse_args()
if args.models == 'all':
args.models = [
"gpt2",
"bert-base-cased",
"xlnet-base-cased",
"xlm-mlm-en-2048",
"transfo-xl-wt103",
"openai-gpt",
"distilbert-base-uncased",
"distilgpt2",
"roberta-base",
"ctrl"
]
else:
args.models = args.models.split()
print("Running with arguments", args)
if args.torch:
if is_torch_available():
create_setup_and_compute(
model_names=args.models,
tensorflow=False,
gpu=args.torch_cuda,
torchscript=args.torchscript,
fp16=args.fp16,
save_to_csv=args.save_to_csv,
csv_filename=args.csv_filename,
average_over=args.average_over
)
else:
raise ImportError("Trying to run a PyTorch benchmark but PyTorch was not found in the environment.")
if args.tensorflow:
if is_tf_available():
create_setup_and_compute(
model_names=args.models,
tensorflow=True,
xla=args.xla,
amp=args.amp,
save_to_csv=args.save_to_csv,
csv_filename=args.csv_filename,
average_over=args.average_over
)
else:
raise ImportError("Trying to run a TensorFlow benchmark but TensorFlow was not found in the environment.")
if __name__ == '__main__':
main()
| 23,631 | 48.439331 | 138 | py |
DeeBERT | DeeBERT-master/examples/run_summarization_finetuning.py | # coding=utf-8
# Copyright 2019 The HuggingFace Inc. team.
# Copyright (c) 2019 The HuggingFace Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning seq2seq models for sequence generation."""
import argparse
import functools
import logging
import os
import random
import sys
import numpy as np
from tqdm import tqdm, trange
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import (
AutoTokenizer,
BertForMaskedLM,
BertConfig,
PreTrainedEncoderDecoder,
Model2Model,
)
from utils_summarization import (
CNNDailyMailDataset,
encode_for_summarization,
fit_to_block_size,
build_lm_labels,
build_mask,
compute_token_type_ids,
)
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# ------------
# Load dataset
# ------------
def load_and_cache_examples(args, tokenizer):
dataset = CNNDailyMailDataset(tokenizer, data_dir=args.data_dir)
return dataset
def collate(data, tokenizer, block_size):
""" List of tuple as an input. """
# remove the files with empty an story/summary, encode and fit to block
data = filter(lambda x: not (len(x[0]) == 0 or len(x[1]) == 0), data)
data = [
encode_for_summarization(story, summary, tokenizer) for story, summary in data
]
data = [
(
fit_to_block_size(story, block_size, tokenizer.pad_token_id),
fit_to_block_size(summary, block_size, tokenizer.pad_token_id),
)
for story, summary in data
]
stories = torch.tensor([story for story, summary in data])
summaries = torch.tensor([summary for story, summary in data])
encoder_token_type_ids = compute_token_type_ids(stories, tokenizer.cls_token_id)
encoder_mask = build_mask(stories, tokenizer.pad_token_id)
decoder_mask = build_mask(summaries, tokenizer.pad_token_id)
lm_labels = build_lm_labels(summaries, tokenizer.pad_token_id)
return (
stories,
summaries,
encoder_token_type_ids,
encoder_mask,
decoder_mask,
lm_labels,
)
# ----------
# Optimizers
# ----------
class BertSumOptimizer(object):
""" Specific optimizer for BertSum.
As described in [1], the authors fine-tune BertSum for abstractive
summarization using two Adam Optimizers with different warm-up steps and
learning rate. They also use a custom learning rate scheduler.
[1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders."
arXiv preprint arXiv:1908.08345 (2019).
"""
def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8):
self.encoder = model.encoder
self.decoder = model.decoder
self.lr = lr
self.warmup_steps = warmup_steps
self.optimizers = {
"encoder": Adam(
model.encoder.parameters(),
lr=lr["encoder"],
betas=(beta_1, beta_2),
eps=eps,
),
"decoder": Adam(
model.decoder.parameters(),
lr=lr["decoder"],
betas=(beta_1, beta_2),
eps=eps,
),
}
self._step = 0
def _update_rate(self, stack):
return self.lr[stack] * min(
self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-0.5)
)
def zero_grad(self):
self.optimizer_decoder.zero_grad()
self.optimizer_encoder.zero_grad()
def step(self):
self._step += 1
for stack, optimizer in self.optimizers.items():
new_rate = self._update_rate(stack)
for param_group in optimizer.param_groups:
param_group["lr"] = new_rate
optimizer.step()
# ------------
# Train
# ------------
def train(args, model, tokenizer):
""" Fine-tune the pretrained model on the corpus. """
set_seed(args)
# Load the data
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_dataset = load_and_cache_examples(args, tokenizer)
train_sampler = RandomSampler(train_dataset)
model_collate_fn = functools.partial(collate, tokenizer=tokenizer, block_size=512)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=model_collate_fn,
)
# Training schedule
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = t_total // (
len(train_dataloader) // args.gradient_accumulation_steps + 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
# Prepare the optimizer
lr = {"encoder": 0.002, "decoder": 0.2}
warmup_steps = {"encoder": 20000, "decoder": 10000}
optimizer = BertSumOptimizer(model, lr, warmup_steps)
# Train
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps
# * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
model.zero_grad()
train_iterator = trange(args.num_train_epochs, desc="Epoch", disable=True)
global_step = 0
tr_loss = 0.0
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True)
for step, batch in enumerate(epoch_iterator):
source, target, encoder_token_type_ids, encoder_mask, decoder_mask, lm_labels = batch
source = source.to(args.device)
target = target.to(args.device)
encoder_token_type_ids = encoder_token_type_ids.to(args.device)
encoder_mask = encoder_mask.to(args.device)
decoder_mask = decoder_mask.to(args.device)
lm_labels = lm_labels.to(args.device)
model.train()
outputs = model(
source,
target,
encoder_token_type_ids=encoder_token_type_ids,
encoder_attention_mask=encoder_mask,
decoder_attention_mask=decoder_mask,
decoder_lm_labels=lm_labels,
)
loss = outputs[0]
print(loss)
if args.gradient_accumulation_steps > 1:
loss /= args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
model.zero_grad()
global_step += 1
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
return global_step, tr_loss / global_step
# ------------
# Train
# ------------
def evaluate(args, model, tokenizer, prefix=""):
set_seed(args)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size
)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
source, target, encoder_token_type_ids, encoder_mask, decoder_mask, lm_labels = batch
source = source.to(args.device)
target = target.to(args.device)
encoder_token_type_ids = encoder_token_type_ids.to(args.device)
encoder_mask = encoder_mask.to(args.device)
decoder_mask = decoder_mask.to(args.device)
lm_labels = lm_labels.to(args.device)
with torch.no_grad():
outputs = model(
source,
target,
encoder_token_type_ids=encoder_token_type_ids,
encoder_attention_mask=encoder_mask,
decoder_attention_mask=decoder_mask,
decoder_lm_labels=lm_labels,
)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {"perplexity": perplexity}
# Save the evaluation's results
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input training data file (a text file).",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Optional parameters
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--do_evaluate",
type=bool,
default=False,
help="Run model evaluation on out-of-sample data.",
)
parser.add_argument("--do_train", type=bool, default=False, help="Run training.")
parser.add_argument(
"--do_overwrite_output_dir",
type=bool,
default=False,
help="Whether to overwrite the output dir.",
)
parser.add_argument(
"--model_name_or_path",
default="bert-base-cased",
type=str,
help="The model checkpoint to initialize the encoder and decoder's weights with.",
)
parser.add_argument(
"--model_type",
default="bert",
type=str,
help="The decoder architecture to be fine-tuned.",
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--to_cpu", default=False, type=bool, help="Whether to force training on CPU."
)
parser.add_argument(
"--num_train_epochs",
default=10,
type=int,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument("--seed", default=42, type=int)
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.do_overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --do_overwrite_output_dir to overwrite.".format(
args.output_dir
)
)
# Set up training device
if args.to_cpu or not torch.cuda.is_available():
args.device = torch.device("cpu")
args.n_gpu = 0
else:
args.device = torch.device("cuda")
args.n_gpu = torch.cuda.device_count()
# Load pretrained model and tokenizer. The decoder's weights are randomly initialized.
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
config = BertConfig.from_pretrained(args.model_name_or_path)
decoder_model = BertForMaskedLM(config)
model = Model2Model.from_pretrained(
args.model_name_or_path, decoder_model=decoder_model
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
0,
args.device,
args.n_gpu,
False,
False,
)
logger.info("Training/evaluation parameters %s", args)
# Train the model
model.to(args.device)
if args.do_train:
global_step, tr_loss = train(args, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_arguments.bin"))
# Evaluate the model
results = {}
if args.do_evaluate:
checkpoints = []
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
encoder_checkpoint = os.path.join(checkpoint, "encoder")
decoder_checkpoint = os.path.join(checkpoint, "decoder")
model = PreTrainedEncoderDecoder.from_pretrained(
encoder_checkpoint, decoder_checkpoint
)
model.to(args.device)
results = "placeholder"
return results
if __name__ == "__main__":
main()
| 15,727 | 30.902637 | 120 | py |
DeeBERT | DeeBERT-master/examples/run_bertology.py | #!/usr/bin/env python3
# Copyright 2018 CMU and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Bertology: this script shows how you can explore the internals of the models in the library to:
- compute the entropy of the head attentions
- compute the importance of each head
- prune (remove) the low importance head.
Some parts of this script are adapted from the code of Michel et al. (http://arxiv.org/abs/1905.10650)
which is available at https://github.com/pmichel31415/are-16-heads-really-better-than-1
"""
import os
import argparse
import logging
from datetime import timedelta, datetime
from tqdm import tqdm
import numpy as np
import torch
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset, Subset
from torch.utils.data.distributed import DistributedSampler
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import (WEIGHTS_NAME,
BertConfig, BertForSequenceClassification, BertTokenizer,
XLMConfig, XLMForSequenceClassification, XLMTokenizer,
XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer)
from run_glue import set_seed, load_and_cache_examples, ALL_MODELS, MODEL_CLASSES
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
logger = logging.getLogger(__name__)
def entropy(p):
""" Compute the entropy of a probability distribution """
plogp = p * torch.log(p)
plogp[p == 0] = 0
return -plogp.sum(dim=-1)
def print_2d_tensor(tensor):
""" Print a 2D tensor """
logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor))))
for row in range(len(tensor)):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data))
else:
logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data))
def compute_heads_importance(args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None):
""" This method shows how to compute:
- head attention entropy
- head importance scores according to http://arxiv.org/abs/1905.10650
"""
# Prepare our tensors
n_layers, n_heads = model.bert.config.num_hidden_layers, model.bert.config.num_attention_heads
head_importance = torch.zeros(n_layers, n_heads).to(args.device)
attn_entropy = torch.zeros(n_layers, n_heads).to(args.device)
if head_mask is None:
head_mask = torch.ones(n_layers, n_heads).to(args.device)
head_mask.requires_grad_(requires_grad=True)
preds = None
labels = None
tot_tokens = 0.0
for step, batch in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
batch = tuple(t.to(args.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
outputs = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids, head_mask=head_mask)
loss, logits, all_attentions = outputs[0], outputs[1], outputs[-1] # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
if compute_entropy:
for layer, attn in enumerate(all_attentions):
masked_entropy = entropy(attn.detach()) * input_mask.float().unsqueeze(1)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
# Also store our logits/labels if we want to compute metrics afterwards
if preds is None:
preds = logits.detach().cpu().numpy()
labels = label_ids.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
labels = np.append(labels, label_ids.detach().cpu().numpy(), axis=0)
tot_tokens += input_mask.float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
exponent = 2
norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1/exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print/save matrices
np.save(os.path.join(args.output_dir, 'attn_entropy.npy'), attn_entropy.detach().cpu().numpy())
np.save(os.path.join(args.output_dir, 'head_importance.npy'), head_importance.detach().cpu().numpy())
logger.info("Attention entropies")
print_2d_tensor(attn_entropy)
logger.info("Head importance scores")
print_2d_tensor(head_importance)
logger.info("Head ranked by importance scores")
head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device)
head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange(head_importance.numel(), device=args.device)
head_ranks = head_ranks.view_as(head_importance)
print_2d_tensor(head_ranks)
return attn_entropy, head_importance, preds, labels
def mask_heads(args, model, eval_dataloader):
""" This method shows how to mask head (set some heads to zero), to test the effect on the network,
based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650)
"""
_, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
original_score = compute_metrics(args.task_name, preds, labels)[args.metric_name]
logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold)
new_head_mask = torch.ones_like(head_importance)
num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount))
current_score = original_score
while current_score >= original_score * args.masking_threshold:
head_mask = new_head_mask.clone() # save current head mask
# heads from least important to most - keep only not-masked heads
head_importance[head_mask == 0.0] = float('Inf')
current_heads_to_mask = head_importance.view(-1).sort()[1]
if len(current_heads_to_mask) <= num_to_mask:
break
# mask heads
current_heads_to_mask = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist()))
new_head_mask = new_head_mask.view(-1)
new_head_mask[current_heads_to_mask] = 0.0
new_head_mask = new_head_mask.view_as(head_mask)
print_2d_tensor(new_head_mask)
# Compute metric and head importance again
_, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
current_score = compute_metrics(args.task_name, preds, labels)[args.metric_name]
logger.info("Masking: current score: %f, remaning heads %d (%.1f percents)", current_score, new_head_mask.sum(), new_head_mask.sum()/new_head_mask.numel() * 100)
logger.info("Final head mask")
print_2d_tensor(head_mask)
np.save(os.path.join(args.output_dir, 'head_mask.npy'), head_mask.detach().cpu().numpy())
return head_mask
def prune_heads(args, model, eval_dataloader, head_mask):
""" This method shows how to prune head (remove heads weights) based on
the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650)
"""
# Try pruning and test time speedup
# Pruning is like masking but we actually remove the masked weights
before_time = datetime.now()
_, _, preds, labels = compute_heads_importance(args, model, eval_dataloader,
compute_entropy=False, compute_importance=False, head_mask=head_mask)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
score_masking = compute_metrics(args.task_name, preds, labels)[args.metric_name]
original_time = datetime.now() - before_time
original_num_params = sum(p.numel() for p in model.parameters())
heads_to_prune = dict((layer, (1 - head_mask[layer].long()).nonzero().tolist()) for layer in range(len(head_mask)))
assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(heads_to_prune)
pruned_num_params = sum(p.numel() for p in model.parameters())
before_time = datetime.now()
_, _, preds, labels = compute_heads_importance(args, model, eval_dataloader,
compute_entropy=False, compute_importance=False, head_mask=None)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
score_pruning = compute_metrics(args.task_name, preds, labels)[args.metric_name]
new_time = datetime.now() - before_time
logger.info("Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)", original_num_params, pruned_num_params, pruned_num_params/original_num_params * 100)
logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning)
logger.info("Pruning: speed ratio (new timing / original timing): %f percents", original_time/new_time * 100)
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(
ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--data_subset", type=int, default=-1,
help="If > 0: limit the data to a subset of data_subset instances.")
parser.add_argument("--overwrite_output_dir", action='store_true',
help="Whether to overwrite data in output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument("--dont_normalize_importance_by_layer", action='store_true',
help="Don't normalize importance score by layers")
parser.add_argument("--dont_normalize_global_importance", action='store_true',
help="Don't normalize all importance scores between 0 and 1")
parser.add_argument("--try_masking", action='store_true',
help="Whether to try to mask head until a threshold of accuracy.")
parser.add_argument("--masking_threshold", default=0.9, type=float,
help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).")
parser.add_argument("--masking_amount", default=0.1, type=float,
help="Amount to heads to masking at each masking step.")
parser.add_argument("--metric_name", default="acc", type=str,
help="Metric to use for head masking.")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded.")
parser.add_argument("--batch_size", default=1, type=int, help="Batch size.")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
args.device = torch.device("cuda", args.local_rank)
args.n_gpu = 1
torch.distributed.init_process_group(backend='nccl') # Initializes the distributed backend
# Setup logging
logging.basicConfig(level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, args.n_gpu, bool(args.local_rank != -1)))
# Set seeds
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = ""
for key in MODEL_CLASSES:
if key in args.model_name_or_path.lower():
args.model_type = key # take the first match in model types
break
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
output_attentions=True,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
elif args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Print/save training arguments
torch.save(args, os.path.join(args.output_dir, 'run_args.bin'))
logger.info("Training/evaluation parameters %s", args)
# Prepare dataset for the GLUE task
eval_data = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=True)
if args.data_subset > 0:
eval_data = Subset(eval_data, list(range(min(args.data_subset, len(eval_data)))))
eval_sampler = SequentialSampler(eval_data) if args.local_rank == -1 else DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(args, model, eval_dataloader)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
head_mask = mask_heads(args, model, eval_dataloader)
prune_heads(args, model, eval_dataloader, head_mask)
if __name__ == '__main__':
main()
| 18,901 | 51.798883 | 177 | py |
DeeBERT | DeeBERT-master/examples/utils_summarization_test.py | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from utils_summarization import (
compute_token_type_ids,
fit_to_block_size,
build_mask,
build_lm_labels,
process_story,
)
class SummarizationDataProcessingTest(unittest.TestCase):
def setUp(self):
self.block_size = 10
def test_fit_to_block_sequence_too_small(self):
""" Pad the sequence with 0 if the sequence is smaller than the block size."""
sequence = [1, 2, 3, 4]
expected_output = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(
fit_to_block_size(sequence, self.block_size, 0), expected_output
)
def test_fit_to_block_sequence_fit_exactly(self):
""" Do nothing if the sequence is the right size. """
sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(
fit_to_block_size(sequence, self.block_size, 0), expected_output
)
def test_fit_to_block_sequence_too_big(self):
""" Truncate the sequence if it is too long. """
sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(
fit_to_block_size(sequence, self.block_size, 0), expected_output
)
def test_process_story_no_highlights(self):
""" Processing a story with no highlights returns an empty list for the summary.
"""
raw_story = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
_, summary_lines = process_story(raw_story)
self.assertEqual(summary_lines, [])
def test_process_empty_story(self):
""" An empty story returns an empty collection of lines.
"""
raw_story = ""
story_lines, summary_lines = process_story(raw_story)
self.assertEqual(story_lines, [])
self.assertEqual(summary_lines, [])
def test_process_story_with_missing_period(self):
raw_story = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
story_lines, summary_lines = process_story(raw_story)
expected_story_lines = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(expected_story_lines, story_lines)
expected_summary_lines = ["It was the best of times."]
self.assertEqual(expected_summary_lines, summary_lines)
def test_build_lm_labels_no_padding(self):
sequence = torch.tensor([1, 2, 3, 4])
expected = sequence
np.testing.assert_array_equal(
build_lm_labels(sequence, 0).numpy(), expected.numpy()
)
def test_build_lm_labels(self):
sequence = torch.tensor([1, 2, 3, 4, 0, 0, 0])
expected = torch.tensor([1, 2, 3, 4, -1, -1, -1])
np.testing.assert_array_equal(
build_lm_labels(sequence, 0).numpy(), expected.numpy()
)
def test_build_mask_no_padding(self):
sequence = torch.tensor([1, 2, 3, 4])
expected = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(sequence, 0).numpy(), expected.numpy())
def test_build_mask(self):
sequence = torch.tensor([1, 2, 3, 4, 23, 23, 23])
expected = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(
build_mask(sequence, 23).numpy(), expected.numpy()
)
def test_build_mask_with_padding_equal_to_one(self):
sequence = torch.tensor([8, 2, 3, 4, 1, 1, 1])
expected = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(sequence, 1).numpy(), expected.numpy())
def test_compute_token_type_ids(self):
separator = 101
batch = torch.tensor(
[[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]]
)
expected = torch.tensor(
[[0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1], [0, 1, 1, 1, 0, 0]]
)
result = compute_token_type_ids(batch, separator)
np.testing.assert_array_equal(result, expected)
if __name__ == "__main__":
unittest.main()
| 5,178 | 36.80292 | 98 | py |
DeeBERT | DeeBERT-master/examples/run_generation.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
from tqdm import trange
import torch
import torch.nn.functional as F
import numpy as np
from transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, XLMConfig, CTRLConfig
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from transformers import XLNetLMHeadModel, XLNetTokenizer
from transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
from transformers import CTRLLMHeadModel, CTRLTokenizer
from transformers import XLMWithLMHeadModel, XLMTokenizer
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, XLMConfig, CTRLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2LMHeadModel, GPT2Tokenizer),
'ctrl': (CTRLLMHeadModel, CTRLTokenizer),
'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'xlnet': (XLNetLMHeadModel, XLNetTokenizer),
'transfo-xl': (TransfoXLLMHeadModel, TransfoXLTokenizer),
'xlm': (XLMWithLMHeadModel, XLMTokenizer),
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size x vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, repetition_penalty=1.0,
is_xlnet=False, is_xlm_mlm=False, xlm_mask_token=None, xlm_lang=None, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in trange(length):
inputs = {'input_ids': generated}
if is_xlnet:
# XLNet is a direct (predict same token, not next token) and bi-directional model by default
# => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1)
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
target_mapping[0, 0, -1] = 1.0 # predict last token
inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}
if is_xlm_mlm and xlm_mask_token:
# XLM MLM models are direct models (predict same token, not next token)
# => need one additional dummy token in the input (will be masked and guessed)
input_ids = torch.cat((generated, torch.full((1, 1), xlm_mask_token, dtype=torch.long, device=device)), dim=1)
inputs = {'input_ids': input_ids}
if xlm_lang is not None:
inputs["langs"] = torch.tensor([xlm_lang] * inputs["input_ids"].shape[1], device=device).view(1, -1)
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet/CTRL (cached hidden-states)
next_token_logits = outputs[0][:, -1, :] / (temperature if temperature > 0 else 1.)
# repetition penalty from CTRL (https://arxiv.org/abs/1909.05858)
for i in range(num_samples):
for _ in set(generated[i].tolist()):
next_token_logits[i, _] /= repetition_penalty
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
if temperature == 0: # greedy sampling:
next_token = torch.argmax(filtered_logits, dim=-1).unsqueeze(-1)
else:
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token), dim=1)
return generated
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--xlm_lang", type=str, default="", help="Optional language when used with the XLM model.")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--num_samples", type=int, default=1)
parser.add_argument("--temperature", type=float, default=1.0,
help="temperature of 0 implies greedy sampling")
parser.add_argument("--repetition_penalty", type=float, default=1.0,
help="primarily useful for CTRL model; in that case, use 1.2")
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--stop_token', type=str, default=None,
help="Token at which text generation is stopped")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
model.to(args.device)
model.eval()
if args.length < 0 and model.config.max_position_embeddings > 0:
args.length = model.config.max_position_embeddings
elif 0 < model.config.max_position_embeddings < args.length:
args.length = model.config.max_position_embeddings # No generation bigger than model size
elif args.length < 0:
args.length = MAX_LENGTH # avoid infinite loop
logger.info(args)
if args.model_type in ["ctrl"]:
if args.temperature > 0.7:
logger.info('CTRL typically works better with lower temperatures (and lower top_k).')
while True:
xlm_lang = None
# XLM Language usage detailed in the issues #1414
if args.model_type in ["xlm"] and hasattr(tokenizer, 'lang2id') and hasattr(model.config, 'use_lang_emb') \
and model.config.use_lang_emb:
if args.xlm_lang:
language = args.xlm_lang
else:
language = None
while language not in tokenizer.lang2id.keys():
language = input("Using XLM. Select language in " + str(list(tokenizer.lang2id.keys())) + " >>> ")
xlm_lang = tokenizer.lang2id[language]
# XLM masked-language modeling (MLM) models need masked token (see details in sample_sequence)
is_xlm_mlm = args.model_type in ["xlm"] and 'mlm' in args.model_name_or_path
if is_xlm_mlm:
xlm_mask_token = tokenizer.mask_token_id
else:
xlm_mask_token = None
raw_text = args.prompt if args.prompt else input("Model prompt >>> ")
if args.model_type in ["transfo-xl", "xlnet"]:
# Models with memory likes to have a long prompt for short inputs.
raw_text = (args.padding_text if args.padding_text else PADDING_TEXT) + raw_text
context_tokens = tokenizer.encode(raw_text, add_special_tokens=False)
if args.model_type == "ctrl":
if not any(context_tokens[0] == x for x in tokenizer.control_codes.values()):
logger.info("WARNING! You are not starting your generation from a control code so you won't get good results")
out = sample_sequence(
model=model,
context=context_tokens,
num_samples=args.num_samples,
length=args.length,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
repetition_penalty=args.repetition_penalty,
is_xlnet=bool(args.model_type == "xlnet"),
is_xlm_mlm=is_xlm_mlm,
xlm_mask_token=xlm_mask_token,
xlm_lang=xlm_lang,
device=args.device,
)
out = out[:, len(context_tokens):].tolist()
for o in out:
text = tokenizer.decode(o, clean_up_tokenization_spaces=True)
text = text[: text.find(args.stop_token) if args.stop_token else None]
print(text)
if args.prompt:
break
return text
if __name__ == '__main__':
main()
| 13,112 | 49.241379 | 167 | py |
DeeBERT | DeeBERT-master/examples/run_ner.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert or Roberta). """
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import WEIGHTS_NAME, BertConfig, BertForTokenClassification, BertTokenizer
from transformers import RobertaConfig, RobertaForTokenClassification, RobertaTokenizer
from transformers import DistilBertConfig, DistilBertForTokenClassification, DistilBertTokenizer
from transformers import CamembertConfig, CamembertForTokenClassification, CamembertTokenizer
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, RobertaConfig, DistilBertConfig)),
())
MODEL_CLASSES = {
"bert": (BertConfig, BertForTokenClassification, BertTokenizer),
"roberta": (RobertaConfig, RobertaForTokenClassification, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForTokenClassification, DistilBertTokenizer),
"camembert": (CamembertConfig, CamembertForTokenClassification, CamembertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"]: batch[2] if args.model_type in ["bert", "xlnet"] else None # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev")
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"]: batch[2] if args.model_type in ["bert", "xlnet"] else None # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list)
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}".format(mode,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(examples, labels, args.max_seq_length, tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--labels", default="", type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action="store_true",
help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true",
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true",
help="Whether to run predictions on the test set.")
parser.add_argument("--evaluate_during_training", action="store_true",
help="Whether to run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action="store_true",
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=50,
help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action="store_true",
help="Avoid using CUDA when available")
parser.add_argument("--overwrite_output_dir", action="store_true",
help="Overwrite the content of the output directory")
parser.add_argument("--overwrite_cache", action="store_true",
help="Overwrite the cached training and evaluation sets")
parser.add_argument("--seed", type=int, default=42,
help="random seed for initialization")
parser.add_argument("--fp16", action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument("--fp16_opt_level", type=str, default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare CONLL-2003 task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train")
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)))
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step)
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
if args.do_predict and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.output_dir)
model.to(args.device)
result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test")
# Save results
output_test_results_file = os.path.join(args.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
# Save predictions
output_test_predictions_file = os.path.join(args.output_dir, "test_predictions.txt")
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(args.data_dir, "test.txt"), "r") as f:
example_id = 0
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not predictions[example_id]:
example_id += 1
elif predictions[example_id]:
output_line = line.split()[0] + " " + predictions[example_id].pop(0) + "\n"
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
return results
if __name__ == "__main__":
main()
| 28,786 | 53.009381 | 184 | py |
DeeBERT | DeeBERT-master/examples/utils_summarization.py | from collections import deque
import os
import torch
from torch.utils.data import Dataset
# ------------
# Data loading
# ------------
class CNNDailyMailDataset(Dataset):
""" Abstracts the dataset used to train seq2seq models.
CNN/Daily News:
The CNN/Daily News raw datasets are downloaded from [1]. The stories are
stored in different files; the summary appears at the end of the story as
sentences that are prefixed by the special `@highlight` line. To process
the data, untar both datasets in the same folder, and pass the path to this
folder as the "data_dir argument. The formatting code was inspired by [2].
[1] https://cs.nyu.edu/~kcho/
[2] https://github.com/abisee/cnn-dailymail/
"""
def __init__(self, tokenizer, prefix="train", data_dir=""):
assert os.path.isdir(data_dir)
self.tokenizer = tokenizer
# We initialize the class by listing all the files that contain
# stories and summaries. Files are not read in memory given
# the size of the corpus.
self.stories_path = []
datasets = ("cnn", "dailymail")
for dataset in datasets:
path_to_stories = os.path.join(data_dir, dataset, "stories")
story_filenames_list = os.listdir(path_to_stories)
for story_filename in story_filenames_list:
path_to_story = os.path.join(path_to_stories, story_filename)
if not os.path.isfile(path_to_story):
continue
self.stories_path.append(path_to_story)
def __len__(self):
return len(self.stories_path)
def __getitem__(self, idx):
story_path = self.stories_path[idx]
with open(story_path, encoding="utf-8") as source:
raw_story = source.read()
story_lines, summary_lines = process_story(raw_story)
return story_lines, summary_lines
def process_story(raw_story):
""" Extract the story and summary from a story file.
Attributes:
raw_story (str): content of the story file as an utf-8 encoded string.
Raises:
IndexError: If the stoy is empty or contains no highlights.
"""
nonempty_lines = list(
filter(lambda x: len(x) != 0, [line.strip() for line in raw_story.split("\n")])
)
# for some unknown reason some lines miss a period, add it
nonempty_lines = [_add_missing_period(line) for line in nonempty_lines]
# gather article lines
story_lines = []
lines = deque(nonempty_lines)
while True:
try:
element = lines.popleft()
if element.startswith("@highlight"):
break
story_lines.append(element)
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None.
return story_lines, []
# gather summary lines
summary_lines = list(filter(lambda t: not t.startswith("@highlight"), lines))
return story_lines, summary_lines
def _add_missing_period(line):
END_TOKENS = [".", "!", "?", "...", "'", "`", '"', u"\u2019", u"\u2019", ")"]
if line.startswith("@highlight"):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
# --------------------------
# Encoding and preprocessing
# --------------------------
def fit_to_block_size(sequence, block_size, pad_token):
""" Adapt the source and target sequences' lengths to the block size.
If the sequence is shorter than the block size we pad it with -1 ids
which correspond to padding tokens.
"""
if len(sequence) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token] * (block_size - len(sequence)))
return sequence
def build_lm_labels(sequence, pad_token):
""" Padding token, encoded as 0, are represented by the value -1 so they
are not taken into account in the loss computation. """
padded = sequence.clone()
padded[padded == pad_token] = -1
return padded
def build_mask(sequence, pad_token):
""" Builds the mask. The attention mechanism will only attend to positions
with value 1. """
mask = torch.ones_like(sequence)
idx_pad_tokens = sequence == pad_token
mask[idx_pad_tokens] = 0
return mask
def encode_for_summarization(story_lines, summary_lines, tokenizer):
""" Encode the story and summary lines, and join them
as specified in [1] by using `[SEP] [CLS]` tokens to separate
sentences.
"""
story_lines_token_ids = [
tokenizer.add_special_tokens_single_sequence(tokenizer.encode(line))
for line in story_lines
]
summary_lines_token_ids = [
tokenizer.add_special_tokens_single_sequence(tokenizer.encode(line))
for line in summary_lines
]
story_token_ids = [
token for sentence in story_lines_token_ids for token in sentence
]
summary_token_ids = [
token for sentence in summary_lines_token_ids for token in sentence
]
return story_token_ids, summary_token_ids
def compute_token_type_ids(batch, separator_token_id):
""" Segment embeddings as described in [1]
The values {0,1} were found in the repository [2].
Attributes:
batch: torch.Tensor, size [batch_size, block_size]
Batch of input.
separator_token_id: int
The value of the token that separates the segments.
[1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders."
arXiv preprint arXiv:1908.08345 (2019).
[2] https://github.com/nlpyang/PreSumm (/src/prepro/data_builder.py, commit fac1217)
"""
batch_embeddings = []
for sequence in batch:
sentence_num = 0
embeddings = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2)
batch_embeddings.append(embeddings)
return torch.tensor(batch_embeddings)
| 6,022 | 31.556757 | 88 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.