text
stringlengths 1
93.6k
|
|---|
(ite_step, ite_epoch+1, num_epoch, sum_improved_PSNR/num_patch_count))
|
file_object.flush()
|
if __name__ == '__main__':
|
### Settings
|
CHANNEL = 1 # use only Y
|
ratio_small = 0.01
|
lr_ori = 1e-4
|
epoch_step1 = 20
|
epoch_step2 = 60
|
parser = argparse.ArgumentParser()
|
parser.add_argument('-hf', '--height', type=int, help="HEIGHT of frame")
|
parser.add_argument('-wf', '--width', type=int, help="WIDTH of frame")
|
parser.add_argument('-bs', '--batch_size', type=int)
|
parser.add_argument('-gpu', '--gpu', type=str, help="GPU")
|
parser.add_argument('-qp', '--qp', type=str, help="QP")
|
args = parser.parse_args()
|
BATCH_SIZE = args.batch_size
|
QP = args.qp
|
WIDTH = args.width
|
HEIGHT = args.height
|
dir_stack = "/home/x/SCI_1/MFQEv2.0/Database/Training_stack/QP" + QP
|
dir_model = "./model_QP" + QP
|
record_FileName = "./record_train_QP" + QP + ".txt"
|
file_object = open(record_FileName, 'w')
|
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # only show error and warning
|
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
|
config = tf.ConfigProto(allow_soft_placement = True) # if GPU is not usable, then turn to CPU automatically
|
main_train()
|
print("##### Training completes! #####")
|
file_object.write("##### Training completes! #####\n")
|
file_object.close()
|
# <FILESEP>
|
import argparse
|
import pycls.core.config as config
|
import pycls.core.builders as builders
|
from pycls.datasets.transforms import create_test_transform
|
import os
|
import numpy as np
|
from tqdm import tqdm
|
from collections import defaultdict
|
import torch
|
from torch.utils.data import DataLoader
|
from torchvision.datasets import CIFAR100
|
from torchvision.transforms import Compose
|
def main():
|
parser = argparse.ArgumentParser()
|
parser.add_argument('--cfg', type=str, default='configs/resnet/r-56_c100.yaml')
|
parser.add_argument('--ckpt', type=str, default='work_dirs/r-56_c100/model.pyth')
|
args = parser.parse_args()
|
save_dir = 'temp'
|
os.makedirs(save_dir, exist_ok=True)
|
config.load_cfg(args.cfg)
|
transforms = create_test_transform()
|
transform = Compose(transforms)
|
dataset = CIFAR100(root='data', train=True, transform=transform, download=True)
|
loader = DataLoader(dataset, batch_size=200)
|
model = builders.build_model()
|
model.load_state_dict(torch.load(args.ckpt, map_location='cpu')['model_state'])
|
model.cuda()
|
feat_dict = defaultdict(list)
|
for img, _ in tqdm(loader):
|
img = img.cuda()
|
with torch.no_grad():
|
model(img)
|
for i, feat in enumerate(model.features):
|
N, _, H, W = feat.shape
|
feat = feat.cpu()
|
feat_dict[f'layer_{i}'].append(feat)
|
for k in feat_dict:
|
feat_dict[k] = torch.cat(feat_dict[k], dim=0).numpy()
|
cfg_name = os.path.splitext(os.path.basename(args.cfg))[0]
|
np.savez(os.path.join(save_dir, f'{cfg_name}.npz'), **feat_dict)
|
if __name__ == '__main__':
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.