index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
60,918 | funyoo/sinny | refs/heads/master | /module/video/sys_video_list.py | """
系统预设视频列表
@author: funyoo
"""
# 视频目录
ROOT = "../../resources/video/"
# 睡眠视频 路径 + 时长
SLEEP_VIDEO = [ROOT + "sleep.mp4", 5]
# 唤醒视频1
WAKE_UP_VIDEO = [ROOT + "wakeup.mp4", 4]
# 唤醒视频2
WAKE_UP_VIDEO_2 = [ROOT + "wakeup2.mp4", 4]
# 忙碌视频
BUSY_VIDEO = [ROOT + "busy.mp4", 4] | {"/module/voice_module.py": ["/module/base_module.py"], "/module/rgb_module.py": ["/module/base_module.py"], "/main.py": ["/module_register.py", "/wake_up.py"], "/wake_up.py": ["/commander.py"], "/module/picture_module.py": ["/module/base_module.py"], "/commander.py": ["/reader.py", "/module_register.py"]} |
60,921 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /submit_sunnybrook_unetres_multi.py | #!/usr/bin/env python2.7
import re, sys, os
import shutil, cv2
import numpy as np
from train_sunnybrook_unetres_mul import read_contour, map_all_contours, export_all_contours, map_endo_contours
from helpers import reshape, get_SAX_SERIES, draw_result
from unet_res_multi_model import unet_res_multi_model, dice_coef_endo_each, dice_coef_myo_each
SAX_SERIES = get_SAX_SERIES()
SUNNYBROOK_ROOT_PATH = 'D:\cardiac_data\Sunnybrook'
VAL_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart2',
'ValidationDataContours')
VAL_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart2',
'ValidationDataDICOM')
VAL_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart2',
'ValidationDataOverlay')
ONLINE_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart1',
'OnlineDataContours')
ONLINE_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart1',
'OnlineDataDICOM')
ONLINE_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart1',
'OnlineDataOverlay')
SAVE_VAL_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_val_submission')
SAVE_ONLINE_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_online_submission')
def create_submission(contours, data_path, output_path, contour_type = 'a'):
if contour_type == 'a':
weights = 'model_logs/sunnybrook_a_unetres_multi.h5'
else:
sys.exit('\ncontour type "%s" not recognized\n' % contour_type)
crop_size = 128
input_shape = (crop_size, crop_size, 1)
num_classes = 3
images, masks = export_all_contours(contours, data_path, output_path, crop_size, num_classes=num_classes)
model = unet_res_multi_model(input_shape, num_classes, weights=weights, contour_type=contour_type, transfer=True)
pred_masks = model.predict(images, batch_size=32, verbose=1)
print('\nEvaluating dev set ...')
result = model.evaluate(images, masks, batch_size=32)
result = np.round(result, decimals=10)
print('\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
num = 0
for c_type in ['i', 'm']:
for idx, ctr in enumerate(contours):
img, mask = read_contour(ctr, data_path, num_classes)
h, w, d = img.shape
if c_type == 'i':
tmp = pred_masks[idx,...,2]
elif c_type == 'm':
tmp = pred_masks[idx,...,1]
tmp = tmp[..., np.newaxis]
tmp = reshape(tmp, to_shape=(h, w, d))
tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if not coords:
print('\nNo detection in case: {:s}; image: {:d}'.format(ctr.case, ctr.img_no))
coords = np.ones((1, 1, 1, 2), dtype='int')
if c_type == 'i':
man_filename = ctr.ctr_endo_path[ctr.ctr_endo_path.rfind('\\')+1:]
elif c_type == 'm':
man_filename = ctr.ctr_epi_path[ctr.ctr_epi_path.rfind('\\')+1:]
auto_filename = man_filename.replace('manual', 'auto')
img_filename = re.sub(r'-[io]contour-manual.txt', '.dcm', man_filename)
man_full_path = os.path.join(save_dir, ctr.case, 'contours-manual', 'IRCCI-expert')
auto_full_path = os.path.join(save_dir, ctr.case, 'contours-auto', 'FCN')
img_full_path = os.path.join(save_dir, ctr.case, 'DICOM')
dcm = 'IM-0001-%04d.dcm' % (ctr.img_no)
#dcm = 'IM-%s-%04d.dcm' % (SAX_SERIES[ctr.case], ctr.img_no)
dcm_path = os.path.join(data_path, ctr.case, 'DICOM', dcm)
overlay_full_path = os.path.join(save_dir, ctr.case, 'Overlay')
for dirpath in [man_full_path, auto_full_path, img_full_path, overlay_full_path]:
if not os.path.exists(dirpath):
os.makedirs(dirpath)
if 'DICOM' in dirpath:
src = dcm_path
dst = os.path.join(dirpath, img_filename)
shutil.copyfile(src, dst)
elif 'Overlay' in dirpath:
draw_result(ctr, data_path, overlay_full_path, c_type, coords)
else:
dst = os.path.join(auto_full_path, auto_filename)
if not os.path.exists(auto_full_path):
os.makedirs(auto_full_path)
with open(dst, 'wb') as f:
for cd in coords:
cd = np.squeeze(cd)
if cd.ndim == 1:
np.savetxt(f, cd, fmt='%d', delimiter=' ')
else:
for coord in cd:
np.savetxt(f, coord, fmt='%d', delimiter=' ')
print('\nNumber of multiple detections: {:d}'.format(num))
dst_eval = os.path.join(save_dir, 'evaluation_{:s}.txt'.format(c_type))
with open(dst_eval, 'wb') as f:
f.write(('Dev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result))).encode('utf-8'))
f.close()
# Detailed evaluation:
detail_eval = os.path.join(save_dir, 'evaluation_detail_{:s}.csv'.format(c_type))
evalEndoArr = dice_coef_endo_each(masks, pred_masks)
evalMyoArr = dice_coef_myo_each(masks, pred_masks)
caseArr = [ctr.case for ctr in contours]
imgArr = [ctr.img_no for ctr in contours]
resArr = np.transpose([caseArr, imgArr, evalEndoArr, evalMyoArr])
np.savetxt(detail_eval, resArr, fmt='%s', delimiter=',')
#np.savetxt(f, '\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
def create_endo_submission(endos, data_path, output_path, contour_type = 'a'):
if contour_type == 'a':
weights = 'model_logs/sunnybrook_a_unetres_multi.h5'
else:
sys.exit('\ncontour type "%s" not recognized\n' % contour_type)
crop_size = 128
input_shape = (crop_size, crop_size, 1)
num_classes = 3
images, masks = export_all_contours(endos, data_path, output_path, crop_size, num_classes=num_classes)
model = unet_res_multi_model(input_shape, num_classes, weights=weights, contour_type=contour_type, transfer=True)
pred_masks = model.predict(images, batch_size=32, verbose=1)
print('\nEvaluating dev set ...')
result = model.evaluate(images, masks, batch_size=32)
result = np.round(result, decimals=10)
print('\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
num = 0
c_type = 'i'
for idx, ctr in enumerate(endos):
img, mask = read_contour(ctr, data_path, num_classes)
h, w, d = img.shape
if c_type == 'i':
tmp = pred_masks[idx, ..., 2]
elif c_type == 'm':
tmp = pred_masks[idx, ..., 1]
tmp = tmp[..., np.newaxis]
tmp = reshape(tmp, to_shape=(h, w, d))
tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if not coords:
print('\nNo detection in case: {:s}; image: {:d}'.format(ctr.case, ctr.img_no))
coords = np.ones((1, 1, 1, 2), dtype='int')
if c_type == 'i':
man_filename = ctr.ctr_endo_path[ctr.ctr_endo_path.rfind('\\') + 1:]
elif c_type == 'm':
man_filename = ctr.ctr_epi_path[ctr.ctr_epi_path.rfind('\\') + 1:]
auto_filename = man_filename.replace('manual', 'auto')
img_filename = re.sub(r'-[io]contour-manual.txt', '.dcm', man_filename)
man_full_path = os.path.join(save_dir, ctr.case, 'contours-manual', 'IRCCI-expert')
auto_full_path = os.path.join(save_dir, ctr.case, 'contours-auto', 'FCN')
img_full_path = os.path.join(save_dir, ctr.case, 'DICOM')
dcm = 'IM-0001-%04d.dcm' % (ctr.img_no)
# dcm = 'IM-%s-%04d.dcm' % (SAX_SERIES[ctr.case], ctr.img_no)
dcm_path = os.path.join(data_path, ctr.case, 'DICOM', dcm)
overlay_full_path = os.path.join(save_dir, ctr.case, 'Overlay')
for dirpath in [man_full_path, auto_full_path, img_full_path, overlay_full_path]:
if not os.path.exists(dirpath):
os.makedirs(dirpath)
if 'DICOM' in dirpath:
src = dcm_path
dst = os.path.join(dirpath, img_filename)
shutil.copyfile(src, dst)
elif 'Overlay' in dirpath:
draw_result(ctr, data_path, overlay_full_path, c_type, coords)
else:
dst = os.path.join(auto_full_path, auto_filename)
if not os.path.exists(auto_full_path):
os.makedirs(auto_full_path)
with open(dst, 'wb') as f:
for cd in coords:
cd = np.squeeze(cd)
if cd.ndim == 1:
np.savetxt(f, cd, fmt='%d', delimiter=' ')
else:
for coord in cd:
np.savetxt(f, coord, fmt='%d', delimiter=' ')
print('\nNumber of multiple detections: {:d}'.format(num))
dst_eval = os.path.join(save_dir, 'evaluation_{:s}.txt'.format(c_type))
with open(dst_eval, 'wb') as f:
f.write(('Dev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result))).encode('utf-8'))
f.close()
# Detailed evaluation:
detail_eval = os.path.join(save_dir, 'evaluation_detail_{:s}.csv'.format(c_type))
evalEndoArr = dice_coef_endo_each(masks, pred_masks)
evalMyoArr = dice_coef_myo_each(masks, pred_masks)
caseArr = [ctr.case for ctr in endos]
imgArr = [ctr.img_no for ctr in endos]
resArr = np.transpose([caseArr, imgArr, evalEndoArr, evalMyoArr])
np.savetxt(detail_eval, resArr, fmt='%s', delimiter=',')
if __name__== '__main__':
contour_type = 'a'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_online_submission_unetres_multi'
print('\nProcessing online '+contour_type+' contours...')
online_ctrs = list(map_all_contours(ONLINE_CONTOUR_PATH))
online_endos = list(map_endo_contours(ONLINE_CONTOUR_PATH))
create_submission(online_ctrs, ONLINE_IMG_PATH, ONLINE_OVERLAY_PATH, contour_type)
create_endo_submission(online_endos, ONLINE_IMG_PATH, ONLINE_OVERLAY_PATH, contour_type)
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_val_submission_unetres_multi'
print('\nProcessing val '+contour_type+' contours...')
val_ctrs = list(map_all_contours(VAL_CONTOUR_PATH))
val_endos = list(map_endo_contours(VAL_CONTOUR_PATH))
create_submission(val_ctrs, VAL_IMG_PATH, VAL_OVERLAY_PATH, contour_type)
create_endo_submission(val_endos, VAL_IMG_PATH, VAL_OVERLAY_PATH, contour_type)
print('\nAll done.')
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,922 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /train_sunnybrook_unet_time.py | #!/usr/bin/env python2.7
import dicom, cv2, re
import os, fnmatch, sys
from keras.callbacks import *
from keras import backend as K
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
from itertools import zip_longest
from scipy.misc import imsave
from helpers import center_crop_3d, center_crop, lr_poly_decay, get_SAX_SERIES
import pylab
import matplotlib.pyplot as plt
from CardiacImageDataGenerator import CardiacImageDataGenerator, CardiacTimeSeriesDataGenerator
from unet_model_time import unet_res_model_time
from unet_res_model_Inv import unet_res_model_Inv
from DataIOProc import DataIOProc
seed = 1234
np.random.seed(seed)
SAX_SERIES = get_SAX_SERIES()
SUNNYBROOK_ROOT_PATH = 'D:\cardiac_data\Sunnybrook'
TEMP_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database Temp',
'Temp')
TRAIN_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'TrainingDataContours')
TRAIN_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'TrainingDataDICOM')
TRAIN_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'TrainingOverlayImage')
TRAIN_AUG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database Augmentation')
DEBUG_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'Debug')
DEBUG_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'Debug')
DEBUG_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'Debug')
class Contour(object):
def __init__(self, ctr_endo_path, ctr_epi_path, ctr_p1_path, ctr_p2_path, ctr_p3_path):
self.ctr_endo_path = ctr_endo_path
self.ctr_epi_path = ctr_epi_path
self.ctr_p1_path = ctr_p1_path
self.ctr_p2_path = ctr_p2_path
self.ctr_p3_path = ctr_p3_path
match = re.search(r'\\([^\\]*)\\contours-manual\\IRCCI-expert\\IM-0001-(\d{4})-.*', ctr_endo_path) #it always has endo
self.case = match.group(1)
self.img_no = int(match.group(2))
def __str__(self):
return '<Contour for case %s, image %d>' % (self.case, self.img_no)
__repr__ = __str__
def find_neighbor_images(contour, data_path, num_phases, num_phases_in_cycle, phase_dilation):
center_index = contour.img_no
center_file = 'IM-0001-%04d.dcm' % (contour.img_no)
center_file_path = os.path.join(data_path, contour.case, 'DICOM', center_file) #modified by C.Cong
center = dicom.read_file(center_file_path)
center_slice_pos = center[0x20, 0x1041]
center_img = center.pixel_array.astype('int')
h, w = center_img.shape
img_arr = np.zeros((num_phases, h, w), dtype="int")
for i in range (num_phases):
idx = int(center_index + (i - int(num_phases/2))*phase_dilation)
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
#If
if os.path.isfile(full_path) == False:
if idx < center_index:
idx = idx + num_phases_in_cycle
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
else:
idx = idx - num_phases_in_cycle
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
f = dicom.read_file(full_path)
f_slice_pos = f[0x20, 0x1041]
if(f_slice_pos.value != center_slice_pos.value):
idx = idx + num_phases_in_cycle
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
if os.path.isfile(full_path) == True:
f = dicom.read_file(full_path)
f_slice_pos = f[0x20, 0x1041]
if (f_slice_pos.value != center_slice_pos.value):
idx = idx - num_phases_in_cycle - num_phases_in_cycle
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
if os.path.isfile(full_path) == True:
f = dicom.read_file(full_path)
f_slice_pos = f[0x20, 0x1041]
if (f_slice_pos.value != center_slice_pos.value):
raise AssertionError('Cannot find neighbor files for: {:s}'.format(center_file_path))
img_arr[i] = f.pixel_array.astype('int')
return img_arr
def read_contour(contour, data_path, num_classes, num_phases, num_phases_in_cycle, phase_dilation):
#filename = 'IM-%s-%04d.dcm' % (SAX_SERIES[contour.case], contour.img_no)
filename = 'IM-0001-%04d.dcm' % (contour.img_no)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename) #modified by C.Cong
f = dicom.read_file(full_path)
img = f.pixel_array.astype('int')
mask = np.zeros_like(img, dtype="uint8")
coords = np.loadtxt(contour.ctr_endo_path, delimiter=' ').astype('int')
cv2.fillPoly(mask, [coords], 1)
classify = mask
img_arr = find_neighbor_images(contour, data_path, num_phases, num_phases_in_cycle, phase_dilation)
if img_arr.ndim < 4:
img_arr = img_arr[..., np.newaxis]
if classify.ndim < 4:
classify = classify[np.newaxis, ..., np.newaxis]
return img_arr, classify
def map_all_contours(contour_path):
endo = []
epi = []
p1 = []
p2 = []
p3 = []
for dirpath, dirnames, files in os.walk(contour_path):
for endo_f in fnmatch.filter(files, 'IM-0001-*-icontour-manual.txt'):
endo.append(os.path.join(dirpath, endo_f))
match = re.search(r'IM-0001-(\d{4})-icontour-manual.txt', endo_f) # it always has endo
imgno = match.group(1)
epi_f = 'IM-0001-' + imgno + '-ocontour-manual.txt'
p1_f = 'IM-0001-' + imgno + '-p1-manual.txt'
p2_f = 'IM-0001-' + imgno + '-p2-manual.txt'
p3_f = 'IM-0001-' + imgno + '-p3-manual.txt'
epi.append(os.path.join(dirpath, epi_f))
p1.append(os.path.join(dirpath, p1_f))
p2.append(os.path.join(dirpath, p2_f))
p3.append(os.path.join(dirpath, p3_f))
print('Number of examples: {:d}'.format(len(endo)))
contours = map(Contour, endo, epi, p1, p2, p3)
return contours
def map_endo_contours(contour_path):
endo = []
epi = []
p1 = []
p2 = []
p3 = []
for dirpath, dirnames, files in os.walk(contour_path):
for endo_f in fnmatch.filter(files, 'IM-0001-*-icontour-manual.txt'):
endo.append(os.path.join(dirpath, endo_f))
match = re.search(r'IM-0001-(\d{4})-icontour-manual.txt', endo_f) # it always has endo
imgno = match.group(1)
epi_f = 'IM-0001-' + imgno + '-ocontour-manual.txt'
p1_f = 'IM-0001-' + imgno + '-p1-manual.txt'
p2_f = 'IM-0001-' + imgno + '-p2-manual.txt'
p3_f = 'IM-0001-' + imgno + '-p3-manual.txt'
epi.append(os.path.join(dirpath, epi_f))
p1.append(os.path.join(dirpath, p1_f))
p2.append(os.path.join(dirpath, p2_f))
p3.append(os.path.join(dirpath, p3_f))
print('Number of examples: {:d}'.format(len(endo)))
contours = map(Contour, endo, epi, p1, p2, p3)
return contours
def export_all_contours(contours, data_path, overlay_path, crop_size=100, num_classes=4, num_phases=5, phase_dilation=1):
print('\nProcessing {:d} images and labels ...\n'.format(len(contours)))
if num_classes == 2:
num_classes = 1
images = np.zeros((len(contours), num_phases, crop_size, crop_size, 1))
masks = np.zeros((len(contours), 1, crop_size, crop_size, num_classes))
for idx, contour in enumerate(contours):
img, mask = read_contour(contour, data_path, num_classes, num_phases, 20, phase_dilation)
#draw_contour(contour, data_path, overlay_path)
img = center_crop_3d(img, crop_size=crop_size)
mask = center_crop_3d(mask, crop_size=crop_size)
images[idx] = img
masks[idx] = mask
return images, masks
if __name__== '__main__':
contour_type = 'a'
weight_s = 'model_logs/sunnybrook_i_unetres_inv.h5'
shuffle = True
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
crop_size = 128
num_phases = 5
save_path = 'model_logs'
phase_dilation = 1
data_proc = DataIOProc(TEMP_CONTOUR_PATH, 'p5_a4')
num_classes = 2
s = 6800
p = 5
h = 128
w = 128
d = 1
s_val = 202
p_val = 5
h_val = 128
w_val = 128
d_val = 1
print('\nPredict for 2nd training ...')
# Load training dataset
temp_mask_t = data_proc.load_data_4d('training_data.bin', s, p, h, w, d)
mask_train = data_proc.load_data_4d('training_mask.bin', s, 1, h, w, d)
# Load validation dataset
print('\nTotal sample is {:d} for 2nd training.'.format(s))
#print('\nPredict for 2nd evaluating ...')
temp_mask_dev = data_proc.load_data_4d('eval_data.bin', s_val, p_val, h_val, w_val, d_val)
mask_dev = data_proc.load_data_4d('eval_mask.bin', s_val, 1, h_val, w_val, d_val)
dev_generator = (temp_mask_dev, mask_dev)
input_shape = (num_phases, crop_size, crop_size, 1)
epochs = 30
model_t = unet_res_model_time(input_shape, num_classes, nb_filters=32, n_phases=num_phases, dilation=phase_dilation, transfer=True, weights=None)
callbacks = []
# ####################### tfboard ###########################
if K.backend() == 'tensorflow':
tensorboard = TensorBoard(log_dir=os.path.join(save_path, 'logs_unet_time'), histogram_freq=0, write_graph=False,
write_grads=False, write_images=False)
callbacks.append(tensorboard)
# ################### checkpoint saver#######################
checkpoint = ModelCheckpoint(filepath=os.path.join(save_path, 'check_point_model.hdf5'),
save_weights_only=False,
save_best_only=False,
period=2) # .{epoch:d}
callbacks.append(checkpoint)
print('\nTotal sample is {:d} for 2nd evaluation.'.format(s_val))
mini_batch_size = 1
steps_per_epoch = int(np.ceil(s / mini_batch_size))
model_t.fit(temp_mask_t,
mask_train,
epochs=epochs,
batch_size=4,
validation_data=dev_generator,
callbacks=callbacks,
class_weight=None
)
save_file = '_'.join(['sunnybrook', contour_type, 'unetres_inv_time']) + '.h5'
save_file = os.path.join(save_path, save_file)
model_t.save_weights(save_file)
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,923 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /train_sunnybrook_unet_3d.py | #!/usr/bin/env python2.7
import dicom, cv2, re
import os, fnmatch, sys
from keras.callbacks import *
from keras import backend as K
from itertools import zip_longest
from helpers import center_crop_3d, center_crop, lr_poly_decay, get_SAX_SERIES
import pylab
import matplotlib.pyplot as plt
from CardiacImageDataGenerator import CardiacImageDataGenerator, CardiacVolumeDataGenerator
from unet_model_3d import unet_model_3d, resume_training
from unet_model_3d_Inv import unet_model_3d_Inv, resume_training
seed = 1234
np.random.seed(seed)
SAX_SERIES = get_SAX_SERIES()
SUNNYBROOK_ROOT_PATH = 'D:\cardiac_data\Sunnybrook'
TRAIN_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'TrainingDataContours')
TRAIN_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'TrainingDataDICOM')
TRAIN_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'TrainingOverlayImage')
TRAIN_AUG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database Augmentation')
DEBUG_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'Debug')
DEBUG_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'Debug')
DEBUG_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'Debug')
class Contour(object):
def __init__(self, ctr_endo_path, ctr_epi_path, ctr_p1_path, ctr_p2_path, ctr_p3_path):
self.ctr_endo_path = ctr_endo_path
self.ctr_epi_path = ctr_epi_path
self.ctr_p1_path = ctr_p1_path
self.ctr_p2_path = ctr_p2_path
self.ctr_p3_path = ctr_p3_path
match = re.search(r'\\([^\\]*)\\contours-manual\\IRCCI-expert\\IM-0001-(\d{4})-.*', ctr_endo_path) #it always has endo
self.case = match.group(1)
self.img_no = int(match.group(2))
def __str__(self):
return '<Contour for case %s, image %d>' % (self.case, self.img_no)
__repr__ = __str__
def read_mask(contour, data_path, num_classes):
filename = 'IM-0001-%04d.dcm' % (contour.img_no)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename) #modified by C.Cong
f = dicom.read_file(full_path)
img = f.pixel_array.astype('int')
mask = np.zeros_like(img, dtype="uint8")
h, w = img.shape
classify = np.zeros((h, w, num_classes), dtype="uint8")
coords = np.loadtxt(contour.ctr_endo_path, delimiter=' ').astype('int')
cv2.fillPoly(mask, [coords], 1)
classify[...,2] = mask
#classify[..., 2] = np.where(mask != 1, 1, 0)
if os.path.exists(contour.ctr_epi_path):
mask = np.zeros_like(img, dtype="uint8")
coords = np.loadtxt(contour.ctr_epi_path, delimiter=' ').astype('int')
cv2.fillPoly(mask, [coords], 1)
classify[..., 1] = mask
#classify[..., 1] = np.where(mask_union != 1 , 1, 0)
classify[..., 0] = np.where(classify[..., 1] != 1 , 1, 0)
classify[..., 1] = classify[..., 1] - classify[..., 2]
return classify
def read_image(img_no, data_path, case):
filename = 'IM-0001-%04d.dcm' % (img_no)
full_path = os.path.join(data_path, case, 'DICOM', filename)
f = dicom.read_file(full_path)
img = f.pixel_array.astype('int')
if img.ndim < 3:
img = img[..., np.newaxis]
return img
def find_min_max_image(data_path, case):
full_path = os.path.join(data_path, case, 'DICOM')
min_no = 9999
max_no = 0
for dirpath, dirnames, files in os.walk(full_path):
for file in files:
match = re.search(r'IM-0001-(\d{4}).dcm', file) # it always has endo
if match != None:
imgno = int(match.group(1))
if min_no > imgno:
min_no = imgno
if max_no < imgno:
max_no = imgno
return min_no, max_no
def read_volume(center_ctr, volume_map, data_path, num_classes, num_slices, num_phases_in_cycle, crop_size, is_all_valid_slice):
case = center_ctr.case
center_no = center_ctr.img_no
img_index = center_ctr.img_no % num_phases_in_cycle
if img_index == 0:
img_index = num_phases_in_cycle
img_no_min, img_no_max = find_min_max_image(data_path, case)
images = np.zeros((crop_size, crop_size, num_slices, 1))
masks = np.zeros((crop_size, crop_size, num_slices, num_classes))
masks_bg = np.ones((crop_size, crop_size, num_slices))
masks[:,:,:,0] = masks_bg
if is_all_valid_slice:
for slice_idx in range(num_slices):
img_no = center_no + (slice_idx - int(num_slices / 2)) * num_phases_in_cycle
if img_no not in volume_map[case]:
return [], []
for slice_idx in range(num_slices):
img_no = center_no + (slice_idx - int(num_slices/2))*num_phases_in_cycle
if img_no < img_no_min:
img_no = img_no_min
if img_no > img_no_max:
img_no = img_no_max
img = read_image(img_no, data_path, case)
img = center_crop(img, crop_size)
images[:,:,slice_idx,:] = img
if img_no in volume_map[case]:
mask = read_mask(volume_map[case][img_no], data_path, num_classes)
mask = center_crop(mask, crop_size)
masks[:, :, slice_idx, :] = mask
return images, masks
def map_all_contours(contour_path):
endo = []
epi = []
p1 = []
p2 = []
p3 = []
volume_map = {}
contour_map = {}
for dirpath, dirnames, files in os.walk(contour_path):
contour_map = {}
for epi_f in fnmatch.filter(files, 'IM-0001-*-ocontour-manual.txt'):
epi.append(os.path.join(dirpath, epi_f))
match = re.search(r'IM-0001-(\d{4})-ocontour-manual.txt', epi_f) # it always has endo
imgno = match.group(1)
endo_f = 'IM-0001-' + imgno + '-icontour-manual.txt'
p1_f = 'IM-0001-' + imgno + '-p1-manual.txt'
p2_f = 'IM-0001-' + imgno + '-p2-manual.txt'
p3_f = 'IM-0001-' + imgno + '-p3-manual.txt'
endo.append(os.path.join(dirpath, endo_f))
p1.append(os.path.join(dirpath, p1_f))
p2.append(os.path.join(dirpath, p2_f))
p3.append(os.path.join(dirpath, p3_f))
contour_map[int(imgno)] = Contour(os.path.join(dirpath, endo_f), os.path.join(dirpath, epi_f), os.path.join(dirpath, p1_f), os.path.join(dirpath, p2_f), os.path.join(dirpath, p3_f))
match = re.search(r'\\([^\\]*)\\contours-manual\\IRCCI-expert', dirpath)
if(match != None):
case = match.group(1)
volume_map[case] = contour_map
print('Number of examples: {:d}'.format(len(endo)))
contours = list(map(Contour, endo, epi, p1, p2, p3))
return contours, volume_map
def map_endo_contours(contour_path):
endo = []
epi = []
p1 = []
p2 = []
p3 = []
for dirpath, dirnames, files in os.walk(contour_path):
for endo_f in fnmatch.filter(files, 'IM-0001-*-icontour-manual.txt'):
endo.append(os.path.join(dirpath, endo_f))
match = re.search(r'IM-0001-(\d{4})-icontour-manual.txt', endo_f) # it always has endo
imgno = match.group(1)
epi_f = 'IM-0001-' + imgno + '-ocontour-manual.txt'
p1_f = 'IM-0001-' + imgno + '-p1-manual.txt'
p2_f = 'IM-0001-' + imgno + '-p2-manual.txt'
p3_f = 'IM-0001-' + imgno + '-p3-manual.txt'
epi.append(os.path.join(dirpath, epi_f))
p1.append(os.path.join(dirpath, p1_f))
p2.append(os.path.join(dirpath, p2_f))
p3.append(os.path.join(dirpath, p3_f))
print('Number of examples: {:d}'.format(len(endo)))
contours = map(Contour, endo, epi, p1, p2, p3)
return contours
def export_all_volumes(ctrs, volume_map, data_path, overlay_path, crop_size=100, num_classes=4, num_slices=5, num_phase_in_cycle=20, is_all_valid_slice=True):
print('\nProcessing {:d} volumes and labels ...\n'.format(len(ctrs)))
volumes = np.zeros((len(ctrs), crop_size, crop_size, num_slices, 1))
volume_masks = np.zeros((len(ctrs), crop_size, crop_size, num_slices, num_classes))
idx = 0
case = []
img_no = []
for i, contour in enumerate(ctrs):
volume, volume_mask = read_volume(contour, volume_map, data_path, num_classes, num_slices, num_phase_in_cycle, crop_size, is_all_valid_slice=is_all_valid_slice)
if len(volume) > 0:
volumes[idx] = volume
volume_masks[idx] = volume_mask
case.append(contour.case)
img_no.append(contour.img_no)
idx = idx + 1
volumes = volumes[0:idx-1]
volume_masks = volume_masks[0:idx-1]
return volumes, volume_masks, case, img_no
if __name__== '__main__':
is_train = True
contour_type = 'a'
weight_path = None
#weight_path = '.\\model_logs\\sunnybrook_a_unet_3d_Inv_e135_a8_f8_775_d4_s5_allvalid_mvn.h5'
shuffle = False
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
crop_size = 128
num_slices = 5
num_phase_in_cycle = 20
save_path = 'model_logs'
verbosity = 1
standard_weight = 1.0
low_weight = 0.5
hight_weight = 2.0
patience = 10 # learning rate will be reduced after this many epochs if the validation loss is not improving
early_stop = 50 # training will be stopped after this many epochs without the validation loss improving
initial_learning_rate = 0.00001
learning_rate_drop = 0.5 # factor by which the learning rate will be reduced
print('Mapping ground truth contours to images in train...')
train_ctrs, volume_map = map_all_contours(TRAIN_CONTOUR_PATH)
if shuffle:
print('Shuffling data')
np.random.shuffle(train_ctrs)
print('Done mapping training set')
num_classes = 3
split = int(0.1*len(train_ctrs))
dev_ctrs = train_ctrs[0:split]
train_ctrs = train_ctrs[split:]
print('\nBuilding Train dataset ...')
img_train, mask_train, _, __ = export_all_volumes(train_ctrs,
volume_map,
TRAIN_IMG_PATH,
TRAIN_OVERLAY_PATH,
crop_size=crop_size,
num_classes=num_classes,
num_slices=num_slices,
num_phase_in_cycle=num_phase_in_cycle,
is_all_valid_slice=True)
print('\nBuilding Dev dataset ...')
img_dev, mask_dev, _, __ = export_all_volumes(dev_ctrs,
volume_map,
TRAIN_IMG_PATH,
TRAIN_OVERLAY_PATH,
crop_size=crop_size,
num_classes=num_classes,
num_slices=num_slices,
num_phase_in_cycle=num_phase_in_cycle,
is_all_valid_slice=True
)
input_shape = (crop_size, crop_size, num_slices, 1)
kwargs = dict(
rotation_range=90,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
fill_mode='constant',
)
image_datagen = CardiacVolumeDataGenerator(**kwargs)
mask_datagen = CardiacVolumeDataGenerator(**kwargs)
aug_img_path = os.path.join(TRAIN_AUG_PATH, "Image")
aug_mask_path = os.path.join(TRAIN_AUG_PATH, "Mask")
img_train = image_datagen.fit(img_train, augment=True, seed=seed, rounds=8, toDir=None)
mask_train = mask_datagen.fit(mask_train, augment=True, seed=seed, rounds=8, toDir=None)
epochs = 200
mini_batch_size = 1
image_generator = image_datagen.flow(img_train, shuffle=False,
batch_size=mini_batch_size, seed=seed)
mask_generator = mask_datagen.flow(mask_train, shuffle=False,
batch_size=mini_batch_size, seed=seed)
train_generator = zip_longest(image_generator, mask_generator)
dev_generator = (img_dev, mask_dev)
max_iter = int(np.ceil(len(img_train) / mini_batch_size)) * epochs
steps_per_epoch = int(np.ceil(len(img_train) / mini_batch_size))
curr_iter = 0
callbacks = []
# ####################### tfboard ###########################
if K.backend() == 'tensorflow':
tensorboard = TensorBoard(log_dir=os.path.join(save_path, 'logs_unet_3d_Inv'), histogram_freq=10, write_graph=False,
write_grads=False, write_images=False)
callbacks.append(tensorboard)
# ################### checkpoint saver#######################
callbacks.append(ReduceLROnPlateau(factor=learning_rate_drop, patience=patience,
verbose=verbosity))
callbacks.append(EarlyStopping(verbose=verbosity, patience=early_stop))
checkpoint = ModelCheckpoint(filepath=os.path.join(save_path, 'check_point_model.hdf5'),
save_weights_only=False,
save_best_only=False,
period=20) # .{epoch:d}
callbacks.append(checkpoint)
class_weight = dict([(i, low_weight) for i in range(num_classes)])
class_weight[1] = hight_weight
class_weight[2] = hight_weight
if(is_train):
if(weight_path == None):
model = unet_model_3d_Inv(input_shape, pool_size=(2, 2, 1), kernel=(7, 7, 5), n_labels=3, initial_learning_rate=0.00001,
deconvolution=False, depth=4, n_base_filters=4, include_label_wise_dice_coefficients=True, batch_normalization=True, weights=None)
else:
model = resume_training(weight_path)
else:
model = unet_model_3d_Inv(input_shape, pool_size=(2, 2, 1), kernel=(7, 7, 5), n_labels=3, initial_learning_rate=0.00001,
deconvolution=False, depth=4, n_base_filters=4, include_label_wise_dice_coefficients=True, batch_normalization=True, weights=weight_path)
model.fit_generator(generator=train_generator,
steps_per_epoch=steps_per_epoch,
validation_data=dev_generator,
validation_steps=img_dev.__len__(),
epochs=epochs,
callbacks=callbacks,
workers=1,
class_weight=None
)
save_file = '_'.join(['sunnybrook', contour_type, 'unet', '3d']) + '.h5'
save_file = os.path.join(save_path, save_file)
model.save_weights(save_file)
# for e in range(epochs):
# print('\nMain Epoch {:d}\n'.format(e + 1))
# print('\nLearning rate: {:6f}\n'.format(lrate))
# train_result = []
# for iteration in range(int(len(img_train) * augment_scale / mini_batch_size)):
# img, mask = next(train_generator)
# res = model.train_on_batch(img, mask)
# curr_iter += 1
# lrate = lr_poly_decay(model, base_lr, curr_iter,
# max_iter, power=0.5)
# train_result.append(res)
# train_result = np.asarray(train_result)
# train_result = np.mean(train_result, axis=0).round(decimals=10)
# print('Train result {:s}:\n{:s}'.format(str(model.metrics_names), str(train_result)))
# print('\nEvaluating dev set ...')
# result = model.evaluate(img_dev, mask_dev, batch_size=32)
#
# result = np.round(result, decimals=10)
# print('\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
# save_file = '_'.join(['sunnybrook', contour_type,
# 'epoch', str(e + 1)]) + '.h5'
# if not os.path.exists('model_logs'):
# os.makedirs('model_logs')
# save_path = os.path.join(save_path, save_file)
# print('\nSaving model weights to {:s}'.format(save_path))
# model.save_weights(save_path)
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,924 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /fcn_model_resnet50.py | #!/usr/bin/env python2.7
from keras import optimizers
from keras.models import Model
from keras.layers import Dropout, Lambda
from keras.layers import Input, average
from keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, AtrousConvolution2D
from keras.layers import ZeroPadding2D, Cropping2D
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras import backend as K
import tensorflow as tf
import numpy as np
from keras.regularizers import l2
import pylab
import matplotlib.pyplot as plt
from keras.utils.vis_utils import plot_model
from metrics_common import dice_coef, dice_coef_endo, dice_coef_myo, dice_coef_rv, dice_coef_loss, dice_coef_loss_endo, dice_coef_loss_myo, dice_coef_loss_rv, dice_coef_endo_each
from layer_common import mvn, crop
def fcn_model_resnet50(input_shape, num_classes, transfer=True, contour_type='i', weights=None):
''' "Skip" FCN architecture similar to Long et al., 2015
https://arxiv.org/abs/1411.4038
'''
if num_classes == 2:
num_classes = 1
loss = dice_coef_loss
activation = 'sigmoid'
else:
if transfer == True:
if contour_type == 'i':
loss = dice_coef_loss_endo
elif contour_type == 'o':
loss = dice_coef_loss_myo
elif contour_type == 'r':
loss = dice_coef_loss_rv
elif contour_type == 'a':
loss = dice_coef_loss
else:
loss = dice_coef_loss
activation = 'softmax'
kwargs_a = dict(
kernel_size=1,
strides=1,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_b = dict(
kernel_size=3,
strides=1,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_c = kwargs_a
kwargs_ds = dict(
kernel_size=1,
strides=2,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_atrous = dict(
kernel_size=3,
strides=1,
dilation_rate=2,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_atrous4 = dict(
kernel_size=3,
strides=1,
dilation_rate=4,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_atrous6 = dict(
kernel_size=3,
strides=1,
dilation_rate=6,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_atrous12 = dict(
kernel_size=3,
strides=1,
dilation_rate=12,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_atrous18 = dict(
kernel_size=3,
strides=1,
dilation_rate=18,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_atrous24 = dict(
kernel_size=3,
strides=1,
dilation_rate=24,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
weight_decay = 1E-4
data = Input(shape=input_shape, dtype='float', name='data')
mvn0 = Lambda(mvn, name='mvn0')(data)
conv1 = Conv2D(filters=64, name='conv1', kernel_size=7, strides=2, activation=None, padding='same',
use_bias=False, kernel_initializer='glorot_uniform')(mvn0)
mvn1 = Lambda(mvn, name='mvn1')(conv1)
bn1 = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True)(mvn1)
ac1 = Activation('relu')(bn1)
pool1 = MaxPooling2D(pool_size=3, strides=2,
padding='same', name='pool1')(ac1)
#2a
conv2a_1 = Conv2D(filters=256, name='conv2a_1', **kwargs_a)(pool1)
mvn2a_1 = Lambda(mvn, name='mvn2a_1')(conv2a_1)
bn2a_1 = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2a_1")(mvn2a_1)
conv2a_2a = Conv2D(filters=64, name='conv2a_2a', **kwargs_a)(pool1)
mvn2a_2a = Lambda(mvn, name='mvn2a_2a')(conv2a_2a)
bn2a_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2a_2a")(mvn2a_2a)
ac2a_2a = Activation('relu', name="ac2a_2a")(bn2a_2a)
conv2a_2b = Conv2D(filters=64, name='conv2a_2b', **kwargs_b)(ac2a_2a)
mvn2a_2b = Lambda(mvn, name='mvn2a_2b')(conv2a_2b)
bn2a_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2a_2b")(mvn2a_2b)
ac2a_2b = Activation('relu', name="ac2a_2b")(bn2a_2b)
conv2a_2c = Conv2D(filters=256, name='conv2a_2c', **kwargs_c)(ac2a_2b)
mvn2a_2c = Lambda(mvn, name='mvn2a_2c')(conv2a_2c)
bn2a_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2a_2c")(mvn2a_2c)
res2a = average([bn2a_1, bn2a_2c], name='res2a')
ac2a= Activation('relu', name="ac2a")(res2a)
# 2b
conv2b_2a = Conv2D(filters=64, name='conv2b_2a', **kwargs_a)(ac2a)
mvn2b_2a = Lambda(mvn, name='mvn2b_2a')(conv2b_2a)
bn2b_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2b_2a")(mvn2b_2a)
ac2b_2a = Activation('relu', name="ac2b_2a")(bn2b_2a)
conv2b_2b = Conv2D(filters=64, name='conv2b_2b', **kwargs_b)(ac2b_2a)
mvn2b_2b = Lambda(mvn, name='mvn2b_2b')(conv2b_2b)
bn2b_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2b_2b")(mvn2b_2b)
ac2b_2b = Activation('relu', name="ac2b_2b")(bn2b_2b)
conv2b_2c = Conv2D(filters=256, name='conv2b_2c', **kwargs_c)(ac2b_2b)
mvn2b_2c = Lambda(mvn, name='mvn2b_2c')(conv2b_2c)
bn2b_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2b_2c")(mvn2b_2c)
res2b = average([ac2a, bn2b_2c], name='res2b')
ac2b= Activation('relu', name="ac2b")(res2b)
# 2c
conv2c_2a = Conv2D(filters=64, name='conv2c_2a', **kwargs_a)(ac2b)
mvn2c_2a = Lambda(mvn, name='mvn2c_2a')(conv2c_2a)
bn2c_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn2c_2a")(mvn2c_2a)
ac2c_2a = Activation('relu', name="ac2c_2a")(bn2c_2a)
conv2c_2b = Conv2D(filters=64, name='conv2c_2b', **kwargs_b)(ac2c_2a)
mvn2c_2b = Lambda(mvn, name='mvn2c_2b')(conv2c_2b)
bn2c_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn2c_2b")(mvn2c_2b)
ac2c_2b = Activation('relu', name="ac2c_2b")(bn2c_2b)
conv2c_2c = Conv2D(filters=256, name='conv2c_2c', **kwargs_c)(ac2c_2b)
mvn2c_2c = Lambda(mvn, name='mvn2c_2c')(conv2c_2c)
bn2c_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn2c_2c")(mvn2c_2c)
res2c = average([ac2b, bn2c_2c], name='res2c')
ac2c = Activation('relu', name="ac2c")(res2c)
drop2c = Dropout(rate=0.5, name='drop2c')(ac2c)
# 3a
conv3a_1 = Conv2D(filters=512, name='conv3a_1', **kwargs_ds)(drop2c)
mvn3a_1 = Lambda(mvn, name='mvn3a_1')(conv3a_1)
bn3a_1 = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3a_1")(mvn3a_1)
conv3a_2a = Conv2D(filters=128, name='conv3a_2a', **kwargs_ds)(drop2c)
mvn3a_2a = Lambda(mvn, name='mvn3a_2a')(conv3a_2a)
bn3a_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3a_2a")(mvn3a_2a)
ac3a_2a = Activation('relu', name="ac3a_2a")(bn3a_2a)
conv3a_2b = Conv2D(filters=128, name='conv3a_2b', **kwargs_b)(ac3a_2a)
mvn3a_2b = Lambda(mvn, name='mvn3a_2b')(conv3a_2b)
bn3a_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3a_2b")(mvn3a_2b)
ac3a_2b = Activation('relu', name="ac3a_2b")(bn3a_2b)
conv3a_2c = Conv2D(filters=512, name='conv3a_2c', **kwargs_c)(ac3a_2b)
mvn3a_2c = Lambda(mvn, name='mvn3a_2c')(conv3a_2c)
bn3a_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3a_2c")(mvn3a_2c)
res3a = average([bn3a_1, bn3a_2c], name='res3a')
ac3a = Activation('relu', name="ac3a")(res3a)
# 3b1
conv3b1_2a = Conv2D(filters=128, name='conv3b1_2a', **kwargs_a)(ac3a)
mvn3b1_2a = Lambda(mvn, name='mvn3b1_2a')(conv3b1_2a)
bn3b1_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b1_2a")(mvn3b1_2a)
ac3b1_2a = Activation('relu', name="ac3b1_2a")(bn3b1_2a)
conv3b1_2b = Conv2D(filters=128, name='conv3b1_2b', **kwargs_b)(ac3b1_2a)
mvn3b1_2b = Lambda(mvn, name='mvn3b1_2b')(conv3b1_2b)
bn3b1_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b1_2b")(mvn3b1_2b)
ac3b1_2b = Activation('relu', name="ac3b1_2b")(bn3b1_2b)
conv3b1_2c = Conv2D(filters=512, name='conv3b1_2c', **kwargs_c)(ac3b1_2b)
bn3b1_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b1_2c")(conv3b1_2c)
res3b1 = average([ac3a, bn3b1_2c], name='res3b1')
ac3b1 = Activation('relu', name="ac3b1")(res3b1)
# 3b2
conv3b2_2a = Conv2D(filters=128, name='conv3b2_2a', **kwargs_a)(ac3b1)
mvn3b2_2a = Lambda(mvn, name='mvn3b2_2a')(conv3b2_2a)
bn3b2_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b2_2a")(mvn3b2_2a)
ac3b2_2a = Activation('relu', name="ac3b2_2a")(bn3b2_2a)
conv3b2_2b = Conv2D(filters=128, name='conv3b2_2b', **kwargs_b)(ac3b2_2a)
mvn3b2_2b = Lambda(mvn, name='mvn3b2_2b')(conv3b2_2b)
bn3b2_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b2_2b")(mvn3b2_2b)
ac3b2_2b = Activation('relu', name="ac3b2_2b")(bn3b2_2b)
conv3b2_2c = Conv2D(filters=512, name='conv3b2_2c', **kwargs_c)(ac3b2_2b)
bn3b2_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b2_2c")(conv3b2_2c)
res3b2 = average([ac3b1, bn3b2_2c], name='res3b2')
ac3b2 = Activation('relu', name="ac3b2")(res3b2)
# 3b3
conv3b3_2a = Conv2D(filters=128, name='conv3b3_2a', **kwargs_a)(ac3b2)
mvn3b3_2a = Lambda(mvn, name='mvn3b3_2a')(conv3b3_2a)
bn3b3_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b3_2a")(mvn3b3_2a)
ac3b3_2a = Activation('relu', name="ac3b3_2a")(bn3b3_2a)
conv3b3_2b = Conv2D(filters=128, name='conv3b3_2b', **kwargs_b)(ac3b3_2a)
mvn3b3_2b = Lambda(mvn, name='mvn3b3_2b')(conv3b3_2b)
bn3b3_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b3_2b")(mvn3b3_2b)
ac3b3_2b = Activation('relu', name="ac3b3_2b")(bn3b3_2b)
conv3b3_2c = Conv2D(filters=512, name='conv3b3_2c', **kwargs_c)(ac3b3_2b)
bn3b3_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b3_2c")(conv3b3_2c)
res3b3 = average([ac3b2, bn3b3_2c], name='res3b3')
ac3b3 = Activation('relu', name="ac3b3")(res3b3)
# 4a
conv4a_1 = Conv2D(filters=1024, name='conv4a_1', **kwargs_a)(ac3b3) # not using down sampling, using atrous convolution layer instead
mvn4a_1 = Lambda(mvn, name='mvn4a_1')(conv4a_1)
bn4a_1 = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4a_1")(mvn4a_1)
conv4a_2a = Conv2D(filters=256, name='conv4a_2a', **kwargs_a)(ac3b3) # not using down sampling, using atrous convolution layer instead
mvn4a_2a = Lambda(mvn, name='mvn4a_2a')(conv4a_2a)
bn4a_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4a_2a")(mvn4a_2a)
ac4a_2a = Activation('relu', name="ac4a_2a")(bn4a_2a)
conv4a_2b = Conv2D(filters=256, name='conv4a_2b', **kwargs_atrous)(ac4a_2a)#atrous convolution layer
mvn4a_2b = Lambda(mvn, name='mvn4a_2b')(conv4a_2b)
bn4a_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4a_2b")(mvn4a_2b)
ac4a_2b = Activation('relu', name="ac4a_2b")(bn4a_2b)
conv4a_2c = Conv2D(filters=1024, name='conv4a_2c', **kwargs_c)(ac4a_2b)
mvn4a_2c = Lambda(mvn, name='mvn4a_2c')(conv4a_2c)
bn4a_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4a_2c")(mvn4a_2c)
res4a = average([bn4a_1, bn4a_2c], name='res4a')
ac4a = Activation('relu', name="ac4a")(res4a)
# 4b1
conv4b1_2a = Conv2D(filters=256, name='conv4b1_2a', **kwargs_a)(ac4a)
mvn4b1_2a = Lambda(mvn, name='mvn4b1_2a')(conv4b1_2a)
bn4b1_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b1_2a")(mvn4b1_2a)
ac4b1_2a = Activation('relu', name="ac4b1_2a")(bn4b1_2a)
conv4b1_2b = Conv2D(filters=256, name='conv4b1_2b', **kwargs_atrous)(ac4b1_2a)#atrous convolution layer
mvn4b1_2b = Lambda(mvn, name='mvn4b1_2b')(conv4b1_2b)
bn4b1_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b1_2b")(mvn4b1_2b)
ac4b1_2b = Activation('relu', name="ac4b1_2b")(bn4b1_2b)
conv4b1_2c = Conv2D(filters=1024, name='conv4b1_2c', **kwargs_c)(ac4b1_2b)
bn4b1_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b1_2c")(conv4b1_2c)
res4b1 = average([ac4a, bn4b1_2c], name='res4b1')
ac4b1 = Activation('relu', name="ac4b1")(res4b1)
# 4b2
conv4b2_2a = Conv2D(filters=256, name='conv4b2_2a', **kwargs_a)(ac4b1)
mvn4b2_2a = Lambda(mvn, name='mvn4b2_2a')(conv4b2_2a)
bn4b2_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b2_2a")(mvn4b2_2a)
ac4b2_2a = Activation('relu', name="ac4b2_2a")(bn4b2_2a)
conv4b2_2b = Conv2D(filters=256, name='conv4b2_2b', **kwargs_atrous)(ac4b2_2a)#atrous convolution layer
mvn4b2_2b = Lambda(mvn, name='mvn4b2_2b')(conv4b2_2b)
bn4b2_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b2_2b")(mvn4b2_2b)
ac4b2_2b = Activation('relu', name="ac4b2_2b")(bn4b2_2b)
conv4b2_2c = Conv2D(filters=1024, name='conv4b2_2c', **kwargs_c)(ac4b2_2b)
bn4b2_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b2_2c")(conv4b2_2c)
res4b2 = average([ac4b1, bn4b2_2c], name='res4b2')
ac4b2 = Activation('relu', name="ac4b2")(res4b2)
# 4b3
conv4b3_2a = Conv2D(filters=256, name='conv4b3_2a', **kwargs_a)(ac4b2)
mvn4b3_2a = Lambda(mvn, name='mvn4b3_2a')(conv4b3_2a)
bn4b3_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b3_2a")(mvn4b3_2a)
ac4b3_2a = Activation('relu', name="ac4b3_2a")(bn4b3_2a)
conv4b3_2b = Conv2D(filters=256, name='conv4b3_2b', **kwargs_atrous)(ac4b3_2a)#atrous convolution layer
mvn4b3_2b = Lambda(mvn, name='mvn4b3_2b')(conv4b3_2b)
bn4b3_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b3_2b")(mvn4b3_2b)
ac4b3_2b = Activation('relu', name="ac4b3_2b")(bn4b3_2b)
conv4b3_2c = Conv2D(filters=1024, name='conv4b3_2c', **kwargs_c)(ac4b3_2b)
bn4b3_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b3_2c")(conv4b3_2c)
res4b3 = average([ac4b2, bn4b3_2c], name='res4b3')
ac4b3 = Activation('relu', name="ac4b3")(res4b3)
# 4b4
conv4b4_2a = Conv2D(filters=256, name='conv4b4_2a', **kwargs_a)(ac4b3)
mvn4b4_2a = Lambda(mvn, name='mvn4b4_2a')(conv4b4_2a)
bn4b4_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b4_2a")(mvn4b4_2a)
ac4b4_2a = Activation('relu', name="ac4b4_2a")(bn4b4_2a)
conv4b4_2b = Conv2D(filters=256, name='conv4b4_2b', **kwargs_atrous)(ac4b4_2a)#atrous convolution layer
mvn4b4_2b = Lambda(mvn, name='mvn4b4_2b')(conv4b4_2b)
bn4b4_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b4_2b")(mvn4b4_2b)
ac4b4_2b = Activation('relu', name="ac4b4_2b")(bn4b4_2b)
conv4b4_2c = Conv2D(filters=1024, name='conv4b4_2c', **kwargs_c)(ac4b4_2b)
bn4b4_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b4_2c")(conv4b4_2c)
res4b4 = average([ac4b3, bn4b4_2c], name='res4b4')
ac4b4 = Activation('relu', name="ac4b4")(res4b4)
# 4b5
conv4b5_2a = Conv2D(filters=256, name='conv4b5_2a', **kwargs_a)(ac4b4)
mvn4b5_2a = Lambda(mvn, name='mvn4b5_2a')(conv4b5_2a)
bn4b5_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b5_2a")(mvn4b5_2a)
ac4b5_2a = Activation('relu', name="ac4b5_2a")(bn4b5_2a)
conv4b5_2b = Conv2D(filters=256, name='conv4b5_2b', **kwargs_atrous)(ac4b5_2a)#atrous convolution layer
mvn4b5_2b = Lambda(mvn, name='mvn4b5_2b')(conv4b5_2b)
bn4b5_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b5_2b")(mvn4b5_2b)
ac4b5_2b = Activation('relu', name="ac4b5_2b")(bn4b5_2b)
conv4b5_2c = Conv2D(filters=1024, name='conv4b5_2c', **kwargs_c)(ac4b5_2b)
bn4b5_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b5_2c")(conv4b5_2c)
res4b5 = average([ac4b4, bn4b5_2c], name='res4b5')
ac4b5 = Activation('relu', name="ac4b5")(res4b5)
# 5a
conv5a_1 = Conv2D(filters=2048, name='conv5a_1', **kwargs_a)(ac4b5)#not downsampling, using atrous conv instead
mvn5a_1 = Lambda(mvn, name='mvn5a_1')(conv5a_1)
bn5a_1 = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn5a_1")(mvn5a_1)
conv5a_2a = Conv2D(filters=512, name='conv5a_2a', **kwargs_a)(ac4b5)
mvn5a_2a = Lambda(mvn, name='mvn5a_2a')(conv5a_2a)
bn5a_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn5a_2a")(mvn5a_2a)
ac5a_2a = Activation('relu', name="ac5a_2a")(bn5a_2a)
conv5a_2b = Conv2D(filters=512, name='conv5a_2b', **kwargs_atrous4)(ac5a_2a)#atrous conv
mvn5a_2b = Lambda(mvn, name='mvn5a_2b')(conv5a_2b)
bn5a_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn5a_2b")(mvn5a_2b)
ac5a_2b = Activation('relu', name="ac5a_2b")(bn5a_2b)
conv5a_2c = Conv2D(filters=2048, name='conv5a_2c', **kwargs_c)(ac5a_2b)
mvn5a_2c = Lambda(mvn, name='mvn5a_2c')(conv5a_2c)
bn5a_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn5a_2c")(mvn5a_2c)
res5a = average([bn5a_1, bn5a_2c], name='res5a')
ac5a = Activation('relu', name="ac5a")(res5a)
# 5b
conv5b_2a = Conv2D(filters=512, name='conv5b_2a', **kwargs_a)(ac5a)
mvn5b_2a = Lambda(mvn, name='mvn5b_2a')(conv5b_2a)
bn5b_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn5b_2a")(mvn5b_2a)
ac5b_2a = Activation('relu', name="ac5b_2a")(bn5b_2a)
conv5b_2b = Conv2D(filters=512, name='conv5b_2b', **kwargs_atrous4)(ac5b_2a)#atrous conv
mvn5b_2b = Lambda(mvn, name='mvn5b_2b')(conv5b_2b)
bn5b_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn5b_2b")(mvn5b_2b)
ac5b_2b = Activation('relu', name="ac5b_2b")(bn5b_2b)
conv5b_2c = Conv2D(filters=2048, name='conv5b_2c', **kwargs_c)(ac5b_2b)
bn5b_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn5b_2c")(conv5b_2c)
res5b = average([ac5a, bn5b_2c], name='res5b')
ac5b = Activation('relu', name="ac5b")(res5b)
# 5c
conv5c_2a = Conv2D(filters=512, name='conv5c_2a', **kwargs_a)(ac5b)
mvn5c_2a = Lambda(mvn, name='mvn5c_2a')(conv5c_2a)
bn5c_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn5c_2a")(mvn5c_2a)
ac5c_2a = Activation('relu', name="ac5c_2a")(bn5c_2a)
conv5c_2b = Conv2D(filters=512, name='conv5c_2b', **kwargs_atrous4)(ac5c_2a)#atrous conv
mvn5c_2b = Lambda(mvn, name='mvn5c_2b')(conv5c_2b)
bn5c_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn5c_2b")(mvn5c_2b)
ac5c_2b = Activation('relu', name="ac5c_2b")(bn5c_2b)
conv5c_2c = Conv2D(filters=2048, name='conv5c_2c', **kwargs_c)(ac5c_2b)
bn5c_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn5c_2c")(conv5c_2c)
res5c = average([ac5b, bn5c_2c], name='res5c')
ac5c = Activation('relu', name="ac5c")(res5c)
drop5c = Dropout(rate=0.5, name='drop5c')(ac5c)
fc1_c0 = Conv2D(filters=num_classes, name='fc1_c0', **kwargs_atrous)(drop5c) # atrous conv
fc1_c1 = Conv2D(filters=num_classes, name='fc1_c1', **kwargs_atrous4)(drop5c) # atrous conv
fc1 = average([fc1_c0, fc1_c1], name='fc1')
us1 = Conv2DTranspose(filters=num_classes, kernel_size=3,
strides=2, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=False,
name='us1')(fc1)
fc2_c0 = Conv2D(filters=num_classes, name='fc2_c0', **kwargs_atrous)(drop2c) # atrous conv
fc2_c1 = Conv2D(filters=num_classes, name='fc2_c1', **kwargs_atrous4)(drop2c) # atrous conv
fc2 = average([fc2_c0, fc2_c1], name='fc2')
crop1 = Lambda(crop, name='crop1')([fc2, us1])
fuse1 = average([crop1, fc2], name='fuse1')
us2 = Conv2DTranspose(filters=num_classes, kernel_size=3,
strides=2, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=False,
name='us2')(fuse1)
crop2 = Lambda(crop, name='crop2')([data, us2])
fc3_c0 = Conv2D(filters=num_classes, name='fc3_c0', **kwargs_atrous)(ac1) # atrous conv
fc3_c1 = Conv2D(filters=num_classes, name='fc3_c1', **kwargs_atrous4)(ac1) # atrous conv
fc3 = average([fc3_c0, fc3_c1], name='fc3')
crop3 = Lambda(crop, name='crop3')([fc3, us2])
fuse2 = average([crop3, fc3], name='fuse2')
us3 = Conv2DTranspose(filters=num_classes, kernel_size=3,
strides=2, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=False,
name='us3')(fuse2)
crop4 = Lambda(crop, name='crop4')([data, us3])
predictions = Conv2D(filters=num_classes, kernel_size=1,
strides=1, activation=activation, padding='valid',
kernel_initializer='glorot_uniform', use_bias=True,
name='predictions')(crop4)
model = Model(inputs=data, outputs=predictions)
if transfer == True:
if weights is not None:
model.load_weights(weights)
for layer in model.layers[:10]:
layer.trainable = False
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss=loss,
metrics=['accuracy', dice_coef_endo])
else:
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss=loss,
metrics=['accuracy', dice_coef_endo, dice_coef_myo, dice_coef_rv])
return model
if __name__ == '__main__':
model = fcn_model_resnet50((100, 100, 1), 4, transfer=True, weights=None)
plot_model(model, show_shapes=True, to_file='fcn_model_resnet50.png')
model.summary()
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,925 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /train_sunnybrook_segnet.py | import os
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
import tfmodel
import numpy as np
DATA_NAME = 'Data'
TRAIN_SOURCE = "Training"
TEST_SOURCE = 'Testing'
ONLINE_SOURCE = 'Online'
RUN_NAME = "SELU_Run03"
OUTPUT_NAME = 'Output'
CHECKPOINT_FN = 'model.ckpt'
WORKING_DIR = os.getcwd()
TRAIN_DATA_DIR = os.path.join(WORKING_DIR, DATA_NAME, TRAIN_SOURCE)
TEST_DATA_DIR = os.path.join(WORKING_DIR, DATA_NAME, TEST_SOURCE)
ONLINE_DATA_DIR = os.path.join(WORKING_DIR, DATA_NAME, ONLINE_SOURCE)
ROOT_LOG_DIR = os.path.join(WORKING_DIR, OUTPUT_NAME)
LOG_DIR = os.path.join(ROOT_LOG_DIR, RUN_NAME)
CHECKPOINT_FL = os.path.join(LOG_DIR, CHECKPOINT_FN)
TRAIN_WRITER_DIR = os.path.join(LOG_DIR, TRAIN_SOURCE)
TEST_WRITER_DIR = os.path.join(LOG_DIR, TEST_SOURCE)
NUM_EPOCHS = 10
MAX_STEP = 5000
BATCH_SIZE = 8
TEST_BATCH_SIZE = 8
LEARNING_RATE = 1e-04
SAVE_RESULTS_INTERVAL = 5
SAVE_CHECKPOINT_INTERVAL = 100
CROP_SIZE = 128
def main():
train_data = tfmodel.GetData(TRAIN_DATA_DIR)
test_data = tfmodel.GetData(TEST_DATA_DIR)
online_data = tfmodel.GetData(ONLINE_DATA_DIR)
if not os.path.exists(ROOT_LOG_DIR):
os.makedirs(ROOT_LOG_DIR)
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
if not os.path.exists(TRAIN_WRITER_DIR):
os.makedirs(TRAIN_WRITER_DIR)
if not os.path.exists(TEST_WRITER_DIR):
os.makedirs(TEST_WRITER_DIR)
g = tf.Graph()
with g.as_default():
images, labels = tfmodel.placeholder_inputs(batch_size=BATCH_SIZE)
logits, softmax_logits = tfmodel.inference(images, class_inc_bg=2, crop_size=CROP_SIZE)
tfmodel.add_output_images(images=images, logits=softmax_logits, labels=labels)
loss = tfmodel.loss_dice(logits=softmax_logits, labels=labels, crop_size=CROP_SIZE)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = tfmodel.training(loss=loss, learning_rate=1e-04, global_step=global_step)
accuracy = tfmodel.eval_dice(logits=softmax_logits, labels=labels, crop_size=CROP_SIZE, smooth=1.0)
accuracy_array = tfmodel.eval_dice_array(logits=softmax_logits, labels=labels, crop_size=CROP_SIZE, smooth=1.0)
summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
saver = tf.train.Saver(tf.global_variables())
sm = tf.train.SessionManager(graph=g)
with sm.prepare_session("", init_op=init, saver=saver, checkpoint_dir=LOG_DIR) as sess:
sess.run(tf.local_variables_initializer())
train_writer = tf.summary.FileWriter(TRAIN_WRITER_DIR, sess.graph)
test_writer = tf.summary.FileWriter(TEST_WRITER_DIR)
global_step_value, = sess.run([global_step])
print("Last trained iteration was: ", global_step_value)
#try:
while True:
if global_step_value >= MAX_STEP:
print(f"Reached MAX_STEP: {MAX_STEP} at step: {global_step_value}")
break
images_batch, labels_batch, _ = train_data.next_batch(BATCH_SIZE)
feed_dict = {images: images_batch, labels: labels_batch}
if (global_step_value + 1) % SAVE_RESULTS_INTERVAL == 0:
_, loss_value, accuracy_value, global_step_value, summary_str = sess.run(
[train_op, loss, accuracy, global_step, summary], feed_dict=feed_dict)
train_writer.add_summary(summary_str, global_step=global_step_value)
print(f"TRAIN Step: {global_step_value}\tLoss: {loss_value}\tAccuracy: {accuracy_value}")
images_batch, labels_batch, _ = test_data.next_batch(TEST_BATCH_SIZE)
feed_dict = {images: images_batch, labels: labels_batch}
loss_value, accuracy_value, global_step_value, summary_str = sess.run(
[loss, accuracy, global_step, summary], feed_dict=feed_dict)
test_writer.add_summary(summary_str, global_step=global_step_value)
print(f"TEST Step: {global_step_value}\tLoss: {loss_value}\tAccuracy: {accuracy_value}")
else:
_, loss_value, accuracy_value, global_step_value = sess.run([train_op, loss, accuracy, global_step],
feed_dict=feed_dict)
print(f"TRAIN Step: {global_step_value}\tLoss: {loss_value}\tAccuracy: {accuracy_value}")
if global_step_value % SAVE_CHECKPOINT_INTERVAL == 0:
saver.save(sess, CHECKPOINT_FL, global_step=global_step_value)
print("Checkpoint Saved")
evalArr = []
fileArr = []
for index in range(int(online_data.examples/TEST_BATCH_SIZE -5)):
images_batch, labels_batch, files_batch = online_data.next_batch(TEST_BATCH_SIZE)
feed_dict = {images: images_batch, labels: labels_batch}
logits, loss_value, accuracy = sess.run(
[softmax_logits, loss, accuracy_array], feed_dict=feed_dict)
tfmodel.save_output_images(images=images_batch, logits=logits, image_names=files_batch,
contour_type='i')
evalArr = np.append(evalArr, list(accuracy))
fileArr = np.append(fileArr, list(files_batch))
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_online_submission'
detail_eval = os.path.join(save_dir, 'evaluation_detail_{:s}.csv'.format('i'))
resArr = np.transpose([fileArr, evalArr])
np.savetxt(detail_eval, resArr, fmt='%s', delimiter=',')
#except Exception as e:
# print('Exception')
# print(e)
train_writer.flush()
test_writer.flush()
saver.save(sess, CHECKPOINT_FL, global_step=global_step_value)
print("Checkpoint Saved")
print("Stopping")
if __name__ == '__main__':
main() | {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,926 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /fcn_model_resnet.py | #!/usr/bin/env python2.7
from keras import optimizers
from keras.models import Model
from keras.layers import Dropout, Lambda
from keras.layers import Input, average
from keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, AtrousConvolution2D
from keras.layers import ZeroPadding2D, Cropping2D
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras import backend as K
import tensorflow as tf
import numpy as np
from keras.regularizers import l2
import pylab
import matplotlib.pyplot as plt
from keras.utils.vis_utils import plot_model
from metrics_common import dice_coef, dice_coef_endo, dice_coef_myo, dice_coef_rv, dice_coef_loss, dice_coef_loss_endo, dice_coef_loss_myo, dice_coef_loss_rv, dice_coef_endo_each
from layer_common import mvn, crop
def fcn_model_resnet(input_shape, num_classes, transfer=True, contour_type='i', weights=None):
''' "Skip" FCN architecture similar to Long et al., 2015
https://arxiv.org/abs/1411.4038
'''
if num_classes == 2:
num_classes = 1
loss = dice_coef_loss
activation = 'sigmoid'
else:
if transfer == True:
if contour_type == 'i':
loss = dice_coef_loss_endo
elif contour_type == 'o':
loss = dice_coef_loss_myo
elif contour_type == 'r':
loss = dice_coef_loss_rv
elif contour_type == 'a':
loss = dice_coef_loss
else:
loss = dice_coef_loss
activation = 'softmax'
kwargs_a = dict(
kernel_size=1,
strides=1,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_b = dict(
kernel_size=3,
strides=1,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_c = kwargs_a
kwargs_ds = dict(
kernel_size=1,
strides=2,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_atrous = dict(
kernel_size=3,
strides=1,
dilation_rate=2,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_atrous4 = dict(
kernel_size=3,
strides=1,
dilation_rate=4,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_atrous6 = dict(
kernel_size=3,
strides=1,
dilation_rate=6,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_atrous12 = dict(
kernel_size=3,
strides=1,
dilation_rate=12,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_atrous18 = dict(
kernel_size=3,
strides=1,
dilation_rate=18,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
kwargs_atrous24 = dict(
kernel_size=3,
strides=1,
dilation_rate=24,
activation=None,
padding='same',
use_bias=False,
kernel_initializer='glorot_uniform',
activity_regularizer=None,
kernel_constraint=None,
trainable=True,
)
weight_decay = 1E-4
data = Input(shape=input_shape, dtype='float', name='data')
mvn0 = Lambda(mvn, name='mvn0')(data)
conv1 = Conv2D(filters=64, name='conv1', kernel_size=7, strides=2, activation=None, padding='same',
use_bias=False, kernel_initializer='glorot_uniform')(mvn0)
mvn1 = Lambda(mvn, name='mvn1')(conv1)
bn1 = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True)(mvn1)
ac1 = Activation('relu')(bn1)
pool1 = MaxPooling2D(pool_size=3, strides=2,
padding='same', name='pool1')(ac1)
#2a
conv2a_1 = Conv2D(filters=256, name='conv2a_1', **kwargs_a)(pool1)
mvn2a_1 = Lambda(mvn, name='mvn2a_1')(conv2a_1)
bn2a_1 = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2a_1")(mvn2a_1)
conv2a_2a = Conv2D(filters=64, name='conv2a_2a', **kwargs_a)(pool1)
mvn2a_2a = Lambda(mvn, name='mvn2a_2a')(conv2a_2a)
bn2a_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2a_2a")(mvn2a_2a)
ac2a_2a = Activation('relu', name="ac2a_2a")(bn2a_2a)
conv2a_2b = Conv2D(filters=64, name='conv2a_2b', **kwargs_b)(ac2a_2a)
mvn2a_2b = Lambda(mvn, name='mvn2a_2b')(conv2a_2b)
bn2a_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2a_2b")(mvn2a_2b)
ac2a_2b = Activation('relu', name="ac2a_2b")(bn2a_2b)
conv2a_2c = Conv2D(filters=256, name='conv2a_2c', **kwargs_c)(ac2a_2b)
mvn2a_2c = Lambda(mvn, name='mvn2a_2c')(conv2a_2c)
bn2a_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2a_2c")(mvn2a_2c)
res2a = average([bn2a_1, bn2a_2c], name='res2a')
ac2a= Activation('relu', name="ac2a")(res2a)
# 2b
conv2b_2a = Conv2D(filters=64, name='conv2b_2a', **kwargs_a)(ac2a)
mvn2b_2a = Lambda(mvn, name='mvn2b_2a')(conv2b_2a)
bn2b_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2b_2a")(mvn2b_2a)
ac2b_2a = Activation('relu', name="ac2b_2a")(bn2b_2a)
conv2b_2b = Conv2D(filters=64, name='conv2b_2b', **kwargs_b)(ac2b_2a)
mvn2b_2b = Lambda(mvn, name='mvn2b_2b')(conv2b_2b)
bn2b_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2b_2b")(mvn2b_2b)
ac2b_2b = Activation('relu', name="ac2b_2b")(bn2b_2b)
conv2b_2c = Conv2D(filters=256, name='conv2b_2c', **kwargs_c)(ac2b_2b)
mvn2b_2c = Lambda(mvn, name='mvn2b_2c')(conv2b_2c)
bn2b_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), trainable=True, name="bn2b_2c")(mvn2b_2c)
res2b = average([ac2a, bn2b_2c], name='res2b')
ac2b= Activation('relu', name="ac2b")(res2b)
# 2c
conv2c_2a = Conv2D(filters=64, name='conv2c_2a', **kwargs_a)(ac2b)
mvn2c_2a = Lambda(mvn, name='mvn2c_2a')(conv2c_2a)
bn2c_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn2c_2a")(mvn2c_2a)
ac2c_2a = Activation('relu', name="ac2c_2a")(bn2c_2a)
conv2c_2b = Conv2D(filters=64, name='conv2c_2b', **kwargs_b)(ac2c_2a)
mvn2c_2b = Lambda(mvn, name='mvn2c_2b')(conv2c_2b)
bn2c_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn2c_2b")(mvn2c_2b)
ac2c_2b = Activation('relu', name="ac2c_2b")(bn2c_2b)
conv2c_2c = Conv2D(filters=256, name='conv2c_2c', **kwargs_c)(ac2c_2b)
mvn2c_2c = Lambda(mvn, name='mvn2c_2c')(conv2c_2c)
bn2c_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn2c_2c")(mvn2c_2c)
res2c = average([ac2b, bn2c_2c], name='res2c')
ac2c = Activation('relu', name="ac2c")(res2c)
drop2c = Dropout(rate=0.5, name='drop2c')(ac2c)
# 3a
conv3a_1 = Conv2D(filters=512, name='conv3a_1', **kwargs_ds)(drop2c)
mvn3a_1 = Lambda(mvn, name='mvn3a_1')(conv3a_1)
bn3a_1 = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3a_1")(mvn3a_1)
conv3a_2a = Conv2D(filters=128, name='conv3a_2a', **kwargs_ds)(drop2c)
mvn3a_2a = Lambda(mvn, name='mvn3a_2a')(conv3a_2a)
bn3a_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3a_2a")(mvn3a_2a)
ac3a_2a = Activation('relu', name="ac3a_2a")(bn3a_2a)
conv3a_2b = Conv2D(filters=128, name='conv3a_2b', **kwargs_b)(ac3a_2a)
mvn3a_2b = Lambda(mvn, name='mvn3a_2b')(conv3a_2b)
bn3a_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3a_2b")(mvn3a_2b)
ac3a_2b = Activation('relu', name="ac3a_2b")(bn3a_2b)
conv3a_2c = Conv2D(filters=512, name='conv3a_2c', **kwargs_c)(ac3a_2b)
mvn3a_2c = Lambda(mvn, name='mvn3a_2c')(conv3a_2c)
bn3a_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3a_2c")(mvn3a_2c)
res3a = average([bn3a_1, bn3a_2c], name='res3a')
ac3a = Activation('relu', name="ac3a")(res3a)
# 3b1
conv3b1_2a = Conv2D(filters=128, name='conv3b1_2a', **kwargs_a)(ac3a)
mvn3b1_2a = Lambda(mvn, name='mvn3b1_2a')(conv3b1_2a)
bn3b1_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b1_2a")(mvn3b1_2a)
ac3b1_2a = Activation('relu', name="ac3b1_2a")(bn3b1_2a)
conv3b1_2b = Conv2D(filters=128, name='conv3b1_2b', **kwargs_b)(ac3b1_2a)
mvn3b1_2b = Lambda(mvn, name='mvn3b1_2b')(conv3b1_2b)
bn3b1_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b1_2b")(mvn3b1_2b)
ac3b1_2b = Activation('relu', name="ac3b1_2b")(bn3b1_2b)
conv3b1_2c = Conv2D(filters=512, name='conv3b1_2c', **kwargs_c)(ac3b1_2b)
bn3b1_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b1_2c")(conv3b1_2c)
res3b1 = average([ac3a, bn3b1_2c], name='res3b1')
ac3b1 = Activation('relu', name="ac3b1")(res3b1)
# 3b2
conv3b2_2a = Conv2D(filters=128, name='conv3b2_2a', **kwargs_a)(ac3b1)
mvn3b2_2a = Lambda(mvn, name='mvn3b2_2a')(conv3b2_2a)
bn3b2_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b2_2a")(mvn3b2_2a)
ac3b2_2a = Activation('relu', name="ac3b2_2a")(bn3b2_2a)
conv3b2_2b = Conv2D(filters=128, name='conv3b2_2b', **kwargs_b)(ac3b2_2a)
mvn3b2_2b = Lambda(mvn, name='mvn3b2_2b')(conv3b2_2b)
bn3b2_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b2_2b")(mvn3b2_2b)
ac3b2_2b = Activation('relu', name="ac3b2_2b")(bn3b2_2b)
conv3b2_2c = Conv2D(filters=512, name='conv3b2_2c', **kwargs_c)(ac3b2_2b)
bn3b2_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b2_2c")(conv3b2_2c)
res3b2 = average([ac3b1, bn3b2_2c], name='res3b2')
ac3b2 = Activation('relu', name="ac3b2")(res3b2)
# 3b3
conv3b3_2a = Conv2D(filters=128, name='conv3b3_2a', **kwargs_a)(ac3b2)
mvn3b3_2a = Lambda(mvn, name='mvn3b3_2a')(conv3b3_2a)
bn3b3_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b3_2a")(mvn3b3_2a)
ac3b3_2a = Activation('relu', name="ac3b3_2a")(bn3b3_2a)
conv3b3_2b = Conv2D(filters=128, name='conv3b3_2b', **kwargs_b)(ac3b3_2a)
mvn3b3_2b = Lambda(mvn, name='mvn3b3_2b')(conv3b3_2b)
bn3b3_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b3_2b")(mvn3b3_2b)
ac3b3_2b = Activation('relu', name="ac3b3_2b")(bn3b3_2b)
conv3b3_2c = Conv2D(filters=512, name='conv3b3_2c', **kwargs_c)(ac3b3_2b)
bn3b3_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn3b3_2c")(conv3b3_2c)
res3b3 = average([ac3b2, bn3b3_2c], name='res3b3')
ac3b3 = Activation('relu', name="ac3b3")(res3b3)
# 4a
conv4a_1 = Conv2D(filters=1024, name='conv4a_1', **kwargs_a)(ac3b3) # not using down sampling, using atrous convolution layer instead
mvn4a_1 = Lambda(mvn, name='mvn4a_1')(conv4a_1)
bn4a_1 = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4a_1")(mvn4a_1)
conv4a_2a = Conv2D(filters=256, name='conv4a_2a', **kwargs_a)(ac3b3) # not using down sampling, using atrous convolution layer instead
mvn4a_2a = Lambda(mvn, name='mvn4a_2a')(conv4a_2a)
bn4a_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4a_2a")(mvn4a_2a)
ac4a_2a = Activation('relu', name="ac4a_2a")(bn4a_2a)
conv4a_2b = Conv2D(filters=256, name='conv4a_2b', **kwargs_atrous)(ac4a_2a)#atrous convolution layer
mvn4a_2b = Lambda(mvn, name='mvn4a_2b')(conv4a_2b)
bn4a_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4a_2b")(mvn4a_2b)
ac4a_2b = Activation('relu', name="ac4a_2b")(bn4a_2b)
conv4a_2c = Conv2D(filters=1024, name='conv4a_2c', **kwargs_c)(ac4a_2b)
mvn4a_2c = Lambda(mvn, name='mvn4a_2c')(conv4a_2c)
bn4a_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4a_2c")(mvn4a_2c)
res4a = average([bn4a_1, bn4a_2c], name='res4a')
ac4a = Activation('relu', name="ac4a")(res4a)
# 4b1
conv4b1_2a = Conv2D(filters=256, name='conv4b1_2a', **kwargs_a)(ac4a)
mvn4b1_2a = Lambda(mvn, name='mvn4b1_2a')(conv4b1_2a)
bn4b1_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b1_2a")(mvn4b1_2a)
ac4b1_2a = Activation('relu', name="ac4b1_2a")(bn4b1_2a)
conv4b1_2b = Conv2D(filters=256, name='conv4b1_2b', **kwargs_atrous)(ac4b1_2a)#atrous convolution layer
mvn4b1_2b = Lambda(mvn, name='mvn4b1_2b')(conv4b1_2b)
bn4b1_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b1_2b")(mvn4b1_2b)
ac4b1_2b = Activation('relu', name="ac4b1_2b")(bn4b1_2b)
conv4b1_2c = Conv2D(filters=1024, name='conv4b1_2c', **kwargs_c)(ac4b1_2b)
bn4b1_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b1_2c")(conv4b1_2c)
res4b1 = average([ac4a, bn4b1_2c], name='res4b1')
ac4b1 = Activation('relu', name="ac4b1")(res4b1)
# 4b2
conv4b2_2a = Conv2D(filters=256, name='conv4b2_2a', **kwargs_a)(ac4b1)
mvn4b2_2a = Lambda(mvn, name='mvn4b2_2a')(conv4b2_2a)
bn4b2_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b2_2a")(mvn4b2_2a)
ac4b2_2a = Activation('relu', name="ac4b2_2a")(bn4b2_2a)
conv4b2_2b = Conv2D(filters=256, name='conv4b2_2b', **kwargs_atrous)(ac4b2_2a)#atrous convolution layer
mvn4b2_2b = Lambda(mvn, name='mvn4b2_2b')(conv4b2_2b)
bn4b2_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b2_2b")(mvn4b2_2b)
ac4b2_2b = Activation('relu', name="ac4b2_2b")(bn4b2_2b)
conv4b2_2c = Conv2D(filters=1024, name='conv4b2_2c', **kwargs_c)(ac4b2_2b)
bn4b2_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b2_2c")(conv4b2_2c)
res4b2 = average([ac4b1, bn4b2_2c], name='res4b2')
ac4b2 = Activation('relu', name="ac4b2")(res4b2)
# 4b3
conv4b3_2a = Conv2D(filters=256, name='conv4b3_2a', **kwargs_a)(ac4b2)
mvn4b3_2a = Lambda(mvn, name='mvn4b3_2a')(conv4b3_2a)
bn4b3_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b3_2a")(mvn4b3_2a)
ac4b3_2a = Activation('relu', name="ac4b3_2a")(bn4b3_2a)
conv4b3_2b = Conv2D(filters=256, name='conv4b3_2b', **kwargs_atrous)(ac4b3_2a)#atrous convolution layer
mvn4b3_2b = Lambda(mvn, name='mvn4b3_2b')(conv4b3_2b)
bn4b3_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b3_2b")(mvn4b3_2b)
ac4b3_2b = Activation('relu', name="ac4b3_2b")(bn4b3_2b)
conv4b3_2c = Conv2D(filters=1024, name='conv4b3_2c', **kwargs_c)(ac4b3_2b)
bn4b3_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b3_2c")(conv4b3_2c)
res4b3 = average([ac4b2, bn4b3_2c], name='res4b3')
ac4b3 = Activation('relu', name="ac4b3")(res4b3)
# 4b4
conv4b4_2a = Conv2D(filters=256, name='conv4b4_2a', **kwargs_a)(ac4b3)
mvn4b4_2a = Lambda(mvn, name='mvn4b4_2a')(conv4b4_2a)
bn4b4_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b4_2a")(mvn4b4_2a)
ac4b4_2a = Activation('relu', name="ac4b4_2a")(bn4b4_2a)
conv4b4_2b = Conv2D(filters=256, name='conv4b4_2b', **kwargs_atrous)(ac4b4_2a)#atrous convolution layer
mvn4b4_2b = Lambda(mvn, name='mvn4b4_2b')(conv4b4_2b)
bn4b4_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b4_2b")(mvn4b4_2b)
ac4b4_2b = Activation('relu', name="ac4b4_2b")(bn4b4_2b)
conv4b4_2c = Conv2D(filters=1024, name='conv4b4_2c', **kwargs_c)(ac4b4_2b)
bn4b4_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b4_2c")(conv4b4_2c)
res4b4 = average([ac4b3, bn4b4_2c], name='res4b4')
ac4b4 = Activation('relu', name="ac4b4")(res4b4)
# 4b5
conv4b5_2a = Conv2D(filters=256, name='conv4b5_2a', **kwargs_a)(ac4b4)
mvn4b5_2a = Lambda(mvn, name='mvn4b5_2a')(conv4b5_2a)
bn4b5_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b5_2a")(mvn4b5_2a)
ac4b5_2a = Activation('relu', name="ac4b5_2a")(bn4b5_2a)
conv4b5_2b = Conv2D(filters=256, name='conv4b5_2b', **kwargs_atrous)(ac4b5_2a)#atrous convolution layer
mvn4b5_2b = Lambda(mvn, name='mvn4b5_2b')(conv4b5_2b)
bn4b5_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b5_2b")(mvn4b5_2b)
ac4b5_2b = Activation('relu', name="ac4b5_2b")(bn4b5_2b)
conv4b5_2c = Conv2D(filters=1024, name='conv4b5_2c', **kwargs_c)(ac4b5_2b)
bn4b5_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b5_2c")(conv4b5_2c)
res4b5 = average([ac4b4, bn4b5_2c], name='res4b5')
ac4b5 = Activation('relu', name="ac4b5")(res4b5)
# 4b6
conv4b6_2a = Conv2D(filters=256, name='conv4b6_2a', **kwargs_a)(ac4b5)
mvn4b6_2a = Lambda(mvn, name='mvn4b6_2a')(conv4b6_2a)
bn4b6_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b6_2a")(mvn4b6_2a)
ac4b6_2a = Activation('relu', name="ac4b6_2a")(bn4b6_2a)
conv4b6_2b = Conv2D(filters=256, name='conv4b6_2b', **kwargs_atrous)(ac4b6_2a)#atrous convolution layer
mvn4b6_2b = Lambda(mvn, name='mvn4b6_2b')(conv4b6_2b)
bn4b6_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b6_2b")(mvn4b6_2b)
ac4b6_2b = Activation('relu', name="ac4b6_2b")(bn4b6_2b)
conv4b6_2c = Conv2D(filters=1024, name='conv4b6_2c', **kwargs_c)(ac4b6_2b)
bn4b6_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b6_2c")(conv4b6_2c)
res4b6 = average([ac4b5, bn4b6_2c], name='res4b6')
ac4b6 = Activation('relu', name="ac4b6")(res4b6)
# 4b7
conv4b7_2a = Conv2D(filters=256, name='conv4b7_2a', **kwargs_a)(ac4b6)
mvn4b7_2a = Lambda(mvn, name='mvn4b7_2a')(conv4b7_2a)
bn4b7_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b7_2a")(mvn4b7_2a)
ac4b7_2a = Activation('relu', name="ac4b7_2a")(bn4b7_2a)
conv4b7_2b = Conv2D(filters=256, name='conv4b7_2b', **kwargs_atrous)(ac4b7_2a)#atrous convolution layer
mvn4b7_2b = Lambda(mvn, name='mvn4b7_2b')(conv4b7_2b)
bn4b7_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b7_2b")(mvn4b7_2b)
ac4b7_2b = Activation('relu', name="ac4b7_2b")(bn4b7_2b)
conv4b7_2c = Conv2D(filters=1024, name='conv4b7_2c', **kwargs_c)(ac4b7_2b)
bn4b7_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b7_2c")(conv4b7_2c)
res4b7 = average([ac4b6, bn4b7_2c], name='res4b7')
ac4b7 = Activation('relu', name="ac4b7")(res4b7)
# 4b8
conv4b8_2a = Conv2D(filters=256, name='conv4b8_2a', **kwargs_a)(ac4b7)
mvn4b8_2a = Lambda(mvn, name='mvn4b8_2a')(conv4b8_2a)
bn4b8_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b8_2a")(mvn4b8_2a)
ac4b8_2a = Activation('relu', name="ac4b8_2a")(bn4b8_2a)
conv4b8_2b = Conv2D(filters=256, name='conv4b8_2b', **kwargs_atrous)(ac4b8_2a)#atrous convolution layer
mvn4b8_2b = Lambda(mvn, name='mvn4b8_2b')(conv4b8_2b)
bn4b8_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b8_2b")(mvn4b8_2b)
ac4b8_2b = Activation('relu', name="ac4b8_2b")(bn4b8_2b)
conv4b8_2c = Conv2D(filters=1024, name='conv4b8_2c', **kwargs_c)(ac4b8_2b)
bn4b8_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b8_2c")(conv4b8_2c)
res4b8 = average([ac4b7, bn4b8_2c], name='res4b8')
ac4b8 = Activation('relu', name="ac4b8")(res4b8)
# 4b9
conv4b9_2a = Conv2D(filters=256, name='conv4b9_2a', **kwargs_a)(ac4b8)
mvn4b9_2a = Lambda(mvn, name='mvn4b9_2a')(conv4b9_2a)
bn4b9_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b9_2a")(mvn4b9_2a)
ac4b9_2a = Activation('relu', name="ac4b9_2a")(bn4b9_2a)
conv4b9_2b = Conv2D(filters=256, name='conv4b9_2b', **kwargs_atrous)(ac4b9_2a)#atrous convolution layer
mvn4b9_2b = Lambda(mvn, name='mvn4b9_2b')(conv4b9_2b)
bn4b9_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b9_2b")(mvn4b9_2b)
ac4b9_2b = Activation('relu', name="ac4b9_2b")(bn4b9_2b)
conv4b9_2c = Conv2D(filters=1024, name='conv4b9_2c', **kwargs_c)(ac4b9_2b)
bn4b9_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b9_2c")(conv4b9_2c)
res4b9 = average([ac4b8, bn4b9_2c], name='res4b9')
ac4b9 = Activation('relu', name="ac4b9")(res4b9)
# 4b10
conv4b10_2a = Conv2D(filters=256, name='conv4b10_2a', **kwargs_a)(ac4b9)
mvn4b10_2a = Lambda(mvn, name='mvn4b10_2a')(conv4b10_2a)
bn4b10_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b10_2a")(mvn4b10_2a)
ac4b10_2a = Activation('relu', name="ac4b10_2a")(bn4b10_2a)
conv4b10_2b = Conv2D(filters=256, name='conv4b10_2b', **kwargs_atrous)(ac4b10_2a)#atrous convolution layer
mvn4b10_2b = Lambda(mvn, name='mvn4b10_2b')(conv4b10_2b)
bn4b10_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b10_2b")(mvn4b10_2b)
ac4b10_2b = Activation('relu', name="ac4b10_2b")(bn4b10_2b)
conv4b10_2c = Conv2D(filters=1024, name='conv4b10_2c', **kwargs_c)(ac4b10_2b)
bn4b10_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b10_2c")(conv4b10_2c)
res4b10 = average([ac4b9, bn4b10_2c], name='res4b10')
ac4b10 = Activation('relu', name="ac4b10")(res4b10)
# 4b11
conv4b11_2a = Conv2D(filters=256, name='conv4b11_2a', **kwargs_a)(ac4b10)
mvn4b11_2a = Lambda(mvn, name='mvn4b11_2a')(conv4b11_2a)
bn4b11_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b11_2a")(mvn4b11_2a)
ac4b11_2a = Activation('relu', name="ac4b11_2a")(bn4b11_2a)
conv4b11_2b = Conv2D(filters=256, name='conv4b11_2b', **kwargs_atrous)(ac4b11_2a)#atrous convolution layer
mvn4b11_2b = Lambda(mvn, name='mvn4b11_2b')(conv4b11_2b)
bn4b11_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b11_2b")(mvn4b11_2b)
ac4b11_2b = Activation('relu', name="ac4b11_2b")(bn4b11_2b)
conv4b11_2c = Conv2D(filters=1024, name='conv4b11_2c', **kwargs_c)(ac4b11_2b)
bn4b11_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b11_2c")(conv4b11_2c)
res4b11 = average([ac4b10, bn4b11_2c], name='res4b11')
ac4b11 = Activation('relu', name="ac4b11")(res4b11)
# 4b12
conv4b12_2a = Conv2D(filters=256, name='conv4b12_2a', **kwargs_a)(ac4b11)
mvn4b12_2a = Lambda(mvn, name='mvn4b12_2a')(conv4b12_2a)
bn4b12_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b12_2a")(mvn4b12_2a)
ac4b12_2a = Activation('relu', name="ac4b12_2a")(bn4b12_2a)
conv4b12_2b = Conv2D(filters=256, name='conv4b12_2b', **kwargs_atrous)(ac4b12_2a)#atrous convolution layer
mvn4b12_2b = Lambda(mvn, name='mvn4b12_2b')(conv4b12_2b)
bn4b12_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b12_2b")(mvn4b12_2b)
ac4b12_2b = Activation('relu', name="ac4b12_2b")(bn4b12_2b)
conv4b12_2c = Conv2D(filters=1024, name='conv4b12_2c', **kwargs_c)(ac4b12_2b)
bn4b12_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b12_2c")(conv4b12_2c)
res4b12 = average([ac4b11, bn4b12_2c], name='res4b12')
ac4b12 = Activation('relu', name="ac4b12")(res4b12)
# 4b13
conv4b13_2a = Conv2D(filters=256, name='conv4b13_2a', **kwargs_a)(ac4b12)
mvn4b13_2a = Lambda(mvn, name='mvn4b13_2a')(conv4b13_2a)
bn4b13_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b13_2a")(mvn4b13_2a)
ac4b13_2a = Activation('relu', name="ac4b13_2a")(bn4b13_2a)
conv4b13_2b = Conv2D(filters=256, name='conv4b13_2b', **kwargs_atrous)(ac4b13_2a)#atrous convolution layer
mvn4b13_2b = Lambda(mvn, name='mvn4b13_2b')(conv4b13_2b)
bn4b13_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b13_2b")(mvn4b13_2b)
ac4b13_2b = Activation('relu', name="ac4b13_2b")(bn4b13_2b)
conv4b13_2c = Conv2D(filters=1024, name='conv4b13_2c', **kwargs_c)(ac4b13_2b)
bn4b13_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b13_2c")(conv4b13_2c)
res4b13 = average([ac4b12, bn4b13_2c], name='res4b13')
ac4b13 = Activation('relu', name="ac4b13")(res4b13)
# 4b14
conv4b14_2a = Conv2D(filters=256, name='conv4b14_2a', **kwargs_a)(ac4b13)
mvn4b14_2a = Lambda(mvn, name='mvn4b14_2a')(conv4b14_2a)
bn4b14_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b14_2a")(mvn4b14_2a)
ac4b14_2a = Activation('relu', name="ac4b14_2a")(bn4b14_2a)
conv4b14_2b = Conv2D(filters=256, name='conv4b14_2b', **kwargs_atrous)(ac4b14_2a)#atrous convolution layer
mvn4b14_2b = Lambda(mvn, name='mvn4b14_2b')(conv4b14_2b)
bn4b14_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b14_2b")(mvn4b14_2b)
ac4b14_2b = Activation('relu', name="ac4b14_2b")(bn4b14_2b)
conv4b14_2c = Conv2D(filters=1024, name='conv4b14_2c', **kwargs_c)(ac4b14_2b)
bn4b14_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b14_2c")(conv4b14_2c)
res4b14 = average([ac4b13, bn4b14_2c], name='res4b14')
ac4b14 = Activation('relu', name="ac4b14")(res4b14)
# 4b15
conv4b15_2a = Conv2D(filters=256, name='conv4b15_2a', **kwargs_a)(ac4b14)
mvn4b15_2a = Lambda(mvn, name='mvn4b15_2a')(conv4b15_2a)
bn4b15_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b15_2a")(mvn4b15_2a)
ac4b15_2a = Activation('relu', name="ac4b15_2a")(bn4b15_2a)
conv4b15_2b = Conv2D(filters=256, name='conv4b15_2b', **kwargs_atrous)(ac4b15_2a)#atrous convolution layer
mvn4b15_2b = Lambda(mvn, name='mvn4b15_2b')(conv4b15_2b)
bn4b15_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b15_2b")(mvn4b15_2b)
ac4b15_2b = Activation('relu', name="ac4b15_2b")(bn4b15_2b)
conv4b15_2c = Conv2D(filters=1024, name='conv4b15_2c', **kwargs_c)(ac4b15_2b)
bn4b15_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b15_2c")(conv4b15_2c)
res4b15 = average([ac4b14, bn4b15_2c], name='res4b15')
ac4b15 = Activation('relu', name="ac4b15")(res4b15)
# 4b16
conv4b16_2a = Conv2D(filters=256, name='conv4b16_2a', **kwargs_a)(ac4b15)
mvn4b16_2a = Lambda(mvn, name='mvn4b16_2a')(conv4b16_2a)
bn4b16_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b16_2a")(mvn4b16_2a)
ac4b16_2a = Activation('relu', name="ac4b16_2a")(bn4b16_2a)
conv4b16_2b = Conv2D(filters=256, name='conv4b16_2b', **kwargs_atrous)(ac4b16_2a)#atrous convolution layer
mvn4b16_2b = Lambda(mvn, name='mvn4b16_2b')(conv4b16_2b)
bn4b16_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b16_2b")(mvn4b16_2b)
ac4b16_2b = Activation('relu', name="ac4b16_2b")(bn4b16_2b)
conv4b16_2c = Conv2D(filters=1024, name='conv4b16_2c', **kwargs_c)(ac4b16_2b)
bn4b16_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b16_2c")(conv4b16_2c)
res4b16 = average([ac4b15, bn4b16_2c], name='res4b16')
ac4b16 = Activation('relu', name="ac4b16")(res4b16)
# 4b17
conv4b17_2a = Conv2D(filters=256, name='conv4b17_2a', **kwargs_a)(ac4b16)
mvn4b17_2a = Lambda(mvn, name='mvn4b17_2a')(conv4b17_2a)
bn4b17_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b17_2a")(mvn4b17_2a)
ac4b17_2a = Activation('relu', name="ac4b17_2a")(bn4b17_2a)
conv4b17_2b = Conv2D(filters=256, name='conv4b17_2b', **kwargs_atrous)(ac4b17_2a)#atrous convolution layer
mvn4b17_2b = Lambda(mvn, name='mvn4b17_2b')(conv4b17_2b)
bn4b17_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b17_2b")(mvn4b17_2b)
ac4b17_2b = Activation('relu', name="ac4b17_2b")(bn4b17_2b)
conv4b17_2c = Conv2D(filters=1024, name='conv4b17_2c', **kwargs_c)(ac4b17_2b)
bn4b17_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b17_2c")(conv4b17_2c)
res4b17 = average([ac4b16, bn4b17_2c], name='res4b17')
ac4b17 = Activation('relu', name="ac4b17")(res4b17)
# 4b18
conv4b18_2a = Conv2D(filters=256, name='conv4b18_2a', **kwargs_a)(ac4b17)
mvn4b18_2a = Lambda(mvn, name='mvn4b18_2a')(conv4b18_2a)
bn4b18_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b18_2a")(mvn4b18_2a)
ac4b18_2a = Activation('relu', name="ac4b18_2a")(bn4b18_2a)
conv4b18_2b = Conv2D(filters=256, name='conv4b18_2b', **kwargs_atrous)(ac4b18_2a)#atrous convolution layer
mvn4b18_2b = Lambda(mvn, name='mvn4b18_2b')(conv4b18_2b)
bn4b18_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b18_2b")(mvn4b18_2b)
ac4b18_2b = Activation('relu', name="ac4b18_2b")(bn4b18_2b)
conv4b18_2c = Conv2D(filters=1024, name='conv4b18_2c', **kwargs_c)(ac4b18_2b)
bn4b18_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b18_2c")(conv4b18_2c)
res4b18 = average([ac4b17, bn4b18_2c], name='res4b18')
ac4b18 = Activation('relu', name="ac4b18")(res4b18)
# 4b19
conv4b19_2a = Conv2D(filters=256, name='conv4b19_2a', **kwargs_a)(ac4b18)
mvn4b19_2a = Lambda(mvn, name='mvn4b19_2a')(conv4b19_2a)
bn4b19_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b19_2a")(mvn4b19_2a)
ac4b19_2a = Activation('relu', name="ac4b19_2a")(bn4b19_2a)
conv4b19_2b = Conv2D(filters=256, name='conv4b19_2b', **kwargs_atrous)(ac4b19_2a)#atrous convolution layer
mvn4b19_2b = Lambda(mvn, name='mvn4b19_2b')(conv4b19_2b)
bn4b19_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b19_2b")(mvn4b19_2b)
ac4b19_2b = Activation('relu', name="ac4b19_2b")(bn4b19_2b)
conv4b19_2c = Conv2D(filters=1024, name='conv4b19_2c', **kwargs_c)(ac4b19_2b)
bn4b19_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b19_2c")(conv4b19_2c)
res4b19 = average([ac4b18, bn4b19_2c], name='res4b19')
ac4b19 = Activation('relu', name="ac4b19")(res4b19)
# 4b20
conv4b20_2a = Conv2D(filters=256, name='conv4b20_2a', **kwargs_a)(ac4b19)
mvn4b20_2a = Lambda(mvn, name='mvn4b20_2a')(conv4b20_2a)
bn4b20_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b20_2a")(mvn4b20_2a)
ac4b20_2a = Activation('relu', name="ac4b20_2a")(bn4b20_2a)
conv4b20_2b = Conv2D(filters=256, name='conv4b20_2b', **kwargs_atrous)(ac4b20_2a)#atrous convolution layer
mvn4b20_2b = Lambda(mvn, name='mvn4b20_2b')(conv4b20_2b)
bn4b20_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b20_2b")(mvn4b20_2b)
ac4b20_2b = Activation('relu', name="ac4b20_2b")(bn4b20_2b)
conv4b20_2c = Conv2D(filters=1024, name='conv4b20_2c', **kwargs_c)(ac4b20_2b)
bn4b20_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b20_2c")(conv4b20_2c)
res4b20 = average([ac4b19, bn4b20_2c], name='res4b20')
ac4b20 = Activation('relu', name="ac4b20")(res4b20)
# 4b21
conv4b21_2a = Conv2D(filters=256, name='conv4b21_2a', **kwargs_a)(ac4b20)
mvn4b21_2a = Lambda(mvn, name='mvn4b21_2a')(conv4b21_2a)
bn4b21_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b21_2a")(mvn4b21_2a)
ac4b21_2a = Activation('relu', name="ac4b21_2a")(bn4b21_2a)
conv4b21_2b = Conv2D(filters=256, name='conv4b21_2b', **kwargs_atrous)(ac4b21_2a)#atrous convolution layer
mvn4b21_2b = Lambda(mvn, name='mvn4b21_2b')(conv4b21_2b)
bn4b21_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b21_2b")(mvn4b21_2b)
ac4b21_2b = Activation('relu', name="ac4b21_2b")(bn4b21_2b)
conv4b21_2c = Conv2D(filters=1024, name='conv4b21_2c', **kwargs_c)(ac4b21_2b)
bn4b21_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b21_2c")(conv4b21_2c)
res4b21 = average([ac4b20, bn4b21_2c], name='res4b21')
ac4b21 = Activation('relu', name="ac4b21")(res4b21)
# 4b22
conv4b22_2a = Conv2D(filters=256, name='conv4b22_2a', **kwargs_a)(ac4b21)
mvn4b22_2a = Lambda(mvn, name='mvn4b22_2a')(conv4b22_2a)
bn4b22_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b22_2a")(mvn4b22_2a)
ac4b22_2a = Activation('relu', name="ac4b22_2a")(bn4b22_2a)
conv4b22_2b = Conv2D(filters=256, name='conv4b22_2b', **kwargs_atrous)(ac4b22_2a)#atrous convolution layer
mvn4b22_2b = Lambda(mvn, name='mvn4b22_2b')(conv4b22_2b)
bn4b22_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b22_2b")(mvn4b22_2b)
ac4b22_2b = Activation('relu', name="ac4b22_2b")(bn4b22_2b)
conv4b22_2c = Conv2D(filters=1024, name='conv4b22_2c', **kwargs_c)(ac4b22_2b)
bn4b22_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn4b22_2c")(conv4b22_2c)
res4b22 = average([ac4b21, bn4b22_2c], name='res4b22')
ac4b22 = Activation('relu', name="ac4b22")(res4b22)
# 5a
conv5a_1 = Conv2D(filters=2048, name='conv5a_1', **kwargs_a)(ac4b22)#not downsampling, using atrous conv instead
mvn5a_1 = Lambda(mvn, name='mvn5a_1')(conv5a_1)
bn5a_1 = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn5a_1")(mvn5a_1)
conv5a_2a = Conv2D(filters=512, name='conv5a_2a', **kwargs_a)(ac4b22)
mvn5a_2a = Lambda(mvn, name='mvn5a_2a')(conv5a_2a)
bn5a_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn5a_2a")(mvn5a_2a)
ac5a_2a = Activation('relu', name="ac5a_2a")(bn5a_2a)
conv5a_2b = Conv2D(filters=512, name='conv5a_2b', **kwargs_atrous4)(ac5a_2a)#atrous conv
mvn5a_2b = Lambda(mvn, name='mvn5a_2b')(conv5a_2b)
bn5a_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn5a_2b")(mvn5a_2b)
ac5a_2b = Activation('relu', name="ac5a_2b")(bn5a_2b)
conv5a_2c = Conv2D(filters=2048, name='conv5a_2c', **kwargs_c)(ac5a_2b)
mvn5a_2c = Lambda(mvn, name='mvn5a_2c')(conv5a_2c)
bn5a_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay),
trainable=True, name="bn5a_2c")(mvn5a_2c)
res5a = average([bn5a_1, bn5a_2c], name='res5a')
ac5a = Activation('relu', name="ac5a")(res5a)
# 5b
conv5b_2a = Conv2D(filters=512, name='conv5b_2a', **kwargs_a)(ac5a)
mvn5b_2a = Lambda(mvn, name='mvn5b_2a')(conv5b_2a)
bn5b_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn5b_2a")(mvn5b_2a)
ac5b_2a = Activation('relu', name="ac5b_2a")(bn5b_2a)
conv5b_2b = Conv2D(filters=512, name='conv5b_2b', **kwargs_atrous4)(ac5b_2a)#atrous conv
mvn5b_2b = Lambda(mvn, name='mvn5b_2b')(conv5b_2b)
bn5b_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn5b_2b")(mvn5b_2b)
ac5b_2b = Activation('relu', name="ac5b_2b")(bn5b_2b)
conv5b_2c = Conv2D(filters=2048, name='conv5b_2c', **kwargs_c)(ac5b_2b)
bn5b_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn5b_2c")(conv5b_2c)
res5b = average([ac5a, bn5b_2c], name='res5b')
ac5b = Activation('relu', name="ac5b")(res5b)
# 5c
conv5c_2a = Conv2D(filters=512, name='conv5c_2a', **kwargs_a)(ac5b)
mvn5c_2a = Lambda(mvn, name='mvn5c_2a')(conv5c_2a)
bn5c_2a = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn5c_2a")(mvn5c_2a)
ac5c_2a = Activation('relu', name="ac5c_2a")(bn5c_2a)
conv5c_2b = Conv2D(filters=512, name='conv5c_2b', **kwargs_atrous4)(ac5c_2a)#atrous conv
mvn5c_2b = Lambda(mvn, name='mvn5c_2b')(conv5c_2b)
bn5c_2b = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn5c_2b")(mvn5c_2b)
ac5c_2b = Activation('relu', name="ac5c_2b")(bn5c_2b)
conv5c_2c = Conv2D(filters=2048, name='conv5c_2c', **kwargs_c)(ac5c_2b)
bn5c_2c = BatchNormalization(axis=1, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
trainable=True, name="bn5c_2c")(conv5c_2c)
res5c = average([ac5b, bn5c_2c], name='res5c')
ac5c = Activation('relu', name="ac5c")(res5c)
drop5c = Dropout(rate=0.5, name='drop5c')(ac5c)
fc1_c0 = Conv2D(filters=num_classes, name='fc1_c0', **kwargs_atrous)(drop5c) # atrous conv
fc1_c1 = Conv2D(filters=num_classes, name='fc1_c1', **kwargs_atrous4)(drop5c) # atrous conv
fc1 = average([fc1_c0, fc1_c1], name='fc1')
us1 = Conv2DTranspose(filters=num_classes, kernel_size=3,
strides=2, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=False,
name='us1')(fc1)
fc2_c0 = Conv2D(filters=num_classes, name='fc2_c0', **kwargs_atrous)(drop2c) # atrous conv
fc2_c1 = Conv2D(filters=num_classes, name='fc2_c1', **kwargs_atrous4)(drop2c) # atrous conv
fc2 = average([fc2_c0, fc2_c1], name='fc2')
crop1 = Lambda(crop, name='crop1')([fc2, us1])
fuse1 = average([crop1, fc2], name='fuse1')
us2 = Conv2DTranspose(filters=num_classes, kernel_size=3,
strides=2, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=False,
name='us2')(fuse1)
fc3_c0 = Conv2D(filters=num_classes, name='fc3_c0', **kwargs_atrous)(ac1) # atrous conv
fc3_c1 = Conv2D(filters=num_classes, name='fc3_c1', **kwargs_atrous4)(ac1) # atrous conv
fc3 = average([fc3_c0, fc3_c1], name='fc3')
crop2 = Lambda(crop, name='crop2')([fc3, us2])
fuse2 = average([crop2, fc3], name='fuse2')
us3 = Conv2DTranspose(filters=num_classes, kernel_size=3,
strides=2, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=False,
name='us3')(fuse2)
crop3 = Lambda(crop, name='crop3')([data, us3])
predictions = Conv2D(filters=num_classes, kernel_size=1,
strides=1, activation=activation, padding='valid',
kernel_initializer='glorot_uniform', use_bias=True,
name='predictions')(crop3)
model = Model(inputs=data, outputs=predictions)
if transfer == True:
if weights is not None:
model.load_weights(weights)
for layer in model.layers[:10]:
layer.trainable = False
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss=loss,
metrics=['accuracy', dice_coef_endo])
else:
sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss=loss,
metrics=['accuracy', dice_coef_endo, dice_coef_myo, dice_coef_rv])
return model
if __name__ == '__main__':
model = fcn_model_resnet((100, 100, 1), 4, weights=None)
plot_model(model, show_shapes=True, to_file='fcn_model_resnet.png')
model.summary()
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,927 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /LearningRate_Batch.py | from keras.callbacks import *
class LearningRateBatchScheduler(Callback):
"""Learning rate scheduler.
# Arguments
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
"""
current_epoch = 0
current_iter = 0
def __init__(self, schedule):
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
def on_batch_end(self, curr_iter, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = self.schedule(self.current_epoch, curr_iter)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
self.current_epoch = epoch | {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,928 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /pre_train_sunnybrook.py | import os
import shutil
def copyFiles2(srcPath,dstPath):
if not os.path.exists(srcPath):
print("src path not exist!")
if not os.path.exists(dstPath):
os.makedirs(dstPath)
#递归遍历文件夹下的文件,用os.walk函数返回一个三元组
for root,dirs,files in os.walk(srcPath):
for eachfile in files:
if eachfile.find("DS_Store") > 0:
continue
shutil.copy(os.path.join(root,eachfile),dstPath)
print(eachfile+" copy succeeded")
copyFiles2("D:\\cardiac_data\\Sunnybrook\\Sunnybrook Cardiac MR Database ContoursPart3\\TrainingDataContours",
"D:\\cardiac_data\\Sunnybrook\\Sunnybrook Cardiac MR Database ContoursPart3\\TrainingDataContours") | {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,929 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /metrics_common.py |
from keras import backend as K
import tensorflow as tf
import numpy as np
def dice_coef(y_true, y_pred, smooth=0.0):
'''Average dice coefficient of endo/epi/rv per batch.'''
return (
dice_coef_endo(y_true, y_pred, smooth) + dice_coef_myo(y_true, y_pred, smooth) + dice_coef_rv(y_true, y_pred,
smooth)) / 3.0
def dice_coef_loss(y_true, y_pred):
return 1.0 - dice_coef(y_true, y_pred, smooth=10.0)
def dice_coef_endo(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_endo = y_true[:, :, :, 3]
y_pred_endo = y_pred[:, :, :, 3]
intersection = K.sum(y_true_endo * y_pred_endo, axis=axes)
summation = K.sum(y_true_endo * y_true_endo, axis=axes) + K.sum(y_pred_endo * y_pred_endo, axis=axes)
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def dice_coef_endo_each(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_endo = y_true[:, :, :, 3].astype('float32')
y_pred_endo = y_pred[:, :, :, 3]
y_pred_endo = np.where(y_pred_endo > 0.5, 1.0, 0.0).astype('float32')
intersection = np.sum(y_true_endo * y_pred_endo, axis=axes)
summation = np.sum(y_true_endo * y_true_endo, axis=axes) + np.sum(y_pred_endo * y_pred_endo, axis=axes)
return (2.0 * intersection + smooth) / (summation + smooth)
def dice_coef_loss_endo(y_true, y_pred):
return 1.0 - dice_coef_endo(y_true, y_pred, smooth=0.0)
def dice_coef_myo(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for myocardium class per batch.'''
axes = (1, 2)
y_true_myo = y_true[:, :, :, 2]
y_pred_myo = y_pred[:, :, :, 2]
summation_true = K.sum(y_true_myo, axis=axes)
intersection = K.sum(y_true_myo * y_pred_myo, axis=axes)
summation = K.sum(y_true_myo * y_true_myo, axis=axes) + K.sum(y_pred_myo * y_pred_myo, axis=axes)
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def dice_coef_myo_each(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_myo = y_true[:, :, :, 2].astype('float32')
y_pred_myo = y_pred[:, :, :, 2]
y_pred_myo = np.where(y_pred_myo > 0.5, 1.0, 0.0).astype('float32')
intersection = np.sum(y_true_myo * y_pred_myo, axis=axes)
summation = np.sum(y_true_myo * y_true_myo, axis=axes) + np.sum(y_pred_myo * y_pred_myo, axis=axes)
return (2.0 * intersection + smooth) / (summation + smooth)
def dice_coef_epi(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for myocardium class per batch.'''
axes = (1, 2)
y_true_myo = y_true[:, :, :, 2]
y_pred_myo = y_pred[:, :, :, 2]
y_true_endo = y_true[:, :, :, 3]
y_pred_endo = y_pred[:, :, :, 3]
y_true_epi = tf.cast(tf.logical_or(tf.cast(y_true_myo, tf.bool), tf.cast(y_true_endo, tf.bool)), tf.float32)
y_pred_epi = tf.cast(tf.logical_or(tf.cast(y_pred_myo, tf.bool), tf.cast(y_pred_endo, tf.bool)), tf.float32)
tf.summary.image("y_true_myo", y_true_myo[...,None], max_outputs=1)
tf.summary.image("y_true_endo", y_true_endo[...,None], max_outputs=1)
tf.summary.image("y_pred_myo", y_pred_myo[...,None], max_outputs=1)
tf.summary.image("y_pred_endo", y_pred_endo[..., None], max_outputs=1)
tf.summary.image("y_pred_epi", y_pred_epi[...,None], max_outputs=1)
tf.summary.image("y_true_epi", y_true_epi[...,None], max_outputs=1)
intersection = K.sum(y_true_epi * y_pred_epi, axis=axes)
summation = K.sum(y_true_epi * y_true_epi, axis=axes) + K.sum(y_pred_epi * y_pred_epi, axis=axes)
tf.summary.merge_all()
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def summation_myo(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for myocardium class per batch.'''
axes = (1, 2)
y_true_myo = y_true[:, :, :, 2]
summation_true = K.sum(y_true_myo, axis=axes)
return summation_true
def dice_coef_loss_myo(y_true, y_pred):
return 1.0 - K.minimum(dice_coef_myo(y_true, y_pred, smooth=1.0), dice_coef_endo(y_true, y_pred, smooth=1.0))
def dice_coef_rv(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for right ventricle per batch.'''
axes = (1, 2)
y_true_rv = y_true[:, :, :, 1]
y_pred_rv = y_pred[:, :, :, 1]
intersection = K.sum(y_true_rv * y_pred_rv, axis=axes)
summation = K.sum(y_true_rv, axis=axes) + K.sum(y_pred_rv, axis=axes)
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def dice_coef_loss_rv(y_true, y_pred):
return 1.0 - dice_coef_rv(y_true, y_pred, smooth=10.0)
def jaccard_coef(y_true, y_pred, smooth=0.0):
'''Average jaccard coefficient per batch.'''
axes = (1, 2, 3)
intersection = K.sum(y_true * y_pred, axis=axes)
union = K.sum(y_true, axis=axes) + K.sum(y_pred, axis=axes) - intersection
return K.mean((intersection + smooth) / (union + smooth), axis=0)
def dice_coef_each(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_endo = y_true.astype('float32')
y_pred_endo = y_pred
y_pred_endo = np.where(y_pred_endo > 0.5, 1.0, 0.0).astype('float32')
intersection = np.sum(y_true_endo * y_pred_endo, axis=axes)
summation = np.sum(y_true_endo * y_true_endo, axis=axes) + np.sum(y_pred_endo * y_pred_endo, axis=axes)
return (2.0 * intersection + smooth) / (summation + smooth) | {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,930 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /prepare_sunnybrook_data.py | import os
import sys
import numpy as np
from scipy.misc import imsave
import scipy.ndimage
import dicom as pydicom
SUNNYBROOK_ROOT_PATH = 'D:\cardiac_data\Sunnybrook'
TRAIN_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'TrainingDataContours')
TRAIN_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'TrainingDataDICOM')
TRAIN_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'TrainingOverlayImage')
TEST_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart2',
'ValidationDataContours')
TEST_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart2',
'ValidationDataDICOM')
TEST_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart2',
'ValidationDataOverlay')
ONLINE_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart1',
'OnlineDataContours')
ONLINE_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart1',
'OnlineDataDICOM')
ONLINE_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart1',
'OnlineDataOverlay')
SAVE_VAL_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_val_submission')
SAVE_ONLINE_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_online_submission')
training_dicom_dir = TRAIN_IMG_PATH
training_labels_dir = TRAIN_CONTOUR_PATH
training_png_dir = "./Data/Training/Images/Sunnybrook_Part3"
training_png_labels_dir = "./Data/Training/Labels/Sunnybrook_Part3"
testing_dicom_dir = TEST_IMG_PATH
testing_labels_dir = TEST_CONTOUR_PATH
testing_png_dir = "./Data/Testing/Images/Sunnybrook_Part2"
testing_png_labels_dir = "./Data/Testing/Labels/Sunnybrook_Part2"
online_dicom_dir = ONLINE_IMG_PATH
online_labels_dir = ONLINE_CONTOUR_PATH
online_png_dir = "./Data/Online/Images/Sunnybrook_Part1"
online_png_labels_dir = "./Data/Online/Labels/Sunnybrook_Part1"
if not os.path.exists(training_png_dir):
os.makedirs(training_png_dir)
if not os.path.exists(training_png_labels_dir):
os.makedirs(training_png_labels_dir)
if not os.path.exists(testing_png_dir):
os.makedirs(testing_png_dir)
if not os.path.exists(testing_png_labels_dir):
os.makedirs(testing_png_labels_dir)
if not os.path.exists(online_png_dir):
os.makedirs(online_png_dir)
if not os.path.exists(online_png_labels_dir):
os.makedirs(online_png_labels_dir)
for labels_dir, dicom_dir, png_dir, png_labels_dir in [[training_labels_dir,training_dicom_dir, training_png_dir, training_png_labels_dir],
[testing_labels_dir,testing_dicom_dir, testing_png_dir, testing_png_labels_dir],
[online_labels_dir,online_dicom_dir, online_png_dir, online_png_labels_dir]]:
for root, dirs, files in os.walk(labels_dir):
for file in files:
if file.endswith("-icontour-manual.txt"):
try:
prefix, _ = os.path.split(root)
prefix, _ = os.path.split(prefix)
_, patient = os.path.split(prefix)
file_fn = file.strip("-icontour-manual.txt") + ".dcm"
print(file_fn)
print(patient)
dcm = pydicom.read_file(os.path.join(dicom_dir, patient, "DICOM", file_fn))
print(dcm.pixel_array.shape)
img = np.concatenate((dcm.pixel_array[...,None], dcm.pixel_array[...,None], dcm.pixel_array[...,None]), axis=2)
labels = np.zeros_like(dcm.pixel_array)
print(img.shape)
print(labels.shape)
with open(os.path.join(root, file)) as labels_f:
for line in labels_f:
x, y = line.split(" ")
labels[int(float(y)), int(float(x))] = 128
labels = scipy.ndimage.binary_fill_holes(labels)
img_labels = np.concatenate((labels[..., None], labels[..., None], labels[..., None]), axis=2)
imsave(os.path.join(png_dir, patient + "-" + file_fn + ".png"), img)
imsave(os.path.join(png_labels_dir, patient + "-" + file_fn + ".png"), img_labels)
except Exception as e:
print(e)
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,931 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /tfmodel/evaluation.py | import tensorflow as tf
def loss_calc(logits, labels):
class_inc_bg = 2
labels = labels[...,0]
class_weights = tf.constant([[10.0/90, 10.0]])
onehot_labels = tf.one_hot(labels, class_inc_bg)
weights = tf.reduce_sum(class_weights * onehot_labels, axis=-1)
unweighted_losses = tf.nn.softmax_cross_entropy_with_logits(labels=onehot_labels, logits=logits)
weighted_losses = unweighted_losses * weights
loss = tf.reduce_mean(weighted_losses)
tf.summary.scalar('loss', loss)
return loss
def evaluation(logits, labels):
labels = labels[..., 0]
correct_prediction = tf.equal(tf.argmax(logits, 3), labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
return accuracy
def eval_dice(logits, labels, crop_size, smooth):
labels = tf.image.resize_image_with_crop_or_pad(labels, crop_size, crop_size)
axes = (1, 2)
y_true = tf.cast(labels[..., 0], tf.float32)
y_pred = tf.cast(logits[..., 1], tf.float32)
intersection = tf.reduce_sum(y_true * y_pred, axis=axes)
summation = tf.reduce_sum(y_true * y_true, axis=axes) + tf.reduce_sum(y_pred * y_pred, axis=axes)
dice = tf.reduce_mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
return dice
def eval_dice_array(logits, labels, crop_size, smooth):
labels = tf.image.resize_image_with_crop_or_pad(labels, crop_size, crop_size)
axes = (1, 2)
y_true = tf.cast(labels[..., 0], tf.float32)
y_pred = tf.cast(logits[..., 1], tf.float32)
intersection = tf.reduce_sum(y_true * y_pred, axis=axes)
summation = tf.reduce_sum(y_true * y_true, axis=axes) + tf.reduce_sum(y_pred * y_pred, axis=axes)
dice = (2.0 * intersection + smooth) / (summation + smooth)
return dice
def loss_dice(logits, labels, crop_size):
return 1.0 - eval_dice(logits=logits, labels=labels, crop_size=crop_size, smooth=1.0) | {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,932 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /train_sunnybrook_unetres.py | #!/usr/bin/env python2.7
import dicom, cv2, re
import os, fnmatch, sys
import numpy as np
import tensorflow as tf
from keras.callbacks import *
from keras import backend as K
from itertools import zip_longest
from helpers import center_crop, lr_poly_decay, get_SAX_SERIES
import pylab
import matplotlib.pyplot as plt
from CardiacImageDataGenerator import CardiacImageDataGenerator
from unet_res_model_Inv import unet_res_model_Inv
seed = 1234
np.random.seed(seed)
SAX_SERIES = get_SAX_SERIES()
SUNNYBROOK_ROOT_PATH = 'D:\cardiac_data\Sunnybrook'
TRAIN_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'TrainingDataContours')
TRAIN_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'TrainingDataDICOM')
TRAIN_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'TrainingOverlayImage')
DEBUG_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'Debug')
DEBUG_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'Debug')
DEBUG_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'Debug')
TRAIN_AUG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database Augmentation')
class Contour(object):
def __init__(self, ctr_endo_path, ctr_epi_path, ctr_p1_path, ctr_p2_path, ctr_p3_path):
self.ctr_endo_path = ctr_endo_path
self.ctr_epi_path = ctr_epi_path
self.ctr_p1_path = ctr_p1_path
self.ctr_p2_path = ctr_p2_path
self.ctr_p3_path = ctr_p3_path
match = re.search(r'\\([^\\]*)\\contours-manual\\IRCCI-expert\\IM-0001-(\d{4})-.*', ctr_endo_path) #it always has endo
self.case = match.group(1)
self.img_no = int(match.group(2))
def __str__(self):
return '<Contour for case %s, image %d>' % (self.case, self.img_no)
__repr__ = __str__
def read_contour(contour, data_path, num_classes):
#filename = 'IM-%s-%04d.dcm' % (SAX_SERIES[contour.case], contour.img_no)
filename = 'IM-0001-%04d.dcm' % (contour.img_no)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename) #modified by C.Cong
f = dicom.read_file(full_path)
img = f.pixel_array.astype('int')
mask = np.zeros_like(img, dtype="uint8")
h, w = img.shape
classify = np.zeros((h, w, num_classes), dtype="uint8")
coords = np.loadtxt(contour.ctr_endo_path, delimiter=' ').astype('int')
cv2.fillPoly(mask, [coords], 1)
classify = mask
if img.ndim < 3:
img = img[..., np.newaxis]
if classify.ndim < 3:
classify = classify[..., np.newaxis]
return img, classify
def read_all_contour(case, data_path, num_classes):
images = []
masks = []
file_names = []
contour_path = os.path.join(data_path, case)
for dirpath, dirnames, files in os.walk(contour_path):
for file_name in fnmatch.filter(files, '*.dcm'):
full_path = os.path.join(contour_path, 'DICOM', file_name)
f = dicom.read_file(full_path)
img = f.pixel_array.astype('int')
mask = np.zeros_like(img, dtype="uint8")
if img.ndim < 3:
img = img[..., np.newaxis]
if mask.ndim < 3:
mask = mask[..., np.newaxis]
images.append(img)
masks.append(mask)
file_names.append(file_name)
images = np.array(images, dtype=int)
return images, masks, file_names
def draw_contour(contour, data_path, out_path, contour_type='i'):
# filename = 'IM-%s-%04d.dcm' % (SAX_SERIES[contour.case], contour.img_no)
filename = 'IM-0001-%04d.dcm' % (contour.img_no)
outname = 'IM-0001-%s-%04d.png' % (contour_type, contour.img_no)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename) # modified by C.Cong
out_full_path = os.path.join(out_path, contour.case) # modified by C.Cong
out_full_name = os.path.join(out_full_path, outname)
if not os.path.exists(out_full_path):
os.makedirs(out_full_path)
f = dicom.read_file(full_path)
img = f.pixel_array
img_size = img.shape
plt.cla()
pylab.imshow(img, cmap=pylab.cm.bone)
coords = np.loadtxt(contour.ctr_endo_path, delimiter=' ').astype('int')
if coords.ndim == 1:
x, y = coords
else:
x, y = zip(*coords)
plt.plot(x, y, 'r.')
if os.path.exists(contour.ctr_epi_path):
coords = np.loadtxt(contour.ctr_epi_path, delimiter=' ').astype('int')
if coords.ndim == 1:
x, y = coords
else:
x, y = zip(*coords)
plt.plot(x, y, 'b.')
if os.path.exists(contour.ctr_p1_path):
coords = np.loadtxt(contour.ctr_p1_path, delimiter=' ').astype('int')
if coords.ndim == 1:
x, y = coords
else:
x, y = zip(*coords)
plt.plot(x, y, 'y.')
if os.path.exists(contour.ctr_p2_path):
coords = np.loadtxt(contour.ctr_p2_path, delimiter=' ').astype('int')
if coords.ndim == 1:
x, y = coords
else:
x, y = zip(*coords)
plt.plot(x, y, 'y.')
if os.path.exists(contour.ctr_p3_path):
coords = np.loadtxt(contour.ctr_p3_path, delimiter=' ').astype('int')
if coords.ndim == 1:
x, y = coords
else:
x, y = zip(*coords)
plt.plot(x, y, 'y.')
plt.xlim(50, img_size[0]-50)
plt.ylim(50, img_size[1]-50)
pylab.savefig(out_full_name,bbox_inches='tight',dpi=200)
#pylab.show()
return
def map_all_contours(contour_path, contour_type='i'):
endo = []
epi = []
p1 = []
p2 = []
p3 = []
for dirpath, dirnames, files in os.walk(contour_path):
if contour_type == 'i':
for endo_f in fnmatch.filter(files, 'IM-0001-*-icontour-manual.txt'):
endo.append(os.path.join(dirpath, endo_f))
match = re.search(r'IM-0001-(\d{4})-icontour-manual.txt', endo_f) # it always has endo
imgno = match.group(1)
epi_f = 'IM-0001-' + imgno + '-ocontour-manual.txt'
p1_f = 'IM-0001-' + imgno + '-p1-manual.txt'
p2_f = 'IM-0001-' + imgno + '-p2-manual.txt'
p3_f = 'IM-0001-' + imgno + '-p3-manual.txt'
epi.append(os.path.join(dirpath, epi_f))
p1.append(os.path.join(dirpath, p1_f))
p2.append(os.path.join(dirpath, p2_f))
p3.append(os.path.join(dirpath, p3_f))
elif contour_type == 'm':
for epi_f in fnmatch.filter(files, 'IM-0001-*-ocontour-manual.txt'):
epi.append(os.path.join(dirpath, epi_f))
match = re.search(r'IM-0001-(\d{4})-ocontour-manual.txt', epi_f) # it always has endo
imgno = match.group(1)
endo_f = 'IM-0001-' + imgno + '-icontour-manual.txt'
p1_f = 'IM-0001-' + imgno + '-p1-manual.txt'
p2_f = 'IM-0001-' + imgno + '-p2-manual.txt'
p3_f = 'IM-0001-' + imgno + '-p3-manual.txt'
endo.append(os.path.join(dirpath, endo_f))
p1.append(os.path.join(dirpath, p1_f))
p2.append(os.path.join(dirpath, p2_f))
p3.append(os.path.join(dirpath, p3_f))
print('Number of examples: {:d}'.format(len(endo)))
contours = map(Contour, endo, epi, p1, p2, p3)
return contours
def map_all_cases(contour_path):
cases = []
for file_name in os.listdir(contour_path):
if fnmatch.fnmatch(file_name, 'SC-*'):
file_path = os.path.join(contour_path, file_name)
if os.path.isdir(file_path):
cases.append(file_name)
print('Number of examples: {:d}'.format(len(cases)))
return cases
def export_all_contours(contours, data_path, overlay_path, crop_size=100, num_classes=4 ):
print('\nProcessing {:d} images and labels ...\n'.format(len(contours)))
if num_classes == 2:
num_classes = 1
images = np.zeros((len(contours), crop_size, crop_size, 1))
masks = np.zeros((len(contours), crop_size, crop_size, num_classes))
for idx, contour in enumerate(contours):
img, mask = read_contour(contour, data_path, num_classes)
#draw_contour(contour, data_path, overlay_path)
img = center_crop(img, crop_size=crop_size)
mask = center_crop(mask, crop_size=crop_size)
images[idx] = img
masks[idx] = mask
return images, masks
# ###############learning rate scheduler####################
def lr_scheduler(curr_epoch, curr_iter):
total_iter = curr_epoch*steps_per_epoch + curr_iter
lrate = lr_poly_decay(model, base_lr, total_iter, max_iter, power=0.5)
print(' - lr: %f' % lrate)
return lrate
if __name__== '__main__':
contour_type = 'i'
weight_path = 'model_logs/acdc_i_unetres_epoch100.hdf5'
shuffle = True
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
crop_size = 128
save_path = 'model_logs'
print('Mapping ground truth contours to images in train...')
train_ctrs = list(map_all_contours(TRAIN_CONTOUR_PATH, contour_type))
if shuffle:
print('Shuffling data')
np.random.shuffle(train_ctrs)
print('Done mapping training set')
num_classes = 2
split = int(0.1*len(train_ctrs))
dev_ctrs = train_ctrs[0:split]
train_ctrs = train_ctrs[split:]
print('\nBuilding Train dataset ...')
img_train, mask_train = export_all_contours(train_ctrs,
TRAIN_IMG_PATH,
TRAIN_OVERLAY_PATH,
crop_size=crop_size,
num_classes=num_classes)
print('\nBuilding Dev dataset ...')
img_dev, mask_dev = export_all_contours(dev_ctrs,
TRAIN_IMG_PATH,
TRAIN_OVERLAY_PATH,
crop_size=crop_size,
num_classes=num_classes)
input_shape = (crop_size, crop_size, 1)
model = unet_res_model_Inv(input_shape, num_classes, nb_filters=8, transfer=True, contour_type=contour_type, weights=weight_path)
kwargs = dict(
rotation_range=180,
zoom_range=0.2,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
)
image_datagen = CardiacImageDataGenerator(**kwargs)
mask_datagen = CardiacImageDataGenerator(**kwargs)
aug_img_path = os.path.join(TRAIN_AUG_PATH, "Image")
aug_mask_path = os.path.join(TRAIN_AUG_PATH, "Mask")
img_train = image_datagen.fit(img_train, augment=True, seed=seed, rounds=8, toDir=None)
mask_train = mask_datagen.fit(mask_train, augment=True, seed=seed, rounds=8, toDir=None)
epochs = 200
mini_batch_size = 4
image_generator = image_datagen.flow(img_train, shuffle=False,
batch_size=mini_batch_size, seed=seed)
mask_generator = mask_datagen.flow(mask_train, shuffle=False,
batch_size=mini_batch_size, seed=seed)
train_generator = zip_longest(image_generator, mask_generator)
dev_generator = (img_dev, mask_dev)
max_iter = int(np.ceil(len(img_train) / mini_batch_size)) * epochs
steps_per_epoch = int(np.ceil(len(img_train) / mini_batch_size))
curr_iter = 0
base_lr = K.eval(model.optimizer.lr)
lrate = lr_poly_decay(model, base_lr, curr_iter, max_iter, power=0.5)
callbacks = []
# ####################### tfboard ###########################
if K.backend() == 'tensorflow':
tensorboard = TensorBoard(log_dir=os.path.join(save_path, 'logs_unetres_inv_drop_acdc'), histogram_freq=10, write_graph=False,
write_grads=False, write_images=False)
callbacks.append(tensorboard)
# ################### checkpoint saver#######################
checkpoint = ModelCheckpoint(filepath=os.path.join(save_path, 'temp_weights.hdf5'),
save_weights_only=False,
save_best_only=False) # .{epoch:d}
callbacks.append(checkpoint)
model.fit_generator(generator=train_generator,
steps_per_epoch=steps_per_epoch,
validation_data=dev_generator,
validation_steps=img_dev.__len__(),
epochs=epochs,
callbacks=callbacks,
workers=1,
class_weight=None
)
save_file = '_'.join(['sunnybrook', contour_type, 'unetres_inv_drop_acdc']) + '.h5'
save_file = os.path.join(save_path, save_file)
model.save_weights(save_file)
# for e in range(epochs):
# print('\nMain Epoch {:d}\n'.format(e + 1))
# print('\nLearning rate: {:6f}\n'.format(lrate))
# train_result = []
# for iteration in range(int(len(img_train) * augment_scale / mini_batch_size)):
# img, mask = next(train_generator)
# res = model.train_on_batch(img, mask)
# curr_iter += 1
# lrate = lr_poly_decay(model, base_lr, curr_iter,
# max_iter, power=0.5)
# train_result.append(res)
# train_result = np.asarray(train_result)
# train_result = np.mean(train_result, axis=0).round(decimals=10)
# print('Train result {:s}:\n{:s}'.format(str(model.metrics_names), str(train_result)))
# print('\nEvaluating dev set ...')
# result = model.evaluate(img_dev, mask_dev, batch_size=32)
#
# result = np.round(result, decimals=10)
# print('\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
# save_file = '_'.join(['sunnybrook', contour_type,
# 'epoch', str(e + 1)]) + '.h5'
# if not os.path.exists('model_logs'):
# os.makedirs('model_logs')
# save_path = os.path.join(save_path, save_file)
# print('\nSaving model weights to {:s}'.format(save_path))
# model.save_weights(save_path)
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,933 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /unet_model_3d_Inv.py | import numpy as np
from keras import backend as K
from keras.engine import Input, Model
from keras.layers import Conv3D, MaxPooling3D, UpSampling3D, Activation, BatchNormalization, PReLU, Lambda, Dropout
from keras.optimizers import Adam
from keras.utils.vis_utils import plot_model
from functools import partial
from layer_common import mvn3d, mvn
from keras.models import load_model
try:
from keras.engine import merge
except ImportError:
from keras.layers.merge import concatenate
def dice_coef_endo(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2, 3)
y_true_endo = y_true[..., 2]
y_pred_endo = y_pred[..., 2]
intersection = K.sum(y_true_endo * y_pred_endo, axis=axes)
summation = K.sum(y_true_endo, axis=axes) + K.sum(y_pred_endo, axis=axes)
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def dice_coef_myo(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for myocardium class per batch.'''
axes = (1, 2, 3)
y_true_myo = y_true[..., 1]
y_pred_myo = y_pred[..., 1]
intersection = K.sum(y_true_myo * y_pred_myo, axis=axes)
summation = K.sum(y_true_myo, axis=axes) + K.sum(y_pred_myo, axis=axes)
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def dice_coef_endo_each(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_endo = y_true[..., 2].astype('float32')
y_pred_endo = y_pred[..., 2]
y_pred_endo = np.where(y_pred_endo > 0.5, 1.0, 0.0).astype('float32')
intersection = np.sum(y_true_endo * y_pred_endo, axis=axes)
summation = np.sum(y_true_endo, axis=axes) + np.sum(y_pred_endo, axis=axes)
return (2.0 * intersection + smooth) / (summation + smooth)
def dice_coef_myo_each(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_myo = y_true[..., 1].astype('float32')
y_pred_myo = y_pred[..., 1]
y_pred_myo = np.where(y_pred_myo > 0.5, 1.0, 0.0).astype('float32')
intersection = np.sum(y_true_myo * y_pred_myo, axis=axes)
summation = np.sum(y_true_myo, axis=axes) + np.sum(y_pred_myo, axis=axes)
return (2.0 * intersection + smooth) / (summation + smooth)
def dice_coef(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def dice_coef_cardiac_loss(y_true, y_pred):
return 2 - (dice_coef_endo(y_true, y_pred) + dice_coef_myo(y_true, y_pred))
def label_wise_dice_coefficient(y_true, y_pred, label_index):
return dice_coef(y_true[..., label_index], y_pred[..., label_index])
def get_label_dice_coefficient_function(label_index):
f = partial(label_wise_dice_coefficient, label_index=label_index)
f.__setattr__('__name__', 'label_{0}_dice_coef'.format(label_index))
return f
def unet_model_3d_Inv(input_shape, pool_size=(2, 2, 2), n_labels=1, kernel=(3, 3, 3), initial_learning_rate=0.00001, deconvolution=False,
depth=4, n_base_filters=32, include_label_wise_dice_coefficients=False, metrics=dice_coef,
batch_normalization=False, weights=None):
"""
Builds the 3D UNet Keras model.f
:param metrics: List metrics to be calculated during model training (default is dice coefficient).
:param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice
coefficient for each label as metric.
:param n_base_filters: The number of filters that the first layer in the convolution network will have. Following
layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required
to train the model.
:param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling
layers will be added to the model. Lowering the depth may reduce the amount of memory required for training.
:param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be
divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth.
:param pool_size: Pool size for the max pooling operations.
:param n_labels: Number of binary labels that the model is learning.
:param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
:param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This
increases the amount memory required during training.
:return: Untrained 3D UNet Model
"""
inputs = Input(input_shape)
current_layer = inputs
levels = list()
# add levels with max pooling
for layer_depth in range(depth):
layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters*(2**(depth-layer_depth)),
batch_normalization=batch_normalization, kernel=kernel)
layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters*(2**(depth-layer_depth)),
batch_normalization=batch_normalization, kernel=kernel)
if layer_depth < depth - 1:
current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
levels.append([layer1, layer2, current_layer])
else:
current_layer = layer2
levels.append([layer1, layer2])
# add levels with up-convolution or up-sampling
for layer_depth in range(depth-2, -1, -1):
up_convolution = get_up_convolution(pool_size=pool_size, deconvolution=deconvolution, depth=layer_depth,
n_filters=current_layer._keras_shape[1],
image_shape=input_shape[-3:])(current_layer)
concat = concatenate([up_convolution, levels[layer_depth][1]], axis=4)
current_layer = create_convolution_block(n_filters=levels[depth-layer_depth-1][1]._keras_shape[4],
input_layer=concat, batch_normalization=batch_normalization, kernel=kernel)
current_layer = create_convolution_block(n_filters=levels[depth-layer_depth-1][1]._keras_shape[4],
input_layer=current_layer, batch_normalization=batch_normalization, kernel=kernel)
final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
act = Activation('softmax')(final_convolution)
model = Model(inputs=inputs, outputs=act)
if not isinstance(metrics, list):
metrics = [metrics]
if include_label_wise_dice_coefficients and n_labels > 1:
if metrics:
metrics = metrics + [dice_coef_endo, dice_coef_myo]
else:
metrics = [dice_coef_endo, dice_coef_myo]
model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coef_cardiac_loss, metrics=metrics)
if(weights != None):
model.load_weights(weights)
return model
def create_convolution_block(input_layer, n_filters, batch_normalization=False, kernel=(3, 3, 3), activation=None,
padding='same'):
"""
:param input_layer:
:param n_filters:
:param batch_normalization:
:param kernel:
:param activation: Keras activation layer to use. (default is 'relu')
:param padding:
:return:
"""
mvn0 = Lambda(mvn)(input_layer)
layer = Conv3D(n_filters, kernel, padding=padding)(mvn0)
if batch_normalization:
layer = BatchNormalization(axis=4)(layer)
layer = Dropout(rate=0.2)(layer)
if activation is None:
return Activation('relu')(layer)
else:
return activation()(layer)
def compute_level_output_shape(n_filters, depth, pool_size, image_shape):
"""
Each level has a particular output shape based on the number of filters used in that level and the depth or number
of max pooling operations that have been done on the data at that point.
:param image_shape: shape of the 3d image.
:param pool_size: the pool_size parameter used in the max pooling operation.
:param n_filters: Number of filters used by the last node in a given level.
:param depth: The number of levels down in the U-shaped model a given node is.
:return: 5D vector of the shape of the output node
"""
output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()
return tuple([None, n_filters] + output_image_shape)
def get_up_convolution(depth, n_filters, pool_size, image_shape, kernel_size=(2, 2, 2), strides=(2, 2, 2),
deconvolution=False):
if deconvolution:
try:
from keras_contrib.layers import Deconvolution3D
except ImportError:
raise ImportError("Install keras_contrib in order to use deconvolution. Otherwise set deconvolution=False."
"\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git")
return Deconvolution3D(filters=n_filters, kernel_size=kernel_size,
output_shape=compute_level_output_shape(n_filters=n_filters, depth=depth,
pool_size=pool_size, image_shape=image_shape),
strides=strides, input_shape=compute_level_output_shape(n_filters=n_filters,
depth=depth,
pool_size=pool_size,
image_shape=image_shape))
else:
return UpSampling3D(size=pool_size)
def resume_training(model_file):
print("Resume training and load model")
custom_objects = {'dice_coef_cardiac_loss': dice_coef_cardiac_loss, 'dice_coef': dice_coef,
'dice_coef_endo': dice_coef_endo,
'dice_coef_myo':dice_coef_endo}
return load_model(model_file, custom_objects=custom_objects)
if __name__ == '__main__':
model = unet_model_3d_Inv((128, 128, 5, 1), pool_size=(2, 2, 1), kernel=(7, 7, 5), n_labels=3, initial_learning_rate=0.00001,
deconvolution=False, depth=4, n_base_filters=8, include_label_wise_dice_coefficients=True, batch_normalization=True)
plot_model(model, show_shapes=True, to_file='unet_model_3d_Inv.png')
model.summary() | {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,934 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /pred_sunnybrook_unetres_time.py | #!/usr/bin/env python2.7
import re, sys, os
import shutil, cv2
import numpy as np
from train_sunnybrook_unetres import read_contour, map_all_cases, export_all_contours, read_all_contour
from helpers import reshape, get_SAX_SERIES, draw_result, draw_image_overlay, center_crop, center_crop_3d
from unet_res_model_Inv import unet_res_model_Inv
from unet_model_time import unet_res_model_time, dice_coef, dice_coef_each
SAX_SERIES = get_SAX_SERIES()
SUNNYBROOK_ROOT_PATH = 'D:\cardiac_data\Sunnybrook'
VAL_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart2',
'ValidationDataContours')
VAL_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart2',
'ValidationDataDICOM')
VAL_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart2',
'ValidationDataOverlay')
ONLINE_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart1',
'OnlineDataContours')
ONLINE_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart1',
'OnlineDataDICOM')
ONLINE_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart1',
'OnlineDataOverlay')
SAVE_VAL_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_val_submission')
SAVE_ONLINE_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_online_submission')
def create_submission(cases, data_path, output_path ,contour_type = 'i'):
weight_t = 'model_logs/sunnybrook_a_unetres_inv_time.h5'
weight_s = 'model_logs/sunnybrook_i_unetres_inv.h5'
crop_size = 128
num_phases = 3
num_classes = 2
input_shape = (num_phases, crop_size, crop_size, 1)
input_shape_s = (crop_size, crop_size, 1)
model_s = unet_res_model_Inv(input_shape, num_classes, nb_filters=16, transfer=True, contour_type=contour_type, weights=weight_s)
model_t = unet_res_model_time(input_shape, num_classes, nb_filters=16, n_phases=num_phases, transfer=True, contour_type=contour_type, weights=weight_t)
for idx, case in enumerate(cases):
print('\nPredict image sequence {:d}'.format(idx))
images, _, file_names = read_all_contour(case, data_path, num_classes)
images_crop = center_crop_3d(images, crop_size=crop_size)
pred_masks = model_s.predict(images_crop, batch_size=32, verbose=1)
p, h, w, d = images.shape
for idx in range(p):
image = images[idx, ...]
tmp = pred_masks[idx,:]
out_file = file_names[idx]
tmp = reshape(tmp, to_shape=(h, w, d))
tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if not coords:
print('\nNo detection in case: {:s}; image: {:s}'.format(case, out_file))
coords = np.ones((1, 1, 1, 2), dtype='int')
output_full_path = os.path.join(output_path, case)
p = re.compile("dcm")
out_file = p.sub('jpg', out_file)
draw_image_overlay(image, out_file, output_full_path, contour_type, coords)
if __name__== '__main__':
contour_type = 'i'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_val_submission_unetres_inv_time'
print('\nProcessing val ' + contour_type + ' contours...')
val_cases = list(map_all_cases(VAL_CONTOUR_PATH))
create_submission(val_cases, VAL_IMG_PATH, save_dir, contour_type)
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_online_submission_unetres_inv_time'
print('\nProcessing online '+contour_type+' contours...')
online_cases = list(map_all_cases(ONLINE_CONTOUR_PATH))
create_submission(online_cases, ONLINE_IMG_PATH, save_dir, contour_type)
print('\nAll done.')
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,935 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /tfmodel/layers.py | import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.layers import ZeroPadding2D, Cropping2D
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.framework import ops
def unpool_with_argmax(pool, ind, name = None, ksize=[1, 2, 2, 1]):
"""
Unpooling layer after max_pool_with_argmax.
Args:
pool: max pooled output tensor
ind: argmax indices
ksize: ksize is the same as for the pool
Return:
unpool: unpooling tensor
"""
with tf.variable_scope(name):
input_shape = pool.get_shape().as_list()
output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])
flat_input_size = np.prod(input_shape)
flat_output_shape = [output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]]
pool_ = tf.reshape(pool, [flat_input_size])
batch_range = tf.reshape(tf.range(output_shape[0], dtype=ind.dtype), shape=[input_shape[0], 1, 1, 1])
b = tf.ones_like(ind) * batch_range
b = tf.reshape(b, [flat_input_size, 1])
ind_ = tf.reshape(ind, [flat_input_size, 1])
ind_ = tf.concat([b, ind_], 1)
ret = tf.scatter_nd(ind_, pool_, shape=flat_output_shape)
ret = tf.reshape(ret, output_shape)
return ret
def mvn(tensor):
'''Performs per-channel spatial mean-variance normalization.'''
epsilon = 1e-6
max_v = tf.reduce_max(tensor, axis=(1, 2))
tensor = tensor/max_v
mean = tf.reduce_mean(tensor, axis=(1, 2), keep_dims=True)
std = tf.sqrt(var(tensor, axis=(1, 2), keepdims=True))
mvn = (tensor - mean) / (std + epsilon)
return mvn
def crop(tensors):
'''
List of 2 tensors, the second tensor having larger spatial dimensions.
'''
h_dims, w_dims = [], []
for t in tensors:
b, h, w, d = tf.shape(t)
h_dims.append(h)
w_dims.append(w)
crop_h, crop_w = (h_dims[1] - h_dims[0]), (w_dims[1] - w_dims[0])
rem_h = crop_h % 2
rem_w = crop_w % 2
crop_h_dims = (int(crop_h / 2), int(crop_h / 2 + rem_h))
crop_w_dims = (int(crop_w / 2), int(crop_w / 2 + rem_w))
cropped = Cropping2D(cropping=(crop_h_dims, crop_w_dims))(tensors[1])
return cropped
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, 'float32')
m = tf.reduce_mean(x, axis=axis, keep_dims=True)
devs_squared = tf.square(x - m)
return tf.reduce_mean(devs_squared,
axis=axis,
keep_dims=keepdims)
try:
@ops.RegisterGradient("MaxPoolWithArgmax")
def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
return gen_nn_ops._max_pool_grad_with_argmax(op.inputs[0],
grad,
op.outputs[1],
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"))
except Exception as e:
print(f"Could not add gradient for MaxPoolWithArgMax, Likely installed already (tf 1.4)")
print(e) | {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,936 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /layer_common.py | from keras import backend as K
from keras.layers import ZeroPadding2D, Cropping2D
def mvn(tensor):
'''Performs per-channel spatial mean-variance normalization.'''
epsilon = 1e-6
mean = K.mean(tensor, axis=(1, 2), keepdims=True)
std = K.std(tensor, axis=(1, 2), keepdims=True)
mvn = (tensor - mean) / (std + epsilon)
return mvn
def mvn3d(tensor):
'''Performs per-channel spatial mean-variance normalization.'''
epsilon = 1e-6
mean = K.mean(tensor, axis=(1, 2, 3), keepdims=True)
std = K.std(tensor, axis=(1, 2, 3), keepdims=True)
mvn = (tensor - mean) / (std + epsilon)
return mvn
def crop(tensors):
'''
List of 2 tensors, the second tensor having larger spatial dimensions.
'''
h_dims, w_dims = [], []
for t in tensors:
b, h, w, d = K.get_variable_shape(t)
h_dims.append(h)
w_dims.append(w)
crop_h, crop_w = (h_dims[1] - h_dims[0]), (w_dims[1] - w_dims[0])
rem_h = crop_h % 2
rem_w = crop_w % 2
crop_h_dims = (int(crop_h / 2), int(crop_h / 2 + rem_h))
crop_w_dims = (int(crop_w / 2), int(crop_w / 2 + rem_w))
cropped = Cropping2D(cropping=(crop_h_dims, crop_w_dims))(tensors[1])
return cropped | {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,937 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /submit_sunnybrook_unet_3d.py | #!/usr/bin/env python2.7
import re, sys, os
import shutil, cv2
import numpy as np
from train_sunnybrook_unet_3d import read_volume, map_all_contours, export_all_volumes, map_endo_contours
from helpers import reshape, get_SAX_SERIES, draw_result, draw_image_overlay
from unet_model_3d import unet_model_3d, dice_coef_endo_each, dice_coef_myo_each, resume_training
from CardiacImageDataGenerator import CardiacImageDataGenerator, CardiacVolumeDataGenerator
from unet_model_3d_Inv import unet_model_3d_Inv, resume_training
SAX_SERIES = get_SAX_SERIES()
SUNNYBROOK_ROOT_PATH = 'D:\cardiac_data\Sunnybrook'
VAL_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart2',
'ValidationDataContours')
VAL_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart2',
'ValidationDataDICOM')
VAL_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart2',
'ValidationDataOverlay')
ONLINE_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart1',
'OnlineDataContours')
ONLINE_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart1',
'OnlineDataDICOM')
ONLINE_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart1',
'OnlineDataOverlay')
TRAIN_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'TrainingDataContours')
TRAIN_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'TrainingDataDICOM')
TRAIN_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'TrainingOverlayImage')
SAVE_VAL_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_val_submission')
SAVE_ONLINE_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_online_submission')
def create_submission(contours, volume_map, data_path, output_path, num_slices, num_phase_in_cycle, contour_type='a', debug=False):
if contour_type == 'a':
weights = 'model_logs/sunnybrook_a_unet_3d.h5'
else:
sys.exit('\ncontour type "%s" not recognized\n' % contour_type)
crop_size = 128
input_shape = (crop_size, crop_size, num_slices, 1)
num_classes = 3
volumes, vol_masks, cases, img_nos = export_all_volumes(contours,
volume_map,
data_path,
output_path,
crop_size,
num_classes=num_classes,
num_slices=num_slices,
num_phase_in_cycle=num_phase_in_cycle,
is_all_valid_slice=True)
model = unet_model_3d_Inv(input_shape, pool_size=(2, 2, 1), kernel=(7, 7, 5), n_labels=3, initial_learning_rate=0.00001,
deconvolution=False, depth=4, n_base_filters=4, include_label_wise_dice_coefficients=True, batch_normalization=True, weights=weights)
if debug:
kwargs = dict(
rotation_range=90,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
fill_mode='constant',
)
seed = 1234
np.random.seed(seed)
image_datagen = CardiacVolumeDataGenerator(**kwargs)
mask_datagen = CardiacVolumeDataGenerator(**kwargs)
volumes = image_datagen.fit(volumes, augment=True, seed=seed, rounds=8, toDir=None)
vol_masks = mask_datagen.fit(vol_masks, augment=True, seed=seed, rounds=8, toDir=None)
result = model.evaluate(volumes, vol_masks, batch_size=8)
result = np.round(result, decimals=10)
print('\nResult {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
else:
pred_masks = model.predict(volumes, batch_size=8, verbose=1)
print('\nEvaluating ...')
result = model.evaluate(volumes, vol_masks, batch_size=8)
result = np.round(result, decimals=10)
print('\nResult {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
num = 0
for c_type in ['i', 'm']:
for idx in range(len(volumes)):
volume = volumes[idx]
h, w, s, d = volume.shape
for s_i in range(s):
img = volume[...,s_i, 0]
if c_type == 'i':
tmp = pred_masks[idx, ..., s_i, 2]
elif c_type == 'm':
tmp = pred_masks[idx, ..., s_i, 1]
tmp = tmp[..., np.newaxis]
tmp = reshape(tmp, to_shape=(h, w, d))
tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if not coords:
print('\nNo detection in case: {:s}; image: {:d}'.format(cases[idx], img_nos[idx]))
coords = np.ones((1, 1, 1, 2), dtype='int')
overlay_full_path = os.path.join(save_dir, cases[idx], 'Overlay')
if not os.path.exists(overlay_full_path):
os.makedirs(overlay_full_path)
if 'Overlay' in overlay_full_path:
out_file = 'IM-0001-%s-%04d-%01d.png' % (c_type, img_nos[idx], s_i)
draw_image_overlay(img, out_file, overlay_full_path, c_type, coords)
print('\nNumber of multiple detections: {:d}'.format(num))
dst_eval = os.path.join(save_dir, 'evaluation_{:s}.txt'.format(c_type))
with open(dst_eval, 'wb') as f:
f.write(('Dev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result))).encode('utf-8'))
f.close()
# Detailed evaluation:
detail_eval = os.path.join(save_dir, 'evaluation_detail_{:s}.csv'.format(c_type))
evalEndoArr = []
evalMyoArr = []
resArr = [cases, img_nos]
for s_i in range(s):
resArr.append(list(dice_coef_endo_each(vol_masks[...,s_i,:], pred_masks[...,s_i,:])))
for s_i in range(s):
resArr.append(list(dice_coef_myo_each(vol_masks[..., s_i, :], pred_masks[..., s_i, :])))
resArr = np.transpose(resArr)
np.savetxt(detail_eval, resArr, fmt='%s', delimiter=',')
# np.savetxt(f, '\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
if __name__ == '__main__':
contour_type = 'a'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
num_slices = 5
num_phase_in_cycle = 20
_debug = False
if _debug:
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_debug_submission_unet_3d_Inv'
print('\nProcessing online ' + contour_type + ' contours...')
online_ctrs, volume_map = map_all_contours(TRAIN_CONTOUR_PATH)
create_submission(online_ctrs, volume_map, TRAIN_IMG_PATH, TRAIN_OVERLAY_PATH, num_slices, num_phase_in_cycle,
contour_type, _debug)
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_online_submission_unet_3d_Inv'
print('\nProcessing online ' + contour_type + ' contours...')
online_ctrs, volume_map = map_all_contours(ONLINE_CONTOUR_PATH)
create_submission(online_ctrs, volume_map, ONLINE_IMG_PATH, ONLINE_OVERLAY_PATH, num_slices, num_phase_in_cycle, contour_type, _debug)
#create_endo_submission(online_endos, ONLINE_IMG_PATH, ONLINE_OVERLAY_PATH, contour_type)
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_val_submission_unet_3d_e135_a8_f8_775_d4_s5_allvalid_mvn'
print('\nProcessing val ' + contour_type + ' contours...')
val_ctrs, volume_map = map_all_contours(VAL_CONTOUR_PATH)
create_submission(val_ctrs, volume_map, VAL_IMG_PATH, VAL_OVERLAY_PATH, num_slices, num_phase_in_cycle, contour_type, _debug)
#create_endo_submission(val_endos, VAL_IMG_PATH, VAL_OVERLAY_PATH, contour_type)
print('\nAll done.')
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,938 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /unet_model.py | from __future__ import print_function
from keras import optimizers
from keras.models import Model
from keras.layers import Input, merge, Conv2D, MaxPooling2D, UpSampling2D, Dropout
from keras.optimizers import Adam
from keras.layers.merge import concatenate
from keras.utils.vis_utils import plot_model
from metrics_common import dice_coef, dice_coef_endo, dice_coef_myo, dice_coef_rv, dice_coef_loss, dice_coef_loss_endo, dice_coef_loss_myo, dice_coef_loss_rv, dice_coef_endo_each
from layer_common import mvn, crop
from keras.layers import Dropout, Lambda
def unet_model(input_shape, num_classes, transfer=True, contour_type='i', weights=None):
if num_classes == 2:
num_classes = 1
loss = dice_coef_loss
activation = 'sigmoid'
else:
if transfer == True:
if contour_type == 'i':
loss = dice_coef_loss_endo
elif contour_type == 'o':
loss = dice_coef_loss_myo
elif contour_type == 'r':
loss = dice_coef_loss_rv
elif contour_type == 'a':
loss = dice_coef_loss
else:
loss = dice_coef_loss
activation = 'softmax'
kwargs = dict(
kernel_size=3,
strides=1,
activation='relu',
padding='same',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
)
data = Input(shape=input_shape, dtype='float', name='data')
mvn1 = Lambda(mvn, name='mvn1')(data)
conv1 = Conv2D(filters=32, **kwargs)(mvn1)
conv1 = Conv2D(filters=32, **kwargs)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
#pool1 = Dropout(rate=0.5)(pool1)
pool1 = Lambda(mvn)(pool1)
conv2 = Conv2D(filters=64, **kwargs)(pool1)
conv2 = Conv2D(filters=64, **kwargs)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
#pool2 = Dropout(rate=0.3)(pool2)
pool2 = Lambda(mvn)(pool2)
conv3 = Conv2D(filters=128, **kwargs)(pool2)
conv3 = Conv2D(filters=128, **kwargs)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
#pool3 = Dropout(rate=0.5)(pool3)
pool3 = Lambda(mvn)(pool3)
conv4 = Conv2D(filters=256, **kwargs)(pool3)
conv4 = Conv2D(filters=256, **kwargs)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
#pool4 = Dropout(rate=0.3)(pool4)
pool4 = Lambda(mvn)(pool4)
conv5 = Conv2D(filters=512, **kwargs)(pool4)
conv5 = Conv2D(filters=512, **kwargs)(conv5)
# pool5 = MaxPooling2D(pool_size=(2, 2))(conv5)
# convdeep = Convolution2D(1024, 3, 3, activation='relu', border_mode='same')(pool5)
# convdeep = Convolution2D(1024, 3, 3, activation='relu', border_mode='same')(convdeep)
# upmid = merge([Convolution2D(512, 2, 2, border_mode='same')(UpSampling2D(size=(2, 2))(convdeep)), conv5], mode='concat', concat_axis=1)
# convmid = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(upmid)
# convmid = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(convmid)
#up6 = merge(
# [Conv2D(filters=256, **kwargs)(UpSampling2D(size=(2, 2))(conv5)), conv4],
# mode='concat', concat_axis=3)
up6 = concatenate([Conv2D(filters=256, **kwargs)(UpSampling2D(size=(2, 2))(conv5)), conv4], axis=3)
#up6 = Lambda(mvn)(up6)
conv6 = Conv2D(filters=256, **kwargs)(up6)
conv6 = Conv2D(filters=256, **kwargs)(conv6)
#conv6 = Dropout(rate=0.5)(conv6)
#conv6 = Lambda(mvn)(conv6)
#up7 = merge(
# [Conv2D(filters=128, **kwargs)(UpSampling2D(size=(2, 2))(conv6)), conv3],
# mode='concat', concat_axis=3)
up7 = concatenate([Conv2D(filters=128, **kwargs)(UpSampling2D(size=(2, 2))(conv6)), conv3], axis=3)
#up7 = Lambda(mvn)(up7)
conv7 = Conv2D(filters=128, **kwargs)(up7)
conv7 = Conv2D(filters=128, **kwargs)(conv7)
#conv7 = Dropout(rate=0.5)(conv7)
#conv7 = Lambda(mvn)(conv7)
#up8 = merge(
# [Conv2D(filters=64, **kwargs)(UpSampling2D(size=(2, 2))(conv7)), conv2],
# mode='concat', concat_axis=3)
up8 = concatenate([Conv2D(filters=64, **kwargs)(UpSampling2D(size=(2, 2))(conv7)), conv2], axis=3)
#up8 = Lambda(mvn)(up8)
conv8 = Conv2D(filters=64, **kwargs)(up8)
conv8 = Conv2D(filters=64, **kwargs)(conv8)
#conv8 = Dropout(rate=0.5)(conv8)
#conv8 = Lambda(mvn)(conv8)
#up9 = merge(
# [Conv2D(filters=32, **kwargs)(UpSampling2D(size=(2, 2))(conv8)), conv1],
# mode='concat', concat_axis=3)
up9 = concatenate([Conv2D(filters=32, **kwargs)(UpSampling2D(size=(2, 2))(conv8)), conv1], axis=3)
conv9 = Conv2D(filters=32, **kwargs)(up9)
conv9 = Conv2D(filters=32, **kwargs)(conv9)
# conv9 = Dropout(rate=0.5)(conv9)
#conv9 = Lambda(mvn)(conv9)
conv10 = Conv2D(filters=num_classes, kernel_size=1,
strides=1, activation=activation, padding='valid',
kernel_initializer='glorot_uniform', use_bias=True, name="prediction")(conv9)
model = Model(inputs=data, outputs=conv10)
if weights is not None:
model.load_weights(weights)
#model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss_endo, metrics=[dice_coef_endo])
sgd = optimizers.SGD(lr=0.0001, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss=dice_coef_loss_endo,
metrics=[dice_coef_endo])
return model
if __name__ == '__main__':
model = unet_model((128, 128, 1), 4, transfer=True, weights=None)
plot_model(model, show_shapes=True, to_file='unet_model.png')
model.summary()
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,939 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /pre_train_acdc_unet_time.py | #!/usr/bin/env python2.7
import dicom, cv2, re
import os, fnmatch, sys
from keras.callbacks import *
from keras import backend as K
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
from itertools import zip_longest
from scipy.misc import imsave
from helpers import center_crop_3d, center_crop, lr_poly_decay, get_SAX_SERIES
from metrics_acdc import load_nii
import pylab
import matplotlib.pyplot as plt
from CardiacImageDataGenerator import CardiacImageDataGenerator, CardiacTimeSeriesDataGenerator
from unet_model_time import unet_res_model_time
from unet_res_model_Inv import unet_res_model_Inv
from DataIOProc import DataIOProc
seed = 1234
np.random.seed(seed)
SAX_SERIES = get_SAX_SERIES()
ACDC_ROOT_PATH = 'D:\cardiac_data\ACDC'
TRAIN_AUG_PATH = os.path.join(ACDC_ROOT_PATH,
'Augmentation')
TRAIN_PATH = os.path.join(ACDC_ROOT_PATH, 'training')
DEBUG_PATH = os.path.join(ACDC_ROOT_PATH, 'debug')
TRAIN_OVERLAY_PATH = os.path.join(ACDC_ROOT_PATH, 'overlay')
TEMP_CONTOUR_PATH = os.path.join(ACDC_ROOT_PATH,
'ACDC Cardiac MR Database Temp',
'Temp')
class VolumeCtr(object):
def __init__(self, ctr_path):
self.ctr_path = ctr_path
match = re.search(r'patient(\d{03})_frame(\d{02})*', ctr_path)
self.patient_no = match.group(1)
self.img_no = match.group(2)
gt, _, header = load_nii(ctr_path)
self.total_number = gt.shape[2]
def read_contour(contour, data_path, num_phases, num_phases_in_cycle, phase_dilation, contour_type='i', return_mask=True):
img_path = os.path.join(data_path, 'patient{:s}'.format(contour.patient_no))
image_name = 'patient{:s}_frame{:s}.nii.gz'.format(contour.patient_no, contour.img_no)
gt_name = 'patient{:s}_frame{:s}_gt.nii.gz'.format(contour.patient_no, contour.img_no)
full_image_path = os.path.join(img_path, image_name)
full_gt_path = os.path.join(img_path, gt_name)
volume, _, header = load_nii(full_image_path)
volume_gt, _, header = load_nii(full_gt_path)
volume = volume.astype('int')
if contour_type == "i":
volume_gt = np.where(volume_gt == 3, 1, 0).astype('uint8')
elif contour_type == "o":
volume_gt = np.where(volume_gt >= 2, 1, 0).astype('uint8')
elif contour_type == "r":
volume_gt = np.where(volume_gt == 1, 1, 0).astype('uint8')
elif contour_type == "a":
volume_gt = volume_gt.astype('uint8')
volume_arr = find_neighbor_volumes(contour, data_path, num_phases, num_phases_in_cycle, phase_dilation)
if volume_arr.ndim < 5:
volume_arr = volume_arr[..., np.newaxis]
if volume_gt.ndim < 4:
volume_gt = volume_gt[np.newaxis, :, :, :, np.newaxis]
if not return_mask:
return volume_arr, None
return volume_arr, volume_gt
def find_neighbor_volumes(contour, data_path, num_phases, num_phases_in_cycle, phase_dilation):
volume_path = os.path.join(data_path, 'patient{:s}'.format(contour.patient_no))
volume_name = 'patient{:s}_4d.nii.gz'.format(contour.patient_no)
center_index = float(contour.img_no)
full_volume_path = os.path.join(volume_path, volume_name)
volume, _, header = load_nii(full_volume_path)
volume = volume.astype('int')
h, w, s, p = volume.shape
phase_dilation = phase_dilation*p/num_phases_in_cycle
volume_arr = np.zeros((num_phases, h, w, s), dtype="int")
for i in range (num_phases):
idx = int(center_index + (i - int(num_phases/2))*phase_dilation)%p
volume_arr[i, ...] = volume[...,idx]
return volume_arr
def draw_contour(contour, data_path, out_path, type="i", coords = []):
img_path = os.path.join(data_path, 'patient{:s}'.format(contour.patient_no))
image_name = 'patient{:s}_frame{:s}.nii.gz'.format(contour.patient_no, contour.img_no)
gt_name = 'patient{:s}_frame{:s}_gt.nii.gz'.format(contour.patient_no, contour.img_no)
full_image_path = os.path.join(img_path, image_name)
full_gt_path = os.path.join(img_path, gt_name)
volume, _, header = load_nii(full_image_path)
volume_gt, _, header = load_nii(full_gt_path)
img_size = volume.shape
for i in range(0, img_size[2]):
overlay_name = 'patient{:s}_frame{:s}_{:2d}_{:s}.png'.format(contour.patient_no, contour.img_no, i, type)
full_overlay_path = os.path.join(img_path, overlay_name)
if type != "a":
img = volume[:, :, i]
mask = volume_gt[:, :, i]
img = np.swapaxes(img, 0, 1)
mask = np.swapaxes(mask, 0, 1)
if type == "i":
mask = np.where(mask == 3, 255, 0).astype('uint8')
elif type == "o":
mask = np.where(mask >= 2, 255, 0).astype('uint8')
elif type == "r":
mask = np.where(mask == 1, 255, 0).astype('uint8')
img = img.astype('int')
tmp2, coords, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if not coords:
print('\nNo detection: {:s}, {:2d}'.format(contour.ctr_path, i))
coords = np.ones((1, 1, 1, 2), dtype='int')
if len(coords) > 1:
print('\nMultiple detections: {:s}, {:2d}'.format(contour.ctr_path, i))
lengths = []
for coord in coords:
lengths.append(len(coord))
coords = [coords[np.argmax(lengths)]]
coords = np.squeeze(coords)
if coords.ndim == 1:
x, y = coords
else:
x, y = zip(*coords)
plt.cla()
pylab.imshow(img, cmap=pylab.cm.bone)
if type == "i":
plt.plot(x, y, 'r.')
elif type == "o":
plt.plot(x, y, 'b.')
elif type == "r":
plt.plot(x, y, 'g.')
elif type == "a":
img = volume[:, :, i]
img = np.swapaxes(img, 0, 1)
mask_i = volume_gt[:, :, i]
mask_o = volume_gt[:, :, i]
mask_r = volume_gt[:, :, i]
mask_i = np.swapaxes(mask_i, 0, 1)
mask_o = np.swapaxes(mask_o, 0, 1)
mask_r = np.swapaxes(mask_r, 0, 1)
mask_i = np.where(mask_i == 3, 255, 0).astype('uint8')
mask_o = np.where(mask_o >= 2, 255, 0).astype('uint8')
mask_r = np.where(mask_r == 1, 255, 0).astype('uint8')
img = img.astype('int')
tmp2, coords_i, hierarchy = cv2.findContours(mask_i.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
tmp2, coords_o, hierarchy = cv2.findContours(mask_o.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
tmp2, coords_r, hierarchy = cv2.findContours(mask_r.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if not coords_i:
print('\nNo detection endo: {:s}, {:2d}'.format(contour.ctr_path, i))
coords_i = np.ones((1, 1, 1, 2), dtype='int')
if len(coords_i) > 1:
print('\nMultiple detections endo: {:s}, {:2d}'.format(contour.ctr_path, i))
lengths = []
for coord in coords_i:
lengths.append(len(coord))
coords_i = [coords_i[np.argmax(lengths)]]
coords_i = np.squeeze(coords_i)
if not coords_o:
print('\nNo detection epi: {:s}, {:2d}'.format(contour.ctr_path, i))
coords_o = np.ones((1, 1, 1, 2), dtype='int')
if len(coords_o) > 1:
print('\nMultiple detections epi: {:s}, {:2d}'.format(contour.ctr_path, i))
lengths = []
for coord in coords_o:
lengths.append(len(coord))
coords_o = [coords_o[np.argmax(lengths)]]
coords_o = np.squeeze(coords_o)
if not coords_r:
print('\nNo detection right ventricle: {:s}, {:2d}'.format(contour.ctr_path, i))
coords_r = np.ones((1, 1, 1, 2), dtype='int')
if len(coords_r) > 1:
print('\nMultiple detections right ventricle: {:s}, {:2d}'.format(contour.ctr_path, i))
lengths = []
for coord in coords_r:
lengths.append(len(coord))
coords_r = [coords_r[np.argmax(lengths)]]
coords_r = np.squeeze(coords_r)
if coords_i.ndim == 1:
x, y = coords_i
else:
x, y = zip(*coords_i)
plt.cla()
pylab.imshow(img, cmap=pylab.cm.bone)
plt.plot(x, y, 'r.')
if coords_o.ndim == 1:
x, y = coords_o
else:
x, y = zip(*coords_o)
plt.plot(x, y, 'b.')
if coords_r.ndim == 1:
x, y = coords_r
else:
x, y = zip(*coords_r)
plt.plot(x, y, 'g.')
plt.xlim(25, img.shape[1]-25)
plt.ylim(25, img.shape[0]-25)
pylab.savefig(full_overlay_path,bbox_inches='tight',dpi=200)
#pylab.show()
return
def map_all_contours(data_path, contour_type, shuffle=True):
contours = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(data_path)
for f in fnmatch.filter(files,
'patient*'+ '_frame*_gt.*')]
if shuffle:
print('Shuffling data')
np.random.shuffle(contours)
print('Number of examples: {:d}'.format(len(contours)))
contours = map(VolumeCtr, contours)
return contours
def export_all_contours(contours, data_path, overlay_path, crop_size, contour_type, num_classes=4, num_phases=5, phase_dilation=1, num_phases_in_cycle=30):
print('\nProcessing {:d} images and labels ...\n'.format(len(contours)))
if num_classes == 2:
num_classes = 1
total_number = 0
for volume_ctr in contours:
total_number += volume_ctr.total_number
images = np.zeros((total_number, num_phases, crop_size, crop_size, 1))
masks = np.zeros((total_number, 1, crop_size, crop_size, num_classes))
idx = 0
for contour in contours:
vol, vol_mask = read_contour(contour, data_path, num_phases, num_phases_in_cycle, phase_dilation, contour_type=contour_type, return_mask=True)
#draw_contour(contour, data_path, overlay_path, type=contour_type)
p, w, h, s, d = vol.shape
for i in range(0, s):
img = vol[:,:,:,i,:]
mask = vol_mask[:,:,:,i,:]
img = np.swapaxes(img, 1, 2)
mask = np.swapaxes(mask, 1, 2)
img = center_crop_3d(img, crop_size=crop_size)
mask = center_crop_3d(mask, crop_size=crop_size)
images[idx] = img
masks[idx] = mask
idx = idx + 1
return images, masks
if __name__== '__main__':
contour_type = 'i'
weight_s = 'model_logs/sunnybrook_i_unetres_inv_drop_acdc.h5'
shuffle = False
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
crop_size = 128
num_phases = 5
save_path = 'model_logs'
phase_dilation = 4
num_phases_in_cycle = 30
data_proc = DataIOProc(TEMP_CONTOUR_PATH, 'p5_a4')
print('Mapping ground truth contours to images in train...')
train_ctrs = list(map_all_contours(TRAIN_PATH, contour_type, shuffle=False))
if shuffle:
print('Shuffling data')
np.random.shuffle(train_ctrs)
print('Done mapping training set')
num_classes = 2
#No dev
split = int(0.1*len(train_ctrs))
dev_ctrs = train_ctrs[0:split]
train_ctrs = train_ctrs[split:]
print('\nBuilding Train dataset ...')
img_train, mask_train = export_all_contours(train_ctrs,
TRAIN_PATH,
TRAIN_OVERLAY_PATH,
contour_type = contour_type,
crop_size=crop_size,
num_classes=num_classes,
num_phases=num_phases,
phase_dilation=phase_dilation,
num_phases_in_cycle=num_phases_in_cycle)
print('\nBuilding Dev dataset ...')
img_dev, mask_dev = export_all_contours(dev_ctrs,
TRAIN_PATH,
TRAIN_OVERLAY_PATH,
contour_type=contour_type,
crop_size=crop_size,
num_classes=num_classes,
num_phases=num_phases,
phase_dilation=phase_dilation,
num_phases_in_cycle = num_phases_in_cycle)
input_shape = (num_phases, crop_size, crop_size, 1)
input_shape_s = (crop_size, crop_size, 1)
model_s = unet_res_model_Inv(input_shape_s, num_classes, nb_filters=8, transfer=True, contour_type=contour_type, weights=weight_s)
kwargs = dict(
rotation_range=90,
zoom_range=0.1,
width_shift_range=0.05,
height_shift_range=0.05,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
)
image_datagen = CardiacTimeSeriesDataGenerator(**kwargs)
mask_datagen = CardiacTimeSeriesDataGenerator(**kwargs)
aug_img_path = os.path.join(TRAIN_AUG_PATH, "Image")
aug_mask_path = os.path.join(TRAIN_AUG_PATH, "Mask")
img_train = image_datagen.fit(img_train, augment=True, seed=seed, rounds=4, toDir=None)
mask_train = mask_datagen.fit(mask_train, augment=True, seed=seed, rounds=4, toDir=None)
epochs = 200
mini_batch_size = 4
s, p, h, w, d = img_train.shape
s_val, p_val, h_val, w_val, d_val = img_dev.shape
max_iter = int(np.ceil(len(img_train) / mini_batch_size)) * epochs
steps_per_epoch = int(np.ceil(len(img_train) / mini_batch_size))
curr_iter = 0
base_lr = K.eval(model_s.optimizer.lr)
lrate = lr_poly_decay(model_s, base_lr, curr_iter, max_iter, power=0.5)
callbacks = []
# ####################### tfboard ###########################
if K.backend() == 'tensorflow':
tensorboard = TensorBoard(log_dir=os.path.join(save_path, 'logs_unet_time'), histogram_freq=1, write_graph=False,
write_grads=False, write_images=False)
callbacks.append(tensorboard)
# ################### checkpoint saver#######################
checkpoint = ModelCheckpoint(filepath=os.path.join(save_path, 'check_point_model.hdf5'),
save_weights_only=False,
save_best_only=False,
period=2) # .{epoch:d}
callbacks.append(checkpoint)
print('\nPredict for 2nd training ...')
#img_train_s = img_train[:,4,...]
#mask_train_s = mask_train[:,0,...]
#result = model_s.evaluate(img_train_s, mask_train_s)
#result = np.round(result, decimals=10)
#print('\nDev set result {:s}:\n{:s}'.format(str(model_s.metrics_names), str(result)))
if not os.path.exists(TEMP_CONTOUR_PATH):
os.makedirs(TEMP_CONTOUR_PATH)
# Create training dataset
temp_image_t = np.reshape(img_train, (s*p, h, w, d))
temp_mask_t = model_s.predict(temp_image_t, batch_size=32, verbose=1)
temp_mask_t = np.reshape(temp_mask_t, (s, p, h, w, d))
data_proc.save_image_4d(temp_mask_t, 'training')
data_proc.save_image_4d(mask_train, 'training_mask')
data_proc.save_data_4d(temp_mask_t.astype('float32'), 'training_data.bin')
data_proc.save_data_4d(mask_train.astype('float32'), 'training_mask.bin')
# train_mask_p = np.zeros((s, p, w, h, 1), dtype=K.floatx())
# for idx_s in range(s):
# img_train_p = img_train[idx_s,...]
# train_mask_p[idx_s] = model_s.predict(img_train_p)
#
# for idx_p in range(p):
# mask = train_mask_p[idx_s, idx_p, ...]
# img = img_train[idx_s, idx_p, ...]
# img = np.squeeze(img*mask)
# img_name = '{:d}-{:d}'.format(idx_s, idx_p)
# imsave(os.path.join(TEMP_CONTOUR_PATH, img_name + ".png"), img)
# Create validation dataset
print('\nTotal sample is {:d} for 2nd training.'.format(s))
print('\nPredict for 2nd evaluating ...')
temp_image_dev = np.reshape(img_dev, (s_val*p_val, w_val, h_val, d_val))
temp_mask_dev = model_s.predict(temp_image_dev, batch_size=16, verbose=1)
temp_mask_dev = np.reshape(temp_mask_dev, (s_val, p_val, w_val, h_val, d_val))
data_proc.save_image_4d(temp_mask_dev, 'evaluation')
data_proc.save_image_4d(mask_dev, 'evaluation_mask')
data_proc.save_data_4d(temp_mask_dev.astype('float32'), 'eval_data.bin')
data_proc.save_data_4d(mask_dev.astype('float32'), 'eval_mask.bin')
#print('\nTotal sample is {:d} for 2nd evaluation.'.format(s_val))
# val_mask_p = np.zeros((s_val, p_val, w_val, h_val, 1), dtype=K.floatx())
# for idx_s in range(s_val):
# img_val_p = img_dev[idx_s,...]
# val_mask_p[idx_s] = model_s.predict(img_val_p)
# dev_generator = (temp_mask_dev, mask_dev)
# print('\nTotal sample is {:d} for 2nd evaluation.'.format(s_val))
# model_t = unet_res_model_time(input_shape, num_classes, nb_filters=64, n_phases=num_phases, dilation=phase_dilation, transfer=True, weights=None)
# model_t.fit(temp_mask_t,
# mask_train,
# epochs=epochs,
# batch_size=1,
# validation_data=dev_generator,
# callbacks=callbacks,
# class_weight=None
# )
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,940 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /unet_multi_model.py | from __future__ import print_function
import numpy as np
from keras import backend as K
import tensorflow as tf
from keras import optimizers
from keras.models import Model
from keras.layers import Input, merge, Conv2D, MaxPooling2D, UpSampling2D, Dropout
from keras.optimizers import Adam
from keras.layers.merge import concatenate
from keras.utils.vis_utils import plot_model
from metrics_common import dice_coef, dice_coef_endo, dice_coef_myo, dice_coef_rv, dice_coef_loss, dice_coef_loss_endo, dice_coef_loss_myo, dice_coef_loss_rv, dice_coef_endo_each
from layer_common import mvn, crop
from keras.layers import Dropout, Lambda
def dice_coef_endo(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_endo = y_true[:, :, :, 2]
y_pred_endo = y_pred[:, :, :, 2]
intersection = K.sum(y_true_endo * y_pred_endo, axis=axes)
summation = K.sum(y_true_endo * y_true_endo, axis=axes) + K.sum(y_pred_endo * y_pred_endo, axis=axes)
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def dice_coef_endo_each(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_endo = y_true[:, :, :, 2].astype('float32')
y_pred_endo = y_pred[:, :, :, 2]
y_pred_endo = np.where(y_pred_endo > 0.5, 1.0, 0.0).astype('float32')
intersection = np.sum(y_true_endo * y_pred_endo, axis=axes)
summation = np.sum(y_true_endo * y_true_endo, axis=axes) + np.sum(y_pred_endo * y_pred_endo, axis=axes)
return (2.0 * intersection + smooth) / (summation + smooth)
def dice_coef_myo(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for myocardium class per batch.'''
axes = (1, 2)
y_true_myo = y_true[:, :, :, 1]
y_pred_myo = y_pred[:, :, :, 1]
summation_true = K.sum(y_true_myo, axis=axes)
intersection = K.sum(y_true_myo * y_pred_myo, axis=axes)
summation = K.sum(y_true_myo * y_true_myo, axis=axes) + K.sum(y_pred_myo * y_pred_myo, axis=axes)
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def dice_coef_myo_each(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_myo = y_true[:, :, :, 1].astype('float32')
y_pred_myo = y_pred[:, :, :, 1]
y_pred_myo = np.where(y_pred_myo > 0.5, 1.0, 0.0).astype('float32')
intersection = np.sum(y_true_myo * y_pred_myo, axis=axes)
summation = np.sum(y_true_myo * y_true_myo, axis=axes) + np.sum(y_pred_myo * y_pred_myo, axis=axes)
return (2.0 * intersection + smooth) / (summation + smooth)
def dice_coef_epi(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for myocardium class per batch.'''
axes = (1, 2)
y_true_myo = y_true[:, :, :, 1]
y_pred_myo = y_pred[:, :, :, 1]
y_true_endo = y_true[:, :, :, 2]
y_pred_endo = y_pred[:, :, :, 2]
y_true_epi = tf.cast(tf.logical_or(tf.cast(y_true_myo, tf.bool), tf.cast(y_true_endo, tf.bool)), tf.float32)
y_pred_epi = tf.cast(tf.logical_or(tf.cast(y_pred_myo, tf.bool), tf.cast(y_pred_endo, tf.bool)), tf.float32)
tf.summary.image("y_true_myo", y_true_myo[...,None], max_outputs=1)
tf.summary.image("y_true_endo", y_true_endo[...,None], max_outputs=1)
tf.summary.image("y_pred_myo", y_pred_myo[...,None], max_outputs=1)
tf.summary.image("y_pred_endo", y_pred_endo[..., None], max_outputs=1)
tf.summary.image("y_pred_epi", y_pred_epi[...,None], max_outputs=1)
tf.summary.image("y_true_epi", y_true_epi[...,None], max_outputs=1)
intersection = K.sum(y_true_epi * y_pred_epi, axis=axes)
summation = K.sum(y_true_epi * y_true_epi, axis=axes) + K.sum(y_pred_epi * y_pred_epi, axis=axes)
tf.summary.merge_all()
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def unet_multi_model(input_shape, num_classes, transfer=True, contour_type='a', weights=None):
loss = 'categorical_crossentropy'
activation = 'softmax'
kwargs = dict(
kernel_size=3,
strides=1,
activation='relu',
padding='same',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
)
data = Input(shape=input_shape, dtype='float', name='data')
mvn1 = Lambda(mvn, name='mvn1')(data)
conv1 = Conv2D(filters=32, **kwargs)(mvn1)
conv1 = Conv2D(filters=32, **kwargs)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
#pool1 = Dropout(rate=0.5)(pool1)
pool1 = Lambda(mvn)(pool1)
conv2 = Conv2D(filters=64, **kwargs)(pool1)
conv2 = Conv2D(filters=64, **kwargs)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
#pool2 = Dropout(rate=0.3)(pool2)
pool2 = Lambda(mvn)(pool2)
conv3 = Conv2D(filters=128, **kwargs)(pool2)
conv3 = Conv2D(filters=128, **kwargs)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
#pool3 = Dropout(rate=0.5)(pool3)
pool3 = Lambda(mvn)(pool3)
conv4 = Conv2D(filters=256, **kwargs)(pool3)
conv4 = Conv2D(filters=256, **kwargs)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
#pool4 = Dropout(rate=0.3)(pool4)
pool4 = Lambda(mvn)(pool4)
conv5 = Conv2D(filters=512, **kwargs)(pool4)
conv5 = Conv2D(filters=512, **kwargs)(conv5)
up6 = concatenate([Conv2D(filters=256, **kwargs)(UpSampling2D(size=(2, 2))(conv5)), conv4], axis=3)
conv6 = Conv2D(filters=256, **kwargs)(up6)
conv6 = Conv2D(filters=256, **kwargs)(conv6)
up7 = concatenate([Conv2D(filters=128, **kwargs)(UpSampling2D(size=(2, 2))(conv6)), conv3], axis=3)
conv7 = Conv2D(filters=128, **kwargs)(up7)
conv7 = Conv2D(filters=128, **kwargs)(conv7)
up8 = concatenate([Conv2D(filters=64, **kwargs)(UpSampling2D(size=(2, 2))(conv7)), conv2], axis=3)
conv8 = Conv2D(filters=64, **kwargs)(up8)
conv8 = Conv2D(filters=64, **kwargs)(conv8)
up9 = concatenate([Conv2D(filters=32, **kwargs)(UpSampling2D(size=(2, 2))(conv8)), conv1], axis=3)
conv9 = Conv2D(filters=32, **kwargs)(up9)
conv9 = Conv2D(filters=32, **kwargs)(conv9)
conv10 = Conv2D(filters=num_classes, kernel_size=1,
strides=1, activation=activation, padding='valid',
kernel_initializer='glorot_uniform', use_bias=True, name="prediction")(conv9)
model = Model(inputs=data, outputs=conv10)
if weights is not None:
model.load_weights(weights)
if contour_type == 'a':
model.compile(optimizer=Adam(lr=1e-5), loss=loss, metrics=[dice_coef_endo, dice_coef_myo, dice_coef_epi])
return model
if __name__ == '__main__':
model = unet_multi_model((128, 128, 1), 3, transfer=True, weights=None)
plot_model(model, show_shapes=True, to_file='unet_model_multi.png')
model.summary()
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,941 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /submit_sunnybrook_unetres_time.py | #!/usr/bin/env python2.7
import re, sys, os
import shutil, cv2
import numpy as np
from keras import backend as K
from train_sunnybrook_unet_time import read_contour, export_all_contours, map_all_contours
from helpers import reshape, get_SAX_SERIES, draw_result, draw_image_overlay, center_crop, center_crop_3d
from scipy.misc import imsave
from unet_res_model_Inv import unet_res_model_Inv
from unet_model_time import unet_res_model_time, dice_coef
from metrics_common import dice_coef_each
SAX_SERIES = get_SAX_SERIES()
SUNNYBROOK_ROOT_PATH = 'D:\cardiac_data\Sunnybrook'
TEMP_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database Temp',
'Temp')
VAL_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart2',
'ValidationDataContours')
VAL_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart2',
'ValidationDataDICOM')
VAL_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart2',
'ValidationDataOverlay')
ONLINE_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart1',
'OnlineDataContours')
ONLINE_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart1',
'OnlineDataDICOM')
ONLINE_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart1',
'OnlineDataOverlay')
SAVE_VAL_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_val_submission')
SAVE_ONLINE_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_online_submission')
DEBUG_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'Debug')
DEBUG_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'Debug')
DEBUG_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'Debug')
def create_submission(contours, data_path, output_path ,contour_type = 'i'):
weight_t = 'model_logs/sunnybrook_a_unetres_inv_time.h5'
weight_s = 'model_logs/sunnybrook_i_unetres_inv_drop_acdc.h5'
crop_size = 128
num_phases = 5
num_classes = 2
phase_dilation = 4
input_shape = (num_phases, crop_size, crop_size, 1)
input_shape_s = (crop_size, crop_size, 1)
model_s = unet_res_model_Inv(input_shape_s, num_classes, nb_filters=8, transfer=True, contour_type=contour_type, weights=weight_s)
model_t = unet_res_model_time(input_shape, num_classes, nb_filters=32, n_phases=num_phases, dilation=1, transfer=True, contour_type=contour_type, weights=weight_t)
images, masks = export_all_contours(contours,
data_path,
output_path,
crop_size=crop_size,
num_classes=num_classes,
num_phases=num_phases,
phase_dilation=phase_dilation)
s, p, h, w, d = images.shape
print('\nFirst step predict set ...')
temp_image_t = np.reshape(images, (s*p, h, w, d))
temp_mask_t = model_s.predict(temp_image_t, batch_size=4, verbose=1)
temp_mask_t = np.reshape(temp_mask_t, (s, p, h, w, d))
# for idx_s in range(s):
# img_t = images[idx_s,...]
# temp_mask_t[idx_s] = model_s.predict(img_t)
# for idx_p in range(p):
# mask = temp_mask_t[idx_s, idx_p, ...]
# img = images[idx_s, idx_p, ...]
# img = np.squeeze(img*mask)
# img_name = '{:d}-{:d}'.format(idx_s, idx_p)
# imsave(os.path.join(TEMP_CONTOUR_PATH, img_name + ".png"), img)
print('\nTotal sample is {:d} for 2nd evaluation.'.format(s))
print('\nSecond step predict set ...')
pred_masks = model_t.predict(temp_mask_t, batch_size=4, verbose=1)
print('\nEvaluating dev set ...')
result = model_t.evaluate(temp_mask_t, masks, batch_size=4)
result = np.round(result, decimals=10)
print('\nDev set result {:s}:\n{:s}'.format(str(model_t.metrics_names), str(result)))
for idx, ctr in enumerate(contours):
print('\nPredict image sequence {:d}'.format(idx))
img, mask = read_contour(ctr, data_path, num_classes, num_phases, num_phases_in_cycle=20, phase_dilation=phase_dilation)
p, h, w, d = img.shape
tmp = np.squeeze(pred_masks[idx, :])
if tmp.ndim == 2:
tmp = tmp[:,:,np.newaxis]
tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if not coords:
print('\nNo detection in case: {:s}; image: {:d}'.format(ctr.case, ctr.img_no))
coords = np.ones((1, 1, 1, 2), dtype='int')
overlay_full_path = os.path.join(save_dir, ctr.case, 'Overlay')
draw_result(ctr, data_path, overlay_full_path, contour_type, coords)
dst_eval = os.path.join(save_dir, 'evaluation_{:s}.txt'.format(contour_type))
with open(dst_eval, 'wb') as f:
f.write(('Dev set result {:s}:\n{:s}'.format(str(model_t.metrics_names), str(result))).encode('utf-8'))
f.close()
# Detailed evaluation:
masks = np.squeeze(masks)
pred_masks = np.squeeze(pred_masks)
detail_eval = os.path.join(save_dir, 'evaluation_detail_{:s}.csv'.format(contour_type))
evalArr = dice_coef_each(masks, pred_masks)
caseArr = [ctr.case for ctr in contours]
imgArr = [ctr.img_no for ctr in contours]
resArr = [caseArr, imgArr]
resArr.append(list(evalArr))
resArr = np.transpose(resArr)
np.savetxt(detail_eval, resArr, fmt='%s', delimiter=',')
if __name__== '__main__':
contour_type = 'i'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_val_submission_unetres_time_acdc_p5_a4_e30'
print('\nProcessing val ' + contour_type + ' contours...')
val_ctrs = list(map_all_contours(VAL_CONTOUR_PATH))
create_submission(val_ctrs, VAL_IMG_PATH, save_dir, contour_type)
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_online_submission_unetres_time_acdc_p5_a4_e30'
print('\nProcessing online '+contour_type+' contours...')
online_ctrs = list(map_all_contours(ONLINE_CONTOUR_PATH))
create_submission(online_ctrs, ONLINE_IMG_PATH, save_dir, contour_type)
print('\nAll done.')
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,942 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /pre_train_sunnybrook_unet_time.py | #!/usr/bin/env python2.7
import dicom, cv2, re
import os, fnmatch, sys
from keras.callbacks import *
from keras import backend as K
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
from itertools import zip_longest
from scipy.misc import imsave
from helpers import center_crop_3d, center_crop, lr_poly_decay, get_SAX_SERIES
import pylab
import matplotlib.pyplot as plt
from CardiacImageDataGenerator import CardiacImageDataGenerator, CardiacTimeSeriesDataGenerator
from unet_model_time import unet_res_model_time
from unet_res_model_Inv import unet_res_model_Inv
from DataIOProc import DataIOProc
seed = 1234
np.random.seed(seed)
SAX_SERIES = get_SAX_SERIES()
SUNNYBROOK_ROOT_PATH = 'D:\cardiac_data\Sunnybrook'
TEMP_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database Temp',
'Temp')
TRAIN_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'TrainingDataContours')
TRAIN_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'TrainingDataDICOM')
TRAIN_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'TrainingOverlayImage')
TRAIN_AUG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database Augmentation')
DEBUG_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'Debug')
DEBUG_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'Debug')
DEBUG_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'Debug')
VAL_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart2',
'ValidationDataContours')
VAL_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart2',
'ValidationDataDICOM')
VAL_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart2',
'ValidationDataOverlay')
class Contour(object):
def __init__(self, ctr_endo_path, ctr_epi_path, ctr_p1_path, ctr_p2_path, ctr_p3_path):
self.ctr_endo_path = ctr_endo_path
self.ctr_epi_path = ctr_epi_path
self.ctr_p1_path = ctr_p1_path
self.ctr_p2_path = ctr_p2_path
self.ctr_p3_path = ctr_p3_path
match = re.search(r'\\([^\\]*)\\contours-manual\\IRCCI-expert\\IM-0001-(\d{4})-.*', ctr_endo_path) #it always has endo
self.case = match.group(1)
self.img_no = int(match.group(2))
def __str__(self):
return '<Contour for case %s, image %d>' % (self.case, self.img_no)
__repr__ = __str__
def find_neighbor_images(contour, data_path, num_phases, num_phases_in_cycle, phase_dilation):
center_index = contour.img_no
center_file = 'IM-0001-%04d.dcm' % (contour.img_no)
center_file_path = os.path.join(data_path, contour.case, 'DICOM', center_file) #modified by C.Cong
center = dicom.read_file(center_file_path)
center_slice_pos = center[0x20, 0x1041]
center_img = center.pixel_array.astype('int')
h, w = center_img.shape
img_arr = np.zeros((num_phases, h, w), dtype="int")
for i in range (num_phases):
idx = int(center_index + (i - int(num_phases/2))*phase_dilation)
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
#If
if os.path.isfile(full_path) == False:
if idx < center_index:
idx = idx + num_phases_in_cycle
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
else:
idx = idx - num_phases_in_cycle
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
f = dicom.read_file(full_path)
f_slice_pos = f[0x20, 0x1041]
if(f_slice_pos.value != center_slice_pos.value):
idx = idx + num_phases_in_cycle
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
if os.path.isfile(full_path) == True:
f = dicom.read_file(full_path)
f_slice_pos = f[0x20, 0x1041]
if (f_slice_pos.value != center_slice_pos.value):
idx = idx - num_phases_in_cycle - num_phases_in_cycle
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
if os.path.isfile(full_path) == True:
f = dicom.read_file(full_path)
f_slice_pos = f[0x20, 0x1041]
if (f_slice_pos.value != center_slice_pos.value):
raise AssertionError('Cannot find neighbor files for: {:s}'.format(center_file_path))
img_arr[i] = f.pixel_array.astype('int')
return img_arr
def read_contour(contour, data_path, num_classes, num_phases, num_phases_in_cycle, phase_dilation):
#filename = 'IM-%s-%04d.dcm' % (SAX_SERIES[contour.case], contour.img_no)
filename = 'IM-0001-%04d.dcm' % (contour.img_no)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename) #modified by C.Cong
f = dicom.read_file(full_path)
img = f.pixel_array.astype('int')
mask = np.zeros_like(img, dtype="uint8")
coords = np.loadtxt(contour.ctr_endo_path, delimiter=' ').astype('int')
cv2.fillPoly(mask, [coords], 1)
classify = mask
img_arr = find_neighbor_images(contour, data_path, num_phases, num_phases_in_cycle, phase_dilation)
if img_arr.ndim < 4:
img_arr = img_arr[..., np.newaxis]
if classify.ndim < 4:
classify = classify[np.newaxis, ..., np.newaxis]
return img_arr, classify
def map_all_contours(contour_path):
endo = []
epi = []
p1 = []
p2 = []
p3 = []
for dirpath, dirnames, files in os.walk(contour_path):
for endo_f in fnmatch.filter(files, 'IM-0001-*-icontour-manual.txt'):
endo.append(os.path.join(dirpath, endo_f))
match = re.search(r'IM-0001-(\d{4})-icontour-manual.txt', endo_f) # it always has endo
imgno = match.group(1)
epi_f = 'IM-0001-' + imgno + '-ocontour-manual.txt'
p1_f = 'IM-0001-' + imgno + '-p1-manual.txt'
p2_f = 'IM-0001-' + imgno + '-p2-manual.txt'
p3_f = 'IM-0001-' + imgno + '-p3-manual.txt'
epi.append(os.path.join(dirpath, epi_f))
p1.append(os.path.join(dirpath, p1_f))
p2.append(os.path.join(dirpath, p2_f))
p3.append(os.path.join(dirpath, p3_f))
print('Number of examples: {:d}'.format(len(endo)))
contours = map(Contour, endo, epi, p1, p2, p3)
return contours
def map_endo_contours(contour_path):
endo = []
epi = []
p1 = []
p2 = []
p3 = []
for dirpath, dirnames, files in os.walk(contour_path):
for endo_f in fnmatch.filter(files, 'IM-0001-*-icontour-manual.txt'):
endo.append(os.path.join(dirpath, endo_f))
match = re.search(r'IM-0001-(\d{4})-icontour-manual.txt', endo_f) # it always has endo
imgno = match.group(1)
epi_f = 'IM-0001-' + imgno + '-ocontour-manual.txt'
p1_f = 'IM-0001-' + imgno + '-p1-manual.txt'
p2_f = 'IM-0001-' + imgno + '-p2-manual.txt'
p3_f = 'IM-0001-' + imgno + '-p3-manual.txt'
epi.append(os.path.join(dirpath, epi_f))
p1.append(os.path.join(dirpath, p1_f))
p2.append(os.path.join(dirpath, p2_f))
p3.append(os.path.join(dirpath, p3_f))
print('Number of examples: {:d}'.format(len(endo)))
contours = map(Contour, endo, epi, p1, p2, p3)
return contours
def export_all_contours(contours, data_path, overlay_path, crop_size=100, num_classes=4, num_phases=5, phase_dilation=1):
print('\nProcessing {:d} images and labels ...\n'.format(len(contours)))
if num_classes == 2:
num_classes = 1
images = np.zeros((len(contours), num_phases, crop_size, crop_size, 1))
masks = np.zeros((len(contours), 1, crop_size, crop_size, num_classes))
for idx, contour in enumerate(contours):
img, mask = read_contour(contour, data_path, num_classes, num_phases, 20, phase_dilation)
#draw_contour(contour, data_path, overlay_path)
img = center_crop_3d(img, crop_size=crop_size)
mask = center_crop_3d(mask, crop_size=crop_size)
images[idx] = img
masks[idx] = mask
return images, masks
# ###############learning rate scheduler####################
def lr_scheduler(curr_epoch, curr_iter):
total_iter = curr_epoch*steps_per_epoch + curr_iter
lrate = lr_poly_decay(model_s, base_lr, total_iter, max_iter, power=0.5)
print(' - lr: %f' % lrate)
return lrate
if __name__== '__main__':
contour_type = 'a'
weight_s = 'model_logs/sunnybrook_i_unetres_inv_drop_acdc.h5'
shuffle = False
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
crop_size = 128
num_phases = 5
save_path = 'model_logs'
phase_dilation = 4
data_proc = DataIOProc(TEMP_CONTOUR_PATH, 'p5_a4')
print('Mapping ground truth contours to images in train...')
train_ctrs = list(map_all_contours(TRAIN_CONTOUR_PATH))
if shuffle:
print('Shuffling data')
np.random.shuffle(train_ctrs)
print('Done mapping training set')
num_classes = 2
#No dev
split = int(0.1*len(train_ctrs))
dev_ctrs = train_ctrs[0:split]
train_ctrs = train_ctrs[split:]
print('\nBuilding Train dataset ...')
img_train, mask_train = export_all_contours(train_ctrs,
TRAIN_IMG_PATH,
TRAIN_OVERLAY_PATH,
crop_size=crop_size,
num_classes=num_classes,
num_phases=num_phases,
phase_dilation=phase_dilation)
print('\nBuilding Dev dataset ...')
img_dev, mask_dev = export_all_contours(dev_ctrs,
TRAIN_IMG_PATH,
TRAIN_OVERLAY_PATH,
crop_size=crop_size,
num_classes=num_classes,
num_phases=num_phases,
phase_dilation=phase_dilation)
input_shape = (num_phases, crop_size, crop_size, 1)
input_shape_s = (crop_size, crop_size, 1)
model_s = unet_res_model_Inv(input_shape_s, num_classes, nb_filters=8, transfer=True, contour_type=contour_type, weights=weight_s)
kwargs = dict(
rotation_range=90,
zoom_range=0.1,
width_shift_range=0.05,
height_shift_range=0.05,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
)
image_datagen = CardiacTimeSeriesDataGenerator(**kwargs)
mask_datagen = CardiacTimeSeriesDataGenerator(**kwargs)
aug_img_path = os.path.join(TRAIN_AUG_PATH, "Image")
aug_mask_path = os.path.join(TRAIN_AUG_PATH, "Mask")
img_train = image_datagen.fit(img_train, augment=True, seed=seed, rounds=8, toDir=None)
mask_train = mask_datagen.fit(mask_train, augment=True, seed=seed, rounds=8, toDir=None)
epochs = 200
mini_batch_size = 4
s, p, h, w, d = img_train.shape
s_val, p_val, h_val, w_val, d_val = img_dev.shape
max_iter = int(np.ceil(len(img_train) / mini_batch_size)) * epochs
steps_per_epoch = int(np.ceil(len(img_train) / mini_batch_size))
curr_iter = 0
base_lr = K.eval(model_s.optimizer.lr)
lrate = lr_poly_decay(model_s, base_lr, curr_iter, max_iter, power=0.5)
callbacks = []
# ####################### tfboard ###########################
if K.backend() == 'tensorflow':
tensorboard = TensorBoard(log_dir=os.path.join(save_path, 'logs_unet_time'), histogram_freq=1, write_graph=False,
write_grads=False, write_images=False)
callbacks.append(tensorboard)
# ################### checkpoint saver#######################
checkpoint = ModelCheckpoint(filepath=os.path.join(save_path, 'check_point_model.hdf5'),
save_weights_only=False,
save_best_only=False,
period=2) # .{epoch:d}
callbacks.append(checkpoint)
print('\nPredict for 2nd training ...')
#img_train_s = img_train[:,4,...]
#mask_train_s = mask_train[:,0,...]
#result = model_s.evaluate(img_train_s, mask_train_s)
#result = np.round(result, decimals=10)
#print('\nDev set result {:s}:\n{:s}'.format(str(model_s.metrics_names), str(result)))
if not os.path.exists(TEMP_CONTOUR_PATH):
os.makedirs(TEMP_CONTOUR_PATH)
# Create training dataset
temp_image_t = np.reshape(img_train, (s*p, h, w, d))
temp_mask_t = model_s.predict(temp_image_t, batch_size=32, verbose=1)
temp_mask_t = np.reshape(temp_mask_t, (s, p, h, w, d))
data_proc.save_image_4d(temp_mask_t, 'training')
data_proc.save_image_4d(mask_train, 'training_mask')
data_proc.save_data_4d(temp_mask_t.astype('float32'), 'training_data.bin')
data_proc.save_data_4d(mask_train.astype('float32'), 'training_mask.bin')
# train_mask_p = np.zeros((s, p, w, h, 1), dtype=K.floatx())
# for idx_s in range(s):
# img_train_p = img_train[idx_s,...]
# train_mask_p[idx_s] = model_s.predict(img_train_p)
#
# for idx_p in range(p):
# mask = train_mask_p[idx_s, idx_p, ...]
# img = img_train[idx_s, idx_p, ...]
# img = np.squeeze(img*mask)
# img_name = '{:d}-{:d}'.format(idx_s, idx_p)
# imsave(os.path.join(TEMP_CONTOUR_PATH, img_name + ".png"), img)
# Create validation dataset
print('\nTotal sample is {:d} for 2nd training.'.format(s))
print('\nPredict for 2nd evaluating ...')
temp_image_dev = np.reshape(img_dev, (s_val*p_val, w_val, h_val, d_val))
temp_mask_dev = model_s.predict(temp_image_dev, batch_size=16, verbose=1)
temp_mask_dev = np.reshape(temp_mask_dev, (s_val, p_val, w_val, h_val, d_val))
data_proc.save_image_4d(temp_mask_dev, 'evaluation')
data_proc.save_image_4d(mask_dev, 'evaluation_mask')
data_proc.save_data_4d(temp_mask_dev.astype('float32'), 'eval_data.bin')
data_proc.save_data_4d(mask_dev.astype('float32'), 'eval_mask.bin')
#print('\nTotal sample is {:d} for 2nd evaluation.'.format(s_val))
# val_mask_p = np.zeros((s_val, p_val, w_val, h_val, 1), dtype=K.floatx())
# for idx_s in range(s_val):
# img_val_p = img_dev[idx_s,...]
# val_mask_p[idx_s] = model_s.predict(img_val_p)
# dev_generator = (temp_mask_dev, mask_dev)
# print('\nTotal sample is {:d} for 2nd evaluation.'.format(s_val))
# model_t = unet_res_model_time(input_shape, num_classes, nb_filters=64, n_phases=num_phases, dilation=phase_dilation, transfer=True, weights=None)
# model_t.fit(temp_mask_t,
# mask_train,
# epochs=epochs,
# batch_size=1,
# validation_data=dev_generator,
# callbacks=callbacks,
# class_weight=None
# )
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,943 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /unet_lstm_multi_model.py | from __future__ import print_function
import numpy as np
from keras import backend as K
import tensorflow as tf
from keras import optimizers
from keras.models import Model
from keras.layers import Input, merge, Conv2D, MaxPooling3D, UpSampling3D, Dropout
from keras.optimizers import Adam
from keras.layers.merge import concatenate
from keras.utils.vis_utils import plot_model
from metrics_common import dice_coef, dice_coef_endo, dice_coef_myo, dice_coef_rv, dice_coef_loss, dice_coef_loss_endo, dice_coef_loss_myo, dice_coef_loss_rv, dice_coef_endo_each
from layer_common import mvn3d, crop
from keras.layers import Dropout, Lambda
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.normalization import BatchNormalization
def dice_coef_endo(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_endo = y_true[:,0, ..., 2]
y_pred_endo = y_pred[:,0, ..., 2]
intersection = K.sum(y_true_endo * y_pred_endo, axis=axes)
summation = K.sum(y_true_endo * y_true_endo, axis=axes) + K.sum(y_pred_endo * y_pred_endo, axis=axes)
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def dice_coef_myo(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for myocardium class per batch.'''
axes = (1, 2)
y_true_myo = y_true[:,0, ..., 1]
y_pred_myo = y_pred[:,0, ..., 1]
summation_true = K.sum(y_true_myo, axis=axes)
intersection = K.sum(y_true_myo * y_pred_myo, axis=axes)
summation = K.sum(y_true_myo * y_true_myo, axis=axes) + K.sum(y_pred_myo * y_pred_myo, axis=axes)
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def dice_coef_endo_each(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_endo = y_true[:,0, ..., 2].astype('float32')
y_pred_endo = y_pred[:,0, ..., 2]
y_pred_endo = np.where(y_pred_endo > 0.5, 1.0, 0.0).astype('float32')
intersection = np.sum(y_true_endo * y_pred_endo, axis=axes)
summation = np.sum(y_true_endo * y_true_endo, axis=axes) + np.sum(y_pred_endo * y_pred_endo, axis=axes)
return (2.0 * intersection + smooth) / (summation + smooth)
def dice_coef_myo_each(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_myo = y_true[:,0, ..., 1].astype('float32')
y_pred_myo = y_pred[:,0, ..., 1]
y_pred_myo = np.where(y_pred_myo > 0.5, 1.0, 0.0).astype('float32')
intersection = np.sum(y_true_myo * y_pred_myo, axis=axes)
summation = np.sum(y_true_myo * y_true_myo, axis=axes) + np.sum(y_pred_myo * y_pred_myo, axis=axes)
return (2.0 * intersection + smooth) / (summation + smooth)
def dice_coef_epi(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for myocardium class per batch.'''
axes = (1, 2)
y_true_myo = y_true[:, 0, ..., 1]
y_pred_myo = y_pred[:, 0, ..., 1]
y_true_endo = y_true[:, 0, ..., 2]
y_pred_endo = y_pred[:, 0, ..., 2]
y_true_epi = tf.cast(tf.logical_or(tf.cast(y_true_myo, tf.bool), tf.cast(y_true_endo, tf.bool)), tf.float32)
y_pred_epi = tf.cast(tf.logical_or(tf.cast(y_pred_myo, tf.bool), tf.cast(y_pred_endo, tf.bool)), tf.float32)
tf.summary.image("y_true_myo", y_true_myo[...,None], max_outputs=1)
tf.summary.image("y_true_endo", y_true_endo[...,None], max_outputs=1)
tf.summary.image("y_pred_myo", y_pred_myo[...,None], max_outputs=1)
tf.summary.image("y_pred_endo", y_pred_endo[..., None], max_outputs=1)
tf.summary.image("y_pred_epi", y_pred_epi[...,None], max_outputs=1)
tf.summary.image("y_true_epi", y_true_epi[...,None], max_outputs=1)
intersection = K.sum(y_true_epi * y_pred_epi, axis=axes)
summation = K.sum(y_true_epi * y_true_epi, axis=axes) + K.sum(y_pred_epi * y_pred_epi, axis=axes)
tf.summary.merge_all()
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def unet_lstm_multi_model(input_shape, num_classes, transfer=True, contour_type='a', weights=None):
loss = 'categorical_crossentropy'
activation = 'softmax'
kwargs = dict(
kernel_size=(3, 3),
strides=1,
padding='same',
use_bias=True,
return_sequences = True,
trainable=True,
)
data = Input(shape=input_shape, dtype='float', name='data')
mvn1 = Lambda(mvn3d, name='mvn1')(data)
conv1 = ConvLSTM2D(filters=32, **kwargs)(mvn1)
#conv1 = ConvLSTM2D(filters=32, **kwargs)(conv1)
pool1 = MaxPooling3D(pool_size=(1, 2, 2))(conv1)
#pool1 = Dropout(rate=0.5)(pool1)
pool1 = Lambda(mvn3d)(pool1)
conv2 = ConvLSTM2D(filters=64, **kwargs)(pool1)
#conv2 = ConvLSTM2D(filters=64, **kwargs)(conv2)
pool2 = MaxPooling3D(pool_size=(1, 2, 2))(conv2)
#pool2 = Dropout(rate=0.3)(pool2)
pool2 = Lambda(mvn3d)(pool2)
conv3 = ConvLSTM2D(filters=128, **kwargs)(pool2)
#conv3 = ConvLSTM2D(filters=128, **kwargs)(conv3)
pool3 = MaxPooling3D(pool_size=(1, 2, 2))(conv3)
#pool3 = Dropout(rate=0.5)(pool3)
pool3 = Lambda(mvn3d)(pool3)
conv4 = ConvLSTM2D(filters=256, **kwargs)(pool3)
#conv4 = ConvLSTM2D(filters=256, **kwargs)(conv4)
'''
pool4 = MaxPooling3D(pool_size=(1, 2, 2))(conv4)
#pool4 = Dropout(rate=0.3)(pool4)
pool4 = Lambda(mvn3d)(pool4)
conv5 = ConvLSTM2D(filters=512, **kwargs)(pool4)
conv5 = ConvLSTM2D(filters=512, **kwargs)(conv5)
up6 = concatenate([ConvLSTM2D(filters=256, **kwargs)(UpSampling3D(size=(1, 2, 2))(conv5)), conv4], axis=4)
conv6 = ConvLSTM2D(filters=256, **kwargs)(up6)
conv6 = ConvLSTM2D(filters=256, **kwargs)(conv6)
'''
up7 = concatenate([ConvLSTM2D(filters=128, **kwargs)(UpSampling3D(size=(1, 2, 2))(conv4)), conv3], axis=4)
conv7 = ConvLSTM2D(filters=128, **kwargs)(up7)
#conv7 = ConvLSTM2D(filters=128, **kwargs)(conv7)
up8 = concatenate([ConvLSTM2D(filters=64, **kwargs)(UpSampling3D(size=(1, 2, 2))(conv7)), conv2], axis=4)
conv8 = ConvLSTM2D(filters=64, **kwargs)(up8)
#conv8 = ConvLSTM2D(filters=64, **kwargs)(conv8)
up9 = concatenate([ConvLSTM2D(filters=32, **kwargs)(UpSampling3D(size=(1, 2, 2))(conv8)), conv1], axis=4)
conv9 = ConvLSTM2D(filters=32, **kwargs)(up9)
#conv9 = ConvLSTM2D(filters=32, **kwargs)(conv9)
conv10 = Conv3D(filters=num_classes, kernel_size=(5, 1, 1),
strides=1, activation=activation, padding='valid',
kernel_initializer='glorot_uniform', use_bias=True, name="prediction")(conv9)
model = Model(inputs=data, outputs=conv10)
if weights is not None:
model.load_weights(weights)
if contour_type == 'a':
model.compile(optimizer='adadelta', loss=loss, metrics=[dice_coef_endo, dice_coef_myo, dice_coef_epi])
return model
if __name__ == '__main__':
model = unet_lstm_multi_model((5, 128, 128, 1), 3, transfer=True, weights=None)
plot_model(model, show_shapes=True, to_file='unet_lstm_multi_model.png')
model.summary()
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,944 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /test1.py | class Num123(object):
def __init__(self, i):
self.i = i
input = [1,2,3,4]
nums = map(lambda x: Num123(x), input)
print(list(nums))
def f(x):
return x*x
f_map = map(f, [1, 2, 3, 4, 5, 6, 7, 8, 9])
print (list(f_map)) | {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,945 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /train_acdc_unetres_II.py | #!/usr/bin/env python2.7
import dicom, cv2, re
import os, fnmatch, sys
import numpy as np
import tensorflow as tf
from keras.callbacks import *
from keras import backend as K
from itertools import zip_longest
from metrics_acdc import load_nii
from helpers import center_crop, lr_poly_decay, get_SAX_SERIES
import pylab
import matplotlib.pyplot as plt
from CardiacImageDataGenerator import CardiacImageDataGenerator
from unet_res_model_Inv import unet_res_model_Inv
seed = 1234
np.random.seed(seed)
ACDC_ROOT_PATH = 'D:\cardiac_data\ACDC'
TRAIN_AUG_PATH = os.path.join(ACDC_ROOT_PATH,
'Augmentation')
TRAIN_PATH = os.path.join(ACDC_ROOT_PATH, 'training')
DEBUG_PATH = os.path.join(ACDC_ROOT_PATH, 'debug')
TRAIN_OVERLAY_PATH = os.path.join(ACDC_ROOT_PATH, 'overlay')
class VolumeCtr(object):
def __init__(self, ctr_path):
self.ctr_path = ctr_path
match = re.search(r'patient(\d{03})_frame(\d{02})*', ctr_path)
self.patient_no = match.group(1)
self.img_no = match.group(2)
gt, _, header = load_nii(ctr_path)
self.total_number = gt.shape[2]
# ###############learning rate scheduler####################
def lr_scheduler(curr_epoch, curr_iter):
total_iter = curr_epoch*steps_per_epoch + curr_iter
lrate = lr_poly_decay(model, base_lr, total_iter, max_iter, power=0.5)
print(' - lr: %f' % lrate)
return lrate
def read_contour(contour, data_path, return_mask=True, type="i"):
img_path = os.path.join(data_path, 'patient{:s}'.format(contour.patient_no))
image_name = 'patient{:s}_frame{:s}.nii.gz'.format(contour.patient_no, contour.img_no)
gt_name = 'patient{:s}_frame{:s}_gt.nii.gz'.format(contour.patient_no, contour.img_no)
full_image_path = os.path.join(img_path, image_name)
full_gt_path = os.path.join(img_path, gt_name)
volume, _, header = load_nii(full_image_path)
volume_gt, _, header = load_nii(full_gt_path)
volume = volume.astype('int')
if type == "i":
volume_gt = np.where(volume_gt == 3, 1, 0).astype('uint8')
elif type == "o":
volume_gt = np.where(volume_gt >= 2, 1, 0).astype('uint8')
elif type == "r":
volume_gt = np.where(volume_gt == 1, 1, 0).astype('uint8')
elif type == "a":
volume_gt = volume_gt.astype('uint8')
if not return_mask:
return volume, None
return volume, volume_gt
def draw_contour(contour, data_path, out_path, type="i", coords = []):
img_path = os.path.join(data_path, 'patient{:s}'.format(contour.patient_no))
image_name = 'patient{:s}_frame{:s}.nii.gz'.format(contour.patient_no, contour.img_no)
gt_name = 'patient{:s}_frame{:s}_gt.nii.gz'.format(contour.patient_no, contour.img_no)
full_image_path = os.path.join(img_path, image_name)
full_gt_path = os.path.join(img_path, gt_name)
volume, _, header = load_nii(full_image_path)
volume_gt, _, header = load_nii(full_gt_path)
img_size = volume.shape
for i in range(0, img_size[2]):
overlay_name = 'patient{:s}_frame{:s}_{:2d}_{:s}.png'.format(contour.patient_no, contour.img_no, i, type)
full_overlay_path = os.path.join(img_path, overlay_name)
if type != "a":
img = volume[:, :, i]
mask = volume_gt[:, :, i]
img = np.swapaxes(img, 0, 1)
mask = np.swapaxes(mask, 0, 1)
if type == "i":
mask = np.where(mask == 3, 255, 0).astype('uint8')
elif type == "o":
mask = np.where(mask >= 2, 255, 0).astype('uint8')
elif type == "r":
mask = np.where(mask == 1, 255, 0).astype('uint8')
img = img.astype('int')
tmp2, coords, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if not coords:
print('\nNo detection: {:s}, {:2d}'.format(contour.ctr_path, i))
coords = np.ones((1, 1, 1, 2), dtype='int')
if len(coords) > 1:
print('\nMultiple detections: {:s}, {:2d}'.format(contour.ctr_path, i))
lengths = []
for coord in coords:
lengths.append(len(coord))
coords = [coords[np.argmax(lengths)]]
coords = np.squeeze(coords)
if coords.ndim == 1:
x, y = coords
else:
x, y = zip(*coords)
plt.cla()
pylab.imshow(img, cmap=pylab.cm.bone)
if type == "i":
plt.plot(x, y, 'r.')
elif type == "o":
plt.plot(x, y, 'b.')
elif type == "r":
plt.plot(x, y, 'g.')
elif type == "a":
img = volume[:, :, i]
img = np.swapaxes(img, 0, 1)
mask_i = volume_gt[:, :, i]
mask_o = volume_gt[:, :, i]
mask_r = volume_gt[:, :, i]
mask_i = np.swapaxes(mask_i, 0, 1)
mask_o = np.swapaxes(mask_o, 0, 1)
mask_r = np.swapaxes(mask_r, 0, 1)
mask_i = np.where(mask_i == 3, 255, 0).astype('uint8')
mask_o = np.where(mask_o >= 2, 255, 0).astype('uint8')
mask_r = np.where(mask_r == 1, 255, 0).astype('uint8')
img = img.astype('int')
tmp2, coords_i, hierarchy = cv2.findContours(mask_i.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
tmp2, coords_o, hierarchy = cv2.findContours(mask_o.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
tmp2, coords_r, hierarchy = cv2.findContours(mask_r.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if not coords_i:
print('\nNo detection endo: {:s}, {:2d}'.format(contour.ctr_path, i))
coords_i = np.ones((1, 1, 1, 2), dtype='int')
if len(coords_i) > 1:
print('\nMultiple detections endo: {:s}, {:2d}'.format(contour.ctr_path, i))
lengths = []
for coord in coords_i:
lengths.append(len(coord))
coords_i = [coords_i[np.argmax(lengths)]]
coords_i = np.squeeze(coords_i)
if not coords_o:
print('\nNo detection epi: {:s}, {:2d}'.format(contour.ctr_path, i))
coords_o = np.ones((1, 1, 1, 2), dtype='int')
if len(coords_o) > 1:
print('\nMultiple detections epi: {:s}, {:2d}'.format(contour.ctr_path, i))
lengths = []
for coord in coords_o:
lengths.append(len(coord))
coords_o = [coords_o[np.argmax(lengths)]]
coords_o = np.squeeze(coords_o)
if not coords_r:
print('\nNo detection right ventricle: {:s}, {:2d}'.format(contour.ctr_path, i))
coords_r = np.ones((1, 1, 1, 2), dtype='int')
if len(coords_r) > 1:
print('\nMultiple detections right ventricle: {:s}, {:2d}'.format(contour.ctr_path, i))
lengths = []
for coord in coords_r:
lengths.append(len(coord))
coords_r = [coords_r[np.argmax(lengths)]]
coords_r = np.squeeze(coords_r)
if coords_i.ndim == 1:
x, y = coords_i
else:
x, y = zip(*coords_i)
plt.cla()
pylab.imshow(img, cmap=pylab.cm.bone)
plt.plot(x, y, 'r.')
if coords_o.ndim == 1:
x, y = coords_o
else:
x, y = zip(*coords_o)
plt.plot(x, y, 'b.')
if coords_r.ndim == 1:
x, y = coords_r
else:
x, y = zip(*coords_r)
plt.plot(x, y, 'g.')
plt.xlim(25, img.shape[1]-25)
plt.ylim(25, img.shape[0]-25)
pylab.savefig(full_overlay_path,bbox_inches='tight',dpi=200)
#pylab.show()
return
def map_all_contours(data_path, contour_type, shuffle=True):
contours = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(data_path)
for f in fnmatch.filter(files,
'patient*'+ '_frame*_gt.*')]
if shuffle:
print('Shuffling data')
np.random.shuffle(contours)
print('Number of examples: {:d}'.format(len(contours)))
contours = map(VolumeCtr, contours)
return contours
def export_all_contours(contours, data_path, overlay_path, crop_size, contour_type):
print('\nProcessing {:d} images and labels ...\n'.format(len(contours)))
total_number = 0
for volume_ctr in contours:
total_number += volume_ctr.total_number
images = np.zeros((total_number, crop_size, crop_size, 1))
masks = np.zeros((total_number, crop_size, crop_size, num_classes))
idx = 0
for contour in contours:
vol, vol_mask = read_contour(contour, data_path, return_mask=True, type=contour_type)
#draw_contour(contour, data_path, overlay_path, type=contour_type)
for i in range(0, vol.shape[2]):
img = vol[:,:,i]
mask = vol_mask[:,:,i]
img = np.swapaxes(img, 0, 1)
mask = np.swapaxes(mask, 0, 1)
if img.ndim < 3:
img = img[..., np.newaxis]
if mask.ndim < 3:
if contour_type != "a":
mask = mask[..., np.newaxis]
elif contour_type == "a":
h, w = mask.shape
classify = np.zeros((h, w, num_classes), dtype="uint8")
classify[..., 1] = np.where(mask == 1, 1, 0)
classify[..., 2] = np.where(mask == 2, 1, 0)
classify[..., 3] = np.where(mask == 3, 1, 0)
classify[..., 0] = np.where(mask == 0, 1, 0)
mask = classify
img = center_crop(img, crop_size=crop_size)
mask = center_crop(mask, crop_size=crop_size)
images[idx] = img
masks[idx] = mask
idx = idx + 1
return images, masks
if __name__== '__main__':
crop_size = 128
# weight_path = 'C:\\Users\\congchao\\PycharmProjects\\cardiac-segmentation-master\\model_logs\\acdc_weights.hdf5'
contour_type = 'i'
weight_path = None
save_path = 'model_logs'
num_classes = 2
print('Mapping ground truth ' + contour_type + ' contours to images in train...')
train_ctrs = list(map_all_contours(DEBUG_PATH, contour_type, shuffle=False))
print('Done mapping training set')
split = int(0.1 * len(train_ctrs))
dev_ctrs = train_ctrs[0:split]
train_ctrs = train_ctrs[split:]
print('\nBuilding train dataset ...')
global img_train
global mask_train
img_train, mask_train = export_all_contours(train_ctrs,
DEBUG_PATH,
TRAIN_OVERLAY_PATH,
crop_size=crop_size,
contour_type=contour_type)
print('\nBuilding dev dataset ...')
img_dev, mask_dev = export_all_contours(dev_ctrs,
DEBUG_PATH,
TRAIN_OVERLAY_PATH,
crop_size=crop_size,
contour_type=contour_type)
input_shape = (crop_size, crop_size, 1)
model = unet_res_model_Inv(input_shape, num_classes, nb_filters=16, transfer=True, contour_type=contour_type, weights=weight_path)
kwargs = dict(
rotation_range=180,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
)
image_datagen = CardiacImageDataGenerator(**kwargs)
mask_datagen = CardiacImageDataGenerator(**kwargs)
aug_img_path = os.path.join(TRAIN_AUG_PATH, "Image")
aug_mask_path = os.path.join(TRAIN_AUG_PATH, "Mask")
img_train = image_datagen.fit(img_train, augment=True, seed=seed, rounds=16, toDir=None)
mask_train = mask_datagen.fit(mask_train, augment=True, seed=seed, rounds=16, toDir=None)
epochs = 200
mini_batch_size = 2
image_generator = image_datagen.flow(img_train, shuffle=False,
batch_size=mini_batch_size, seed=seed)
mask_generator = mask_datagen.flow(mask_train, shuffle=False,
batch_size=mini_batch_size, seed=seed)
train_generator = zip_longest(image_generator, mask_generator)
dev_generator = (img_dev, mask_dev)
max_iter = int(np.ceil(len(img_train) / mini_batch_size)) * epochs
steps_per_epoch = int(np.ceil(len(img_train) / mini_batch_size))
curr_iter = 0
base_lr = K.eval(model.optimizer.lr)
lrate = lr_poly_decay(model, base_lr, curr_iter, max_iter, power=0.5)
callbacks = []
# ####################### tfboard ###########################
if K.backend() == 'tensorflow':
tensorboard = TensorBoard(log_dir=os.path.join(save_path, 'logs_acdc_unetres_inv_drop'), histogram_freq=10, write_graph=False,
write_grads=False, write_images=False)
callbacks.append(tensorboard)
# ################### checkpoint saver#######################
checkpoint = ModelCheckpoint(filepath=os.path.join(save_path, 'temp_weights.hdf5'),
save_weights_only=False,
save_best_only=False) # .{epoch:d}
callbacks.append(checkpoint)
model.fit_generator(generator=train_generator,
steps_per_epoch=steps_per_epoch,
validation_data=dev_generator,
validation_steps=img_dev.__len__(),
epochs=epochs,
callbacks=callbacks,
workers=1,
class_weight=None
)
save_file = '_'.join(['sunnybrook', contour_type, 'acdc_unetres_inv_drop']) + '.h5'
save_file = os.path.join(save_path, save_file)
model.save_weights(save_file)
# for e in range(epochs):
# print('\nMain Epoch {:d}\n'.format(e + 1))
# print('\nLearning rate: {:6f}\n'.format(lrate))
# train_result = []
# for iteration in range(int(len(img_train) * augment_scale / mini_batch_size)):
# img, mask = next(train_generator)
# res = model.train_on_batch(img, mask)
# curr_iter += 1
# lrate = lr_poly_decay(model, base_lr, curr_iter,
# max_iter, power=0.5)
# train_result.append(res)
# train_result = np.asarray(train_result)
# train_result = np.mean(train_result, axis=0).round(decimals=10)
# print('Train result {:s}:\n{:s}'.format(str(model.metrics_names), str(train_result)))
# print('\nEvaluating dev set ...')
# result = model.evaluate(img_dev, mask_dev, batch_size=32)
#
# result = np.round(result, decimals=10)
# print('\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
# save_file = '_'.join(['sunnybrook', contour_type,
# 'epoch', str(e + 1)]) + '.h5'
# if not os.path.exists('model_logs'):
# os.makedirs('model_logs')
# save_path = os.path.join(save_path, save_file)
# print('\nSaving model weights to {:s}'.format(save_path))
# model.save_weights(save_path)
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,946 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /tfmodel/__init__.py | from .inference import inference
from .training import training
from .inputs import placeholder_inputs
from .helpers import add_output_images, save_output_images, save_output_eval
from .evaluation import evaluation, loss_calc, loss_dice, eval_dice, eval_dice_array
from .GetData import GetData | {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,947 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /unet_model_time.py | from __future__ import print_function
import numpy as np
from keras import optimizers
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, Dropout, Activation
from keras.optimizers import Adam
from keras.layers.merge import concatenate, add
from keras.utils.vis_utils import plot_model
from layer_common import mvn, crop
from keras.layers import Dropout, Lambda, Conv3D, merge
from keras.layers.normalization import BatchNormalization
from keras import backend as K
import tensorflow as tf
def dice_coef(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (2, 3)
y_true_endo = y_true
y_pred_endo = y_pred
intersection = K.sum(y_true_endo * y_pred_endo, axis=axes)
summation = K.sum(y_true_endo * y_true_endo, axis=axes) + K.sum(y_pred_endo * y_pred_endo, axis=axes)
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def dice_coef_loss(y_true, y_pred):
return 1.0 - dice_coef(y_true, y_pred, smooth=0.0)
def unet_res_model_time(input_shape, num_classes, nb_filters = 32, n_phases=9, dilation=1, transfer=True, contour_type='i', weights=None):
if num_classes == 2:
num_classes = 1
loss = dice_coef_loss
activation = 'sigmoid'
else:
if transfer == True:
if contour_type == 'i':
loss = dice_coef_loss
elif contour_type == 'm':
loss = dice_coef_loss
elif contour_type == 'r':
loss = dice_coef_loss
elif contour_type == 'a':
loss = dice_coef_loss
else:
loss = dice_coef_loss
activation = 'softmax'
data = Input(shape=input_shape, dtype='float', name='data')
conv3d_1 = Conv3D(nb_filters, kernel_size=(n_phases, 3, 3), dilation_rate=dilation, padding='same')(data)
conv3d_1 = Activation('relu')(conv3d_1)
conv3d_2 = Conv3D(nb_filters, kernel_size=(n_phases, 3, 3), dilation_rate=dilation, padding='same')(conv3d_1)
conv3d_2 = Activation('relu')(conv3d_2)
conv3d_3 = Conv3D(nb_filters, kernel_size=(n_phases, 3, 3), dilation_rate=dilation, padding='same')(conv3d_2)
conv3d_3 = Activation('relu')(conv3d_3)
conv3d_4 = Conv3D(nb_filters, kernel_size=(n_phases, 3, 3), dilation_rate=dilation, padding='same')(conv3d_3)
conv3d_4 = Activation('relu')(conv3d_4)
conv3d_5 = Conv3D(nb_filters, kernel_size=(n_phases, 3, 3), dilation_rate=dilation, padding='same')(conv3d_4)
conv3d_5 = Activation('relu')(conv3d_5)
conv3d_6 = Conv3D(nb_filters, kernel_size=(n_phases, 3, 3), dilation_rate=dilation, padding='same')(conv3d_5)
conv3d_6 = Activation('relu')(conv3d_6)
conv3d_7 = Conv3D(nb_filters, kernel_size=(n_phases, 3, 3), dilation_rate=dilation, padding='same')(conv3d_6)
conv3d_7 = Activation('relu')(conv3d_7)
conv3d_8 = Conv3D(nb_filters, kernel_size=(n_phases, 3, 3), dilation_rate=dilation, padding='same')(conv3d_7)
conv3d_8 = Activation('relu')(conv3d_8)
conv3d_9 = Conv3D(nb_filters, kernel_size=(n_phases, 3, 3), dilation_rate=dilation, padding='same')(conv3d_8)
conv3d_9 = Activation('relu')(conv3d_9)
conv3d_10 = Conv3D(nb_filters, kernel_size=(n_phases, 3, 3), dilation_rate=dilation, padding='same')(conv3d_9)
conv3d_10 = Activation('relu')(conv3d_10)
final_convolution = Conv3D(num_classes, kernel_size=(n_phases, 1, 1))(conv3d_10)
act = Activation(activation)(final_convolution)
model = Model(inputs=data, outputs=act)
if weights is not None:
model.load_weights(weights)
model.compile(optimizer=Adam(lr=1e-6), loss=dice_coef_loss, metrics=[dice_coef])
return model
if __name__ == '__main__':
model = unet_res_model_time((9, 128, 128, 1), 2, nb_filters=64, n_phases=9, dilation=1, transfer=True, weights=None)
plot_model(model, show_shapes=True, to_file='unet_res_model_time.png')
model.summary()
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,948 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /CardiacImageDataGenerator.py | from keras.preprocessing.image import ImageDataGenerator, transform_matrix_offset_center, apply_transform, random_channel_shift, flip_axis
from keras.preprocessing.image import Iterator as Iterator
from keras import backend as K
import numpy as np
import warnings
import os
from scipy import linalg
import pylab
import matplotlib.pyplot as plt
class ImageArrayIterator(Iterator):
"""Iterator yielding data from image array.
# Arguments
x: Numpy array of input data.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png'):
if y is not None and len(x) != len(y):
raise ValueError('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
if self.x.ndim != 5:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 5. You passed an array '
'with shape', self.x.shape)
channels_axis = 4 if data_format == 'channels_last' else 2
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(
channels_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' + str(
self.x.shape) +
' (' + str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(ImageArrayIterator, self).__init__(x.shape[0], batch_size, shuffle, seed)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros(tuple([current_batch_size] + list(self.x.shape)[1:]), dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
#x = self.image_data_generator.random_transform_array(x.astype(K.floatx()))
#x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
class CardiacImageDataGenerator(ImageDataGenerator):
#Customized data augmentation method.
def fit(self, x,
augment=False,
rounds=1,
seed=None,
toDir=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
x: Numpy array, the data to fit on. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
# Raises
ValueError: in case of invalid input `x`.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' + self.data_format + '" '
'(channels on axis ' + str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
'However, it was passed an array with shape ' + str(x.shape) +
' (' + str(x.shape[self.channel_axis]) + ' channels).')
if seed is not None:
np.random.seed(seed)
if toDir != None:
if not os.path.exists(toDir):
os.makedirs(toDir)
x = np.copy(x)
if augment:
ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
if toDir != None:
filename = 'img-%d.png' % (i + r * x.shape[0])
out_full_name = os.path.join(toDir, filename)
shape = ax.shape
if shape[3] == 1:
img = ax[i, ..., 0]
plt.cla()
pylab.imshow(img, cmap=pylab.cm.bone)
pylab.savefig(out_full_name, bbox_inches='tight')
elif shape[3] == 4:
img = ax[i, ..., 1:4]
plt.cla()
pylab.imshow(img)
pylab.savefig(out_full_name, bbox_inches='tight')
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
self.principal_components = np.dot(np.dot(u, np.diag(1. / np.sqrt(s + self.zca_epsilon))), u.T)
return x
def fit_3d(self, x,
augment=False,
rounds=1,
seed=None,
toDir=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
x: Numpy array, the data to fit on. Should have rank 5.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
# Raises
ValueError: in case of invalid input `x`.
"""
data_format = self.data_format
if data_format == 'channels_first':
self.channel_axis = 2
self.phase_axis = 1
self.row_axis = 3
self.col_axis = 4
if data_format == 'channels_last':
self.channel_axis = 4
self.phase_axis = 1
self.row_axis = 2
self.col_axis = 3
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 5:
raise ValueError('Input to `.fit()` should have rank 5. '
'Got array with shape: ' + str(x.shape))
if seed is not None:
np.random.seed(seed)
if toDir != None:
if not os.path.exists(toDir):
os.makedirs(toDir)
x = np.copy(x)
if augment:
ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform_array(x[i])
if toDir != None:
for j in range(x.shape[1]):
filename = 'img-%d-%d.png' % (i + r * x.shape[0], j)
out_full_name = os.path.join(toDir, filename)
shape = ax.shape
if shape[4] == 1:
img = ax[i, j, ..., 0]
plt.cla()
pylab.imshow(img, cmap=pylab.cm.bone)
pylab.savefig(out_full_name, bbox_inches='tight')
elif shape[4] == 4:
img = ax[i, j, ..., 1:4]
plt.cla()
pylab.imshow(img)
pylab.savefig(out_full_name, bbox_inches='tight')
elif shape[4] == 3:
img = ax[i, j, ..., :]
plt.cla()
pylab.imshow(img)
pylab.savefig(out_full_name, bbox_inches='tight')
x = ax
return x
def fit_to_directory(self, x,
augment=False,
rounds=1,
seed=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
x: Numpy array, the data to fit on. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
# Raises
ValueError: in case of invalid input `x`.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' + self.data_format + '" '
'(channels on axis ' + str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
'However, it was passed an array with shape ' + str(x.shape) +
' (' + str(x.shape[self.channel_axis]) + ' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
self.principal_components = np.dot(np.dot(u, np.diag(1. / np.sqrt(s + self.zca_epsilon))), u.T)
return x
def random_transform_array(self, x, seed=None):
"""Randomly augment a image array tensor.
# Arguments
x: 3D tensor, single image.
seed: random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_phase_axis = self.phase_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
p, h, w = x.shape[img_phase_axis], x.shape[img_row_axis], x.shape[img_col_axis]
if transform_matrix is not None:
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
for i in range(p):
x[i] = apply_transform(x[i], transform_matrix, img_channel_axis-1,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
for i in range(p):
x[i] = random_channel_shift(x[i],
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
for i in range(p):
x[i] = flip_axis(x[i], img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
for i in range(p):
x[i] = flip_axis(x[i], img_row_axis)
return x
def flow3d(self, x, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='png'):
return ImageArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
class CardiacVolumeDataGenerator(ImageDataGenerator):
#Customized data augmentation method.
def fit(self, x,
augment=False,
rounds=1,
seed=None,
toDir=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
x: Numpy array, the data to fit on. Should have rank 5.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
# Raises
ValueError: in case of invalid input `x`.
"""
data_format = self.data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
self.slice_axis = 4
if data_format == 'channels_last':
self.channel_axis = 4
self.row_axis = 1
self.col_axis = 2
self.slice_axis = 3
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 5:
raise ValueError('Input to `.fit()` should have rank 5. '
'Got array with shape: ' + str(x.shape))
if seed is not None:
np.random.seed(seed)
if toDir != None:
if not os.path.exists(toDir):
os.makedirs(toDir)
x = np.copy(x)
if augment:
ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform_array(x[i])
if toDir != None:
for j in range(x.shape[self.slice_axis]):
filename = 'img-%d-%d.png' % (i + r * x.shape[0], j)
out_full_name = os.path.join(toDir, filename)
shape = ax.shape
if shape[self.channel_axis] == 1:
img = ax[i, ..., j, 0]
plt.cla()
pylab.imshow(img, cmap=pylab.cm.bone)
pylab.savefig(out_full_name, bbox_inches='tight')
elif shape[4] == 4:
img = ax[i, ..., j, 1:4]
plt.cla()
pylab.imshow(img)
pylab.savefig(out_full_name, bbox_inches='tight')
elif shape[4] == 3:
img = ax[i, ..., j, :]
plt.cla()
pylab.imshow(img)
pylab.savefig(out_full_name, bbox_inches='tight')
x = ax
return x
def random_transform_array(self, x, seed=None):
"""Randomly augment a image array tensor.
# Arguments
x: 3D tensor, single image.
seed: random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_slice_axis = self.slice_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
s, h, w = x.shape[img_slice_axis], x.shape[img_row_axis], x.shape[img_col_axis]
if transform_matrix is not None:
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
for i in range(s):
x[...,i,:] = apply_transform(x[...,i,:], transform_matrix, img_channel_axis-1,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
for i in range(s):
x[...,i,:] = random_channel_shift(x[...,i,:],
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
for i in range(s):
x[...,i,:] = flip_axis(x[...,i,:], img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
for i in range(s):
x[...,i,:] = flip_axis(x[...,i,:], img_row_axis)
return x
def flow(self, x, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='png'):
return ImageArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
class CardiacTimeSeriesDataGenerator(ImageDataGenerator):
#Customized data augmentation method.
def fit(self, x,
augment=False,
rounds=1,
seed=None,
toDir=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
x: Numpy array, the data to fit on. Should have rank 5.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
# Raises
ValueError: in case of invalid input `x`.
"""
data_format = self.data_format
if data_format == 'channels_first':
self.channel_axis = 2
self.row_axis = 3
self.col_axis = 4
self.phase_axis = 1
if data_format == 'channels_last':
self.channel_axis = 4
self.row_axis = 2
self.col_axis = 3
self.phase_axis = 1
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 5:
raise ValueError('Input to `.fit()` should have rank 5. '
'Got array with shape: ' + str(x.shape))
if seed is not None:
np.random.seed(seed)
if toDir != None:
if not os.path.exists(toDir):
os.makedirs(toDir)
x = np.copy(x)
if augment:
ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform_array(x[i])
if toDir != None:
for j in range(x.shape[self.phase_axis]):
filename = 'img-%d-%d.png' % (i + r * x.shape[0], j)
out_full_name = os.path.join(toDir, filename)
shape = ax.shape
if shape[self.channel_axis] == 1:
img = ax[i, j, ..., 0]
plt.cla()
pylab.imshow(img, cmap=pylab.cm.bone)
pylab.savefig(out_full_name, bbox_inches='tight')
elif shape[4] == 4:
img = ax[i, j, ..., 1:4]
plt.cla()
pylab.imshow(img)
pylab.savefig(out_full_name, bbox_inches='tight')
elif shape[4] == 3:
img = ax[i, j, ..., :]
plt.cla()
pylab.imshow(img)
pylab.savefig(out_full_name, bbox_inches='tight')
x = ax
return x
def random_transform_array(self, x, seed=None):
"""Randomly augment a image array tensor.
# Arguments
x: 3D tensor, single image.
seed: random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_phase_axis = self.phase_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
p, h, w = x.shape[img_phase_axis], x.shape[img_row_axis], x.shape[img_col_axis]
if transform_matrix is not None:
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
for i in range(p):
x[i,...] = apply_transform(x[i,...], transform_matrix, img_channel_axis-1,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
for i in range(p):
x[i,...] = random_channel_shift(x[i,...],
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
for i in range(p):
x[i,...] = flip_axis(x[i,...], img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
for i in range(p):
x[i,...] = flip_axis(x[i,...], img_row_axis)
return x
def flow(self, x, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='png'):
return ImageArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,949 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /DataIOProc.py | import os, re
import random
import numpy as np
from scipy.misc import imsave
import scipy.misc
from keras import backend as K
class DataIOProc():
def __init__(self, data_dir, study_case):
self.data_dir = data_dir
self.study_case = study_case
def save_image_4d(self, data_4d, sub_dir):
save_path = os.path.join(self.data_dir, self.study_case, sub_dir)
if not os.path.exists(save_path):
os.makedirs(save_path)
s, p, h, w, d = data_4d.shape
if d != 1:
print("The last dimension of data should be 1!")
return
for idx_s in range(s):
for idx_p in range(p):
img = data_4d[idx_s, idx_p, ...]
img = np.squeeze(img)
img_name = '{:d}-{:d}'.format(idx_s, idx_p)
imsave(os.path.join(save_path, img_name + ".png"), img)
def load_image_4d(self, sub_dir, s, p, h, w, d):
save_path = os.path.join(self.data_dir, self.study_case, sub_dir)
if not os.path.exists(save_path):
print("No data!")
return
if d != 1:
print("The last dimension of data should be 1!")
return
data_4d = np.zeros((s, p, w, h, d), dtype=K.floatx())
for label_root, dir, files in os.walk(save_path):
for file in files:
if not file.endswith((".png")):
continue
try:
image = scipy.misc.imread(os.path.join(save_path, file))
image = image.astype('float32')/255.0
image = image[..., np.newaxis]
match = re.search(r'(\d)-(\d).*', file)
s = int(match.group(1))
p = int(match.group(2))
data_4d[s, p, ...] = image
except Exception as e:
print(e)
return data_4d
def save_data_4d(self, data_4d, save_name):
save_path = os.path.join(self.data_dir, self.study_case)
if not os.path.exists(save_path):
os.makedirs(save_path)
save_file = os.path.join(save_path,save_name)
data_4d.tofile(save_file)
def load_data_4d(self, load_name, s, p, h, w, d):
save_path = os.path.join(self.data_dir, self.study_case)
if not os.path.exists(save_path):
print("No data!")
return
save_file = os.path.join(save_path, load_name)
data_4d = np.fromfile(save_file, dtype='float32')
data_4d = np.reshape(data_4d, [s, p, h, w, d])
return data_4d | {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,950 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /unet_res_model.py | from __future__ import print_function
from keras import optimizers
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, Dropout, Activation
from keras.optimizers import Adam
from keras.layers.merge import concatenate, add
from keras.utils.vis_utils import plot_model
from metrics_common import dice_coef, dice_coef_endo, dice_coef_myo, dice_coef_rv, dice_coef_loss, dice_coef_loss_endo, dice_coef_loss_myo, dice_coef_loss_rv, dice_coef_endo_each, dice_coef_epi
from layer_common import mvn, crop
from keras.layers import Dropout, Lambda
from keras.layers.normalization import BatchNormalization
kwargs = dict(
activation=None,
padding='same',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
)
# Helper to build a conv -> BN -> relu block
def _conv_bn_relu(nb_filter, kernel_size, strides=1):
def f(input):
conv = Conv2D(filters=nb_filter, kernel_size=kernel_size, strides=strides, **kwargs)(input)
norm = BatchNormalization(axis=1)(conv)
return Activation("relu")(norm)
return f
# Helper to build a BN -> relu -> conv block
# This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
def _bn_relu_conv(nb_filter, kernel_size, strides=1):
def f(input):
norm = BatchNormalization(axis=1)(input)
activation = Activation("relu")(norm)
return Conv2D(filters=nb_filter, kernel_size=kernel_size, strides=strides, **kwargs)(activation)
return f
# Bottleneck architecture for > 34 layer resnet.
# Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
# Returns a final conv layer of nb_filters * 4
def _bottleneck(nb_filters, strides=1):
def f(input):
conv_1_1 = _bn_relu_conv(nb_filters, 1, strides=strides)(input)
conv_3_3 = _bn_relu_conv(nb_filters, 3)(conv_1_1)
residual = _bn_relu_conv(nb_filters * 4, 1)(conv_3_3)
return _shortcut(input, residual)
return f
# Basic 3 X 3 convolution blocks.
# Use for resnet with layers <= 34
# Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
def _basic_block(nb_filters, strides=1):
def f(input):
conv1 = _bn_relu_conv(nb_filters, 3, strides=strides)(input)
residual = _bn_relu_conv(nb_filters, 3)(conv1)
return _shortcut(input, residual)
return f
# Adds a shortcut between input and residual block and merges them with "sum"
def _shortcut(input, residual):
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
strides = input._keras_shape[2] / residual._keras_shape[2]
equal_channels = residual._keras_shape[3] == input._keras_shape[3]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if strides > 1 or not equal_channels:
shortcut = Conv2D(filters=residual._keras_shape[3], kernel_size=1, strides=int(strides), **kwargs)(input)
return add([shortcut, residual])
# Builds a residual block with repeating bottleneck blocks.
def _residual_block(block_function, nb_filters, repetations, is_first_layer=False):
def f(input):
for i in range(repetations):
init_subsample = 1
if i == 0 and not is_first_layer:
init_subsample = 2
input = block_function(nb_filters=nb_filters, strides=init_subsample)(input)
return input
return f
def _up_block(block, mrge, nb_filters):
up = concatenate([Conv2D(filters=2 * nb_filters, kernel_size=2, padding='same')(UpSampling2D(size=(2, 2))(block)), mrge],
axis=3)
# conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(up)
conv = Conv2D(filters=nb_filters, kernel_size=3, activation='relu', padding='same')(up)
conv = Conv2D(filters=nb_filters, kernel_size=3, activation='relu', padding='same')(conv)
# conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
return conv
def unet_res_model(input_shape, num_classes, transfer=True, contour_type='i', weights=None):
if num_classes == 2:
num_classes = 1
loss = dice_coef_loss
activation = 'sigmoid'
else:
if transfer == True:
if contour_type == 'i':
loss = dice_coef_loss_endo
elif contour_type == 'm':
loss = dice_coef_loss_myo
elif contour_type == 'r':
loss = dice_coef_loss_rv
elif contour_type == 'a':
loss = dice_coef_loss
else:
loss = dice_coef_loss
activation = 'softmax'
data = Input(shape=input_shape, dtype='float', name='data')
mvn1 = Lambda(mvn)(data)
nb_filters = 32 # 5
conv1 = _conv_bn_relu(nb_filter=2 * nb_filters, kernel_size=7, strides=2)(mvn1)
pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1)
# Build residual blocks..
block_fn = _bottleneck
pool1 = Lambda(mvn)(pool1)
block1 = _residual_block(block_fn, nb_filters=2 * nb_filters, repetations=3, is_first_layer=True)(pool1)
block1 = Lambda(mvn)(block1)
block2 = _residual_block(block_fn, nb_filters=2 ** 2 * nb_filters, repetations=4)(block1)
block2 = Lambda(mvn)(block2)
block3 = _residual_block(block_fn, nb_filters=2 ** 3 * nb_filters, repetations=6)(block2)
block3 = Lambda(mvn)(block3)
block4 = _residual_block(block_fn, nb_filters=2 ** 4 * nb_filters, repetations=3)(block3)
block4 = Lambda(mvn)(block4)
up5 = _up_block(block4, block3, 2 ** 3 * nb_filters)
up6 = _up_block(up5, block2, 2 ** 2 * nb_filters)
up7 = _up_block(up6, block1, 2 * nb_filters)
up8 = _up_block(up7, conv1, nb_filters)
up9 = UpSampling2D(size=(2, 2))(up8)
conv10 = Conv2D(filters=num_classes, kernel_size=1,
strides=1, activation=activation, padding='valid',
kernel_initializer='glorot_uniform', use_bias=True, name="prediction")(up9)
model = Model(inputs=data, outputs=conv10)
if weights is not None:
model.load_weights(weights)
if contour_type == 'i':
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss_endo, metrics=[dice_coef_endo])
elif contour_type == 'm':
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss_myo, metrics=[dice_coef_myo, dice_coef_epi, dice_coef_endo])
#sgd = optimizers.SGD(lr=0.0001, momentum=0.9, nesterov=True)
# model.compile(optimizer=sgd, loss=dice_coef_loss_endo,
# metrics=[dice_coef_endo])
return model
if __name__ == '__main__':
model = unet_res_model((128, 128, 1), 4, transfer=True, weights=None)
plot_model(model, show_shapes=True, to_file='unet_res_model.png')
model.summary()
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,951 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /tfmodel/helpers.py | import tensorflow as tf
import re, sys, os
import shutil, cv2
import numpy as np
import pylab
import matplotlib.pyplot as plt
from helpers import reshape
SUNNYBROOK_ROOT_PATH = 'D:\cardiac_data\Sunnybrook'
SAVE_VAL_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_val_submission')
SAVE_ONLINE_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook_online_submission')
def draw_contour(image, image_name, out_path, contour_type='i', coords=None):
out_full_name = os.path.join(out_path, image_name)
if not os.path.exists(out_path):
os.makedirs(out_path)
image = image[..., 0]
img_size = image.shape
plt.cla()
pylab.imshow(image, cmap=pylab.cm.bone)
if isinstance(coords, np.ndarray):
if coords.ndim == 1:
x, y = coords
else:
x, y = zip(*coords)
if contour_type == 'i':
plt.plot(x, y, 'r.')
elif contour_type == 'o':
plt.plot(x, y, 'b.')
plt.xlim(50, img_size[0]-50)
plt.ylim(50, img_size[1]-50)
pylab.savefig(out_full_name,bbox_inches='tight',dpi=200)
#pylab.show()
return
def add_output_images(images, logits, labels, max_outputs=3):
tf.summary.image('input', images, max_outputs=max_outputs)
output_image_bw = images[..., 0]
labels1 = tf.cast(labels[...,0], tf.float32)
input_labels_image_r = labels1 + (output_image_bw * (1-labels1))
input_labels_image = tf.stack([input_labels_image_r, output_image_bw, output_image_bw], axis=3)
tf.summary.image('input_labels_mixed', input_labels_image, max_outputs=3)
img_shape = tf.shape(images)
classification1 = tf.image.resize_image_with_crop_or_pad(logits, img_shape[1], img_shape[2])[...,1]
output_labels_image_r = classification1 + (output_image_bw * (1-classification1))
output_labels_image = tf.stack([output_labels_image_r, output_image_bw, output_image_bw], axis=3)
tf.summary.image('output_labels_mixed', output_labels_image, max_outputs=3)
return
def save_output_images(images, logits, image_names, contour_type):
save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_online_submission'
overlay_full_path = os.path.join(save_dir, 'Overlay')
img_shape = images.shape
for idx in range(img_shape[0]):
image = images[idx,...]
image_name = image_names[idx]
logit = logits[idx, ..., 1]
logit = logit[..., np.newaxis]
logit = reshape(logit, to_shape=(img_shape[1], img_shape[2], img_shape[3]))
logit = np.where(logit > 0.5, 255, 0).astype('uint8')
tmp2, coords, hierarchy = cv2.findContours(logit.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if not coords:
print('\nNo detection in image: {:s}'.format(image_name))
coords = np.ones((1, 1, 1, 2), dtype='int')
if len(coords) > 1:
print('\nMultiple detections in image: {:s}'.format(image_name))
# cv2.imwrite(data_path + '\\multiple_dets\\'+contour_type+'{:04d}.png'.format(idx), tmp)
lengths = []
for coord in coords:
lengths.append(len(coord))
coords = [coords[np.argmax(lengths)]]
coords = np.squeeze(coords)
draw_contour(image, image_name, overlay_full_path, contour_type, coords)
def save_output_eval(accuracy, image_names, contour_type):
img_shape = image_names.shape
resArr = []
for idx in range(img_shape[0]):
eval = accuracy[idx]
img = image_names[idx]
resArr = [resArr, np.transpose([img, eval])]
return resArr
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,952 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /unet_model_inv.py | from __future__ import print_function
import numpy as np
from keras import optimizers
from keras.models import Model
from keras.layers import Input, merge, Conv2D, MaxPooling2D, UpSampling2D, Dropout
from keras.optimizers import Adam
from keras.layers.merge import concatenate
from keras.utils.vis_utils import plot_model
from layer_common import mvn, crop
from keras.layers import Dropout, Lambda
from keras import backend as K
def dice_coef(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_endo = y_true[:, :, :]
y_pred_endo = y_pred[:, :, :]
intersection = K.sum(y_true_endo * y_pred_endo, axis=axes)
summation = K.sum(y_true_endo * y_true_endo, axis=axes) + K.sum(y_pred_endo * y_pred_endo, axis=axes)
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def dice_coef_each(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_endo = y_true[:, :, :].astype('float32')
y_pred_endo = y_pred[:, :, :]
y_pred_endo = np.where(y_pred_endo > 0.5, 1.0, 0.0).astype('float32')
intersection = np.sum(y_true_endo * y_pred_endo, axis=axes)
summation = np.sum(y_true_endo * y_true_endo, axis=axes) + np.sum(y_pred_endo * y_pred_endo, axis=axes)
return (2.0 * intersection + smooth) / (summation + smooth)
def dice_coef_loss(y_true, y_pred):
return 1.0 - dice_coef(y_true, y_pred, smooth=0.0)
def unet_model_inv(input_shape, num_classes, num_filters=32, transfer=True, contour_type='i', weights=None):
if num_classes == 2:
num_classes = 1
loss = dice_coef_loss
activation = 'sigmoid'
else:
if transfer == True:
if contour_type == 'i':
loss = dice_coef_loss
elif contour_type == 'o':
loss = dice_coef_loss
elif contour_type == 'r':
loss = dice_coef_loss
elif contour_type == 'a':
loss = dice_coef_loss
else:
loss = dice_coef_loss
activation = 'softmax'
kwargs = dict(
kernel_size=3,
strides=1,
activation='relu',
padding='same',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
)
data = Input(shape=input_shape, dtype='float', name='data')
mvn1 = Lambda(mvn, name='mvn1')(data)
conv1 = Conv2D(filters=2**4*num_filters, **kwargs)(mvn1)
conv1 = Conv2D(filters=2**4*num_filters, **kwargs)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
#pool1 = Dropout(rate=0.5)(pool1)
pool1 = Lambda(mvn)(pool1)
conv2 = Conv2D(filters=2**3*num_filters, **kwargs)(pool1)
conv2 = Conv2D(filters=2**3*num_filters, **kwargs)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
#pool2 = Dropout(rate=0.3)(pool2)
pool2 = Lambda(mvn)(pool2)
conv3 = Conv2D(filters=2**2*num_filters, **kwargs)(pool2)
conv3 = Conv2D(filters=2**2*num_filters, **kwargs)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
#pool3 = Dropout(rate=0.5)(pool3)
pool3 = Lambda(mvn)(pool3)
conv4 = Conv2D(filters=2**1*num_filters, **kwargs)(pool3)
conv4 = Conv2D(filters=2**1*num_filters, **kwargs)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
#pool4 = Dropout(rate=0.3)(pool4)
pool4 = Lambda(mvn)(pool4)
conv5 = Conv2D(filters=2**0*num_filters, **kwargs)(pool4)
conv5 = Conv2D(filters=2**0*num_filters, **kwargs)(conv5)
# pool5 = MaxPooling2D(pool_size=(2, 2))(conv5)
# convdeep = Convolution2D(1024, 3, 3, activation='relu', border_mode='same')(pool5)
# convdeep = Convolution2D(1024, 3, 3, activation='relu', border_mode='same')(convdeep)
# upmid = merge([Convolution2D(512, 2, 2, border_mode='same')(UpSampling2D(size=(2, 2))(convdeep)), conv5], mode='concat', concat_axis=1)
# convmid = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(upmid)
# convmid = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(convmid)
#up6 = merge(
# [Conv2D(filters=256, **kwargs)(UpSampling2D(size=(2, 2))(conv5)), conv4],
# mode='concat', concat_axis=3)
up6 = concatenate([Conv2D(filters=2**1*num_filters, **kwargs)(UpSampling2D(size=(2, 2))(conv5)), conv4], axis=3)
#up6 = Lambda(mvn)(up6)
conv6 = Conv2D(filters=2**1*num_filters, **kwargs)(up6)
conv6 = Conv2D(filters=2**1*num_filters, **kwargs)(conv6)
#conv6 = Dropout(rate=0.5)(conv6)
#conv6 = Lambda(mvn)(conv6)
#up7 = merge(
# [Conv2D(filters=128, **kwargs)(UpSampling2D(size=(2, 2))(conv6)), conv3],
# mode='concat', concat_axis=3)
up7 = concatenate([Conv2D(filters=2**2*num_filters, **kwargs)(UpSampling2D(size=(2, 2))(conv6)), conv3], axis=3)
#up7 = Lambda(mvn)(up7)
conv7 = Conv2D(filters=2**2*num_filters, **kwargs)(up7)
conv7 = Conv2D(filters=2**2*num_filters, **kwargs)(conv7)
#conv7 = Dropout(rate=0.5)(conv7)
#conv7 = Lambda(mvn)(conv7)
#up8 = merge(
# [Conv2D(filters=64, **kwargs)(UpSampling2D(size=(2, 2))(conv7)), conv2],
# mode='concat', concat_axis=3)
up8 = concatenate([Conv2D(filters=2**3*num_filters, **kwargs)(UpSampling2D(size=(2, 2))(conv7)), conv2], axis=3)
#up8 = Lambda(mvn)(up8)
conv8 = Conv2D(filters=2**3*num_filters, **kwargs)(up8)
conv8 = Conv2D(filters=2**3*num_filters, **kwargs)(conv8)
#conv8 = Dropout(rate=0.5)(conv8)
#conv8 = Lambda(mvn)(conv8)
#up9 = merge(
# [Conv2D(filters=32, **kwargs)(UpSampling2D(size=(2, 2))(conv8)), conv1],
# mode='concat', concat_axis=3)
up9 = concatenate([Conv2D(filters=2**4*num_filters, **kwargs)(UpSampling2D(size=(2, 2))(conv8)), conv1], axis=3)
conv9 = Conv2D(filters=2**4*num_filters, **kwargs)(up9)
conv9 = Conv2D(filters=2**4*num_filters, **kwargs)(conv9)
# conv9 = Dropout(rate=0.5)(conv9)
#conv9 = Lambda(mvn)(conv9)
conv10 = Conv2D(filters=num_classes, kernel_size=1,
strides=1, activation=activation, padding='valid',
kernel_initializer='glorot_uniform', use_bias=True, name="prediction")(conv9)
model = Model(inputs=data, outputs=conv10)
if weights is not None:
model.load_weights(weights)
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
#sgd = optimizers.SGD(lr=0.0001, momentum=0.9, nesterov=True)
#model.compile(optimizer=sgd, loss=dice_coef_loss_endo,
# metrics=[dice_coef_endo])
return model
if __name__ == '__main__':
model = unet_model_inv((128, 128, 1), 4, 32, transfer=True, weights=None)
plot_model(model, show_shapes=True, to_file='unet_model_inv.png')
model.summary()
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,953 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /keras_visualization_test.py | from vis.losses import ActivationMaximization
from vis.regularizers import TotalVariation, LPNorm
from vis.input_modifiers import Jitter
from vis.optimizer import Optimizer
from vis.callbacks import GifGenerator
from vis.utils.vggnet import VGG16
# Build the VGG16 network with ImageNet weights
model = VGG16(weights='imagenet', include_top=True)
print('Model loaded.')
# The name of the layer we want to visualize
# (see model definition in vggnet.py)
layer_name = 'predictions'
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
output_class = [20]
losses = [
(ActivationMaximization(layer_dict[layer_name], output_class), 2),
(LPNorm(model.input), 10),
(TotalVariation(model.input), 10)
]
opt = Optimizer(model.input, losses)
opt.minimize(max_iter=500, verbose=True, image_modifiers=[Jitter()], callbacks=[GifGenerator('opt_progress')]) | {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,954 | alexliyang/cardiac-segmentation-cc | refs/heads/master | /fcn_model_inv.py | #!/usr/bin/env python2.7
import numpy as np
from keras import optimizers
from keras.models import Model
from keras.layers import Dropout, Lambda
from keras.layers import Input, average
from keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose
from keras.layers import ZeroPadding2D, Cropping2D
from keras import backend as K
from layer_common import mvn, crop
from keras.utils.vis_utils import plot_model
def dice_coef(y_true, y_pred, smooth=0.0):
'''Average dice coefficient per batch.'''
axes = (1,2,3)
intersection = K.sum(y_true * y_pred, axis=axes)
summation = K.sum(y_true, axis=axes) + K.sum(y_pred, axis=axes)
return K.mean((2.0 * intersection + smooth) / (summation + smooth), axis=0)
def dice_coef_loss(y_true, y_pred):
return 1.0 - dice_coef(y_true, y_pred, smooth=10.0)
def jaccard_coef(y_true, y_pred, smooth=0.0):
'''Average jaccard coefficient per batch.'''
axes = (1,2,3)
intersection = K.sum(y_true * y_pred, axis=axes)
union = K.sum(y_true, axis=axes) + K.sum(y_pred, axis=axes) - intersection
return K.mean( (intersection + smooth) / (union + smooth), axis=0)
def dice_coef_each(y_true, y_pred, smooth=0.0):
'''Average dice coefficient for endocardium class per batch.'''
axes = (1, 2)
y_true_endo = y_true[:, :, :].astype('float32')
y_pred_endo = y_pred[:, :, :]
y_pred_endo = np.where(y_pred_endo > 0.5, 1.0, 0.0).astype('float32')
intersection = np.sum(y_true_endo * y_pred_endo, axis=axes)
summation = np.sum(y_true_endo * y_true_endo, axis=axes) + np.sum(y_pred_endo * y_pred_endo, axis=axes)
return (2.0 * intersection + smooth) / (summation + smooth)
def fcn_model_inv(input_shape, num_classes, num_filter=64, weights=None):
''' "Skip" FCN architecture similar to Long et al., 2015
https://arxiv.org/abs/1411.4038
'''
if num_classes == 2:
num_classes = 1
loss = dice_coef_loss
activation = 'sigmoid'
else:
loss = 'categorical_crossentropy'
activation = 'softmax'
kwargs = dict(
kernel_size=3,
strides=1,
activation='relu',
padding='same',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
)
data = Input(shape=input_shape, dtype='float', name='data')
mvn0 = Lambda(mvn, name='mvn0')(data)
conv1 = Conv2D(filters=2**3*num_filter, name='conv1', **kwargs)(mvn0)
mvn1 = Lambda(mvn, name='mvn1')(conv1)
conv2 = Conv2D(filters=2**3*num_filter, name='conv2', **kwargs)(mvn1)
mvn2 = Lambda(mvn, name='mvn2')(conv2)
conv3 = Conv2D(filters=2**3*num_filter, name='conv3', **kwargs)(mvn2)
mvn3 = Lambda(mvn, name='mvn3')(conv3)
drop3 = Dropout(rate=0.5, name='drop1')(mvn3)
pool1 = MaxPooling2D(pool_size=3, strides=2,
padding='same', name='pool1')(drop3)
conv4 = Conv2D(filters=2**2*num_filter, name='conv4', **kwargs)(pool1)
mvn4 = Lambda(mvn, name='mvn4')(conv4)
conv5 = Conv2D(filters=2**2*num_filter, name='conv5', **kwargs)(mvn4)
mvn5 = Lambda(mvn, name='mvn5')(conv5)
conv6 = Conv2D(filters=2**2*num_filter, name='conv6', **kwargs)(mvn5)
mvn6 = Lambda(mvn, name='mvn6')(conv6)
conv7 = Conv2D(filters=2**2*num_filter, name='conv7', **kwargs)(mvn6)
mvn7 = Lambda(mvn, name='mvn7')(conv7)
drop7 = Dropout(rate=0.5, name='drop2')(mvn7)
pool2 = MaxPooling2D(pool_size=3, strides=2,
padding='same', name='pool2')(drop7)
conv8 = Conv2D(filters=2**1*num_filter, name='conv8', **kwargs)(pool2)
mvn8 = Lambda(mvn, name='mvn8')(conv8)
conv9 = Conv2D(filters=2**1*num_filter, name='conv9', **kwargs)(mvn8)
mvn9 = Lambda(mvn, name='mvn9')(conv9)
conv10 = Conv2D(filters=2**1*num_filter, name='conv10', **kwargs)(mvn9)
mvn10 = Lambda(mvn, name='mvn10')(conv10)
conv11 = Conv2D(filters=2**1*num_filter, name='conv11', **kwargs)(mvn10)
mvn11 = Lambda(mvn, name='mvn11')(conv11)
pool3 = MaxPooling2D(pool_size=3, strides=2,
padding='same', name='pool3')(mvn11)
conv12 = Conv2D(filters=2**0*num_filter, name='conv12', **kwargs)(pool3)
mvn12 = Lambda(mvn, name='mvn12')(conv12)
conv13 = Conv2D(filters=2**0*num_filter, name='conv13', **kwargs)(mvn12)
mvn13 = Lambda(mvn, name='mvn13')(conv13)
conv14 = Conv2D(filters=2**0*num_filter, name='conv14', **kwargs)(mvn13)
mvn14 = Lambda(mvn, name='mvn14')(conv14)
conv15 = Conv2D(filters=2**0*num_filter, name='conv15', **kwargs)(mvn14)
mvn15 = Lambda(mvn, name='mvn15')(conv15)
score_conv15 = Conv2D(filters=num_classes, kernel_size=1,
strides=1, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=True,
name='score_conv15')(mvn15)
upsample1 = Conv2DTranspose(filters=num_classes, kernel_size=3,
strides=2, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=False,
name='upsample1')(score_conv15)
score_conv11 = Conv2D(filters=num_classes, kernel_size=1,
strides=1, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=True,
name='score_conv11')(mvn11)
crop1 = Lambda(crop, name='crop1')([upsample1, score_conv11])
fuse_scores1 = average([crop1, upsample1], name='fuse_scores1')
upsample2 = Conv2DTranspose(filters=num_classes, kernel_size=3,
strides=2, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=False,
name='upsample2')(fuse_scores1)
score_conv7 = Conv2D(filters=num_classes, kernel_size=1,
strides=1, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=True,
name='score_conv7')(drop7)
crop2 = Lambda(crop, name='crop2')([upsample2, score_conv7])
fuse_scores2 = average([crop2, upsample2], name='fuse_scores2')
upsample3 = Conv2DTranspose(filters=num_classes, kernel_size=3,
strides=2, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=False,
name='upsample3')(fuse_scores2)
score_conv3 = Conv2D(filters=num_classes, kernel_size=1,
strides=1, activation=None, padding='same',
kernel_initializer='glorot_uniform', use_bias=True,
name='score_conv3')(drop3)
fuse_scores3 = average([score_conv3, upsample3], name='fuse_scores3')
predictions = Conv2D(filters=num_classes, kernel_size=1,
strides=1, activation=activation, padding='same',
kernel_initializer='glorot_uniform', use_bias=True,
name='predictions')(fuse_scores3)
model = Model(inputs=data, outputs=predictions)
if weights is not None:
model.load_weights(weights, by_name=True)
sgd = optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss=loss,
metrics=['accuracy', dice_coef, jaccard_coef])
return model
if __name__ == '__main__':
model = fcn_model_inv((128, 128, 1), 2, num_filter=64, weights=None)
plot_model(model, show_shapes=True, to_file='fcn_model_inv.png')
model.summary()
| {"/train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/train_sunnybrook_unet_3d.py": ["/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/fcn_model_resnet50.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_segnet.py": ["/tfmodel/__init__.py"], "/fcn_model_resnet.py": ["/metrics_common.py", "/layer_common.py"], "/train_sunnybrook_unetres.py": ["/CardiacImageDataGenerator.py"], "/unet_model_3d_Inv.py": ["/layer_common.py"], "/pred_sunnybrook_unetres_time.py": ["/train_sunnybrook_unetres.py", "/unet_model_time.py"], "/submit_sunnybrook_unet_3d.py": ["/train_sunnybrook_unet_3d.py", "/CardiacImageDataGenerator.py", "/unet_model_3d_Inv.py"], "/unet_model.py": ["/metrics_common.py", "/layer_common.py"], "/pre_train_acdc_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/submit_sunnybrook_unetres_time.py": ["/train_sunnybrook_unet_time.py", "/unet_model_time.py", "/metrics_common.py"], "/pre_train_sunnybrook_unet_time.py": ["/CardiacImageDataGenerator.py", "/unet_model_time.py", "/DataIOProc.py"], "/unet_lstm_multi_model.py": ["/metrics_common.py", "/layer_common.py"], "/train_acdc_unetres_II.py": ["/CardiacImageDataGenerator.py"], "/tfmodel/__init__.py": ["/tfmodel/helpers.py", "/tfmodel/evaluation.py"], "/unet_model_time.py": ["/layer_common.py"], "/unet_res_model.py": ["/metrics_common.py", "/layer_common.py"], "/unet_model_inv.py": ["/layer_common.py"], "/fcn_model_inv.py": ["/layer_common.py"]} |
60,965 | murdoch3/cryptopals-set1 | refs/heads/main | /c7.py | import base64
from Crypto.Cipher import AES
def b64_to_raw(b64_string):
return base64.b64decode(b64_string)
def main():
# First let's get all of the b64 encoded data from the file
with open('7.txt', 'r') as file:
data = file.read().replace('\n', '')
# Decode the b64 encrypted data into bytes
encrypted_bytes = b64_to_raw(data)
# Now we want to decrypt this with aes-128 ecb
key = b'YELLOW SUBMARINE'
cipher = AES.new(key, AES.MODE_ECB)
plaintext = cipher.decrypt(encrypted_bytes)
print(plaintext)
if __name__ == '__main__':
main()
| {"/c3.py": ["/c1.py"], "/c5.py": ["/c2.py"], "/c6.py": ["/c3.py"], "/c4.py": ["/c3.py"], "/c2.py": ["/c1.py"], "/c8.py": ["/c1.py"]} |
60,966 | murdoch3/cryptopals-set1 | refs/heads/main | /c3.py | from c1 import hex_to_raw
import string
character_frequencies = {
'e': 12.02,
't': 9.10,
'a': 8.12,
'o': 7.68,
'i': 7.31,
'n': 6.95,
's': 6.28,
'r': 6.02,
'h': 5.92,
'd': 4.32,
'l': 3.98,
'u': 2.88,
'c': 2.71,
'm': 2.61,
'f': 2.30,
'y': 2.11,
'w': 2.09,
'g': 2.03,
'p': 1.82,
'b': 1.49,
'v': 1.11,
'k': 0.69,
'x': 0.17,
'q': 0.11,
'j': 0.10,
'z': 0.07
}
def get_frequencies(text):
# initialize the dictionary to have all chars
odict = {}
for c in string.ascii_lowercase:
odict[c] = 0
# Count the number of each character in the string.
# we're counting punctuation here
# I'm doing it separately because I want to guarantee that the chars
# are in the dict, but the punctuation will be evaluated as all being
# the same.
for c in text:
if c in odict.keys():
odict[c] += 1
else:
odict[c] = 1
# Calculate the frequencies of each character in the text.
length = len(text)
if length != 0:
for c in odict.keys():
odict[c] /= length
odict[c] *= 100
return odict
def score_english(text):
frequencies = get_frequencies(text)
# character_frequencies
score = 0
for c in frequencies.keys():
if c in character_frequencies.keys():
score += abs(frequencies[c] - character_frequencies[c])
elif c == ' ':
score += 0
elif c in "'\"!.?" or c in string.digits:
score += 15
else:
score += 100
return score
def bytes_to_string(byte_string):
output = ""
for i in byte_string:
output += chr(i)
return output
def xor(byte_string, key):
output = b''
key_val = key[0] # from key of type bytes to int
for b in byte_string:
output += bytes([b ^ key_val])
return output
def decrypt_single_xor(hex_string):
byte_string = hex_to_raw(hex_string)
lowest_score = 100000
lowest_string = ""
for c in string.printable:
xor_result = xor(byte_string, bytes([ord(c)]))
xor_string = bytes_to_string(xor_result)
score = score_english(xor_string)
if score < lowest_score:
lowest_score = score
lowest_string = xor_string
return lowest_string, c
def decrypt_block_xor(byte_string):
lowest_score = 100000
lowest_string = ""
lowest_c = ""
for c in string.printable:
xor_result = xor(byte_string, bytes([ord(c)]))
xor_string = bytes_to_string(xor_result)
score = score_english(xor_string)
if score < lowest_score:
lowest_score = score
lowest_string = xor_string
lowest_c = c
return bytes([ord(lowest_c)])
if __name__ == '__main__':
b = b'\x3C'
print(xor(b, (b'\x08'))[0])
print(b[0] ^ (b'\x08')[0])
hex_string = '1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736'
#print(hex_to_raw(hex_string))
print(decrypt_single_xor(hex_string))
| {"/c3.py": ["/c1.py"], "/c5.py": ["/c2.py"], "/c6.py": ["/c3.py"], "/c4.py": ["/c3.py"], "/c2.py": ["/c1.py"], "/c8.py": ["/c1.py"]} |
60,967 | murdoch3/cryptopals-set1 | refs/heads/main | /c1.py | import base64, binascii
b64_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
# Takes a hex encoded string and returns the bytestring representation
def hex_to_raw(hex_string):
return binascii.unhexlify(hex_string)
# Taking a list of bytes as b64 index values, outputs a b64 encoded string
def raw_to_b64(raw_bytes):
output = ""
for i in raw_bytes:
output += b64_chars[i]
return output
# Taking a list of bytes divisible by 3, produces a list of corresponding base64 values
def hex_to_b64(hex_bytes):
b64 = []
for i in range(0, len(hex_bytes), 3):
# where ls1 is the least significant b64 digit and ls4 is the most
ls1 = hex_bytes[i+2] & 63
ls2 = ((hex_bytes[i+2] & 192) >> 6) + ((hex_bytes[i+1] & 15) << 2)
ls3 = ((hex_bytes[i+1] & 240) >> 4) + ((hex_bytes[i] & 3) << 4)
ls4 = (hex_bytes[i] & 252) >> 2
b64.extend([ls4, ls3, ls2, ls1])
return b64
if __name__ == '__main__':
hex_string = "49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d"
hex_string = "af0000"
hex_raw = hex_to_raw(hex_string)
b64_raw = hex_to_b64(hex_raw)
print(raw_to_b64(b64_raw))
| {"/c3.py": ["/c1.py"], "/c5.py": ["/c2.py"], "/c6.py": ["/c3.py"], "/c4.py": ["/c3.py"], "/c2.py": ["/c1.py"], "/c8.py": ["/c1.py"]} |
60,968 | murdoch3/cryptopals-set1 | refs/heads/main | /c5.py | from c2 import raw_to_hex
def ascii_to_raw(msg):
output = []
for c in msg:
output.append(ord(c))
return output
def repeating_key_xor(byte_string, key):
output = []
kindex = 0
for b in byte_string:
output.append(b ^ key[kindex])
kindex += 1
if kindex >= len(key):
kindex = 0
return output
if __name__ == '__main__':
msg = "Burning 'em, if you ain't quick and nimble\nI go crazy when I hear a cymbal"
key = "ICE"
msg_bytes = ascii_to_raw(msg)
key_bytes = ascii_to_raw(key)
encrypted_msg = repeating_key_xor(msg_bytes, key_bytes)
msg_hex = raw_to_hex(encrypted_msg)
print(msg_hex)
| {"/c3.py": ["/c1.py"], "/c5.py": ["/c2.py"], "/c6.py": ["/c3.py"], "/c4.py": ["/c3.py"], "/c2.py": ["/c1.py"], "/c8.py": ["/c1.py"]} |
60,969 | murdoch3/cryptopals-set1 | refs/heads/main | /c6.py | import base64, binascii
from c3 import decrypt_block_xor
from c5sol import repeating_key_xor
def b64_to_raw(b64_string):
return base64.b64decode(b64_string)
def hamming_distance(byte1, byte2):
x = byte1 ^ byte2
set_bits = 0
while x > 0:
set_bits += x & 1
x = x >> 1
return set_bits
def string_hamming_distance(byte_string1, byte_string2):
# we're assuming for now that the strings are the same length
dist = 0
le = len(byte_string1)
if len(byte_string1) > len(byte_string2):
le = len(byte_string2)
for i in range(le):
dist += hamming_distance(byte_string1[i], byte_string2[i])
return dist
def shift_list_left(l):
if len(l) == 0:
return []
for i in range(len(l)-1, 0, -1):
if (i-1) >= -1:
l[i] = l[i-1]
l[0] = 0
return l
def score_vigenere_key_size(candidate_key_size, ciphertext):
# as suggested in the instructions,
# we take samples bigger than just one time the candidate key size
slice_size = 2*candidate_key_size
#slice_size = candidate_key_size
# the number of samples we can make
# given the ciphertext length
nb_measurements = len(ciphertext) // slice_size - 1
# the "score" will represent how likely it is
# that the current candidate key size is the good one
# (the lower the score the *more* likely)
score = 0
for i in range(nb_measurements):
s = slice_size
k = candidate_key_size
# in python, "slices" objects are what you put in square brackets
# to access elements in lists and other iterable objects.
# see https://docs.python.org/3/library/functions.html#slice
# here we build the slices separately
# just to have a cleaner, easier to read code
slice_1 = slice(i*s, i*s + k)
slice_2 = slice(i*s + k, i*s + 2*k)
# ADDED: I need to convert these two slices to byte strings to work
# with the functions I've written.
bytechunk1 = ciphertext[slice_1]
bytechunk2 = ciphertext[slice_2]
byte_string1 = b''
byte_string2 = b''
for b in bytechunk1:
byte_string1 += bytes([b])
for b in bytechunk2:
byte_string2 += bytes([b])
#score += string_hamming_distance(ciphertext[slice_1], ciphertext[slice_2])
score += string_hamming_distance(byte_string1, byte_string2)
# normalization: do not forget this
# or there will be a strong biais towards long key sizes
# and your code will not detect key size properly
score /= candidate_key_size
# some more normalization,
# to make sure each candidate is evaluated in the same way
score /= nb_measurements
return score
def get_norm_dist(encrypted_bytes, keysize):
# Split encrypted_bytes into chunks of size keysize.
chunks = []
for i in range(0, len(encrypted_bytes), keysize):
chunks.append(encrypted_bytes[i:i+keysize])
print(len(chunks[0]))
# Sum the edit distances
total_dist = 0
count = 0
for i in range(0, len(chunks)-1, 2):
first_chunk = chunks[i]
second_chunk = chunks[i+1]
dist = string_hamming_distance(first_chunk, second_chunk)
norm = dist / keysize
total_dist += norm
count += 2 # 1
#return norm / count
return total_dist / count
def get_blocks(byte_string, keysize):
if len(byte_string) <= keysize:
return [bytestring]
output = []
oindex = 0
counter = 0
for i in range(len(byte_string)):
if counter == 0:
output.append(b'')
# bytes([b[1]])
output[oindex] += bytes([byte_string[i]])
counter += 1
if counter >= keysize:
oindex += 1
counter = 0
return output
def get_transpose(blocks, keysize):
output = []
for k in range(keysize):
output.append(b'')
for i in range(len(blocks)):
if len(blocks[i]) <= k:
continue
b = bytes([blocks[i][k]])
output[k] += b
return output
def main():
# load data from 6.txt
with open('6.txt', 'r') as file:
data = file.read().replace('\n', '')
encrypted_bytes = b64_to_raw(data)
#encrypted_bytes = base64.b64decode(data).hex()
#print(binascii.unhexlify(encrypted_bytes) == b64_to_raw(data))
#priypnt(encrypted_bytes)
# use keysizes from 2 to 40
keysize_norms = []
for keysize in range(2, 41):
#first_chunk = encrypted_bytes[:keysize]
#second_chunk = encrypted_bytes[keysize:keysize+keysize]
#dist = string_hamming_distance(first_chunk, second_chunk)
#norm = dist / keysize
norm = get_norm_dist(encrypted_bytes, keysize)
#norm = score_vigenere_key_size(keysize, encrypted_bytes)
keysize_norms.append((keysize, norm))
# Sort the list by norm.
keysize_norms.sort(key=lambda tup: tup[1])
# Get the top 3 most likely keysizes (smallest keysizes)
keysizes = [keysize_norms[0][0], keysize_norms[1][0], keysize_norms[2][0]]
print(keysize_norms)
#keysizes = [29]
for ki in range(len(keysizes)-2):
# Now break the ciphertext into blocks of keysize length
blocks = get_blocks(encrypted_bytes, keysizes[ki])
# Transpose the blocks.
transpose = get_transpose(blocks, keysizes[ki])
# Now we want to solve each block as if it were a single-char xor.
# From this we should get the single-byte xor for each block.
# Put them together and we should have the repeating-key.
repeating_key = b''
for block in transpose:
print(decrypt_block_xor(block))
repeating_key += decrypt_block_xor(block)
# Now with the repeating key, we want to try and get the message.
# We can call our repeating key code from a previous challenge.
decrypted_bytes = repeating_key_xor(encrypted_bytes, repeating_key)
print(decrypted_bytes.decode('ascii'))
if __name__ == '__main__':
main()
| {"/c3.py": ["/c1.py"], "/c5.py": ["/c2.py"], "/c6.py": ["/c3.py"], "/c4.py": ["/c3.py"], "/c2.py": ["/c1.py"], "/c8.py": ["/c1.py"]} |
60,970 | murdoch3/cryptopals-set1 | refs/heads/main | /c4.py | import c3
if __name__ == '__main__':
f = open('4.txt', 'r')
lowest_score = 100000
lowest_text = ""
for line in f:
text = c3.decrypt_single_xor(line.strip())
score = c3.score_english(text)
if score < lowest_score:
lowest_text = text
lowest_score = score
print(lowest_score)
print(lowest_text)
f.close()
| {"/c3.py": ["/c1.py"], "/c5.py": ["/c2.py"], "/c6.py": ["/c3.py"], "/c4.py": ["/c3.py"], "/c2.py": ["/c1.py"], "/c8.py": ["/c1.py"]} |
60,971 | murdoch3/cryptopals-set1 | refs/heads/main | /c2.py | from c1 import hex_to_raw
# Takes a byte string and produces the corresponding hex encoded string
def raw_to_hex(byte_string):
hex_string = ""
for i in byte_string:
h = hex(i)
hs = h[2:]
if len(hs) == 1:
hs = '0' + hs
hex_string += hs
return hex_string
# Taking two fixed length byte strings, produce their XOR
def fixed_xor(bytes1, bytes2):
output = []
for i in range(len(bytes1)):
output.append(bytes1[i] ^ bytes2[i])
return output
if __name__ == '__main__':
hex_string1 = "1c0111001f010100061a024b53535009181c"
hex_string2 = "686974207468652062756c6c277320657965"
bytes1 = hex_to_raw(hex_string1)
bytes2 = hex_to_raw(hex_string2)
xor_val = fixed_xor(bytes1, bytes2)
print(raw_to_hex(xor_val))
| {"/c3.py": ["/c1.py"], "/c5.py": ["/c2.py"], "/c6.py": ["/c3.py"], "/c4.py": ["/c3.py"], "/c2.py": ["/c1.py"], "/c8.py": ["/c1.py"]} |
60,972 | murdoch3/cryptopals-set1 | refs/heads/main | /c8.py | from c1 import hex_to_raw
def main():
# Read the lines into data
with open('8.txt', 'r') as file:
data = file.read().split('\n')
data = data[:len(data)-1]
# Go through each line in data
potentially_ecb = []
for line in data:
# Convert the hex encoded line into bytes
enc_bytes = hex_to_raw(line)
# Split enc_bytes into 16 byte chunks
n = 16
chunks = [enc_bytes[i:i+n] for i in range(0, len(enc_bytes), n)]
# Score based on the total number of repeated chunks
# and how many times each is repeated.
prev_chunks = []
score = 0
for c in chunks:
h = c.hex()
if h in prev_chunks:
score += 1
prev_chunks.append(h)
if score != 0:
potentially_ecb.append(line)
for line in potentially_ecb:
print(line)
if __name__ == '__main__':
main()
| {"/c3.py": ["/c1.py"], "/c5.py": ["/c2.py"], "/c6.py": ["/c3.py"], "/c4.py": ["/c3.py"], "/c2.py": ["/c1.py"], "/c8.py": ["/c1.py"]} |
60,973 | danielcrane/l2reborn | refs/heads/master | /utils/parse_npc_spawn.py | import os
import re
from collections import namedtuple
class SpawnParser:
def __init__(self, sql_path=None):
self.SpawnData = namedtuple("SpawnData", ["x", "y"])
self.util_dir = os.path.dirname(os.path.realpath(__file__))
if sql_path is None:
self.sql_path = os.path.join(self.util_dir, "..", "server_data", "sql")
def parse(self):
self.spawn_data = {}
self.parse_spawn_normal()
self.parse_spawn_raidboss()
self.parse_spawn_grandboss()
return self.spawn_data
def parse_spawn_normal(self):
regex = "\(('-?[0-9]{1,9}', ){7}('-?[0-9]')\)"
with open(f"{self.sql_path}/spawnlist.sql", "r") as f:
lines = f.readlines()
for line in lines:
match = re.match(regex, line)
if match:
data = eval(match.group()) # Evaluate the matched line as a tuple
data = tuple(int(d) for d in data) # Convert data points from str to numbers
npc_id, loc_x, loc_y = data[0], data[1], data[2]
if npc_id not in self.spawn_data:
self.spawn_data[npc_id] = []
# Convert data to SpawnData named tuple format, then add to dict:
self.spawn_data[npc_id].append(self.SpawnData(loc_x, loc_y))
def parse_spawn_raidboss(self):
regex = "\((-?[0-9]{1,9},){9}(-?[0-9])\)"
with open(f"{self.sql_path}/raidboss_spawnlist.sql", "r") as f:
lines = f.readlines()
for line in lines:
match = re.match(regex, line)
if match:
data = eval(match.group()) # Evaluate the matched line as a tuple
data = tuple(int(d) for d in data) # Convert data points from str to numbers
npc_id, loc_x, loc_y = data[0], data[1], data[2]
if npc_id not in self.spawn_data:
self.spawn_data[npc_id] = []
# Convert data to SpawnData named tuple format, then add to dict:
self.spawn_data[npc_id].append(self.SpawnData(loc_x, loc_y))
def parse_spawn_grandboss(self):
regex = "\((-?[0-9]{1,9}, ){8}(-?[0-9])\)"
with open(f"{self.sql_path}/grandboss_data.sql", "r") as f:
lines = f.readlines()
for line in lines:
match = re.match(regex, line)
if match:
data = eval(match.group()) # Evaluate the matched line as a tuple
data = tuple(int(d) for d in data) # Convert data points from str to numbers
npc_id, loc_x, loc_y = data[0], data[1], data[2]
if npc_id not in self.spawn_data:
self.spawn_data[npc_id] = []
# Convert data to SpawnData named tuple format, then add to dict:
self.spawn_data[npc_id].append(self.SpawnData(loc_x, loc_y))
| {"/utils/parse_skills_dat.py": ["/utils/__init__.py"], "/skill_drop_data/create_skill_data.py": ["/utils/__init__.py"], "/utils/__init__.py": ["/utils/utils.py", "/utils/parse_npc_xml.py", "/utils/parse_skills_dat.py", "/utils/parse_npc_spawn.py"], "/create_drop_site/create_site.py": ["/utils/__init__.py"]} |
60,974 | danielcrane/l2reborn | refs/heads/master | /utils/parse_skills_dat.py | import os
import sys
import json
from bs4 import BeautifulSoup
from collections import namedtuple
import utils
class SkillParser:
def __init__(self, skill_dir=None):
self.util_dir = os.path.dirname(os.path.realpath(__file__))
self.SkillData = namedtuple("SkillData", ["name", "desc", "icon"])
self.dat_path = os.path.join(self.util_dir, "..", "server_data", "dat_files")
self.Skill = namedtuple("Skill", ["id", "level"])
def parse(self):
self.skill_data = self.create_skill_db()
self.skill_order = self.get_skill_order()
return self.skill_data, self.skill_order
def get_skill_order(self):
lines = utils.read_encrypted(self.dat_path, "npcgrp.dat")
header = lines[0].split("\t")
skill_cols = []
for i, col in enumerate(header):
if "dtab" in col:
skill_cols.append(i)
skill_cnt_col = skill_cols[0] # First mention of 'dtab' is the skill count for that npc
skill_cols = skill_cols[1:]
skill_order = {}
for line in lines[1:]:
line = line.split("\t")
npc_id = int(line[0])
skill_order[npc_id] = []
skill_cnt = int(line[skill_cnt_col])
if skill_cnt < 2:
# For some reason in L2Reborn files, treasure chests have 1 skill,
# whereas at least two skills are needed (skill id + level)
continue
for idx in range(0, skill_cnt, 2):
skill_id = int(line[skill_cols[idx]])
skill_lvl = int(line[skill_cols[idx + 1]])
skill_order[npc_id].append(self.Skill(skill_id, skill_lvl))
return skill_order
def create_skill_db(self):
lines = utils.read_encrypted(self.dat_path, "skillgrp.dat")
skill_icons = {}
for line in lines[1:]:
line = line.split("\t")
id, level, icon = int(line[0]), int(line[1]), line[10]
if id not in skill_icons:
skill_icons[id] = {}
skill_icons[id][level] = icon
lines = utils.read_encrypted(self.dat_path, "skillname-e.dat")
skill_data = {}
for line in lines[1:]:
line = line.split("\t")
id, level = int(line[0]), int(line[1])
name = line[2].strip("\\0").strip("a,")
desc = line[3].strip("\\0").strip("a,")
if desc == "none":
desc = ""
if id not in skill_data:
skill_data[id] = {}
skill_data[id][level] = self.SkillData(name, desc, skill_icons[id][level])
return skill_data
if __name__ == "__main__":
parser = NpcParser()
parser.parse()
parser.dump()
| {"/utils/parse_skills_dat.py": ["/utils/__init__.py"], "/skill_drop_data/create_skill_data.py": ["/utils/__init__.py"], "/utils/__init__.py": ["/utils/utils.py", "/utils/parse_npc_xml.py", "/utils/parse_skills_dat.py", "/utils/parse_npc_spawn.py"], "/create_drop_site/create_site.py": ["/utils/__init__.py"]} |
60,975 | danielcrane/l2reborn | refs/heads/master | /utils/parse_npc_xml.py | import os
import sys
import json
from bs4 import BeautifulSoup
from collections import namedtuple
class NpcParser:
def __init__(self, item_dir=None, npc_dir=None):
self.util_dir = os.path.dirname(os.path.realpath(__file__))
if item_dir is None:
self.item_dir = os.path.join(self.util_dir, "..", "server_data", "items")
if npc_dir is None:
self.npc_dir = os.path.join(self.util_dir, "..", "server_data", "npcs")
# Stats to extract from NPC XMLs:
self.stats = {
"level",
"type",
"hp",
"mp",
"exp",
"sp",
"patk",
"pdef",
"matk",
"mdef",
"runspd",
}
self.item_data = None
self.drop_data = None
def parse(self):
self.item_data = self.parse_item_xml()
self.drop_data = self.parse_npc_xml()
return self.drop_data
def dump(self, out_file="drop_data_xml.json"):
json.dump(self.drop_data, open("drop_data_xml.json", "w"))
def parse_item_xml(self):
Item = namedtuple("Item", ["name", "type", "crystal"])
Crystal = namedtuple("Crystal", ["count", "type"])
item_files = []
for file in os.listdir(self.item_dir):
if file.endswith(".xml"):
item_files.append(file)
item_data = {}
for file in item_files:
with open(os.path.join(self.item_dir, file), "r") as f:
contents = f.read()
soup = BeautifulSoup(contents, features="html.parser")
items = soup.find_all("item")
for item in items:
item_id = eval(item["id"])
item_name = item["name"]
item_type = item["type"]
try:
crystal_count = int(item.find("set", {"name": "crystal_count"})["val"])
crystal_type = item.find("set", {"name": "crystal_type"})["val"]
except:
crystal_count = crystal_type = None
crystal = Crystal(crystal_count, crystal_type)
item_data[item_id] = Item(item_name, item_type, crystal)
return item_data
def parse_npc_xml(self):
Skill = namedtuple("Skill", ["id", "level"])
if self.item_data is None:
assert ValueError("self.item_data is None, first parse item xml")
npc_files = []
for file in os.listdir(self.npc_dir):
if file.endswith(".xml"):
npc_files.append(file)
npc_data = {}
for file in npc_files:
with open(os.path.join(self.npc_dir, file), "r") as f:
contents = f.read()
soup = BeautifulSoup(contents, features="html.parser")
npcs = soup.find_all("npc")
for npc in npcs:
npc_id = eval(npc["id"])
npc_name = npc["name"]
npc_title = npc["title"]
npc_data[npc_id] = {
"name": npc_name,
"title": npc_title,
"file": file,
"stats": [],
"drop": [],
"spoil": [],
}
stat_list = npc.find_all("set")
stats = {}
for stat in stat_list:
stat_name = stat["name"].lower()
if stat_name in self.stats: # If it's a stat we're interested in:
try: # If stat is numerical, then round:
stats[stat_name] = str(round(eval(stat["val"])))
except NameError: # Otherwise:
stats[stat_name] = stat["val"]
elif stat_name == "dropherbgroup":
if stat["val"] != "0":
stats["herbs"] = "Yes"
else:
stats["herbs"] = "No"
ai = npc.find("ai")
if ai.has_attr("aggro") and ai["aggro"] != "0":
stats["agro"] = "Yes"
else:
stats["agro"] = "No"
skills = []
skill_list = npc.find("skills")
for skill in skill_list.find_all("skill"):
skills.append(Skill(int(skill["id"]), int(skill["level"])))
npc_data[npc_id]["skills"] = skills
npc_data[npc_id]["stats"] = stats
drop_list = npc.find("drops")
if drop_list is None:
continue
categories = drop_list.find_all("category")
for category in categories:
drops = category.find_all("drop")
for drop in drops:
id = eval(drop["itemid"])
min_amt = eval(drop["min"])
max_amt = eval(drop["max"])
chance = eval(drop["chance"]) / 1e6
cat = eval(category["id"])
if cat != -1:
npc_data[npc_id]["drop"].append(
[id, min_amt, max_amt, chance, self.item_data[id].name]
)
else:
# id == -1 means spoil
npc_data[npc_id]["spoil"].append(
[id, min_amt, max_amt, chance, self.item_data[id].name]
)
return npc_data
if __name__ == "__main__":
parser = NpcParser()
parser.parse()
parser.dump()
| {"/utils/parse_skills_dat.py": ["/utils/__init__.py"], "/skill_drop_data/create_skill_data.py": ["/utils/__init__.py"], "/utils/__init__.py": ["/utils/utils.py", "/utils/parse_npc_xml.py", "/utils/parse_skills_dat.py", "/utils/parse_npc_spawn.py"], "/create_drop_site/create_site.py": ["/utils/__init__.py"]} |
60,976 | danielcrane/l2reborn | refs/heads/master | /skill_drop_data/create_skill_data.py | import getopt
import numpy as np
import sys
sys.path.append("..")
import utils
class DataBuilder:
def __init__(self, info=True, drops=True, spoils=True, VIP=False):
self.original_data_path = "../server_data/dat_files" # Path of clean dat files
self.new_data_path = "./new_dat_files" # Output path of new data (with drop info)
self.npcs_xml_dir = "../server_data/npcs" # Directory containing NPC xml files
self.items_xml_dir = "../server_data/items" # Directory containing item xml files
self.VIP = VIP # If True, currency amount/xp/sp/drop rates are all scaled accordingly
self.VIP_xp_sp_rate = 1.5 # Experience and SP multiplier
self.VIP_drop_rate = 1 # 1.5 # Drop chance multipler increase for items
self.VIP_adena_rate = 1 # 1.1 # Drop chance increase multiplier for adena
self.VIP_adena_amount = 1.5 # Drop amount increase multiplier for adena
self.skill_include = {"Drop": drops, "Spoil": spoils, "Information": info}
self.skill_ids = {"Drop": 20000, "Spoil": 20001, "Information": 20003}
self.skill_icons = {
"Drop": "icon.etc_adena_i00",
"Spoil": "icon.skill0254",
"Information": "icon.etc_lottery_card_i00",
}
def build(self):
"""This is the main class method that performs the actions required
to build the new .dat files from scratch
Returns
-------
None
Outputs updated skillname-e.dat and skillgrp.dat to self.new_data_path
"""
print("[] Parsing NPC .xml files")
sys.stdout.flush()
self.parse_npc_xmls()
print("[] Updating skillname-e.dat")
sys.stdout.flush()
self.modify_skill_name()
print("[] Updating skillgrp.dat")
sys.stdout.flush()
self.modify_skill_grp()
print("[] Updating npcgrp.dat")
sys.stdout.flush()
self.modify_npc_grp()
print("\n[] Build complete")
sys.stdout.flush()
def format_probability(self, chance, n=4):
"""Format the inputted probability as a percent or fraction depending size
Parameters
----------
chance : float
Probability value between 0 and 1
Returns
-------
string
Formatted chance (percent if > 1%, fraction otherwise)
"""
if chance >= 0.01:
return utils.round_chance(chance, n)
else:
return f"1 / {round(1/chance):,}"
def parse_npc_xmls(self):
"""Parses the server XML files and creates a dict of NPC data
including drops, spoils, stats, etc.
Returns
-------
None
Stores self.npc_data - a dict containing the information of each NPC
"""
parser = utils.NpcParser()
self.npc_data = parser.parse()
def modify_npc_grp(self):
"""Takes an unmodified npcgrp.dat and first increases the number of possible
passive skills from 13 to 16 - an extra 3 spots to accomodate for the 3 new types
of info, and adds the skills which will store drop/spoil/other to each mob
Note that the size of dtab_base/dtab_max are 2x the number of skills,
since the skill id and skill level both consist of one entry each
Returns
-------
None
Outputs updated npcdrp.dat to self.new_data_path
"""
fname = "npcgrp.dat"
# Calculate number of additional skills needed to display required info:
additional_skills = len(self.skill_ids)
# additional_skills = list(self.skill_include.values()).count(True)
dtab_base = 26 # Original max number of allowed skills = 13 (x2)
dtab_max = 32 # New max number of allowed skills = 16 (x2)
# Decode and convert from .dat to .txt
lines = utils.read_encrypted(self.original_data_path, fname)
# Now modify each line to add the skill slots, and data where appropriate:
for i, line in enumerate(lines):
line = line.split("\t") # Split the tab-delimited string into a list
if i == 0:
# Modify the header
dtab_loc = line.index("dtab1[0]") # Index of first skill, denoted by dtab[0]
for idx in range(dtab_base, dtab_max):
loc = dtab_loc + idx # Offset idx by starting index, dtab_loc
line.insert(loc, f"dtab1[{idx}]") # Insert new skill header element
lines[i] = "\t".join(line) # Now rejoin the list to form a tab-delimited string
continue # Move on to the next line
# If not the header, then first add empty string to each new skill slot:
for idx in range(dtab_base, dtab_max):
loc = dtab_loc + idx # Offset idx by starting index, dtab_loc
line.insert(loc, "") # Insert blank skill data for now
npc_id = eval(line[0])
# Now, if the NPC is in our data parsed from XML:
if npc_id in self.npc_data:
# Add the skills containing the additional information to the mob data
n_skill = eval(line[dtab_loc - 1]) # Find how many skills the NPC has
# Note that mobs with no passives have "1", so we must change to 0 before proceeding:
n_skill = 0 if n_skill == 1 else n_skill
# Now we must increase the number of skills the NPC has by 2 for each additional
# field of information that we wish to add:
line[dtab_loc - 1] = str(n_skill + 2 * additional_skills)
for idx, skill_id in enumerate(self.skill_ids.values()):
loc = dtab_loc + n_skill + 2 * idx # Select first empty skill index
line[loc : loc + 2] = [str(skill_id), str(npc_id)] # Insert skill and npc id
lines[i] = "\t".join(line) # Now rejoin the list to form a tab-delimited string
# Since we'll add new skills, we must write with a custom ddf file:
fname_ddf = fname.replace(".dat", "-custom.ddf")
# Now encrypt and write updated lines:
utils.write_encrypted(self.new_data_path, fname, lines, ddf=fname_ddf)
def modify_skill_grp(self):
"""Takes an unmodified skillgrp.dat and adds the skills which will store
drop/spoil/other info about mobs
Returns
-------
None
Outputs updated skillgrp.dat to self.new_data_path
"""
fname = "skillgrp.dat"
# Define the format each line takes:
line_format = "{}\t{}\t2\t0\t-1\t0\t0.00000000\t0\t\t\t{}\t0\t0\t0\t0\t-1\t-1"
# First decode and convert from .dat to .txt
lines = utils.read_encrypted(self.original_data_path, fname)
for npc_id, npc in self.npc_data.items():
for info_type in self.skill_ids.keys():
if not self.skill_include[info_type]:
# If this type of info isn't to be included, then skip
continue
elif info_type == "Drop":
# Don't include drop skill for NPCs with no drops
if "drop" not in npc or len(npc["drop"]) == 0:
continue
elif info_type == "Spoil":
# Don't include spoil skill for NPCs with no drops
if "spoil" not in npc or len(npc["spoil"]) == 0:
continue
# Add info to line_format and append to lines:
lines.append(
line_format.format(
self.skill_ids[info_type], npc_id, self.skill_icons[info_type]
)
)
# Now encrypt and write updated lines:
utils.write_encrypted(self.new_data_path, fname, lines)
def modify_skill_name(self):
"""Takes an unmodified skillname-e.dat and adds the skills which will store
drop/spoil/other info about mobs
Returns
-------
None
Outputs updated skillname-e.dat to self.new_data_path
"""
fname = "skillname-e.dat"
info_header = f"a,{40*'.'}::: {'{}'} :::{40*'.'}\0" # Format for header of skill desc
tail = "\\0\ta,none\\0\ta,none\\0" # Every line ends with this
# First decode and convert from .dat to .txt
lines = utils.read_encrypted(self.original_data_path, fname)
for npc_id, npc in self.npc_data.items():
for info_type in self.skill_ids.keys():
if not self.skill_include[info_type]:
# If this type of info isn't to be included, then skip:
continue
head = f"{self.skill_ids[info_type]}\t{npc_id}\t{info_header.format(info_type)}\\t\ta,"
body = ""
if info_type == "Information":
minfo = npc["stats"]
if self.VIP is True:
# If VIP, then multiply exp and sp by VIP_xp_sp_rate:
minfo["exp"] = int(np.floor(eval(minfo["exp"]) * self.VIP_xp_sp_rate))
minfo["sp"] = int(np.floor(eval(minfo["sp"]) * self.VIP_xp_sp_rate))
body = (
f"NPC ID: {npc_id} "
f"Level: {minfo['level']} "
f"Agro: {minfo['agro']}\\n"
f"Exp: {minfo['exp']} SP: {minfo['sp']} HP: {minfo['hp']} "
f"MP: {minfo['mp']}\\nP. Atk: {minfo['patk']} P. Def: {minfo['pdef']} "
f"M. Atk: {minfo['matk']} M. Def: {minfo['mdef']}\\n"
)
elif info_type == "Drop":
if "drop" not in npc or len(npc["drop"]) == 0:
# Don't include drop skill for NPCs with no drops
continue
npc_type = npc["stats"]["type"]
# Here we create lists to store the info, and the drop chance:
drop_lines, drop_lines_chance = [], []
for drop in npc["drop"]:
id, item_min, item_max, chance, name = drop # Extract relevant info
if self.VIP is True:
# If VIP, then multiply accordingly:
if name == "Adena":
# If adena, then multiply amount by VIP_adena_amount:
item_min *= self.VIP_adena_amount
item_max *= self.VIP_adena_amount
# And multiply chance by VIP_adena_rate:
chance = min(chance * self.VIP_adena_rate, 1)
elif npc_type not in ["RaidBoss", "GrandBoss"]:
# If not adena or raid boss, then multiply chance by VIP_drop_rate (to a max of 1):
chance = min(chance * self.VIP_drop_rate, 1)
item_min, item_max = (round(item_min), round(item_max)) # Round to int
item_amt = ( # If item_min == item_max, then only show one:
f"{item_min}-{item_max}" if item_min != item_max else f"{item_min}"
)
drop_info = f"{name} [{item_amt}] {self.format_probability(chance)}\\n"
drop_lines.append(drop_info)
drop_lines_chance.append(chance)
else:
# Now we go through item by item and insert in order of decreasing drop rate:
for idx in np.argsort(drop_lines_chance)[::-1]:
body += drop_lines[idx]
elif info_type == "Spoil":
if "spoil" not in npc or len(npc["spoil"]) == 0:
# Don't include spoil skill for NPCs with no drops
continue
# Here we create lists to store the info, and the spoil chance:
spoil_lines, spoil_lines_chance = [], []
for spoil in npc["spoil"]:
id, item_min, item_max, chance, name = spoil # Extract relevant info
item_min, item_max = (round(item_min), round(item_max)) # Round to int
item_amt = ( # If item_min == item_max, then only show one:
f"{item_min}-{item_max}" if item_min != item_max else f"{item_min}"
)
spoil_info = f"{name} [{item_amt}] {self.format_probability(chance)}\\n"
spoil_lines.append(spoil_info)
spoil_lines_chance.append(chance)
else:
# Now we go through item by item and insert in order of decreasing drop rate:
for idx in np.argsort(spoil_lines_chance)[::-1]:
body += spoil_lines[idx]
new_line = head + body + tail # Combine the three parts to get the full line
lines.append(new_line)
# Now encrypt and write updated lines:
utils.write_encrypted(self.new_data_path, fname, lines)
def main(argv):
"""Executes the builder with the specified command line arguments
Parameters
----------
argv : list
List of command line arguments to be parsed
"""
usage = "Usage: create_skill_data.py <--no-info | --no-drops | --no-spoils | --vip >"
try:
opts, args = getopt.getopt(argv, "h", ["no-info", "no-drops", "no-spoils", "vip", "help"])
except getopt.GetoptError:
print(usage)
sys.exit(2)
info, drops, spoils, vip = True, True, True, False
for opt, arg in opts:
if opt == "--no-info":
info = False
elif opt == "--no-drops":
drops = False
elif opt == "--no-spoils":
spoils = False
elif opt == "--vip":
vip = True
elif opt in ["--help", "-h"]:
print(usage)
sys.exit(2)
print(f"[] Running with setup: info={info}, drops={drops}, spoils={spoils}, VIP={vip}")
builder = DataBuilder(info=info, drops=drops, spoils=spoils, VIP=vip)
builder.build()
if __name__ == "__main__":
main(sys.argv[1:])
| {"/utils/parse_skills_dat.py": ["/utils/__init__.py"], "/skill_drop_data/create_skill_data.py": ["/utils/__init__.py"], "/utils/__init__.py": ["/utils/utils.py", "/utils/parse_npc_xml.py", "/utils/parse_skills_dat.py", "/utils/parse_npc_spawn.py"], "/create_drop_site/create_site.py": ["/utils/__init__.py"]} |
60,977 | danielcrane/l2reborn | refs/heads/master | /utils/__init__.py | from .utils import read_encrypted
from .utils import write_encrypted
from .utils import round_chance
from .utils import round_sf
from .parse_npc_xml import NpcParser
from .parse_skills_dat import SkillParser
from .parse_npc_spawn import SpawnParser
from .parse_l2off import L2OffParser
| {"/utils/parse_skills_dat.py": ["/utils/__init__.py"], "/skill_drop_data/create_skill_data.py": ["/utils/__init__.py"], "/utils/__init__.py": ["/utils/utils.py", "/utils/parse_npc_xml.py", "/utils/parse_skills_dat.py", "/utils/parse_npc_spawn.py"], "/create_drop_site/create_site.py": ["/utils/__init__.py"]} |
60,978 | danielcrane/l2reborn | refs/heads/master | /utils/utils.py | import os
import numpy as np
util_path = os.path.dirname(os.path.realpath(__file__))
tmp_path = os.path.join(util_path, "..", "tmp")
asm_path = os.path.join(util_path, "l2asm-disasm_1.4.1")
l2encdec_path = os.path.join(util_path, "l2encdec")
def read_encrypted(path, fname):
"""Reads encrypted .dat file
Note: The input .dat file name must use the original name, otherwise
it'll fail to find the correct .ddf file for l2asmdism
Parameters
----------
path : string
Path of directory containing .dat file
fname : string
File name of .dat file
Returns
-------
list
List containing the lines of the encrypted .dat file
"""
if fname[-4:] != ".dat":
raise ValueError("Input to reader must be a .dat file")
fname_txt = fname.replace(".dat", ".txt")
fname_ddf = fname.replace(".dat", ".ddf")
if not os.path.exists(tmp_path):
# If temporary directory doesn't exist, then make it
# NOTE: When converted to class based method, this should be in __init__
os.makedirs(tmp_path)
os.system(f"{l2encdec_path}/l2encdec.exe -s {path}/{fname} {tmp_path}/dec-{fname}")
os.system(
f"{asm_path}/l2disasm -d {asm_path}/DAT_defs/Interlude/{fname_ddf} "
f"{tmp_path}/dec-{fname} {tmp_path}/{fname_txt}"
)
lines = open(f"{tmp_path}/{fname_txt}", "r", encoding="utf8").read().split("\n")
del lines[-1]
os.remove(f"{tmp_path}/dec-{fname}") # Clean up decoded .dat file
os.remove(f"{tmp_path}/{fname_txt}") # Clean up readable .txt file
return lines
def write_encrypted(path, fname, lines, ddf=None):
"""Writes inputted lines to encrypted .dat file
Note: The input .dat file name must use the original name, otherwise
it'll fail to find the correct .ddf file for l2asmdism
Parameters
----------
path : string
Path to output .dat file to
fname : string
File name to output .dat file to
lines : list
List of strings containing information to be written
"""
if fname[-4:] != ".dat":
raise ValueError("Output of writer must be a .dat file")
fname_txt = fname.replace(".dat", ".txt")
fname_ddf = fname.replace(".dat", ".ddf") if ddf is None else ddf
with open(f"{tmp_path}/{fname_txt}", "w", encoding="utf8") as f:
for line in lines:
f.write(f"{line}\n")
os.system(
f"{asm_path}/l2asm -d {asm_path}/DAT_defs/Interlude/{fname_ddf} "
f"{tmp_path}/{fname_txt} {tmp_path}/unenc-{fname}"
)
if not os.path.exists(path):
# If output directory doesn't exist, then make it
os.makedirs(path)
os.system(f"{l2encdec_path}/l2encdec.exe -h 413 {tmp_path}/unenc-{fname} {path}/{fname}")
os.remove(f"{tmp_path}/unenc-{fname}")
# os.remove(f"{tmp_path}/{fname_txt}") # Remove readable .txt file
def round_sf(X, n=5):
"""Round X to n significant figures, preserving all values before the decimal place
Parameters
----------
X : float
Input value to be rounded
n : int
Number of significant figures to round to
Returns
-------
float/int
X rounded to n significant figures
"""
nX = np.floor(np.log10(X)) + 1 # Number of digits before decimal place in X
if nX >= n:
# If number of digits before decimal is >= n, then return X rounded to nearest int
return round(X)
# If not then we must calculate how many decimal digits to preserve:
nD = n - nX # Number of decimal places to preserve
X_int = np.floor(X) # Extract integer part of X
X_dec = X - X_int # Extract decimal part of X
mult = 10 ** nD # Multiplier to increase X_dec by for rounding of decimal portion
X_dec = round(X_dec * mult) / mult # Update X_dec to round to the first nD digits
# Here we also multiply X_int by mult to avoid rounding errors that can occur
# if we divide the decimal portion first and then add:
return (X_int * mult + round(X_dec * mult)) / mult
def round_chance(X, n=5):
"""Rounds the fractional probability X as a percentage, rounded to n decimal places
with trailing zeros removed
Examples:
round_chance(0.12345, 2) -> '12.35%'
round_chance(0.12341, 2) -> '12.34%'
Parameters
----------
X : float/int
Probability (between 0 and 1) to be rounded
n : int
Number of decimal places to round X to, should be in the range [0, 16] or so
Returns
-------
string
X represented as a percentage (with % symbol included) with n decimal places
"""
if X == 1:
return "100%"
elif X == 0:
return "0%"
elif X > 1:
raise ValueError("Inputted probability is greater than 1")
elif X < 0:
raise ValueError("Inputted probability is less than 0")
elif n < 0:
raise ValueError("Number of decimal places n is less than 0")
X_str = (f"{X:.16f}" + (n + 2) * "0")[2:] # Convert X to str, pad with zeros, and strip "0."
# Note that above we convert f to fixed point float to avoid scientific notation (with e-5 etc)
X_int = int(X_str[:2]) # First two digits of probability become the integer part of percentage
X_dec = X_str[2 : n + 2] # The next n digits become the rounded decimal part
if int(X_str[n + 2]) >= 5:
# If the end+1 digit is >= 5, must round up
X_dec = X_dec[:-1] + str(int(X_dec[-1]) + 1)
X_dec = str(int(X_dec[::-1]))[::-1] # Remove trailing zeros from decimal portion
if X_dec == "0" or n == 0:
X_per = f"{X_int}%"
else:
X_per = f"{X_int}.{X_dec}%"
return X_per
| {"/utils/parse_skills_dat.py": ["/utils/__init__.py"], "/skill_drop_data/create_skill_data.py": ["/utils/__init__.py"], "/utils/__init__.py": ["/utils/utils.py", "/utils/parse_npc_xml.py", "/utils/parse_skills_dat.py", "/utils/parse_npc_spawn.py"], "/create_drop_site/create_site.py": ["/utils/__init__.py"]} |
60,979 | danielcrane/l2reborn | refs/heads/master | /create_drop_site/create_site.py | import os
import sys
import cv2
import numpy as np
from collections import namedtuple
import requests
import urllib.request
from bs4 import BeautifulSoup
import time
import re
sys.path.append("..")
import utils
class PageBuilder:
def __init__(self):
self.site_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "site")
self.npc_path = "npc"
self.item_path = "item"
self.recipe_path = "recipe"
self.img_path = "img"
self.loc_path = "loc"
self.css_path = "css"
self.map_path = f"{self.img_path}/etc/world_map_interlude_big.png"
img = cv2.imread(f"{self.site_path}/{self.map_path}") # Read map image file
self.map_size = (img.shape[1], img.shape[0])
self.set_world_info()
if not os.path.exists(self.site_path):
os.makedirs(self.site_path)
if not os.path.exists(os.path.join(self.site_path, self.npc_path)):
os.makedirs(os.path.join(self.site_path, self.npc_path))
if not os.path.exists(os.path.join(self.site_path, self.item_path)):
os.makedirs(os.path.join(self.site_path, self.item_path))
if not os.path.exists(os.path.join(self.site_path, self.recipe_path)):
os.makedirs(os.path.join(self.site_path, self.recipe_path))
if not os.path.exists(os.path.join(self.site_path, self.loc_path)):
os.makedirs(os.path.join(self.site_path, self.loc_path))
self.item_data = utils.ItemParser().parse()
self.npc_data = utils.NpcSqlParser(item_data=self.item_data).parse()
self.drop_data = self.create_drop_data()
self.spawn_data = utils.SpawnParser().parse()
self.skill_data, self.skill_order = utils.SkillParser().parse()
self.css = """
<head>
<link href="{}/pmfun.css" rel="stylesheet" type="text/css" />
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
</head>
"""
# Note that self.search as it stands will only work from one directory above the base site:
self.search = """
<div class="searchbar">
<form class="example" action="../search.html">
<input type="text" id="searchTxt" placeholder="Search.." name="search">
<button id="searchBtn"><i class="fa fa-search"></i></button>
</form>
</div>
"""
self.table_head = """
<div class="content">
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tbody><tr>
<td width="17"><img src="{0}/etc/tab_1.gif" width="17" height="21"></td>
<td background="{0}/etc/tab_1_fon.gif" align="center"><img src="{0}/etc/tab_ornament_top.gif" width="445" height="21"></td>
<td width="17"><img src="{0}/etc/tab_2.gif" width="17" height="21"></td>
</tr>
<tr>
<td background="{0}/etc/tab_left_fon.gif"></td>
"""
self.table_foot = """
</tbody></table>
</td>
<td background="{0}/etc/tab_right_fon.gif"></td>
</tr>
<tr>
<td><img src="{0}/etc/tab_3.gif" width="17" height="38"></td>
<td background="{0}/etc/tab_bottom_fon.gif">
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tbody><tr>
<td><img src="{0}/etc/tab_4.gif" width="24" height="38"></td>
<td align="right"><img src="{0}/etc/tab_5.gif" width="24" height="38"></td>
</tr>
</tbody></table>
</td>
<td><img src="{0}/etc/tab_6.gif" width="17" height="38"></td>
</tr>
</tbody></table>
</div>
"""
def set_world_info(self):
TILE_X_MIN = 16
TILE_X_MAX = 26
TILE_Y_MIN = 10
TILE_Y_MAX = 25
TILE_SIZE = 32768
self.WORLD_X_MIN = (TILE_X_MIN - 20) * TILE_SIZE
self.WORLD_X_MAX = (TILE_X_MAX - 19) * TILE_SIZE
self.WORLD_Y_MIN = (TILE_Y_MIN - 18) * TILE_SIZE
self.WORLD_Y_MAX = (TILE_Y_MAX - 17) * TILE_SIZE
def create_search_page(self):
img_path = self.img_path
search_db = {"items": {}, "npcs": {}}
search_db["items"] = {"names": [], "ids": []}
search_db["npcs"] = {"names": [], "ids": [], "levels": []}
names_lower = []
for id, data in self.item_data.items():
search_db["items"]["ids"].append(id)
search_db["items"]["names"].append(data.name)
names_lower.append(data.name.lower())
# Now sort the item list in order of names for easier search:
_, search_db["items"]["names"], search_db["items"]["ids"] = (
list(t)
for t in zip(
*sorted(zip(names_lower, search_db["items"]["names"], search_db["items"]["ids"]))
)
)
names_lower = []
for id, data in self.npc_data.items():
search_db["npcs"]["ids"].append(id)
search_db["npcs"]["names"].append(data["name"])
search_db["npcs"]["levels"].append(int(data["stats"]["level"]))
names_lower.append(data["name"].lower())
# Now sort the NPC list in order of levels for easier search:
search_db["npcs"]["levels"], search_db["npcs"]["names"], search_db["npcs"]["ids"] = (
list(t)
for t in zip(
*sorted(
zip(
search_db["npcs"]["levels"],
search_db["npcs"]["names"],
search_db["npcs"]["ids"],
)
)
)
)
html_top = """
<html>
<title>L2Reborn Database Search</title>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
$CSS
</head>
<body>
<div class="searchbar">
<form id="searchbar" class="example">
<input type="text" id="searchTxt" placeholder="Search.." name="search">
<button id="searchBtn"><i class="fa fa-search"></i></button>
</form>
</div>
<div class="content">
""".replace(
"$CSS", self.css.format(self.css_path)
)
npc_list = """
<h3 id="npcHead" style='display:none'>NPCs</h3>
<ul id='npcUL'>
"""
loc_html = """
<a href="{self.loc_path}/{id}.html" title="{npc_name} location on the map">
<img src="{img_path}/etc/flag.gif" border="0" align="absmiddle" alt="{npc_name} location on the map" title="{npc_name} location on the map">
</a>
"""
for i, id in enumerate(search_db["npcs"]["ids"]):
npc_name = search_db["npcs"]["names"][i]
npc_level = search_db["npcs"]["levels"][i]
loc = eval(f'f"""{loc_html}"""') if id in self.spawn_data else ""
npc_list += f"<li style='display:none'><a href='{self.npc_path}/{id}.html'>{npc_name} ({npc_level}) {loc}</a></li>\n"
npc_list += "\n</ul>"
item_list = """
<h3 id="itemHead" style='display:none'>Items</h3>
<ul id='itemUL'>
"""
for i, id in enumerate(search_db["items"]["ids"]):
icon = self.item_data[id].icon.strip("icon.").lower()
item_list += f"<li style='display:none'><a href='{self.item_path}/{id}.html'><img src='{img_path}/icons/{icon}.png' style='position:relative; top:10px;' class='img_border'>{search_db['items']['names'][i]}</a></li>\n"
item_list += "\n</ul>"
html_bottom = """
</div>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<script>
var urlParams;
(window.onpopstate = function () {
var match,
pl = /\+/g, // Regex for replacing addition symbol with a space
search = /([^&=]+)=?([^&]*)/g,
decode = function (s) { return decodeURIComponent(s.replace(pl, " ")); },
query = window.location.search.substring(1);
urlParams = {};
while (match = search.exec(query))
urlParams[decode(match[1])] = decode(match[2]);
})();
if (urlParams["search"] !== undefined) {
var filter, ul, li, a, i, txtValue, listIDs, npcHead, itemHead;
listIds = ["npcUL", "itemUL"]
filter = urlParams["search"].toUpperCase();
document.getElementById("npcHead").style.display = "";
document.getElementById("itemHead").style.display = "";
$.each( listIds, function( index, listId) {
ul = document.getElementById(listId);
li = ul.getElementsByTagName('li');
if (filter.slice(0, 3) == "ID=") {
for (i = 0; i < li.length; i++) {
a = li[i].getElementsByTagName("a")[0];
txtValue = a.href.split('/').pop().split('.html')[0];
if (txtValue.toUpperCase() == filter.split('=').pop()) {
li[i].style.display = "";
} else {
li[i].style.display = "none";
}
}
}
else {
for (i = 0; i < li.length; i++) {
a = li[i].getElementsByTagName("a")[0];
txtValue = a.textContent || a.innerText;
if (txtValue.toUpperCase().indexOf(filter) > -1) {
li[i].style.display = "";
} else {
li[i].style.display = "none";
}
}
};
});
}
$("#searchTxt").keyup(function(event) {
if (event.keyCode === 13) {
$("#myButton").click();
}
});
</script>
</body>
</html>
"""
html = f"{html_top}\n{npc_list}\n{item_list}\n{html_bottom}"
with open(os.path.join(self.site_path, f"search.html"), "w") as f:
f.write(html)
def create_drops(self, data):
img_path = f"../{self.img_path}"
header = """
<tr>
<td class="first_line" align="left">Item Name</td>
<td class="first_line">Crystals (Grade)</td>\n
<td class="first_line">Chance</td>\n
</tr>
"""
header_2 = """
<tr>
<td colspan="3" align="left"><b>{}</b></td>
</tr>
"""
template = """
<tr $COLOR>
<td align="left"><img src="{img_path}/icons/{icon}.png" align="absmiddle" class="img_border" alt="{drop[4]}" title="{drop[4]}"> <a href="../{self.item_path}/{drop[0]}.html" title="{drop[4]}">{drop[4]}</a> ($DROP)</td>
<td>$CRYSTALS</td>
<td>{format_probability(drop[3])}</td>
</tr>
"""
drops = []
chances = []
for i, drop in enumerate(data["drop"]):
icon = self.item_data[drop[0]].icon.strip("icon.").lower()
crystal = self.item_data[drop[0]].crystal
drops.append(
eval(f'f"""{template}"""')
.replace("$DROP", f"{drop[1]}-{drop[2]}" if drop[1] != drop[2] else f"{drop[1]}")
.replace(
"$CRYSTALS",
f"{crystal.count} {crystal.type}" if crystal.count is not None else "-",
)
)
chances.append(drop[3])
# Now sort the drop list in order of chance:
if len(drops) > 0:
_, drops = (list(t) for t in zip(*sorted(zip(chances, drops), reverse=True)))
for i, drop in enumerate(drops):
drops[i] = drop.replace(" $COLOR", " bgcolor=#1C425B" if i % 2 == 0 else "")
drops = header_2.format("Drop") + "\n" + "\n".join(drops)
spoils = []
chances = []
for i, drop in enumerate(data["spoil"]):
icon = self.item_data[drop[0]].icon.strip("icon.").lower()
crystal = self.item_data[drop[0]].crystal
spoils.append(
eval(f'f"""{template}"""')
.replace("$DROP", f"{drop[1]}-{drop[2]}" if drop[1] != drop[2] else f"{drop[1]}")
.replace(
"$CRYSTALS",
f"{crystal.count} {crystal.type}" if crystal.count is not None else "-",
)
)
chances.append(drop[3])
# Now sort the spoil list in order of chance:
if len(spoils) > 0:
_, spoils = (list(t) for t in zip(*sorted(zip(chances, spoils), reverse=True)))
for i, spoil in enumerate(spoils):
spoils[i] = spoil.replace(" $COLOR", " bgcolor=#1C425B" if i % 2 == 0 else "")
spoils = header_2.format("<br>Spoils") + "\n" + "\n".join(spoils)
return f"{header}\n{drops}\n{spoils}"
def create_npc_pages(self):
img_path = f"../{self.img_path}"
header_template = """
<td valign="top" bgcolor="#1E4863">
<table width="100%" border="0" cellpadding="5" cellspacing="0" class="show_list">
<tbody>
<tr>
<td colspan="3">
<img src="{img_path}/etc/blank.gif" height="8">
<br>
<span class="txtbig"><b>{name}</b> ({stats["level"]})</span>
$LOC
<br>
<img src="{img_path}/etc/blank.gif" height="10">
<br>
"""
loc_html = """
<a href="../{self.loc_path}/{id}.html" title="{name} location on the map">
<img src="{img_path}/etc/flag.gif" border="0" align="absmiddle" alt="{name} location on the map" title="{name} location on the map">
Location
</a>
"""
skill_template = """<img src="{0}/icons/{1}.png" width="16" align="absmiddle" class="img_border" alt="{2} ({3})\n{4}" title="{2} ({3})\n{4}">"""
stats_template = """
<b>Exp: {stats["exp"]}, SP: {stats["sp"]}</b><br>
Aggressive: {stats["agro"]}, Herbs: {stats["herbs"]}<br>
HP: {stats["hp"]}, P.Atk: {stats["patk"]}, M.Atk: {stats["matk"]}, RunSpd: {stats["runspd"]}
</td>
</tr>
"""
footer = "</tbody></table>\n</td>"
for id, data in self.npc_data.items():
name = data["name"]
stats = data["stats"]
try:
# First try to get correct skill order from game files:
skills = self.skill_order[id]
except KeyError:
try:
# If not available, get from xml files:
skills = data["skills"]
except KeyError:
# If not available, then pass:
pass
title = f"<title>{name}</title>"
header = eval(f'f"""{header_template}"""').replace(
"$LOC", eval(f'f"""{loc_html}"""') if id in self.spawn_data else ""
)
# skills = Add skills here later
stat_list = eval(f'f"""{stats_template}"""')
skill_list = ""
for skill in skills:
skill_data = self.skill_data[skill.id][skill.level]
icon = skill_data.icon.lower().replace("icon.", "")
skill_list += skill_template.format(
img_path, icon, skill_data.name, skill.level, skill_data.desc
)
skill_list += "\n<br><br>"
drops = self.create_drops(data)
css = self.css.format(f"../{self.css_path}")
html = f"<html>\n{title}\n{css}\n{self.search}\n{self.table_head.format(img_path)}\n{header}\n{skill_list}\n{stat_list}\n{drops}\n{self.table_foot.format(img_path)}\n{footer}</html>"
with open(
os.path.join(self.site_path, self.npc_path, f"{id}.html"), "w", encoding="utf-8"
) as f:
f.write(html)
def create_drop_data(self):
Drop = namedtuple("Drop", ["npc", "min", "max", "chance"])
Npc = namedtuple("Npc", ["id", "name", "level", "agro"])
drop_data = {}
for npc_id, npc in self.npc_data.items():
stats = npc["stats"]
npc_tuple = Npc(
npc_id,
npc["name"],
stats["level"],
"Passive" if stats["agro"] is "No" else "Aggressive",
)
for drop_type in ["drop", "spoil"]:
for drop in npc[drop_type]:
id, min_amt, max_amt, chance, name = drop
if id not in drop_data:
drop_data[id] = {}
drop_data[id]["name"] = name
drop_data[id]["type"] = self.item_data[id].type
drop_data[id]["crystal"] = self.item_data[id].crystal
drop_data[id]["info"] = []
drop_data[id]["drop"] = []
drop_data[id]["spoil"] = []
drop_data[id][drop_type].append(Drop(npc_tuple, min_amt, max_amt, chance))
return drop_data
def create_item_drops(self, id):
img_path = f"../{self.img_path}"
try:
data = self.drop_data[id]
except KeyError:
data = {"drop": [], "spoil": []}
header = """
<tr>
<td class="first_line" align="left">NPC Name</td>
<td class="first_line" align="left">Level
<div class="popup">
<img src="../img/etc/filter.png" height="15" style="cursor:pointer" onclick="myFunction()">
<span class="popuptext" id="myPopup" style=>
<div>
<div>
<input id="levelMin" type="number" min="1" max="90" value="1" onchange="levelFilter()"/> - <input id="levelMax" type="number" min="1" max="90" value="90" onchange="levelFilter()"/>
</div>
</div>
</span>
</div>
</td>
<td class="first_line">Type</td>
<td class="first_line">Quantity</td>
<td class="first_line">Chance</td>
</tr>
"""
# Removed sorting for now:
# header = """
# <tr>
# <td class="first_line" align="left">NPC Name</td>
# <td class="first_line"><a href="{0}/{1}.html?sort=aggro">Type</a></td>
# <td class="first_line"><a href="{0}/{1}.html?sort=quantity">Quantity</a></td>
# <td class="first_line"><a href="{0}/{1}.html?sort=chance">Chance</a></td>
# </tr>
# """
header_2 = """
<tr>
<td colspan="4" align="left"><b>{}</b></td>
</tr>
"""
template = """
<tr class="itemData" $COLOR>
<td class="npcName" align="left">
<a href="../{self.npc_path}/{drop.npc.id}.html" title="View {drop.npc.name} drop and spoil">
{drop.npc.name}
</a>
$LOC
</td>
<td class="npcLevel" align="left">{drop.npc.level}</td>
<td class="npcAgro">{drop.npc.agro}</td>
<td class="dropCount">$DROP</td>
<td class="dropChance">{format_probability(drop.chance)}</td>
</tr>
"""
# Removed location from drop portion of template:
# <a href="/loc/{drop.npc.id}/{drop.npc.name.lower().replace(" ", "-")}.html" title="{drop.npc.name} location on the map"><img src="{img_path}/etc/flag.gif" border="0" align="absmiddle" alt="{drop.npc.name} location on the map" title="{drop.npc.name} location on the map"></a>
drops = []
levels = []
loc_html = """
<a href="../{self.loc_path}/{drop.npc.id}.html" title="{drop.npc.name} location on the map">
<img src="{img_path}/etc/flag.gif" border="0" align="absmiddle" alt="{drop.npc.name} location on the map" title="{drop.npc.name} location on the map">
</a>
"""
for i, drop in enumerate(data["drop"]):
drops.append(
eval(f'f"""{template}"""')
.replace(
"$DROP", f"{drop.min}-{drop.max}" if drop.min != drop.max else f"{drop.min}"
)
.replace(
"$LOC", eval(f'f"""{loc_html}"""') if drop.npc.id in self.spawn_data else ""
)
)
levels.append(int(drop.npc.level))
# Now sort the drop list in order of chance:
if len(drops) > 0:
_, drops = (list(t) for t in zip(*sorted(zip(levels, drops))))
for i, drop in enumerate(drops):
drops[i] = drop.replace(" $COLOR", " bgcolor=#1C425B" if i % 2 == 0 else "")
drops = header_2.format("Drop") + "\n" + "\n".join(drops)
spoils = []
levels = []
for i, drop in enumerate(data["spoil"]):
spoils.append(
eval(f'f"""{template}"""')
.replace(
"$DROP", f"{drop.min}-{drop.max}" if drop.min != drop.max else f"{drop.min}"
)
.replace(
"$LOC", eval(f'f"""{loc_html}"""') if drop.npc.id in self.spawn_data else ""
)
)
levels.append(int(drop.npc.level))
# Now sort the spoil list in order of chance:
if len(spoils) > 0:
_, spoils = (list(t) for t in zip(*sorted(zip(levels, spoils))))
for i, spoil in enumerate(spoils):
spoils[i] = spoil.replace(" $COLOR", " bgcolor=#1C425B" if i % 2 == 0 else "")
spoils = header_2.format("Spoil") + "\n" + "\n".join(spoils)
# return f"{header.format(self.item_path, data['name'].lower().replace(' ', '-'))}\n{drops}<tr></tr>\n{spoils}"
return f"{header}\n{drops}<tr></tr>\n{spoils}"
def create_item_pages(self):
img_path = f"../{self.img_path}"
header_template = """
<td valign="top" bgcolor="#1E4863">
<table width="100%" border="0" cellpadding="5" cellspacing="0" class="show_list">
<tbody id="itemDataTable"><tr><td colspan="4"><img src="{img_path}/etc/blank.gif" height="8"><br><img src="{img_path}/icons/{icon}.png" align="absmiddle" class="img_border" alt="{name}" title="{name}">
<b class="txtbig">{name}</b>{crystals}<br><img src="{img_path}/etc/blank.gif" height="8"><br>
"""
desc_template = 'Type: Blunt, P.Atk/Def: 175, M.Atk/Def: 91 <br><img src="{img_path}/etc/blank.gif" height="8"><br>Bestows either Anger, Health, or Rsk. Focus.</td></tr>'
footer = "</tbody></table>\n</td>"
for id, data in self.item_data.items():
name = data.name
title = f"<title>{name}</title>"
crystals = (
""
if data.crystal.count == None
else f" (crystals: {data.crystal.count} {data.crystal.type}) "
)
icon = data.icon.strip("icon.").lower()
header = eval(f'f"""{header_template}"""')
# Need to scrape descriptions from game files before enabling this:
desc = "" # eval(f'f"""{desc_template}"""')
drops = self.create_item_drops(id)
css = self.css.format(f"../{self.css_path}")
jquery = """
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<script>
function myFunction() {
var popup = document.getElementById("myPopup");
popup.classList.toggle("show");
};
var itemDataTable = document.getElementById("itemDataTable");
var itemDatas = itemDataTable.getElementsByClassName("itemData");
function levelFilter() {
var npcLevel;
var levelMin = parseInt(document.getElementById("levelMin").value);
var levelMax = parseInt(document.getElementById("levelMax").value);
$.each(itemDatas, function(index, itemData) {
npcLevel = parseInt($(itemData.getElementsByClassName("npcLevel")[0]).text());
if ((npcLevel < levelMin) || (npcLevel > levelMax)) { itemData.style.display = "none" }
else { itemData.style.display = "" };
});
}
$(document).ready(function() {
var levelMin = 100;
var levelMax = 0;
$.each(itemDatas, function(index, itemData) {
npcLevel = parseInt($(itemData.getElementsByClassName("npcLevel")[0]).text());
if (npcLevel < levelMin) { levelMin = npcLevel };
if (npcLevel > levelMax) { levelMax = npcLevel };
document.getElementById("levelMin").value = levelMin;
document.getElementById("levelMax").value = levelMax;
});
})
</script>
"""
html = f"<html>\n{title}\n{css}\n<body>\n{self.search}\n{self.table_head.format(img_path)}\n{header}\n{desc}\n{drops}\n{self.table_foot.format(img_path)}\n{footer}\n</body>\n{jquery}\n</html>"
with open(os.path.join(self.site_path, self.item_path, f"{id}.html"), "w") as f:
f.write(html)
def spawn2map(self, spawn_point):
x_map = (
(spawn_point.x - self.WORLD_X_MIN) / (self.WORLD_X_MAX - self.WORLD_X_MIN)
) * self.map_size[0]
y_map = (
self.map_size[1]
- ((spawn_point.y - self.WORLD_Y_MIN) / (self.WORLD_Y_MAX - self.WORLD_Y_MIN))
* self.map_size[1]
)
return x_map, y_map
def create_loc_pages(self):
img_path = f"../{self.img_path}"
for id, data in self.npc_data.items():
if id not in self.spawn_data:
continue
name = data["name"]
title = f"<title>{name} Location</title>"
# css = self.css.format(f"../{self.css_path}")
css = """
<head>
<link href="{0}/pmfun.css" rel="stylesheet" type="text/css" />
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<style>
#map {{
margin: auto;
height: 874px;
width: 604px;
}}
</style>
</head>
""".format(
f"../{self.css_path}"
)
spawn_points = self.spawn_data[id]
spawn_list = "<ul id='coords' style='display:none;'>"
for spawn_point in spawn_points:
x_map, y_map = self.spawn2map(spawn_point)
spawn_list += f"\n\t<li x={x_map} y={y_map}></li>"
spawn_list += "\n</ul>"
npc_title = f"<div align='center'><a href='../{self.npc_path}/{id}.html' title='View {name} drop and spoil'><h2>{name} ({data['stats']['level']})</h2></a></div>"
map = '<div id="map" align="center"></div>'
jquery = """
<link rel="stylesheet" href="https://unpkg.com/leaflet@1.6.0/dist/leaflet.css" integrity="sha512-xwE/Az9zrjBIphAcBb3F6JVqxf46+CDLwfLMHloNu6KEQCAWi6HcDUbeOfBIptF7tcCzusKFjFw2yuvEpDL9wQ==" crossorigin=""/>
<script src="https://unpkg.com/leaflet@1.6.0/dist/leaflet.js" integrity="sha512-gZwIG9x3wUXg2hdXF6+rVkLF/0Vi9U8D2Ntg4Ga5I5BZpVkVxlJWbSQtXPSiUTtC0TjtGOmxa1AJPuV0CPthew==" crossorigin=""></script>
<script type="text/javascript" src="https://code.jquery.com/jquery-3.2.1.min.js"></script>
<script type="text/javascript" src="https://code.jquery.com/ui/1.12.1/jquery-ui.min.js"></script>
<script>
var map = L.map('map', {{
crs: L.CRS.Simple,
nowrap: true,
minZoom: -1.6
}});
var redIcon = new L.Icon({{
iconUrl: 'https://cdn.rawgit.com/pointhi/leaflet-color-markers/master/img/marker-icon-2x-red.png',
shadowUrl: 'https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/images/marker-shadow.png',
iconSize: [25, 41],
iconAnchor: [12, 41],
popupAnchor: [1, -34],
shadowSize: [41, 41]
}});
var bounds = [[0, 0], [{0}, {1}]];
var image = L.imageOverlay("../img/etc/world_map_interlude_big.png", bounds).addTo(map);
map.fitBounds(bounds);
var bigIcon = new L.Icon({{
iconUrl: 'https://cdn.rawgit.com/pointhi/leaflet-color-markers/master/img/marker-icon-2x-red.png',
iconSize: [25, 41],
iconAnchor: [12, 41],
popupAnchor: [1, -34],
}});
var smallIcon = new L.Icon({{
iconUrl: 'https://cdn.rawgit.com/pointhi/leaflet-color-markers/master/img/marker-icon-2x-red.png',
iconSize: [12.5, 20.5],
iconAnchor: [6, 20.5],
popupAnchor: [1, -34],
}});
var ul = document.getElementById("coords");
var li = ul.getElementsByTagName('li');
var markers = []
for (i = 0; i < li.length; i++) {{
x = li[i].getAttribute("x");
y = li[i].getAttribute("y");
markers.push(L.marker(L.latLng(y, x), {{icon: smallIcon}}).addTo(map));
}}
map.setMaxBounds(bounds);
map.on('drag', function() {{ map.panInsideBounds(bounds, {{ animate: false }}); }});
map.on('zoomend', function(ev){{
for (i = 0; i < markers.length; i++) {{
marker = markers[i];
if (map.getZoom() > 1) {{
marker.setIcon(bigIcon);
}} else {{
marker.setIcon(smallIcon);
}}
}}
}})
</script>
""".format(
self.map_size[1], self.map_size[0]
)
html = f"<html>\n{title}\n{css}\n{self.search}\n<br><br><br><br>\n{spawn_list}\n{npc_title}\n{map}\n{jquery}</html>"
with open(os.path.join(self.site_path, self.loc_path, f"{id}.html"), "w") as f:
f.write(html)
def create_ingredient_table(self, recipe, first=True):
img_path = f"../{self.img_path}"
ingredient_list = set()
if first:
ingredients = f"<ul class='{recipe.result.id}'>\n"
else:
ingredients = f"<ul class='{recipe.result.id}' style = 'display:none'>\n"
for ingredient in recipe.ingredients:
icon = self.item_data[ingredient.id].icon.strip("icon.").lower()
ingredients += f"\t<li class='{ingredient.id}'><img src='{img_path}/icons/{icon}.png' style='position:relative; top:10px;' class='img_border'> <text class='item_count'>{ingredient.count}</text>x <a href='../item/{ingredient.id}.html'>{ingredient.name}</a>"
ingredient_list.add(ingredient.id)
if ingredient.id in self.recipe_results and ingredient.id != recipe.id:
ingredients += f" (<a href='../{self.recipe_path}/{ingredient.id}.html'>recipe</a>) <img src='../img/etc/expand.png' id='{ingredient.id}' height='12' style='cursor:pointer; position:relative; top:3px;' onclick='myFunction(this)'></li>\n"
ingredients_, ingredient_list_ = self.create_ingredient_table(
self.recipe_data[self.recipe_results[ingredient.id]], first=False
)
ingredients += ingredients_
ingredients += "</details>"
ingredient_list = ingredient_list.union(ingredient_list_)
else:
ingredients += "</li>\n"
ingredients += "</ul>"
return ingredients, ingredient_list
def create_recipe_pages(self):
img_path = f"../{self.img_path}"
css = self.css.format(f"../{self.css_path}")
self.recipe_data = utils.RecipeParser(item_data=self.item_data).parse()
self.recipe_results = {}
for recipe_id, recipe in self.recipe_data.items():
self.recipe_results[recipe.result.id] = recipe_id
for recipe in self.recipe_data.values():
title = f"<title>{recipe.name}</title>"
info = f"<b>{recipe.name}</b> (level {recipe.level}, quantity {recipe.result.count}, sucess chance {recipe.chance}, MP {recipe.mp}"
ingredients, ingredient_list = self.create_ingredient_table(recipe)
table_0 = """
<td align="center" valign="top" bgcolor="#1E4863">
<img src="{0}/etc/blank.gif" height="8"><br>
<b class="txtbig"><a href='../item/{1}.html'>Recipe</a>:
<a href='../item/{2}.html'>{3}</a> ({4})</b><br><img src="{0}/etc/blank.gif" height="8"><br>
<table cellspacing='0' cellpadding='0' border='0' width='100%' class='txt'>\n<tbody>\n<tr>\n<td>
""".format(
img_path, recipe.id, recipe.result.id, recipe.result.name, recipe.chance
)
table_1 = "</td>\n<td valign='top'><h3>Totals:</h3>"
table_2 = "</td>\n</tr>\n</tbody>\n</table>"
totals = "<ul id='totals'>\n"
base_ingredients = [ingredient.id for ingredient in recipe.ingredients]
base_ingredient_counts = [ingredient.count for ingredient in recipe.ingredients]
for ingredient_id in ingredient_list:
ingredient_data = self.item_data[ingredient_id]
ingredient_name = ingredient_data.name
icon = ingredient_data.icon.strip("icon.").lower()
if ingredient_id in base_ingredients:
ingredient_count = base_ingredient_counts[
base_ingredients.index(ingredient_id)
]
style = "style = ''"
else:
ingredient_count = 0
style = "style='display:none'"
totals += f"\t<li {style} id='total_{ingredient_id}' ><img src='{img_path}/icons/{icon}.png' style='position:relative; top:10px;' class='img_border'><text class='item_count'>{ingredient_count}</text>x <a href='../item/{ingredient_id}.html'>{ingredient_name}</a>\n"
totals += "</ul>\n"
jquery = """
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<script>
var totalUL = document.getElementById("totals");
var totalLIs = totalUL.getElementsByTagName('li');
var i, childNode, childNodes, findID, findLI, parentVal, childVal, totalVal, childID;
function expand(elem, ul) {
ul.style.display = "";
elem.src = '../img/etc/collapse.png';
parentVal = parseInt($(ul).parent().find("li."+elem.id+" text.item_count").text());
findLI = document.getElementById("total_"+elem.id);
totalVal = parseInt($(findLI).find('text.item_count').text());
totalVal -= parentVal;
if (totalVal === 0) {
$(findLI).find('text.item_count').text(totalVal);
findLI.style.display = "none";
};
childNodes = ul.childNodes;
for(i = 0; i < childNodes.length; i++) {
childNode = childNodes[i];
if (childNodes[i].nodeName === "LI") {
childID = childNode.getAttribute("class");
childVal = parseInt($(childNode).find("text.item_count").text());
findLI = document.getElementById("total_"+childID);
totalVal = parseInt($(document.getElementById("total_"+childID)).find('text.item_count').text());
totalVal += childVal;
$(findLI).find('text.item_count').text(totalVal);
if (findLI.style.display == "none") {
findLI.style.display = "";
}
}
}
};
function contract(elem, ul) {
var i, childNode, childNodes, findID, findLI, parentVal, childVal, totalVal, childID;
ul.style.display = "none";
elem.src = '../img/etc/expand.png';
parentVal = parseInt($(ul).parent().find("li."+elem.id+" text.item_count").text());
findLI = document.getElementById("total_"+elem.id);
totalVal = parseInt($(findLI).find('text.item_count').text());
childNodes = ul.childNodes;
for(i = 0; i < childNodes.length; i++) {
childNode = childNodes[i];
if (childNode.nodeName === "UL") {
if (childNode.style.display === "") {
childID = childNode.getAttribute("class");
elem = document.getElementById(childID);
ul = $(elem).parent().parent().find('ul.'+elem.id)[0];
contract(elem, ul);
}
}
}
totalVal += parentVal;
if (totalVal > 0) {
findLI.style.display = "";
$(findLI).find('text.item_count').text(totalVal);
}
for(i = 0; i < childNodes.length; i++) {
childNode = childNodes[i];
if (childNode.nodeName === "LI") {
childID = childNode.getAttribute("class");
childVal = parseInt($(childNode).find("text.item_count").text());
findLI = document.getElementById("total_"+childID);
totalVal = parseInt($(document.getElementById("total_"+childID)).find('text.item_count').text());
totalVal -= childVal;
$(findLI).find('text.item_count').text(totalVal);
if (totalVal === 0) {
findLI.style.display = "none";
}
}
}
};
function myFunction(elem) {
var ul = $(elem).parent().parent().find('ul.'+elem.id)[0];
if (ul.style.display == "none") {
// Expand
expand(elem, ul);
}
else {
// Contract
contract(elem, ul)
}
};
</script>
"""
html = f"<html>\n{title}\n{css}\n{self.search}\n{'<br>'*4}\n{self.table_head.format(img_path)}\n{table_0}\n{ingredients}\n{table_1}\n{totals}\n{self.table_foot.format(img_path)}\n{table_2}\n{jquery}\n</html>"
with open(
os.path.join(self.site_path, self.recipe_path, f"{recipe.id}.html"), "w"
) as f:
f.write(html)
def scrape_pmfun_images(self):
for id, data in self.item_data.items():
file_path = os.path.join(self.site_path, self.img_path, self.item_path, f"{id}.png")
if os.path.isfile(file_path):
continue
url = f"https://lineage.pmfun.com/item/{id}"
r = requests.get(url)
soup = BeautifulSoup(r.text, features="html.parser")
loc = soup.find("img", {"src": re.compile(r"^data/img/")})["src"]
image_url = f"https://lineage.pmfun.com/{loc}"
with open(file_path, "wb") as f:
f.write(requests.get(image_url).content)
time.sleep(0.1)
def icons_to_lower():
dir = r"C:\git\l2reborn\create_drop_site\site\img\icons"
os.chdir(dir)
first = os.listdir()
for file in os.listdir():
# if you do not want to change the name of the .py file too uncomment the next line
# if not file.endswith(".py") # and indent the next one (of four spaces)
os.rename(file, file.lower()) # use upper() for the opposite goal
def format_probability(chance, n=4):
"""Format the inputted probability as a percent or fraction depending size
Parameters
----------
chance : float
Probability value between 0 and 1
Returns
-------
string
Formatted chance (percent if > 1%, fraction otherwise)
"""
if chance >= 0.01:
return utils.round_chance(chance, n)
else:
return f"1 / {round(1/chance):,}"
if __name__ == "__main__":
pb = PageBuilder()
print("Creating NPC pages")
pb.create_npc_pages()
print("Creating Item pages")
pb.create_item_pages()
print("Creating search page")
pb.create_search_page()
print("Creating loc pages")
pb.create_loc_pages()
print("Creating recipe pages")
pb.create_recipe_pages()
| {"/utils/parse_skills_dat.py": ["/utils/__init__.py"], "/skill_drop_data/create_skill_data.py": ["/utils/__init__.py"], "/utils/__init__.py": ["/utils/utils.py", "/utils/parse_npc_xml.py", "/utils/parse_skills_dat.py", "/utils/parse_npc_spawn.py"], "/create_drop_site/create_site.py": ["/utils/__init__.py"]} |
61,001 | mkovalski/rllib | refs/heads/main | /rllib/models/dqn_model.py | #!/usr/bin/env python
import logging
import torch.nn as nn
import torch.nn.functional as F
class DQNModel(nn.Module):
def __init__(self, inputs, outputs):
super(DQNModel, self).__init__()
self.linear1 = nn.Linear(inputs, inputs // 2)
self.linear2 = nn.Linear(inputs // 2, inputs // 4)
self.head = nn.Linear(inputs // 4, outputs)
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[left0exp,right0exp]...]).
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
return self.head(x)
| {"/rllib/agents/random_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/dqn_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/__init__.py": ["/rllib/agents/dqn_agent.py", "/rllib/agents/random_agent.py"], "/rllib/models/__init__.py": ["/rllib/models/dqn_model.py", "/rllib/models/dqn_conv_model.py"]} |
61,002 | mkovalski/rllib | refs/heads/main | /rllib/agents/agent.py | #!/usr/bin/env python
from abc import ABC, abstractmethod
class Agent(ABC):
@abstractmethod
def train(self):
raise NotImplementedError
@abstractmethod
def evaluate(self):
raise NotImplementedError
| {"/rllib/agents/random_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/dqn_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/__init__.py": ["/rllib/agents/dqn_agent.py", "/rllib/agents/random_agent.py"], "/rllib/models/__init__.py": ["/rllib/models/dqn_model.py", "/rllib/models/dqn_conv_model.py"]} |
61,003 | mkovalski/rllib | refs/heads/main | /rllib/utils/replay_buffer.py | #!/usr/bin/env python
'''Simple replay buffer for reinforcement learning tasks'''
from collections import namedtuple
import numpy as np
import pickle
import random
from tqdm import tqdm
# Reference: https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
transition_items = ['state', 'legal_actions', 'action', 'next_state', 'reward', 'done']
Transition = namedtuple('Transition', tuple(transition_items))
class ReplayBuffer():
'''Simple replay buffer for reinfocement learning
Args:
capacity (int): Size of replay buffer
'''
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
'''Saves transition'''
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
'''Randomly sample a batch from the replay buffer'''
samples = random.sample(self.memory, batch_size)
# Aggregate numpy arrays
args = {}
for idx, field in enumerate(samples[0]._fields):
args[field] = np.stack([samples[i][idx] for i in range(len(samples))])
new_sample = Transition(**args)
return new_sample
def populate(self, env):
state = env.reset()
for i in tqdm(range(self.capacity)):
action = env.sample()
next_state, reward, done, _ = env.step(action)
self.push(state, action, next_state, reward, done)
state = next_state
if done:
state = env.reset()
def pop_all(self):
self.position = 0
return_list = []
return_list, self.memory = self.memory, return_list
return return_list
def save(self, path):
with open(path, 'wb') as myFile:
pickle.dump(self, myFile)
@classmethod
def load(cls, path):
with open(path, 'rb') as myFile:
rb = pickle.load(myFile)
return rb
def __len__(self):
return len(self.memory)
| {"/rllib/agents/random_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/dqn_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/__init__.py": ["/rllib/agents/dqn_agent.py", "/rllib/agents/random_agent.py"], "/rllib/models/__init__.py": ["/rllib/models/dqn_model.py", "/rllib/models/dqn_conv_model.py"]} |
61,004 | mkovalski/rllib | refs/heads/main | /rllib/agents/random_agent.py | #!/usr/bin/env python
from .agent import Agent
import math
import numpy as np
# Reference: https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
class RandomAgent():
def step(self, legal_actions):
return np.random.choice(legal_actions)
| {"/rllib/agents/random_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/dqn_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/__init__.py": ["/rllib/agents/dqn_agent.py", "/rllib/agents/random_agent.py"], "/rllib/models/__init__.py": ["/rllib/models/dqn_model.py", "/rllib/models/dqn_conv_model.py"]} |
61,005 | mkovalski/rllib | refs/heads/main | /rllib/models/dqn_conv_model.py | #!/usr/bin/env python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
class DQNConvModel(nn.Module):
def __init__(self, inputs, outputs, kernel_size = 3):
super(DQNConvModel, self).__init__()
planes = inputs[0]
x = inputs[1]
y = inputs[2]
self.conv1 = nn.Conv2d(planes, 128, kernel_size = kernel_size, stride = 1,
bias = False)
self.bn1 = nn.BatchNorm2d(128)
self.conv2 = nn.Conv2d(128, 256, kernel_size = kernel_size, stride = 1,
bias = False)
self.bn2 = nn.BatchNorm2d(256)
self.conv3 = nn.Conv2d(256, 512, kernel_size = kernel_size, stride = 1,
bias = False)
self.bn3 = nn.BatchNorm2d(512)
self.maxpool = nn.MaxPool2d(kernel_size = 2, stride = 2, padding = 1)
self.output_shape = self._get_output_shape(inputs)
self.linear1 = nn.Linear(self.output_shape, self.output_shape)
self.final = nn.Linear(self.output_shape, outputs)
def _get_output_shape(self, sh):
with torch.no_grad():
out = torch.rand((1, *sh))
out = self.bn1(self.conv1(out))
out = self.maxpool(out)
out = self.bn2(self.conv2(out))
out = self.maxpool(out)
out = self.bn3(self.conv3(out))
out = self.maxpool(out)
return np.prod(out.shape)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = self.maxpool(x)
x = F.relu(self.bn2(self.conv2(x)))
x = self.maxpool(x)
x = F.relu(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.linear1(x))
return self.final(x)
if __name__ == '__main__':
inp_shape = (1, 20, 20)
output_shape = 100
model = DQNConvModel(inp_shape, output_shape, kernel_size = 3)
print(model)
data = torch.rand((1, *inp_shape))
print("Input shape: {}".format(data.shape))
out = model(data)
print("Output shape: {}".format(out.shape))
| {"/rllib/agents/random_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/dqn_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/__init__.py": ["/rllib/agents/dqn_agent.py", "/rllib/agents/random_agent.py"], "/rllib/models/__init__.py": ["/rllib/models/dqn_model.py", "/rllib/models/dqn_conv_model.py"]} |
61,006 | mkovalski/rllib | refs/heads/main | /rllib/agents/dqn_agent.py | #!/usr/bin/env python
from .agent import Agent
import copy
import math
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
import time
# Reference: https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
EPS = 1e-8
class DQNAgent():
def __init__(self,
model,
replay_buffer,
action_size,
gamma = 0.95,
eps_start = 0.95,
eps_end = 0.05,
eps_decay = 10000000,
batch_size = 128,
device = 'cuda'):
self.model = model
self.replay_buffer = replay_buffer
self.action_size = action_size
self.gamma = gamma
self.eps_start = eps_start
self.eps_end = eps_end
self.eps_decay = eps_decay
self.batch_size = batch_size
self.device = device
self.optimizer = optim.RMSprop(self.model.parameters(), lr = 3e-4)
self.target_model = copy.deepcopy(self.model)
self.n_steps = 0
self.running_loss = 0
self.loss_steps = 0
self._prev_step = None
def load_model(self, latest_model):
self.model.load_state_dict(latest_model)
def get_loss(self):
return self.running_loss / (self.loss_steps + EPS)
def reset_loss(self):
self.loss_steps = 0
self.running_loss = 0
def actions_to_mask(self, legal_actions):
'''Use a mask of 0 here so we can easily subtract out from next_target'''
mask = np.full(self.action_size, -np.inf)
mask[legal_actions] = 0
return mask
def get_threshold(self):
return self.eps_end + (self.eps_start - self.eps_end) * \
math.exp(-1. * self.n_steps / self.eps_decay)
def _get_action(self, state, legal_actions, is_eval = False):
if is_eval or np.random.random() > self.get_threshold():
state = torch.tensor(state).float().to(self.device)
state = state.view((1, *state.shape))
with torch.no_grad():
action = self.model(state).float().cpu().numpy().flatten()
action_idx = np.argmax(action[legal_actions])
return legal_actions[action_idx]
else:
action = np.random.choice(legal_actions)
return action
def _get_next_target(self, next_state):
with torch.no_grad():
return self.target_model(next_state).cpu().numpy()
def update_target_model(self):
self.target_model.load_state_dict(self.model.state_dict())
def optimize(self):
if len(self.replay_buffer) < self.batch_size:
return 0
batch = self.replay_buffer.sample(self.batch_size)
next_state = torch.tensor(batch.next_state).float().to(self.device)
# Get the next target for our model
next_target = self._get_next_target(next_state)
# Update the targets so they reflect valid actions
# Legal actions are 0, illegal are -inf
next_target += batch.legal_actions
#next_target[np.where(batch.legal_actions == 0)] = float('-inf')
next_target = np.amax(next_target, axis = 1)
target = batch.reward + ((1 - batch.done) * (self.gamma * next_target))
target = target.reshape(-1, 1)
target = torch.tensor(target).float().to(self.device)
# Clean up the original actions to see what we took
state = torch.from_numpy(batch.state).float().to(self.device)
pred = self.model(state).gather(
1, torch.from_numpy(batch.action.reshape(-1, 1)).to(self.device))
loss = F.smooth_l1_loss(pred, target)
self.optimizer.zero_grad()
loss.backward()
for param in self.model.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
self.running_loss += loss.item()
self.loss_steps += 1
return loss.item()
def _update_replay_buffer(self, state, action, legal_actions, done, reward):
if self._prev_step is not None:
self.replay_buffer.push(self._prev_step['state'],
self._prev_step['legal_actions'],
self._prev_step['action'],
state,
reward,
done)
if not done:
self._prev_step = dict(state = state,
legal_actions = self.actions_to_mask(legal_actions),
action = action)
else:
self._prev_step = None
def step(self, state, legal_actions, done, reward, is_eval = False):
action = None
if not done:
action = self._get_action(state = state, legal_actions = legal_actions, is_eval = is_eval)
if not is_eval:
self._update_replay_buffer(state, action, legal_actions, done, reward)
self.n_steps += 1
return action
def pop_transitions(self):
pass
| {"/rllib/agents/random_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/dqn_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/__init__.py": ["/rllib/agents/dqn_agent.py", "/rllib/agents/random_agent.py"], "/rllib/models/__init__.py": ["/rllib/models/dqn_model.py", "/rllib/models/dqn_conv_model.py"]} |
61,007 | mkovalski/rllib | refs/heads/main | /rllib/agents/__init__.py | from .dqn_agent import DQNAgent
from .random_agent import RandomAgent
| {"/rllib/agents/random_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/dqn_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/__init__.py": ["/rllib/agents/dqn_agent.py", "/rllib/agents/random_agent.py"], "/rllib/models/__init__.py": ["/rllib/models/dqn_model.py", "/rllib/models/dqn_conv_model.py"]} |
61,008 | mkovalski/rllib | refs/heads/main | /rllib/models/alpha_zero_resnet.py | import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
import numpy as np
from .blocks import ConvLayer, ValueHead, PolicyHead, ResLayer
class AlphaZeroResnet(nn.Module):
def __init__(self, inp_shape, output_shape, res_layer_number = 5, planes = 128,
use_player_state = True, embedding_dict = {}):
super(AlphaZeroResnet, self).__init__()
self.use_player_state = use_player_state
self.embedding_dict = embedding_dict
self.inp_shape = inp_shape
self.inp_planes = inp_shape[0]
self.board_shape = inp_shape[1:]
self.output_shape = output_shape
self.conv = ConvLayer(self.inp_planes, planes = planes)
self.res_layers = torch.nn.ModuleList([ ResLayer(inplanes = planes, planes = planes) for i in range(res_layer_number)])
self.policyHead = PolicyHead(planes, self.board_shape, output_shape,
use_player_state = use_player_state,
embedding_dict = embedding_dict)
self.valueHead = ValueHead(planes, self.board_shape, output_shape,
use_player_state = use_player_state,
embedding_dict = embedding_dict)
def forward(self,s, player_state):
s = self.conv(s)
for res_layer in self.res_layers:
s = res_layer(s)
v = torch.tanh(self.valueHead(s, player_state = player_state))
p = self.policyHead(s, player_state = player_state)
return F.log_softmax(p, dim = 1).exp(), v
if __name__ == '__main__':
state_shape = (1, 20, 20)
output_shape = 32000
net = AlphaZeroResnet(state_shape, output_shape)
print(net)
item = torch.tensor(np.random.random((4, *state_shape))).float()
policy, value = net(item)
print(policy.shape, value.shape)
| {"/rllib/agents/random_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/dqn_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/__init__.py": ["/rllib/agents/dqn_agent.py", "/rllib/agents/random_agent.py"], "/rllib/models/__init__.py": ["/rllib/models/dqn_model.py", "/rllib/models/dqn_conv_model.py"]} |
61,009 | mkovalski/rllib | refs/heads/main | /rllib/models/__init__.py | from .dqn_model import DQNModel
from .dqn_conv_model import DQNConvModel
| {"/rllib/agents/random_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/dqn_agent.py": ["/rllib/agents/agent.py"], "/rllib/agents/__init__.py": ["/rllib/agents/dqn_agent.py", "/rllib/agents/random_agent.py"], "/rllib/models/__init__.py": ["/rllib/models/dqn_model.py", "/rllib/models/dqn_conv_model.py"]} |
61,018 | bitPanG98/Juliet | refs/heads/master | /Melissa Stuff/wolfram.py | import urllib2
import xml.etree.ElementTree as ET
class Wolfram:
def __init__(self, speaker, key):
self.speaker = speaker
self.key = key
def process(self, job, controller):
if job.get_is_processed():
return False
if not self.key:
self.speaker.say(
"Please provide an API key to query Wolfram Alpha.")
return False
response = self.query(job.recorded(), self.key)
if response.find('No results') != -1:
return False
elif response == "Pulling up visual.":
self.speaker.say(response)
self.open(False, job.recorded(), controller)
else:
self.speaker.say(response)
job.is_processed = True
return True
def query(self, phrase, key):
phrase = phrase.replace(' ', '%20')
w_url = "http://api.wolframalpha.com/v2/query?input=" + \
phrase + "&appid=" + key
xml_data = urllib2.urlopen(w_url).read()
root = ET.fromstring(xml_data)
# Parse response
try:
pods = root.findall('.//pod')
if pods == []:
raise StopIteration()
# if first and second pods are input interpretation and response,
# stop and ignore
if pods[0].attrib['title'] == "Input interpretation" and \
pods[1].attrib['title'] == "Response":
raise StopIteration()
for pod in pods:
# skip input human response (we are doing that ourselves) and
# input interpretation
if pod.attrib['title'] != "Response" and \
pod.attrib['title'] != "Input interpretation":
plaintexts = pod.findall('.//plaintext')
text = plaintexts[0].text
if text is not None and len(text) < 100:
return "the answer is " + \
text.replace("°", ' degrees ').encode('ascii', 'ignore')
else:
return "Pulling up visual."
except StopIteration:
return "No results"
def open(self, wolfram, text, controller):
wolfram_url = "http://www.wolframalpha.com/input/?i=" + \
text.replace(" ", "+")
controller.open(wolfram_url) | {"/Juliet.py": ["/initualizejuliet.py"]} |
61,019 | bitPanG98/Juliet | refs/heads/master | /Juliet.py | #!/usr/bin/env python3
###############################################################################################
###############################################################################################
#
# Welcome to Juliet -- your virtual assistant.
# I'm hoping this code will read a lot like a
# book on computer science. I hope to include enough
# comments to make this code easy to understand and
# modify.
#
# You can say "Julia Help" to get started.
#
###############################################################################################
###############################################################################################
# Import system modules.
import os
# Import my own modules in sub directories.
from SpeakAndHear import talktome
from SpeakAndHear import mycommand
from GreyMatter import julibrain
# Import my own modules in this directory.
import initualizejuliet as ij
################################################################################################
# Start myVars.
def myVars():
# Global variables that control how many songs are played at a time for "Julia play music."
global playcounter
# Totalsongstoplay used below in main().
global totalsongstoplay
################################################################################################
# End myVars.
# START MAIN PROGRAM.
# only definitions for variables and functions happen
# above this. Nevertheless, this code doesn't run either
# until it is called at the bottom of this file.
# Similarly, this file will be called by some sort of
# front-end. Currently, only startJuliet.sh exists
# to do that.
def main():
# Initialize.
myVars()
playcounter = 1
# This is where to set the number of songs to play when you say "Julie play music."
totalsongstoplay = 2
try:
# kaldi.Recognizer requires a model. Make sure we have it. Otherwise say where to get it.
# The vosk module:
# https://github.com/alphacep/vosk-api
# contains the recognizer module
# that uses the model built by Alphacephei:
# https://alphacephei.com/en/
# I find it works very well for my voice.
# Alphacephei do have other models however
# if this one doesn't work well for you.
ij.CheckMyModel()
except SystemExit as e:
print(e)
# End initialize.
# Say and print some helpful infomtion.
# If you get sick of hearing this every time you start
# just comment it out. Conversely, feel free to add
# additional messages with the print and talktome
talktome.talkToMe("I am Julie Julie. How can I help?")
print("How can I help?")
# functions.
talktome.talkToMe("To get started, You can say Julie Julie help.")
print("To get started, You can say 'Julie Julie help.'")
# Also feel free to write some code to supress messages
# after the first use. Eventuall, I will add a
# database and facial recognition so that the
# experience can be customized by user.
# Loop over and over to continuously execute multiple commands.
while True:
# listen for command. Speech to text listener logic is called from inside the myCommand function.
output = mycommand.myCommand()[3:]
# Remember, the mycommand function takes in
# audio from the microphone and returns text.
# Therefore, the "output" variable is text.
if 'juli' in output:
print('Julia responds:\n')
# The assistant function responds to wake words "Julie," "Julia," "Julius," or "Juliet."
# It also gets whatever else you said, like
# "Julie what's up?"
# If a wake word isn't found in what you
# said, nothing is done.
# The assistant function performs whatever action is found that matches the variable named "output."
# Also, other variables are parsed out and passed
# in case you ask to play music.
# Don't run code for unit testing
runtest = False
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# We wrap code that may fail in try blocks.
# That way, if the code fails, our program doesn't
# crash. It simply prints out there's been an
# error, etc.
# The assistant function is in the julibrain.py
# file. It needs four arguments.
# It needs the text in the "output" variable
# so it can figure out what actions to perform.
# It needs the playcounter and totalsongstoplay
# variables for playing music.
# And it needs the runtest variable to turn on
# and off some of the actions.
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
try:
julibrain.assistant(output, playcounter, totalsongstoplay, runtest)
except Exception as e:
print(e)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Whatever you said is printed out, so you can see what Julie understood.
# This may show you what to speak more clearly,
# if you see she doesn't understand.
print(output)
# END MAIN FUNCTION
# None of the code up above this line runs unless main is called.
# CALL THE MAIN FUNCTION HERE
main()
| {"/Juliet.py": ["/initualizejuliet.py"]} |
61,020 | bitPanG98/Juliet | refs/heads/master | /SpeakAndHear/mycommand.py | ###############################################################################################
######## STT SPEECH TO TEXT FUNCTION THAT RETURNS THE VARIABLE: command
import pyaudio
from vosk import Model, KaldiRecognizer
def myCommand():
# "listens for commands"
# We imported vosk up above.
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=8000)
stream.start_stream()
model = Model("model-en")
rec = KaldiRecognizer(model, 16000)
while True:
data = stream.read(2000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
#print(rec.Result())
# I commented out this line and added the 3 lines below
myResult = rec.Result()
myList = myResult.split("text")
command = myList[1]
stream.stop_stream()
stream.close()
p.terminate()
return command
######## END STT SPEECH TO TEXT FUNCTION THAT RETURNS THE VARIABLE: command
############################################################################################### | {"/Juliet.py": ["/initualizejuliet.py"]} |
61,021 | bitPanG98/Juliet | refs/heads/master | /GreyMatter/julibrainUtils.py | ###############################################################################################
# This module is used to check if a process is already running
# I don't think this is being used anymore. I may delete it, but I suspect it is very useful.
# I may move it to a utility module.
# I'll comment it out later and see what breaks.
# I need to create automated tests first using assert statements.
# Then if I break something, I'll know right away.
def checkIfProcessRunning(processName):
'''
Check if there is any running process that contains the given name processName.
'''
# Iterate over the all the running process
for proc in psutil.process_iter():
try:
# Check if process name contains the given name string.
if processName.lower() in proc.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
###############################################################################################
# End Check if a process is already running | {"/Juliet.py": ["/initualizejuliet.py"]} |
61,022 | bitPanG98/Juliet | refs/heads/master | /GreyMatter/julibrain.py |
'''
The julibrain module contains command-word/action pairs.
'''
# Import all the required modules.
# Pyaudio is for the microphone and may be required by mpg123.
# Pyautogui is for moving the mouse around robotically and automating key presses.
# Subprocess is for running operating system commands and programs.
# Os is for access operating system calls. For example, it is used to get the current working directory.
# Webrowser is used to open and control whatever your default webbrowser is.
# The time module give us access to time related functionality.
# Re is python3's regular expression module.
# Requests is used for making get requests to http servers.
# Wikipedia is python3's module to access Wikipedia's API.
# Random access's random generator functionality.
# Psutils adds process utilities -- access information about processes running on the system.
# Sys adds access to system commands. I don't seem to be using this module. (Possibly remove.)
# SpeakAndHear is a local module. You'll find this is the SpeakAndHear subdirectory.
# SkeakAndHear has modules for speech to text and text to speech.
# GreyMatter is the program's brain. It contains a large if statement that contains
# all the keywords and subsequent actions. I shouldn't need to load this, as I'm in this file already.
# (Possibly delete "import GreyMatter.")
################################################################################################
import pyaudio
import pyautogui
import subprocess
import os
import webbrowser
from time import localtime, strftime, sleep
import re
import requests
import wikipedia
from random import randrange
import psutil
# import sys (Possibly delete this line.)
from SpeakAndHear import talktome
# from GreyMatter import julibrain (Possibly delete this line.)
###############################################################################################
# end import statements
################################################################################################
###############################################################################################
# This is Juliet's brain.
# All her commands and logic are called here.
###############################################################################################
def cleanj(command):
command = command.replace("julia", "")
command = command.replace("julie", "")
command = command.replace("julie julie", "")
command = command.replace("julius", "")
command = command.replace("look up", "")
return command
# BEGIN GIGANTIC ASSISTANT FUNCTION
def assistant(command, playcounter, totalsongstoplay, runtest):
'''
Check if command exists and execute corresponding action.
'''
# Big If Statement for Executing Commands
# Open Stuff
#print("test = " + str(test) +".")
# First command. This will open reddit in your browser.
# -------------------------------------------------------------
if 'open reddit' in command:
url = 'https://www.reddit.com/'
if not runtest:
webbrowser.open(url)
print('Done!')
talktome.talkToMe('reddit is opening.')
if runtest:
return url
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command. This will open youtube in your brower.
# -------------------------------------------------------------
if 'open youtube' in command:
url = 'https://www.youtube.com/'
if not runtest:
webbrowser.open(url)
print('Done!')
talktome.talkToMe('youtube is opening.')
if runtest:
return url
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command. This will open Google Docs and activate the microphone.
# The first time you use this, you will need to give Google permission to
# access the microphone.
# -------------------------------------------------------------
if 'dict' in command:
talktome.talkToMe(
'Opening a new document. After the new document is open you can ask me to open the microphone.')
url = 'https://docs.google.com/document/u/0/'
webbrowser.open(url)
# Maximize the window
pyautogui.hotkey('winleft', 'up')
# I have a 4k display. You may need to find
# your own point. I used
# xdotool getmouselocation --shell
# to find the location where to click
# change duration if your internet is slow.
# The lines below click on new document
pyautogui.moveTo(777, 777, duration=.4)
pyautogui.click()
pyautogui.click()
return url
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command Will open whatever website you request --
# requires you to say dot com, etc.
# -------------------------------------------------------------
if 'search' in command:
url = 'https://google.com'
webbrowser.open_new_tab(url)
# Maximize the window
pyautogui.hotkey('winleft', 'up')
# I have a 4k display. You may need to find
# your own point. I used
# xdotool getmouselocation --shell
# to find the location where to click
# change duration if your internet is slow.
pyautogui.moveTo(2716, 1209, duration=.3)
pyautogui.click()
pyautogui.moveTo(1302, 546, duration=.3)
pyautogui.click()
pyautogui.moveTo(2716, 1209, duration=.3)
pyautogui.click()
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command will open microphone in Google Docs.
# -------------------------------------------------------------
if 'microphone' in command:
pyautogui.hotkey('ctrl', 'S')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command will open a new terminal, and tile it to the right.
# -------------------------------------------------------------
elif 'terminal' in command:
# subprocess.call(["terminator"])
subprocess.call(['terminator', '-T', 'First'])
pyautogui.moveTo(2201, 1001, duration=.1)
pyautogui.click()
pyautogui.hotkey('winleft', 'right')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command will open a website in browser.
# This is the general way to open ANY website.
# It uses the re (regular expressions) module.
# Learn this one.
# Rememder to use the fully qualified name.
# -------------------------------------------------------------
elif 'open website' in command:
reg_ex = re.search('open website (.+)', command)
if reg_ex:
domain = reg_ex.group(1)
url = 'https://www.' + domain
webbrowser.open(url)
print('Done!')
else:
pass
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# End Open Stuff
# Query Stuff
# Next command
# -------------------------------------------------------------
elif 'look' in command:
talktome.talkToMe("Searching Wikipedia . . . ")
command = cleanj(command)
#results = wikipedia.summary(command, sentences=3)
results = wikipedia.summary(command)
wikiurl = wikipedia.page(command)
webbrowser.open_new_tab(wikiurl.url)
print(results)
try:
talktome.talkToMe(results)
except KeyboardInterrupt:
pass
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Play your music
# Next command will choose random songs.
# Set the number of songs in Juliet.py. For example: totalsongstoplay = 2
# -------------------------------------------------------------
elif 'music' in command:
if playcounter == 1:
talktome.talkToMe("Choosing random song . . . ")
with open('/home/bard/Code/Juliet/mymusiclist.txt') as f:
if playcounter == 1:
print("Total songs to play " + str(totalsongstoplay) + ".")
mymusic = f.read().splitlines()
random_index = randrange(len(mymusic))
song = mymusic[random_index]
print("Playing song number " + str(playcounter) + ".")
print("Song file:")
print(song)
playthis = 'mpg123 -q ' + song
p1 = subprocess.Popen(playthis, shell=True)
try:
# while True:
while p1.poll() is None:
pass
# p1.wait()
except KeyboardInterrupt:
# Ctrl-C was pressed (or user knew how to send SIGTERM to the python process)
pass # not doing anything here, just needed to get out of the loop
# nicely ask the subprocess to stop
p1.terminate()
# read final output
sleep(1)
# check if still alive
if p1.poll() is not None:
print('process terminated')
p1.kill()
# end new code
if playcounter < totalsongstoplay:
playcounter = playcounter + 1
assistant(command, playcounter, totalsongstoplay, runtest)
# end if
playcounter = 1
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# End Query Stuff
# Polite Stuff
# Next command responds to Hi or Hello.
# -------------------------------------------------------------
elif 'hello' in command or 'hi' in command:
talktome.talkToMe(
'Welcome. I am Julia, your virtual artificial intelligence assistant.')
print('Welcome. I am Julia, your virtual artificial intelligence assistant.')
talktome.talkToMe('How may I help you?')
print('How may I help you?')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command responds to thanks.
# -------------------------------------------------------------
elif 'thanks' in command or 'tanks' in command or 'thank you' in command:
talktome.talkToMe('You are welcome')
print('You are welcome')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command chit chat
# -------------------------------------------------------------
elif 'how are you' in command or 'and you' in command or 'are you okay' in command:
talktome.talkToMe('Fine thank you.')
print('Fine thank you.')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# End Polite Stuff
# Just for fun, HAL Stuff not listed in commandlist or commandlist.html.
# -------------------------------------------------------------
elif 'open the pod door' in command:
talktome.talkToMe('I am sorry, Dave. I am afraid I can not do that.')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command HAL stuff
# -------------------------------------------------------------
elif 'problem' in command:
talktome.talkToMe('I think you know as well as I do')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command HAL stuff
# -------------------------------------------------------------
elif 'talkin' in command:
talktome.talkToMe('This mission is too important.')
talktome.talkToMe(' I can not to allow you to jeopardize it.')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command HAL stuff
# -------------------------------------------------------------
elif 'why do you say that' in command:
talktome.talkToMe('I know that you want to disconnect me.')
talktome.talkToMe('I can not allow that.')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# End HAL Stuff
# System Commands -- This saves time at the end of the day.
# -------------------------------------------------------------
elif 'shutdown' in command:
subprocess.call(["shutdown -h now"])
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command -- REBOOT!
# -------------------------------------------------------------
elif 'reboot' in command:
subprocess.call(["reboot"])
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command -- Stop Juliet completely.
# -------------------------------------------------------------
elif 'stop' in command or 'stopped' in command or "listening" in command:
talktome.talkToMe("Goodbye, Sir, powering off")
print("Goodbye, Sir, powering off")
quit()
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# End System Commands -- Hands-free clicking.
# Interface With Desktop -- Clicking, tiling windows, and maximize.
# -------------------------------------------------------------
elif 'click' in command:
pyautogui.click()
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command -- Right click is "other" because "right" tiles a window.
# -------------------------------------------------------------
elif 'other' in command:
pyautogui.rightClick()
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command -- Middle click.
# -------------------------------------------------------------
elif 'middle' in command:
pyautogui.middleClick()
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command -- Tile window to the right.
# -------------------------------------------------------------
elif 'right' in command:
pyautogui.moveTo(400, 400, duration=.1)
pyautogui.click()
pyautogui.hotkey('winleft', 'right')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command -- Tile window to the left.
# -------------------------------------------------------------
elif 'left' in command:
pyautogui.moveTo(2200, 1000, duration=.1)
pyautogui.click()
pyautogui.hotkey('winleft', 'left')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command -- Maximize window.
# This is used a lot so that pyautogui can find controls.
# -------------------------------------------------------------
elif 'maximize' in command:
pyautogui.click()
pyautogui.hotkey('winleft', 'up')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command -- Minimize window (hide.)
# -------------------------------------------------------------
elif 'minimize' in command:
pyautogui.click()
pyautogui.hotkey('winleft', 'h')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# End Interface With Desktop
# Help Section
# -------------------------------------------------------------
elif 'help' in command:
talktome.talkToMe("The wake word is Julia")
talktome.talkToMe("You can also use Juliet, Julius, or Julie")
talktome.talkToMe("Julie Julie works best, however")
talktome.talkToMe("You can always say Julie Julie HELP.")
talktome.talkToMe("Julia also runs the listed commands that follow")
talktome.talkToMe("Also, you can always say Julie Julie list commands.")
talktome.talkToMe("You can ask Julia to")
with open("commandlist") as file:
for line in file:
talktome.talkToMe(line)
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Next command -- List commands uses the commandlist file
# If you add commands to juliebrain.py, also ad the command name to commandlist.
# -------------------------------------------------------------
elif 'commands' in command:
talktome.talkToMe("You can ask Julia to")
with open("commandlist") as file:
for line in file:
#line = line.strip()
talktome.talkToMe(line)
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# End Help Section
# Miscelaneous -- Because it's short and runs fast,
# this is a great command to test the system.
# -------------------------------------------------------------
elif 'what\'s up' in command:
talktome.talkToMe('Just doing my thing')
# -------------------------------------------------------------
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# End Miscelaneous Section
# END GIGANTIC ASSISTANT FUNCTION -- as of 4/24/20, there are 26 commands in this brain.
###############################################################################################
| {"/Juliet.py": ["/initualizejuliet.py"]} |
61,023 | bitPanG98/Juliet | refs/heads/master | /SpeakAndHear/talktome.py | ###############################################################################################
######## TTS TEXT TO SPEECH FUNCTION
# This gets used all over to speak text aloud.
# It also prints to the console for people with bad memories.
from gtts import gTTS
import os
def talkToMe(mytext):
# "speaks audio passed as argument"
print(mytext)
# can handle multiline text.
#for line in mytext.splitlines():
# uses the google text to speech module to synthesize text
text_to_speech = gTTS(text=mytext, lang='en-uk')
# saves syntesized speech to audio.mp3
# this file gets written, played. and overwritten
# over and over again.
text_to_speech.save('audio.mp3')
# the sox modules wrapper is mpg123.
# This is called by the operating system imported os module.
os.system('mpg123 -q audio.mp3')
###############################################################################################
######## END TTS TEXT TO SPEECH FUNCTION | {"/Juliet.py": ["/initualizejuliet.py"]} |
61,024 | bitPanG98/Juliet | refs/heads/master | /Tests/test_julibrain.py | import unittest
import subprocess
from GreyMatter import julibrain
from SpeakAndHear import talktome
class TestBrain(unittest.TestCase):
def test_open_reddit(self):
test = True
testurl = julibrain.assistant('open reddit', 1, 2, test)
#subprocess.call(['pip', 'list', '|', 'grep', 'webbrowser'])
self.assertEqual(testurl, 'https://www.reddit.com/')
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def test_open_youtube(self):
test = True
testurl = julibrain.assistant('open youtube', 1, 2, test)
#subprocess.call(['pip', 'list', '|', 'grep', 'webbrowser'])
self.assertEqual(testurl, 'https://www.youtube.com/')
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def dictation(self):
test = True
testurl = julibrain.assistant('dict', 1, 2, test)
self.assertEqual(testurl, 'https://docs.google.com/document/u/0/')
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | {"/Juliet.py": ["/initualizejuliet.py"]} |
61,025 | bitPanG98/Juliet | refs/heads/master | /initualizejuliet.py | import os
################################################################################################
######Check Model.
def CheckMyModel():
if not os.path.exists("model-en"):
print ("Please download the model from https://github.com/alphacep/kaldi-android-demo/releases and unpack as 'model-en' in the current folder.")
exit(1)
################################################################################################
######End Check Model.
| {"/Juliet.py": ["/initualizejuliet.py"]} |
61,028 | m-zajac/SimplePyWebChess | refs/heads/master | /run.py | #! /usr/bin/env python
from app import create_app
app = create_app(True)
app.run('0.0.0.0')
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,029 | m-zajac/SimplePyWebChess | refs/heads/master | /modules/chess/move_generators/tests.py | import unittest
from . import gen_minimax
from .. import board, game
class MinimaxMoveGeneratorTests(unittest.TestCase):
def setUp(self):
self.generator = gen_minimax.minimaxGenerator
self.board = board.Board()
self.game = game.Game(self.board)
self.game.init_new()
self.game.strip()
def test_1Level(self):
self.game.initPiece(self.game.piece_list['WK'], (4, 0))
self.game.initPiece(self.game.piece_list['BK'], (4, 7))
self.game.initPiece(self.game.piece_list['Wp1'], (3, 3))
self.game.initPiece(self.game.piece_list['Bp1'], (4, 4))
move = self.generator(self.game, 1)
self.assertEquals(move.moves[0][1], (4, 4))
def test_2Levels(self):
self.game.initPiece(self.game.piece_list['WK'], (4, 0))
self.game.initPiece(self.game.piece_list['BK'], (4, 7))
self.game.initPiece(self.game.piece_list['WQ'], (3, 3))
self.game.initPiece(self.game.piece_list['Bp1'], (4, 4))
self.game.initPiece(self.game.piece_list['Bp2'], (5, 5))
move = self.generator(self.game, 2)
self.assertNotEquals(move.moves[0][1], (4, 4))
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,030 | m-zajac/SimplePyWebChess | refs/heads/master | /modules/searchtree/tests.py | import unittest
import nodes
class DepthFirstNodeTests(unittest.TestCase):
def test_nodes(self):
test_list = []
class TestNode(nodes.DepthFirstNode):
def evaluate(self):
test_list.append(self.data)
return self.value
# http://en.wikipedia.org/wiki/Tree_traversal#Example
def init(algorithm):
a = TestNode('a', None, algorithm)
b = TestNode('b', None, algorithm)
c = TestNode('c', None, algorithm)
d = TestNode('d', None, algorithm)
e = TestNode('e', None, algorithm)
f = TestNode('f', None, algorithm)
g = TestNode('g', None, algorithm)
h = TestNode('h', None, algorithm)
i = TestNode('i', None, algorithm)
f.addNode(b).addNode(g)
b.addNode(a).addNode(d)
d.addNode(c).addNode(e)
g.addNode(i)
i.addNode(h)
return f
# preorder
root = init('preorder')
root.traverse()
self.assertListEqual(
test_list,
['f', 'b', 'a', 'd', 'c', 'e', 'g', 'i', 'h']
)
# postorder
test_list = []
root = init('postodrder')
root.traverse()
self.assertListEqual(
test_list,
['a', 'c', 'e', 'd', 'b', 'h', 'i', 'g', 'f']
)
class MinimaxTests(unittest.TestCase):
def test_simple(self):
self.init1(nodes.MinNode, nodes.MaxNode)
self.root.traverse()
self.assertEqual(self.root.value, -7)
self.assertEqual(self.root.data, 'l8')
self.assertEqual(self.root.evaluations, 22)
self.init2(nodes.MinNode, nodes.MaxNode)
self.root.traverse()
self.assertEqual(self.root.value, 6)
self.assertEqual(self.root.data, 'l7')
self.assertEqual(self.root.evaluations, 33)
def test_ab(self):
self.init1(nodes.MinABNode, nodes.MaxABNode)
self.root.traverse()
self.assertEqual(self.root.value, -7)
self.assertEqual(self.root.data, 'l8')
self.assertEqual(self.root.evaluations, 22)
self.init2(nodes.MinABNode, nodes.MaxABNode)
self.root.traverse()
self.assertEqual(self.root.value, 6)
self.assertEqual(self.root.data, 'l7')
self.assertEqual(self.root.evaluations, 25)
self.init3(nodes.MinABNode, nodes.MaxABNode)
self.root.traverse()
self.assertEqual(self.root.value, 4)
self.assertEqual(self.root.data, 'l1')
self.assertEqual(self.root.evaluations, 11)
def init1(self, MinNode, MaxNode):
# http://en.wikipedia.org/wiki/Minimax#Example_2
# leafs - max nodes- level 4
l1 = MaxNode('l1', 10)
l2 = MaxNode('l2', 999999)
l3 = MaxNode('l3', 5)
l4 = MaxNode('l4', -10)
l5 = MaxNode('l5', 7)
l6 = MaxNode('l6', 5)
l7 = MaxNode('l7', -999999)
l8 = MaxNode('l8', -7)
l9 = MaxNode('l9', -5)
# level 3 - min nodes
n31 = MinNode().addNode(l1).addNode(l2)
n32 = MinNode().addNode(l3)
n33 = MinNode().addNode(l4)
n34 = MinNode().addNode(l5).addNode(l6)
n35 = MinNode().addNode(l7)
n36 = MinNode().addNode(l8).addNode(l9)
# level 2 - max nodes
n21 = MaxNode().addNode(n31).addNode(n32)
n22 = MaxNode().addNode(n33)
n23 = MaxNode().addNode(n34).addNode(n35)
n24 = MaxNode().addNode(n36)
# level 1 - min nodes
n11 = MinNode().addNode(n21).addNode(n22)
n12 = MinNode().addNode(n23).addNode(n24)
# root - max node
root = MaxNode().addNode(n11).addNode(n12)
self.root = root
def init2(self, MinNode, MaxNode):
# http://en.wikipedia.org/wiki/File:AB_pruning.svg
# leafs - max nodes- level 4
l1 = MaxNode('l1', 5)
l2 = MaxNode('l2', 6)
l3 = MaxNode('l3', 7)
l4 = MaxNode('l4', 4)
l5 = MaxNode('l5', 5)
l6 = MaxNode('l6', 3)
l7 = MaxNode('l7', 6)
l8 = MaxNode('l8', 6)
l9 = MaxNode('l9', 9)
l10 = MaxNode('l10', 7)
l11 = MaxNode('l11', 5)
l12 = MaxNode('l12', 9)
l13 = MaxNode('l13', 8)
l14 = MaxNode('l14', 6)
# level 3 - min nodes
n3_1 = MinNode().addNode(l1).addNode(l2)
n3_2 = MinNode().addNode(l3).addNode(l4).addNode(l5)
n3_3 = MinNode().addNode(l6)
n3_4 = MinNode().addNode(l7)
n3_5 = MinNode().addNode(l8).addNode(l9)
n3_6 = MinNode().addNode(l10)
n3_7 = MinNode().addNode(l11)
n3_8 = MinNode().addNode(l12).addNode(l13)
n3_9 = MinNode().addNode(l14)
# level 2 - max nodes
n2_1 = MaxNode().addNode(n3_1).addNode(n3_2)
n2_2 = MaxNode().addNode(n3_3)
n2_3 = MaxNode().addNode(n3_4).addNode(n3_5)
n2_4 = MaxNode().addNode(n3_6)
n2_5 = MaxNode().addNode(n3_7)
n2_6 = MaxNode().addNode(n3_8).addNode(n3_9)
# level 1 - min nodes
n1_1 = MinNode().addNode(n2_1).addNode(n2_2)
n1_2 = MinNode().addNode(n2_3).addNode(n2_4)
n1_3 = MinNode().addNode(n2_5).addNode(n2_6)
# root - max node
root = MaxNode().addNode(n1_1).addNode(n1_2).addNode(n1_3)
self.root = root
def init3(self, MinNode, MaxNode):
"""Best a-b case"""
# leafs - min nodes- level 3
l1 = MinNode('l1', 4)
l2 = MinNode('l2', 1)
l3 = MinNode('l3', 6)
l4 = MinNode('l4', 2)
l5 = MinNode('l5', 3)
l6 = MinNode('l6', 0)
l7 = MinNode('l7', 7)
l8 = MinNode('l8', 8)
# level 2 - max nodes
n2_1 = MaxNode().addNode(l1).addNode(l2)
n2_2 = MaxNode().addNode(l3).addNode(l4)
n2_3 = MaxNode().addNode(l5).addNode(l6)
n2_4 = MaxNode().addNode(l7).addNode(l8)
# level 1 - min nodes
n1_1 = MinNode().addNode(n2_1).addNode(n2_2)
n1_2 = MinNode().addNode(n2_3).addNode(n2_4)
# root - max node
root = MaxNode().addNode(n1_1).addNode(n1_2)
self.root = root
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,031 | m-zajac/SimplePyWebChess | refs/heads/master | /modules/chess/tests.py | """Chess tests"""
import unittest
import operator
import json
from . import board
from . import pieces
from . import game
class BoardTests(unittest.TestCase):
"""Board testing class"""
def setUp(self):
self.board = board.Board()
def test_board(self):
# board size, check squares
self.assertIsInstance(self.board.squares, list)
self.assertEqual(len(self.board.squares), 8)
self.assertTrue(self.board.squares[0][0].is_black)
self.assertTrue(self.board.squares[4][4].is_black)
self.assertFalse(self.board.squares[7][0].is_black)
self.assertFalse(self.board.squares[4][3].is_black)
self.assertEqual(self.board.squares[0][0], self.board.squares_reversed[7][7])
self.assertEqual(self.board.squares[1][1], self.board.squares_reversed[6][6])
for i in range(7):
self.assertEqual(len(self.board.squares[i]), 8)
for j in range(7):
# is square
self.assertIsInstance(self.board.squares[i][j], board.Square)
# square piece isn't present
self.assertIsNone(self.board.squares[i][j].piece)
# check color
if (i + j) % 2:
self.assertFalse(self.board.squares[i][j].is_black)
else:
self.assertTrue(self.board.squares[i][j].is_black)
# check reversed
self.assertEqual(self.board.squares[i][j], self.board.squares_reversed[7-i][7-j], str(i) + ', ' + str(j))
def test_piece_actions(self):
board_manager = board.BoardManager
TestPiece1 = pieces.Piece(pieces.TypePawn, False, 'WP1')
piecePos1 = (4, 3)
board_manager.initPiece(self.board, TestPiece1, piecePos1)
TestPiece1.moves_count = 1
TestPiece2 = pieces.Piece(pieces.TypePawn, True, 'BP1')
piecePos2 = (4, 5)
board_manager.initPiece(self.board, TestPiece2, piecePos2)
TestPiece2.moves_count = 1
# test count
white_pieces = []
black_pieces = []
for row in self.board.squares:
for square in row:
if square.piece:
if square.piece.is_black:
black_pieces.append(square.piece)
else:
white_pieces.append(square.piece)
self.assertEqual(len(white_pieces), 1)
self.assertEqual(len(black_pieces), 1)
# test board squares
self.assertEqual(self.board.squares[4][3].piece, TestPiece1)
self.assertEqual(self.board.squares_reversed[3][4].piece, TestPiece1)
self.assertEqual(self.board.squares[4][5].piece, TestPiece2)
self.assertEqual(self.board.squares_reversed[3][2].piece, TestPiece2)
# test remove
board_manager.removePiece(self.board, TestPiece2)
self.assertIsNone(TestPiece2.position)
class PawnTests(unittest.TestCase):
"""Pawn testing class"""
def setUp(self):
self.board = board.Board()
self.board_manager = board.BoardManager
def test_moves(self):
# init white
TestPieceW = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, TestPieceW, (0, 1))
# init black
TestPieceB = pieces.Piece(pieces.TypePawn, True)
self.board_manager.initPiece(self.board, TestPieceB, (0, 6))
# check initial 2 moves
movelistW = TestPieceW.getMoves(self.board)
self.assertEqual(len(movelistW), 2)
self.assertEqual(movelistW[0].moves[0][1], (0, 2))
self.assertEqual(movelistW[1].moves[0][1], (0, 3))
movelistB = TestPieceB.getMoves(self.board)
self.assertEqual(len(movelistB), 2)
self.assertEqual(movelistB[0].moves[0][1], (0, 5))
self.assertEqual(movelistB[1].moves[0][1], (0, 4))
# move 2 squares
self.board_manager.move(self.board, movelistW[1])
self.assertEqual(TestPieceW.position, (0, 3))
self.board_manager.move(self.board, movelistB[1])
self.assertEqual(TestPieceB.position, (0, 4))
# no moves left
movelistW = TestPieceW.getMoves(self.board)
movelistB = TestPieceB.getMoves(self.board)
self.assertEqual(len(movelistW), 0)
self.assertEqual(len(movelistB), 0)
def test_attack(self):
# init white
TestPieceW = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, TestPieceW, (3, 2))
# init black
TestPieceB = pieces.Piece(pieces.TypePawn, True)
self.board_manager.initPiece(self.board, TestPieceB, (4, 3))
# test moves
movelistW = TestPieceW.getMoves(self.board)
self.assertEqual(len(movelistW), 2)
self.assertEqual(movelistW[0].moves[0][1], (3, 3))
self.assertEqual(movelistW[1].moves[0][1], TestPieceB.position)
movelistB = TestPieceB.getMoves(self.board)
self.assertEqual(len(movelistB), 2)
self.assertEqual(movelistB[0].moves[0][1], (4, 2))
self.assertEqual(movelistB[1].moves[0][1], TestPieceW.position)
def test_move2squares_blocked(self):
# init white
TestPieceW = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, TestPieceW, (0, 1))
# init black
TestPieceB = pieces.Piece(pieces.TypePawn, True)
self.board_manager.initPiece(self.board, TestPieceB, (0, 2))
# test moves
movelistW = TestPieceW.getMoves(self.board)
self.assertEqual(len(movelistW), 0)
def test_blocked(self):
# init white
TestPieceW = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, TestPieceW, (0, 7))
# test moves
movelistW = TestPieceW.getMoves(self.board)
self.assertEqual(len(movelistW), 0)
class KingTests(unittest.TestCase):
"""King testing class"""
def setUp(self):
self.board = board.Board()
self.board_manager = board.BoardManager
def test_moves(self):
TestPiece = pieces.Piece(pieces.TypeKing, False)
self.board_manager.initPiece(self.board, TestPiece, (4, 4))
movelists = TestPiece.getMoves(self.board)
self.assertEqual(len(movelists), 8)
for move in movelists:
p = map(operator.sub, (4, 4), move.moves[0][1])
self.assertLessEqual(abs(complex(p[0], p[1])), 1.5)
self.assertGreaterEqual(abs(complex(p[0], p[1])), 0.0)
def test_blocked(self):
TestPiece = pieces.Piece(pieces.TypeKing, False)
self.board_manager.initPiece(self.board, TestPiece, (0, 0))
O1 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, O1, (1, 0))
O2 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, O2, (1, 1))
O3 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, O3, (0, 1))
movelists = TestPiece.getMoves(self.board)
self.assertEqual(len(movelists), 0)
class QueenTests(unittest.TestCase):
"""Queen testing class"""
def setUp(self):
self.board = board.Board()
self.board_manager = board.BoardManager
def test_moves(self):
TestPiece = pieces.Piece(pieces.TypeQueen, False)
self.board_manager.initPiece(self.board, TestPiece, (4, 4))
movelists = TestPiece.getMoves(self.board)
for move in movelists:
self.assertNotIn(move.moves[0][1], [(2, 3), (3, 2), (5, 1), (5, 6), (5, 7), (6, 1)])
def test_blocked(self):
TestPiece = pieces.Piece(pieces.TypeQueen, False)
self.board_manager.initPiece(self.board, TestPiece, (0, 0))
O1 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, O1, (1, 0))
O2 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, O2, (1, 1))
O3 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, O3, (0, 1))
movelists = TestPiece.getMoves(self.board)
self.assertEqual(len(movelists), 0)
def test_blocked2(self):
TestPiece = pieces.Piece(pieces.TypeQueen, False)
self.board_manager.initPiece(self.board, TestPiece, (3, 0))
P1 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, P1, (3, 1))
P2 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, P2, (2, 1))
B = pieces.Piece(pieces.TypeBishop, False)
self.board_manager.initPiece(self.board, B, (2, 0))
K = pieces.Piece(pieces.TypeKing, False)
self.board_manager.initPiece(self.board, K, (4, 0))
movelists = TestPiece.getMoves(self.board)
self.assertEqual(len(movelists), 4)
class BishopTests(unittest.TestCase):
"""Bishop testing class"""
def setUp(self):
self.board = board.Board()
self.board_manager = board.BoardManager
def test_moves(self):
TestPiece = pieces.Piece(pieces.TypeBishop, False)
self.board_manager.initPiece(self.board, TestPiece, (4, 4))
movelists = TestPiece.getMoves(self.board)
for move in movelists:
# same square color after move
self.assertTrue(self.board.squares[move.moves[0][1][0]][move.moves[0][1][1]].is_black == self.board.squares[4][4].is_black)
# only diagonal
delta = (move.moves[0][1][0] - 4, move.moves[0][1][1] - 4)
self.assertTrue(abs(delta[0]) == abs(delta[1]))
def test_blocked(self):
TestPiece = pieces.Piece(pieces.TypeBishop, False)
self.board_manager.initPiece(self.board, TestPiece, (0, 0))
O1 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, O1, (1, 0))
O2 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, O2, (1, 1))
O3 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, O3, (0, 1))
movelists = TestPiece.getMoves(self.board)
self.assertEqual(len(movelists), 0)
class KnightTests(unittest.TestCase):
"""Knight testing class"""
def setUp(self):
self.board = board.Board()
self.board_manager = board.BoardManager
def test_moves(self):
TestPiece = pieces.Piece(pieces.TypeKnight, False)
self.board_manager.initPiece(self.board, TestPiece, (4, 4))
movelists = TestPiece.getMoves(self.board)
for move in movelists:
# different color after move
self.assertFalse(self.board.squares[move.moves[0][1][0]][move.moves[0][1][1]].is_black == self.board.squares[4][4].is_black)
# L
delta = (move.moves[0][1][0] - 4, move.moves[0][1][1] - 4)
self.assertIn(delta, [(1, 2), (2, 1), (2, -1), (1, -2), (-1, -2), (-2, -1), (-2, 1), (-1, 2)])
def test_nonblocked(self):
TestPiece = pieces.Piece(pieces.TypeKnight, False)
self.board_manager.initPiece(self.board, TestPiece, (0, 0))
O1 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, O1, (1, 0))
O2 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, O2, (1, 1))
O3 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, O3, (0, 1))
movelists = TestPiece.getMoves(self.board)
self.assertEqual(len(movelists), 2)
class KingSafetyTests(unittest.TestCase):
"""King safety testing class"""
def setUp(self):
self.board = board.Board()
self.board_manager = board.BoardManager
def test_1(self):
"""White Pawn can't move"""
WK = pieces.Piece(pieces.TypeKing, False)
self.board_manager.initPiece(self.board, WK, (0, 0))
WP1 = pieces.Piece(pieces.TypePawn, False)
self.board_manager.initPiece(self.board, WP1, (1, 1))
BQ = pieces.Piece(pieces.TypeQueen, True)
self.board_manager.initPiece(self.board, BQ, (7, 7))
movelists = WP1.getMoves(self.board)
self.assertEqual(len(movelists), 0)
def test_2(self):
"""White queen has to cover the king - only one move available"""
WK = pieces.Piece(pieces.TypeKing, False)
self.board_manager.initPiece(self.board, WK, (0, 0))
WQ = pieces.Piece(pieces.TypeQueen, False)
self.board_manager.initPiece(self.board, WQ, (1, 0))
BQ = pieces.Piece(pieces.TypeQueen, True)
self.board_manager.initPiece(self.board, BQ, (7, 7))
movelists = WQ.getMoves(self.board)
self.assertEqual(len(movelists), 1)
class GameTests(unittest.TestCase):
"""Game testing class"""
def setUp(self):
self.board = board.Board()
self.board_manager = board.BoardManager
self.game = game.Game(self.board)
def test_setup(self):
self.game.init_new()
white_pieces = []
black_pieces = []
for row in self.board.squares:
for square in row:
if square.piece:
if square.piece.is_black:
black_pieces.append(square.piece)
else:
white_pieces.append(square.piece)
self.assertEqual(len(white_pieces), 16)
self.assertEqual(len(black_pieces), 16)
self.assertIs(self.board.squares[0][0].piece.type, pieces.TypeRook)
self.assertFalse(self.board.squares[0][0].piece.is_black)
self.assertIs(self.board.squares[7][7].piece.type, pieces.TypeRook)
self.assertTrue(self.board.squares[7][7].piece.is_black)
self.assertIs(self.board.squares[4][0].piece.type, pieces.TypeKing)
self.assertFalse(self.board.squares[4][0].piece.is_black)
self.assertIs(self.board.squares[3][7].piece.type, pieces.TypeQueen)
self.assertTrue(self.board.squares[3][7].piece.is_black)
for i in range(8):
for j in range(2):
self.assertIsNotNone(self.board.squares[i][j].piece)
self.assertIsNotNone(self.board.squares[i][7-j].piece)
for j in range(2, 5):
self.assertIsNone(self.board.squares[i][j].piece)
self.assertIsNone(self.board.squares[i][7-j].piece)
def test_board_serialization(self):
self.game.init_new()
serialized = self.board_manager.serialize(self.board)
serialized = json.dumps(serialized, separators=(',', ':'))
data = json.loads(serialized)
newboard = board.Board()
self.board_manager.deserialize(newboard, data)
self.assertEqual(newboard.squares, self.board.squares)
def test_game(self):
"""http://en.wikibooks.org/wiki/Chess/Sample_chess_game"""
self.game.init_new()
# w pawn
self.move_and_checkafter(
move=pieces.PieceMove(((4, 1), (4, 3))),
black_moves=True,
is_capture=False,
white_capture_count=0,
black_capture_count=0,
white_king_safe=True,
black_king_safe=True,
)
# b pawn
self.move_and_checkafter(
move=pieces.PieceMove(((4, 6), (4, 4))),
black_moves=False,
is_capture=False,
white_capture_count=0,
black_capture_count=0,
white_king_safe=True,
black_king_safe=True,
)
# w bishop
self.move_and_checkafter(
move=pieces.PieceMove(((6, 0), (5, 2))),
black_moves=True,
is_capture=False,
white_capture_count=0,
black_capture_count=0,
white_king_safe=True,
black_king_safe=True,
)
# b pawn
self.move_and_checkafter(
move=pieces.PieceMove(((5, 6), (5, 5))),
black_moves=False,
is_capture=False,
white_capture_count=0,
black_capture_count=0,
white_king_safe=True,
black_king_safe=True,
)
# w knight captures b pawn
self.move_and_checkafter(
move=pieces.PieceMove(((5, 2), (4, 4))),
black_moves=True,
is_capture=True,
white_capture_count=1,
black_capture_count=0,
white_king_safe=True,
black_king_safe=True,
)
# b pawn captures white bishop
self.move_and_checkafter(
move=pieces.PieceMove(((5, 5), (4, 4))),
black_moves=False,
is_capture=True,
white_capture_count=1,
black_capture_count=1,
white_king_safe=True,
black_king_safe=True,
)
# w queen - check!
self.move_and_checkafter(
move=pieces.PieceMove(((3, 0), (7, 4))),
black_moves=True,
is_capture=False,
white_capture_count=1,
black_capture_count=1,
white_king_safe=True,
black_king_safe=False,
)
# b king
self.move_and_checkafter(
move=pieces.PieceMove(((4, 7), (4, 6))),
black_moves=False,
is_capture=False,
white_capture_count=1,
black_capture_count=1,
white_king_safe=True,
black_king_safe=True,
)
# w queen - capture & check!
self.move_and_checkafter(
move=pieces.PieceMove(((7, 4), (4, 4))),
black_moves=True,
is_capture=True,
white_capture_count=2,
black_capture_count=1,
white_king_safe=True,
black_king_safe=False,
)
# b king
self.move_and_checkafter(
move=pieces.PieceMove(((4, 6), (5, 6))),
black_moves=False,
is_capture=False,
white_capture_count=2,
black_capture_count=1,
white_king_safe=True,
black_king_safe=True,
)
# w bishop - check
self.move_and_checkafter(
move=pieces.PieceMove(((5, 0), (2, 3))),
black_moves=True,
is_capture=False,
white_capture_count=2,
black_capture_count=1,
white_king_safe=True,
black_king_safe=False,
)
# b pawn
self.move_and_checkafter(
move=pieces.PieceMove(((3, 6), (3, 4))),
black_moves=False,
is_capture=False,
white_capture_count=2,
black_capture_count=1,
white_king_safe=True,
black_king_safe=True,
)
# w bishop - capture & check
self.move_and_checkafter(
move=pieces.PieceMove(((2, 3), (3, 4))),
black_moves=True,
is_capture=True,
white_capture_count=3,
black_capture_count=1,
white_king_safe=True,
black_king_safe=False,
)
# b king
self.move_and_checkafter(
move=pieces.PieceMove(((5, 6), (6, 5))),
black_moves=False,
is_capture=False,
white_capture_count=3,
black_capture_count=1,
white_king_safe=True,
black_king_safe=True,
)
# w pawn
self.move_and_checkafter(
move=pieces.PieceMove(((7, 1), (7, 3))),
black_moves=True,
is_capture=False,
white_capture_count=3,
black_capture_count=1,
white_king_safe=True,
black_king_safe=True,
)
# b pawn
self.move_and_checkafter(
move=pieces.PieceMove(((7, 6), (7, 4))),
black_moves=False,
is_capture=False,
white_capture_count=3,
black_capture_count=1,
white_king_safe=True,
black_king_safe=True,
)
# w bishop - capture pawn
self.move_and_checkafter(
move=pieces.PieceMove(((3, 4), (1, 6))),
black_moves=True,
is_capture=True,
white_capture_count=4,
black_capture_count=1,
white_king_safe=True,
black_king_safe=True,
)
# b bishop
self.move_and_checkafter(
move=pieces.PieceMove(((2, 7), (1, 6))),
black_moves=False,
is_capture=True,
white_capture_count=4,
black_capture_count=2,
white_king_safe=True,
black_king_safe=True,
)
# w queen
self.move_and_checkafter(
move=pieces.PieceMove(((4, 4), (5, 4))),
black_moves=True,
is_capture=False,
white_capture_count=4,
black_capture_count=2,
white_king_safe=True,
black_king_safe=False,
)
# b king
self.move_and_checkafter(
move=pieces.PieceMove(((6, 5), (7, 5))),
black_moves=False,
is_capture=False,
white_capture_count=4,
black_capture_count=2,
white_king_safe=True,
black_king_safe=True,
black_king_pos=(7, 5),
white_king_pos=(4, 0),
)
# w pawn
self.move_and_checkafter(
move=pieces.PieceMove(((3, 1), (3, 3))),
black_moves=True,
is_capture=False,
white_capture_count=4,
black_capture_count=2,
white_king_safe=True,
black_king_safe=False,
)
# b pawn
self.move_and_checkafter(
move=pieces.PieceMove(((6, 6), (6, 4))),
black_moves=False,
is_capture=False,
white_capture_count=4,
black_capture_count=2,
white_king_safe=True,
black_king_safe=True,
)
# w queen
self.move_and_checkafter(
move=pieces.PieceMove(((5, 4), (5, 6))),
black_moves=True,
is_capture=False,
white_capture_count=4,
black_capture_count=2,
white_king_safe=True,
black_king_safe=True,
)
# b queen
self.move_and_checkafter(
move=pieces.PieceMove(((3, 7), (4, 6))),
black_moves=False,
is_capture=False,
white_capture_count=4,
black_capture_count=2,
white_king_safe=True,
black_king_safe=True,
black_king_pos=(7, 5),
white_king_pos=(4, 0),
)
# w pawn
self.move_and_checkafter(
move=pieces.PieceMove(((7, 3), (6, 4))),
black_moves=True,
is_capture=True,
white_capture_count=5,
black_capture_count=2,
white_king_safe=True,
black_king_safe=False,
black_king_pos=(7, 5),
white_king_pos=(4, 0),
)
# b queen
self.move_and_checkafter(
move=pieces.PieceMove(((4, 6), (6, 4))),
black_moves=False,
is_capture=True,
white_capture_count=5,
black_capture_count=3,
white_king_safe=True,
black_king_safe=True,
black_king_pos=(7, 5),
white_king_pos=(4, 0),
)
# w rook - chekcmate
self.move_and_checkafter(
move=pieces.PieceMove(((7, 0), (7, 4))),
black_moves=True,
is_capture=True,
white_capture_count=6,
black_capture_count=3,
white_king_safe=True,
black_king_safe=False,
black_king_pos=(7, 5),
white_king_pos=(4, 0),
checkmate=True
)
def move_and_checkafter(
self,
move,
black_moves,
is_capture,
white_capture_count,
black_capture_count,
white_king_safe=True,
black_king_safe=True,
white_king_pos=None,
black_king_pos=None,
checkmate=None
):
captures = self.game.move(move)
self.assertEqual(len(captures) > 0, is_capture)
if len(captures) > 0:
self.assertEqual(captures[0].is_black, black_moves)
self.assertEqual(len(self.game.white_captures), white_capture_count)
self.assertEqual(len(self.game.black_captures), black_capture_count)
self.assertEqual(pieces.TypeKing.checkSafe(self.game.board.white_king_pos, self.game.board.squares), white_king_safe)
self.assertEqual(pieces.TypeKing.checkSafe(self.game.board.black_king_pos, self.game.board.squares), black_king_safe)
self.assertEqual(black_moves, self.game.black_moves)
if white_king_pos:
self.assertEqual(self.game.board.white_king_pos, white_king_pos)
self.assertIs(self.game.board.squares[white_king_pos[0]][white_king_pos[1]].piece.type, pieces.TypeKing)
self.assertEqual(self.game.board.squares[white_king_pos[0]][white_king_pos[1]].piece.position, white_king_pos)
if black_king_pos:
self.assertEqual(self.game.board.black_king_pos, black_king_pos)
self.assertIs(self.game.board.squares[black_king_pos[0]][black_king_pos[1]].piece.type, pieces.TypeKing)
self.assertEqual(self.game.board.squares[black_king_pos[0]][black_king_pos[1]].piece.position, black_king_pos)
if checkmate:
self.assertEqual(len(self.game.getAllMoves()), 0)
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,032 | m-zajac/SimplePyWebChess | refs/heads/master | /modules/chess/game.py | """Game module"""
import pieces
import board
class Game(object):
"""Game class"""
def __init__(self, gameboard=None, move_generator=None):
self.board_manager = board.BoardManager
self.board = gameboard
if not self.board:
self.board = board.Board()
self.move_generator = move_generator
# pieces in game
self.piece_list = {}
self.white_pieces = []
self.black_pieces = []
# captures
self.white_captures = {}
self.black_captures = {}
# game state
self.black_moves = False
self.is_check = False
self.is_checkmate = False
def init_new(self):
"""Initialize game. self.board must be present."""
def make_piece_set(is_black, id_prefix):
piece_set = {}
# rooks
piece_set[(0, 0)] = pieces.Piece(pieces.TypeRook, is_black, id_prefix + 'r1')
piece_set[(7, 0)] = pieces.Piece(pieces.TypeRook, is_black, id_prefix + 'r2')
# knights
piece_set[(1, 0)] = pieces.Piece(pieces.TypeKnight, is_black, id_prefix + 'k1')
piece_set[(6, 0)] = pieces.Piece(pieces.TypeKnight, is_black, id_prefix + 'k2')
# bishops
piece_set[(2, 0)] = pieces.Piece(pieces.TypeBishop, is_black, id_prefix + 'b1')
piece_set[(5, 0)] = pieces.Piece(pieces.TypeBishop, is_black, id_prefix + 'b2')
kqpos = [(3, 0), (4, 0)]
if is_black:
kqpos = [(4, 0), (3, 0)]
# queen
piece_set[kqpos[0]] = pieces.Piece(pieces.TypeQueen, is_black, id_prefix + 'Q')
# king
piece_set[kqpos[1]] = pieces.Piece(pieces.TypeKing, is_black, id_prefix + 'K')
# pawns
for i in range(8):
piece_set[(i, 1)] = pieces.Piece(pieces.TypePawn, is_black, id_prefix + 'p' + str(i+1))
return piece_set
white_pieces = make_piece_set(False, 'W')
for pos, piece in white_pieces.iteritems():
self.board_manager.initPiece(self.board, piece, pos, False)
self.white_pieces.append(piece)
self.piece_list[piece.id] = piece
black_pieces = make_piece_set(True, 'B')
for pos, piece in black_pieces.iteritems():
# symetrical
pos = (7 - pos[0], 7 - pos[1])
self.board_manager.initPiece(self.board, piece, pos, False)
self.black_pieces.append(piece)
self.piece_list[piece.id] = piece
return self
def initPiece(self, piece, pos):
"""Initializes piece in game"""
self.board_manager.initPiece(self.board, piece, pos)
if pos:
if piece.id in self.black_captures:
del self.black_captures[piece.id]
if piece.id in self.white_captures:
del self.white_captures[piece.id]
if piece.is_black:
self.black_pieces.append(piece)
else:
self.white_pieces.append(piece)
else:
self.capture(piece)
def strip(self):
"""Strips all pieces off board (for testing purpose)"""
for row in self.board.squares:
for square in row:
piece = square.piece
if piece:
self.capture(piece)
def move(self, move=None):
"""Validates move and executes it. Returns captured pieces."""
if not move:
# empty destination, run move generator
move = self.move_generator(self)
for move_data in move.moves:
piece = self.board.squares[move_data[0][0]][move_data[0][1]].piece
if not piece:
raise ValueError('Invalid start position')
if piece.is_black != self.black_moves:
raise ValueError('Invaid player')
valid_moves = piece.getMoves(self.board)
valid_destinations = []
for valid_move in valid_moves:
for m in valid_move.moves:
valid_destinations.append(m[1])
if not (move_data[1][0], move_data[1][1]) in valid_destinations:
raise ValueError('Invalid move destination')
# captures from piece position in oponent piece position
captures = self.board_manager.move(self.board, move)
for capture in captures:
self.capture(capture)
# other captures (en passant)
if move.capture:
self.capture(move.capture)
# check check
if self.black_moves:
kingpos = self.board.white_king_pos
else:
kingpos = self.board.black_king_pos
if pieces.TypeKing.checkSafe(kingpos, self.board.squares):
self.is_check = False
else:
self.is_check = True
# switch player
self.black_moves = not self.black_moves
# check checkmate
available_moves = self.getAllMoves()
if len(available_moves) == 0:
self.is_checkmate = True
else:
self.is_checkmate = False
return captures
def capture(self, piece):
if piece.is_black:
self.white_captures[piece.id] = piece
self.black_pieces.remove(piece)
else:
self.black_captures[piece.id] = piece
self.white_pieces.remove(piece)
if piece.position:
self.board_manager.removePiece(self.board, piece)
def getAllMoves(self):
"""Returns all available moves for current player"""
moves = []
for row in self.board.squares:
for square in row:
if square.piece:
p = square.piece
if p.is_black == self.black_moves:
for m in p.getMoves(self.board):
moves.append(m)
return moves
def serialize(self):
black_captures_data = []
white_captures_data = []
for id, p in self.black_captures.iteritems():
black_captures_data.append(p.serialize())
for id, p in self.white_captures.iteritems():
white_captures_data.append(p.serialize())
data = {
'board': self.board_manager.serialize(self.board),
'black_moves': self.black_moves,
'black_captures': black_captures_data,
'white_captures': white_captures_data,
'is_check': self.is_check,
'is_checkmate': self.is_checkmate
}
return data
def deserialize(self, game_data):
self.board_manager.deserialize(self.board, game_data['board']),
self.black_moves = game_data['black_moves']
self.is_check = game_data['is_check']
self.is_checkmate = game_data['is_checkmate']
self.black_captures = {}
for p in game_data['black_captures']:
piece = pieces.Piece.deserialize(p)
self.black_captures[piece.id] = piece
self.white_captures = {}
for p in game_data['white_captures']:
piece = pieces.Piece.deserialize(p)
self.white_captures[piece.id] = piece
self.white_pieces = []
self.black_pieces = []
for row in self.board.squares:
for square in row:
piece = square.piece
if not piece:
continue
if piece.is_black:
self.black_pieces.append(piece)
else:
self.white_pieces.append(piece)
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,033 | m-zajac/SimplePyWebChess | refs/heads/master | /blueprints/chess/views.py | import json
from flask import render_template, Response, request
from modules.chess import game, pieces
from modules.chess import move_generators
import modules.chess.game_factory as game_factory
def init(blueprint):
@blueprint.route('')
def index():
return render_template('chess/index.html')
@blueprint.route('/game/init', methods=['POST'])
def init():
chessgame = game.Game()
chessgame.init_new()
return prepare_game_response(chessgame)
@blueprint.route('/game/move', methods=['POST'])
def move():
data = parse_game_request()
chessgame = data['game']
# move
if 'move' in data:
move = data['move']
chessgame.move(move)
else:
chessgame.move()
return prepare_game_response(chessgame)
# Game tests
@blueprint.route('/game/<test>', methods=['POST'])
def make_test(test):
test_method_name = 'make_%s' % test
test_method = getattr(game_factory, test_method_name)
_game = test_method()
return prepare_game_response(_game)
def parse_game_request(chessgame=None):
try:
data = json.loads(request.form.items()[0][0])
except IndexError:
data = None
if not chessgame:
# generator = move_generators.gen_rand.randomGenerator
generator = lambda g: move_generators.gen_minimax.minimaxGenerator(g, level=2)
chessgame = game.Game(None, generator)
chessgame.init_new()
if data and 'game_data' in data:
chessgame.deserialize(data['game_data']['game'])
result = {
'game': chessgame,
}
if data and 'move' in data:
result['move'] = pieces.PieceMove.deserialize(data['move'])
return result
def prepare_game_response(chessgame):
game_data = chessgame.serialize()
piece_moves = chessgame.getAllMoves()
piece_move_data = []
for piece_move in piece_moves:
start_pos = piece_move.moves[0][0]
piece_id = chessgame.board.squares[start_pos[0]][start_pos[1]].piece.id
piece_move_data.append({
'pid': piece_id,
'move': piece_move.serialize()
})
return Response(
response=json.dumps(
{
'game': game_data,
'moves': piece_move_data
},
separators=(',', ':')
),
status=200,
mimetype="application/json"
)
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,034 | m-zajac/SimplePyWebChess | refs/heads/master | /modules/searchtree/nodes.py | import sys
class StopIterationAfterNodeTraverse(StopIteration):
pass
class StopIterationBeforeNodeTraverse(StopIteration):
pass
class DepthFirstNode(object):
"""Generic node for depth-first tree traversal"""
def __init__(self, data=None, value=None, alghoritm='preorder'):
self.data = data
self.nodes = []
self.value = value
self.evaluations = 0
# choose traverse alghoritm
if alghoritm == 'preorder':
self.traverse = self.preorder
else:
self.traverse = self.postorder
def addNode(self, node):
self.nodes.append(node)
return self
def preorder(self):
self.evaluate()
node = None
try:
for node in self.nodes:
self.traverseNode(node)
self.evaluations += node.evaluations
except StopIterationBeforeNodeTraverse:
pass
except StopIterationAfterNodeTraverse:
self.evaluations += node.evaluations
return self
def postorder(self):
node = None
try:
for node in self.nodes:
self.traverseNode(node)
self.evaluations += node.evaluations
except StopIterationBeforeNodeTraverse:
pass
except StopIterationAfterNodeTraverse:
self.evaluations += node.evaluations
self.evaluate()
return self
def traverseNode(self, node):
node.traverse()
def evaluate(self):
self.evaluations += 1
self.doEvaluate()
def doEvaluate(self):
pass
def __str__(self):
return 'data: ' + str(self.data) + ', ' + str(len(self.nodes)) + ' nodes, value: ' + str(self.value)
class MinNode(DepthFirstNode):
"""Min node for minimax alghoritm"""
def __init__(self, data=None, value=None):
super(MinNode, self).__init__(data, value, 'postorder')
def evaluate(self):
super(MinNode, self).evaluate()
if len(self.nodes) > 0:
data = self.data
min_value = sys.maxint
for node in self.nodes:
if node.value < min_value:
min_value = node.value
data = node.data
self.value = min_value
self.data = data
class MaxNode(DepthFirstNode):
"""Max node for minimax alghoritm"""
def __init__(self, data=None, value=None):
super(MaxNode, self).__init__(data, value, 'postorder')
def evaluate(self):
super(MaxNode, self).evaluate()
if len(self.nodes) > 0:
data = self.data
max_value = -sys.maxint
for node in self.nodes:
if node.value > max_value:
max_value = node.value
data = node.data
self.value = max_value
self.data = data
class MinABNode(MinNode):
"""Min node for minimax alghoritm with alpha-beta prunning"""
def __init__(self, data=None, value=None, alpha=-sys.maxint, beta=sys.maxint):
self.alpha = alpha
self.beta = beta
super(MinABNode, self).__init__(data, value)
def traverseNode(self, node):
# pass alpha and beta
node.alpha = self.alpha
node.beta = self.beta
# traverse node
node.traverse()
# update beta
self.beta = min(self.beta, node.value)
# alpha cut-off?
if self.beta <= self.alpha:
raise StopIterationAfterNodeTraverse
class MaxABNode(MaxNode):
"""Max node for minimax alghoritm with alpha-beta prunning"""
def __init__(self, data=None, value=None, alpha=-sys.maxint, beta=sys.maxint):
self.alpha = alpha
self.beta = beta
super(MaxABNode, self).__init__(data, value)
def traverseNode(self, node):
# pass alpha and beta
node.alpha = self.alpha
node.beta = self.beta
# traverse node
node.traverse()
# update alpha
self.alpha = max(self.alpha, node.value)
# beta cut-off?
if self.beta <= self.alpha:
raise StopIterationAfterNodeTraverse
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,035 | m-zajac/SimplePyWebChess | refs/heads/master | /modules/chess/game_factory.py | import board
import game
import pieces
def make_whites_check1():
"""Makes game with whites check situation"""
_board = board.Board()
_game = game.Game(_board)
_game.init_new()
_game.move(pieces.PieceMove(((4, 1), (4, 3))))
_game.move(pieces.PieceMove(((4, 6), (4, 4))))
_game.move(pieces.PieceMove(((6, 0), (5, 2))))
_game.move(pieces.PieceMove(((5, 6), (5, 5))))
_game.move(pieces.PieceMove(((5, 2), (4, 4))))
_game.move(pieces.PieceMove(((5, 5), (4, 4))))
_game.move(pieces.PieceMove(((3, 0), (7, 4))))
return _game
def make_whites_checkmate1():
"""Makes game with whites check situation"""
_board = board.Board()
_game = game.Game(_board)
_game.init_new()
_game.move(pieces.PieceMove(((4, 1), (4, 2))))
_game.move(pieces.PieceMove(((5, 6), (5, 5))))
_game.move(pieces.PieceMove(((3, 1), (3, 2))))
_game.move(pieces.PieceMove(((6, 6), (6, 4))))
_game.move(pieces.PieceMove(((3, 0), (7, 4))))
return _game
def make_whites_castling_short():
"""Makes game with whites shot castling possibility"""
_board = board.Board()
_game = game.Game(_board)
_game.init_new()
_game.strip()
_game.initPiece(_game.piece_list['WK'], (4, 0))
_game.initPiece(_game.piece_list['Wr1'], (7, 0))
_game.initPiece(_game.piece_list['BK'], (4, 7))
return _game
def make_whites_castling_long():
"""Makes game with whites long castling possibility"""
_board = board.Board()
_game = game.Game(_board)
_game.init_new()
_game.strip()
_game.initPiece(_game.piece_list['WK'], (4, 0))
_game.initPiece(_game.piece_list['Wr1'], (0, 0))
_game.initPiece(_game.piece_list['BK'], (4, 7))
return _game
def make_whites_enpassant():
"""Makes game with whites en passant capture possibility"""
_board = board.Board()
_game = game.Game(_board)
_game.init_new()
_game.strip()
_game.initPiece(_game.piece_list['WK'], (4, 0))
_game.initPiece(_game.piece_list['BK'], (4, 7))
_game.initPiece(_game.piece_list['Wp1'], (5, 4))
_game.initPiece(_game.piece_list['Wp2'], (3, 4))
_game.initPiece(_game.piece_list['Bp1'], (4, 6))
_game.black_moves = True
return _game
def make_blacks_enpassant():
"""Makes game with whites en passant capture possibility"""
_board = board.Board()
_game = game.Game(_board)
_game.init_new()
_game.strip()
_game.initPiece(_game.piece_list['WK'], (4, 0))
_game.initPiece(_game.piece_list['BK'], (4, 7))
_game.initPiece(_game.piece_list['Wp1'], (5, 1))
_game.initPiece(_game.piece_list['Bp1'], (4, 3))
_game.initPiece(_game.piece_list['Bp2'], (6, 3))
_game.black_moves = False
return _game
def make_whites_promotion():
"""Makes game with whites promotion possibility"""
_board = board.Board()
_game = game.Game(_board)
_game.init_new()
_game.strip()
_game.initPiece(_game.piece_list['WK'], (4, 0))
_game.initPiece(_game.piece_list['BK'], (2, 7))
_game.initPiece(_game.piece_list['Wp1'], (5, 6))
_game.initPiece(_game.piece_list['Wp2'], (6, 6))
_game.initPiece(_game.piece_list['Bp1'], (1, 3))
_game.initPiece(_game.piece_list['Bp2'], (4, 7))
_game.black_moves = False
return _game
def make_blacks_promotion():
"""Makes game with blacks promotion possibility"""
_board = board.Board()
_game = game.Game(_board)
_game.init_new()
_game.strip()
_game.initPiece(_game.piece_list['WK'], (4, 0))
_game.initPiece(_game.piece_list['BK'], (2, 4))
_game.initPiece(_game.piece_list['Wp1'], (6, 6))
_game.initPiece(_game.piece_list['Bp1'], (1, 1))
_game.black_moves = True
return _game
def make_kings_fight():
"""Makes game with two kings near each other"""
_board = board.Board()
_game = game.Game(_board)
_game.init_new()
_game.strip()
_game.initPiece(_game.piece_list['WK'], (2, 5))
_game.initPiece(_game.piece_list['BK'], (5, 2))
_game.black_moves = True
return _game
def make_stalemate():
"""Makes stalemate"""
_board = board.Board()
_game = game.Game(_board)
_game.init_new()
_game.strip()
_game.initPiece(_game.piece_list['WK'], (7, 5))
_game.initPiece(_game.piece_list['WQ'], (5, 5))
_game.initPiece(_game.piece_list['BK'], (6, 7))
_game.black_moves = False
return _game
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,036 | m-zajac/SimplePyWebChess | refs/heads/master | /app/__init__.py | from flask import Flask
from views import init
from blueprints.chess import chess
def create_app(debug=False):
app = Flask(__name__)
app.debug = debug
app.register_blueprint(chess, url_prefix='/chess')
init(app)
return app
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,037 | m-zajac/SimplePyWebChess | refs/heads/master | /modules/chess/move_generators/__init__.py | from . import gen_rand, gen_minimax
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,038 | m-zajac/SimplePyWebChess | refs/heads/master | /modules/chess/pieces.py | """Pieces module"""
from collections import OrderedDict
from modules.utils import LazyDict
# piece types dictionary
types_dict = LazyDict()
types_dict.addLazy('K', lambda: TypeKing)
types_dict.addLazy('Q', lambda: TypeQueen)
types_dict.addLazy('b', lambda: TypeBishop)
types_dict.addLazy('k', lambda: TypeKnight)
types_dict.addLazy('r', lambda: TypeRook)
types_dict.addLazy('p', lambda: TypePawn)
class PieceMove(object):
"""Piece move object
One move can be 1 or 2 piece moves (castling) + one transformation (pawn at the end of the board)
Move positions are absolute
"""
def __init__(self, *vargs):
# move = ((from_x, from_y), (to_x, to_y))
if vargs:
self.moves = vargs
else:
self.moves = []
# format: tuple - (position, type)
# params:
# position: Piece position after move
# type: TypeQueen, TypeRook ...
self.transformation = None
# piece to capture
self.capture = None
def rotate(self):
"""Transforms coordinates to other player"""
self.moves = map(lambda m: ((7 - m[0][0], 7 - m[0][1]), (7 - m[1][0], 7 - m[1][1])), self.moves)
if self.transformation:
pos = self.transformation[0]
pos = (7 - pos[0], 7 - pos[1])
self.transformation = (pos, self.transformation[1])
def serialize(self):
reverse_types_dict = {v: k for k, v in types_dict.items()}
return {
'moves': self.moves,
'tp': self.transformation[0] if self.transformation else None,
'tt': reverse_types_dict[self.transformation[1]] if self.transformation else None,
'c': self.capture.serialize() if self.capture else None,
}
@staticmethod
def deserialize(data):
transformation = None
if 'tt' in data and data['tt']:
type = types_dict[data['tt']]
pos = data['tp']
transformation = (pos, type)
capture = None
if 'c' in data:
capture = Piece.deserialize(data['c'])
move = PieceMove(*data['moves'])
move.transformation = transformation
move.capture = capture
return move
def __str__(self):
return "moves: {m}, trans: {t}, cap: {c}".format(m=self.moves, t=self.transformation, c=self.capture)
class Piece(object):
"""Base piece class"""
def __init__(self, type, is_black, id=None):
super(Piece, self).__init__()
self.id = id
self.type = type
self.is_black = is_black
self.moves_count = 0
self.position = None
def getMoves(self, board):
"""Returns available moves offsets
Returned moves are absolute
"""
if self.position is None:
return []
if self.is_black:
moves = self.type.getMoves(self, (7 - self.position[0], 7 - self.position[1]), board.squares_reversed)
map(lambda m: m.rotate(), moves)
else:
moves = self.type.getMoves(self, self.position, board.squares)
# filter by kings safety
moves = filter(lambda m: TypeKing.checkSafeAfterMove(m, board), moves)
return moves
def serialize(self):
reverse_types_dict = {v: k for k, v in types_dict.items()}
return {
'id': self.id,
't': reverse_types_dict[self.type],
'p': self.position,
'm': self.moves_count,
'b': self.is_black
}
@staticmethod
def deserialize(data):
if not data:
return None
ptype = types_dict[data['t']]
p = Piece(ptype, data['b'], data['id'])
p.moves_count = data['m']
if data['p']:
p.position = tuple(data['p'])
return p
def __eq__(self, other):
return self.id == other.id and self.type == other.type and self.is_black == other.is_black
def __str__(self):
name = 'Black' if self.is_black else 'White'
name += ' ' + str(self.id) + ' (' + str(self.moves_count) + ' moves)'
return name
def __repr__(self):
return self.__str__()
class TypeBishop(object):
"""Bishop"""
value = 3
@staticmethod
def getMoves(piece, position, squares):
"""One+ square in each diagonal direction"""
position_list = []
for i in range(-1, 2, 2):
for j in range(-1, 2, 2):
for d in range(1, 8):
x, y = position[0] + i * d, position[1] + j * d
if max(x, y) > 7 or min(x, y) < 0:
break
o = squares[x][y].piece
if o:
if o.is_black == piece.is_black:
break
else:
position_list.append((x, y))
break
position_list.append((x, y))
return [PieceMove((position, p)) for p in position_list]
class TypeRook(object):
"""Rook"""
value = 5
@staticmethod
def getMoves(piece, position, squares):
"""Horizontal + vertical moves"""
position_list = []
for i in range(4):
for d in range(1, 8):
if i == 0:
x, y = position[0] + d, position[1]
elif i == 1:
x, y = position[0] - d, position[1]
elif i == 2:
x, y = position[0], position[1] + d
else:
x, y = position[0], position[1] - d
if max(x, y) > 7 or min(x, y) < 0:
break
o = squares[x][y].piece
if o:
if o.is_black == piece.is_black:
break
else:
position_list.append((x, y))
break
position_list.append((x, y))
return [PieceMove((position, p)) for p in position_list]
class TypeQueen(object):
"""Queen"""
value = 9
@staticmethod
def getMoves(piece, position, squares):
return TypeRook.getMoves(piece, position, squares) + TypeBishop.getMoves(piece, position, squares)
class TypeKnight(object):
"""Knight"""
value = 3
@staticmethod
def getMoves(piece, position, squares):
"""L moves"""
position_list = []
offsets = [(1, 2), (2, 1), (2, -1), (1, -2), (-1, -2), (-2, -1), (-2, 1), (-1, 2)]
for offset in offsets:
x, y = position[0] + offset[0], position[1] + offset[1]
if max(x, y) > 7 or min(x, y) < 0:
continue
o = squares[x][y].piece
if o:
if o.is_black == piece.is_black:
continue
else:
position_list.append((x, y))
continue
position_list.append((x, y))
return [PieceMove((position, p)) for p in position_list]
class TypePawn(object):
"""Pawn"""
value = 1
@staticmethod
def getMoves(piece, position, squares):
moves = []
offsets = [(0, 1)]
# if first move - may be 2 squares
if piece.moves_count == 0 and position[1] == 1:
offsets.append((0, 2))
for offset in offsets:
x, y = position[0] + offset[0], position[1] + offset[1]
if max(x, y) > 7 or min(x, y) < 0:
continue
# if first offset is blocked - stop
if squares[x][y].piece:
break
if position[1] == 6:
# promotion
types = (TypeQueen, TypeKnight)
for t in types:
move = PieceMove((position, (x, y)))
move.transformation = ((x, y), t)
moves.append(move)
else:
moves.append(PieceMove((position, (x, y))))
# check attacks
attacks = [(1, 1), (-1, 1)]
for attack in attacks:
x, y = position[0] + attack[0], position[1] + attack[1]
if max(x, y) > 7 or min(x, y) < 0:
continue
o = squares[x][y].piece
if not o or o.is_black == piece.is_black:
continue
if position[1] == 6:
# promotion
types = (TypeQueen, TypeKnight)
for t in types:
move = PieceMove((position, (x, y)))
move.transformation = ((x, y), t)
moves.append(move)
else:
moves.append(PieceMove((position, (x, y))))
# en passant
if position[1] == 4:
opponent_positions = [(1, 0), (-1, 0)]
for op in opponent_positions:
x, y = position[0] + op[0], position[1] + op[1]
if max(x, y) > 7 or min(x, y) < 0:
continue
o = squares[x][y].piece
if not o or o.is_black == piece.is_black:
continue
if o.type != TypePawn or o.moves_count != 1:
continue
move = PieceMove((position, (x, y + 1)))
move.capture = o
moves.append(move)
# promotion
# if position[1] == 6:
# x, y = position[0], position[1] + 1
# if max(x, y) <= 7 and min(x, y) >= 0:
# o = squares[x][y].piece
# if not o:
# # promotion available
# types = (TypeQueen, TypeKnight)
# for t in types:
# move = PieceMove((position, (x, y)))
# move.transformation = ((x, y), t)
# moves.append(move)
return moves
class TypeKing(object):
"""King"""
value = 1000
threats_diagonal = set([TypeQueen, TypeBishop])
threats_orthogonal = set([TypeQueen, TypeRook])
@staticmethod
def getMoves(piece, position, squares):
"""One square in each direction"""
position_list = []
for i in range(-1, 2):
for j in range(-1, 2):
if i == j == 0:
continue
x, y = position[0] + i, position[1] + j
if max(x, y) > 7 or min(x, y) < 0:
continue
o = squares[x][y].piece
if o and o.is_black != piece.is_black:
position_list.append((x, y))
elif not o:
position_list.append((x, y))
moves = [PieceMove((position, p)) for p in position_list]
# castling
if piece.moves_count == 0:
# castling - short
rook = squares[7][0].piece
if rook and rook.type == TypeRook and rook.moves_count == 0:
free_pass = True
for i in [5, 6]:
if squares[i][0].piece:
free_pass = False
break
if free_pass:
moves.append(PieceMove(
(position, (6, 0)),
((7, 0), (5, 0))
))
# castling - long
rook = squares[0][0].piece
if rook and rook.type == TypeRook and rook.moves_count == 0:
free_pass = True
for i in range(1, 4):
if squares[i][0].piece:
free_pass = False
break
if free_pass:
moves.append(PieceMove(
(position, (2, 0)),
((0, 0), (3, 0))
))
return moves
@staticmethod
def checkSafe(position, squares):
king = squares[position[0]][position[1]].piece
if king.type is not TypeKing:
raise ValueError('Invalid king position')
if king.position != position:
raise ValueError('Invalid king position data! ' + str(position) + ' vs ' + str(king.position))
king_is_black = king.is_black
# check diagonals + orthogonals
for i in range(-1, 2):
for j in range(-1, 2):
for d in range(1, 8):
x, y = position[0] + i * d, position[1] + j * d
# stay on board
if max(x, y) > 7 or min(x, y) < 0:
break
o = squares[x][y].piece
if o:
if o.is_black == king_is_black:
# friendly piece, no threat from this direction
break
elif d == 1 and o.type == TypeKing:
return False
elif o.type in TypeKing.threats_diagonal and abs(i) == abs(j):
return False
elif o.type in TypeKing.threats_orthogonal and abs(i) != abs(j):
return False
else:
# non threatening foe
break
# check knights
knight_offsets = [(1, 2), (2, 1), (2, -1), (1, -2), (-1, -2), (-2, -1), (-2, 1), (-1, 2)]
for offset in knight_offsets:
x, y = position[0] + offset[0], position[1] + offset[1]
# stay on board
if max(x, y) > 7 or min(x, y) < 0:
continue
o = squares[x][y].piece
if o and o.type == TypeKnight and o.is_black != king_is_black:
return False
# check pawns
if king_is_black:
pawns_offsets = [(1, -1), (-1, -1)]
else:
pawns_offsets = [(1, 1), (-1, 1)]
for offset in pawns_offsets:
x, y = position[0] + offset[0], position[1] + offset[1]
# stay on board
if max(x, y) > 7 or min(x, y) < 0:
continue
o = squares[x][y].piece
if o and o.type == TypePawn and o.is_black != king_is_black:
return False
return True
@staticmethod
def checkSafeAfterMove(move, board):
start_pos = move.moves[0][0]
end_pos = move.moves[0][1]
piece = board.squares[start_pos[0]][start_pos[1]].piece
# init king color and position
if piece.type is TypeKing:
kingpos = end_pos
elif piece.is_black:
kingpos = board.black_king_pos
else:
kingpos = board.white_king_pos
# no king on board
if kingpos is None:
return True
# fake move
backup = OrderedDict()
backup_type = piece.type
for m in move.moves:
_from = m[0]
_to = m[1]
#backup pieces
p = board.squares[_from[0]][_from[1]].piece
backup[(_to[0], _to[1])] = board.squares[_to[0]][_to[1]].piece
backup[(_from[0], _from[1])] = p
p.position = _to
board.squares[_to[0]][_to[1]].piece = p
board.squares[_from[0]][_from[1]].piece = None
if move.transformation:
trans_pos = move.transformation[0]
trans_piece = board.squares[trans_pos[0]][trans_pos[1]].piece
if trans_piece:
trans_piece.type = move.transformation[1]
# check kings safety
result = TypeKing.checkSafe(kingpos, board.squares)
# revert move
for pos, p in reversed(backup.items()):
board.squares[pos[0]][pos[1]].piece = p
if p:
p.position = pos
piece.type = backup_type
# done
return result
types_dict.load()
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,039 | m-zajac/SimplePyWebChess | refs/heads/master | /modules/chess/move_generators/gen_rand.py | import random
def randomGenerator(game):
"""Random move generator
"""
def getRandomPieceMoves():
if game.black_moves:
piece = random.choice(game.black_pieces)
else:
piece = random.choice(game.white_pieces)
return piece.getMoves(game.board)
moves = []
while len(moves) == 0:
moves = getRandomPieceMoves()
if len(moves) > 0:
return random.choice(moves)
return None
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,040 | m-zajac/SimplePyWebChess | refs/heads/master | /blueprints/chess/__init__.py | from flask import Blueprint
from views import init
chess = Blueprint('chess', __name__, template_folder='templates', static_folder='static')
init(chess)
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,041 | m-zajac/SimplePyWebChess | refs/heads/master | /modules/utils.py | "Utilities module"
class LazyDict(dict):
_lazy_keys = {}
def __missing__(self, key):
self._loadLazyKey(key)
del self._lazy_keys[key]
return self[key]
def addLazy(self, key, value):
self._lazy_keys[key] = value
def _loadLazyKey(self, key):
val = self._lazy_keys[key]
if callable(val):
val = val()
self[key] = val
def load(self):
for k in self._lazy_keys:
self._loadLazyKey(k)
self._lazy_keys = {}
return self
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,042 | m-zajac/SimplePyWebChess | refs/heads/master | /modules/chess/board.py | """Board module"""
import pieces
class Square(object):
"""Square on board"""
def __init__(self, is_black, Piece=None):
super(Square, self).__init__()
if Piece:
self.piece = Piece
else:
self.piece = None
self.is_black = is_black
def __eq__(self, other):
return self.piece == other.piece and self.is_black == other.is_black
class Board(object):
"""Board class
This object will probably be frequently copied, so it's just data. Methods for managing board are in BoardManager class
"""
def __init__(self):
"""Initialize board"""
self.squares = [[Square(not ((i + j) % 2)) for i in range(8)] for j in range(8)]
# symmetrical board, for moving black pieces
self.squares_reversed = [[self.squares[7 - j][7 - i] for i in range(8)] for j in range(8)]
# kings positions
self.white_king_pos = None
self.black_king_pos = None
class BoardManager(object):
"""Board manager class"""
# piece types dictionary
types_dict = {
'K': pieces.TypeKing,
'Q': pieces.TypeQueen,
'b': pieces.TypeBishop,
'k': pieces.TypeKnight,
'r': pieces.TypeRook,
'p': pieces.TypePawn
}
@staticmethod
def getDictForPiece(board, piece):
return board.black_pieces if piece.is_black else board.white_pieces
@staticmethod
def initPiece(board, piece, pos, validate=True):
"""Sets piece on board"""
if validate and not BoardManager.onBoard(pos):
raise ValueError('Position out of board!')
piece.position = pos
board.squares[pos[0]][pos[1]].piece = piece
if piece.type == pieces.TypeKing:
if piece.is_black:
board.black_king_pos = pos
else:
board.white_king_pos = pos
@staticmethod
def move(board, move_object):
"""Moves piece. Returns captured pieces."""
captured_pieces = []
for move in move_object.moves:
start_pos = move[0]
end_pos = move[1]
piece = board.squares[start_pos[0]][start_pos[1]].piece
if not piece:
raise ValueError('Invalid move position!')
captured_piece = board.squares[end_pos[0]][end_pos[1]].piece
if captured_piece:
if captured_piece.is_black == piece.is_black:
# same color, invalid move!
raise ValueError('Invalid move, square occupied!')
captured_piece.position = None
captured_pieces.append(captured_piece)
# move
board.squares[start_pos[0]][start_pos[1]].piece = None
board.squares[end_pos[0]][end_pos[1]].piece = piece
piece.position = end_pos
piece.moves_count += 1
# transformation
if move_object.transformation:
pos, trans_piece_type = move_object.transformation
trans_piece = board.squares[pos[0]][pos[1]].piece
if trans_piece:
trans_piece.type = trans_piece_type
if piece.type == pieces.TypeKing:
if piece.is_black:
board.black_king_pos = piece.position
else:
board.white_king_pos = piece.position
return captured_pieces
@staticmethod
def removePiece(board, piece):
"""Removes piece from board"""
pos = piece.position
board.squares[pos[0]][pos[1]].piece = None
piece.position = None
@staticmethod
def onBoard(position):
def rangeok(val):
if val < 0 or val > 7:
return False
return True
if not rangeok(position[0]) or not rangeok(position[1]):
return False
return True
@staticmethod
def serializePiece(piece):
if piece is None:
return None
reverse_types_dict = {v: k for k, v in BoardManager.types_dict.items()}
return {
'id': piece.id,
't': reverse_types_dict[piece.type],
'p': piece.position,
'm': piece.moves_count,
'b': piece.is_black
}
@staticmethod
def deserializePiece(piecedata):
if not piecedata:
return None
ptype = BoardManager.types_dict[piecedata['t']]
p = pieces.Piece(ptype, piecedata['b'], piecedata['id'])
p.moves_count = piecedata['m']
if piecedata['p']:
p.position = tuple(piecedata['p'])
return p
@staticmethod
def serialize(board):
pieces = []
for row in board.squares:
for square in row:
p = square.piece
if p:
pieces.append(p)
result = {}
for p in pieces:
result[p.id] = BoardManager.serializePiece(p)
return result
@staticmethod
def deserialize(board, data):
board.__init__()
for id, piecedata in data.iteritems():
p = BoardManager.deserializePiece(piecedata)
BoardManager.initPiece(board, p, p.position)
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,043 | m-zajac/SimplePyWebChess | refs/heads/master | /app/views.py | from flask import render_template, redirect, url_for
def init(app):
@app.route('/')
def index():
return redirect(url_for('chess.index'))
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,044 | m-zajac/SimplePyWebChess | refs/heads/master | /modules/chess/move_generators/gen_minimax.py | import copy
import types
from modules.searchtree import nodes
def minimaxGenerator(game, level=1):
"""Minimax move generator
"""
if game.black_moves:
init_node = nodes.MinABNode()
else:
init_node = nodes.MaxABNode()
_genTreeLevel(init_node, game, level)
init_node.traverse()
return init_node.data
def _make_evaluation_function(game):
"""Node doEvaluate method factory
"""
def evf(self):
self.value = _evaluateGame(game)
return evf
def _genTreeLevel(node, game, stoplevel, first_call=True):
if stoplevel <= 0:
return node
node.doEvaluate = types.MethodType(
_make_evaluation_function(game),
node
)
if game.black_moves:
pieces = game.black_pieces
else:
pieces = game.white_pieces
if isinstance(node, nodes.MinABNode):
node_class = nodes.MaxABNode
else:
node_class = nodes.MinABNode
for piece in pieces:
for move in piece.getMoves(game.board):
new_game = copy.deepcopy(game)
new_game.move(move)
new_node = node_class()
node.addNode(new_node)
if first_call:
# store move to check
new_node.move = move
else:
# propagate move to the bottom of the tree
new_node.move = node.move
if stoplevel == 1:
# at the bottom - data is the move from first level
new_node.data = new_node.move
new_node.doEvaluate = types.MethodType(
_make_evaluation_function(new_game),
new_node
)
else:
# gen next tree level
_genTreeLevel(
new_node,
new_game,
stoplevel - 1,
first_call=False
)
def _evaluateGame(game):
"""Evaluates score - for white player
"""
def red_func(value, piece):
return value + piece.type.value
value = reduce(red_func, game.white_pieces, 0)
value -= reduce(red_func, game.black_pieces, 0)
return value
| {"/run.py": ["/app/__init__.py"], "/modules/chess/move_generators/tests.py": ["/modules/chess/move_generators/__init__.py"], "/blueprints/chess/views.py": ["/modules/chess/game_factory.py"], "/app/__init__.py": ["/blueprints/chess/__init__.py"], "/modules/chess/pieces.py": ["/modules/utils.py"]} |
61,045 | CIGAUNAM/SIA | refs/heads/master | /apoyo_institucional/models.py | from django.db import models
from autoslug import AutoSlugField
from nucleo.models import User, Tag, Pais, Estado, Ciudad, Ubicacion, Institucion, Dependencia, Departamento, Cargo
# Create your models here.
class Comision(models.Model):
comision = models.CharField(max_length=255, unique=True)
slug = AutoSlugField(populate_from='comision', unique=True)
descripcion = models.TextField(blank=True)
def __str__(self):
return self.comision
class Meta:
verbose_name_plural = 'Comisiones'
class Actividad(models.Model):
actividad = models.CharField(max_length=255, unique=True)
slug = AutoSlugField(populate_from='actividad', unique=True)
descripcion = models.TextField(blank=True)
def __str__(self):
return self.actividad
class Meta:
verbose_name_plural = 'Actividades'
class Representacion(models.Model):
representacion = models.CharField(max_length=255, unique=True)
slug = AutoSlugField(populate_from='representacion', unique=True)
descripcion = models.TextField(blank=True)
def __str__(self):
return self.representacion
class Meta:
ordering = ['representacion']
verbose_name = 'Representación'
verbose_name_plural = 'Representaciones'
"""
class OrganoColegiado(models.Model):
organo_colegiado = models.CharField(max_length=255, unique=True)
slug = AutoSlugField(populate_from='organo_colegiado', unique=True)
def __str__(self):
return self.organo_colegiado
class Meta:
verbose_name_plural = 'Organos Colegiados'
"""
class CargoAcademicoAdministrativo(models.Model):
cargo = models.ForeignKey(Cargo)
user = models.ForeignKey(User)
descripcion = models.TextField(blank=True)
dependencia = models.ForeignKey(Dependencia)
cargo_inicio = models.DateField(auto_now=False)
cargo_fin = models.DateField(auto_now=False)
slug = AutoSlugField(populate_from='cargo', unique=True)
tags = models.ManyToManyField(Tag, related_name='cargo_academico_administrativo_tags', blank=True)
def __str__(self):
return "[ {} : {} ] : {} : {} : {} : {}".format(self.user, self.cargo, self.dependencia.dependencia, self.dependencia.institucion, self.cargo_inicio, self.cargo_fin)
class Meta:
verbose_name_plural = 'Cargos Académico-Administrativos'
unique_together = ('cargo', 'user', 'dependencia', 'cargo_inicio')
ordering = ['-cargo_inicio']
get_latest_by = ['user', 'cargo']
class RepresentanteAnteOrganoColegiado(models.Model):
representante = models.ForeignKey(User)
representacion = models.ForeignKey(Representacion)
ante = models.ForeignKey(Departamento)
descripcion = models.TextField(blank=True)
cargo_inicio = models.DateField(auto_now=False)
cargo_fin = models.DateField(auto_now=False)
tags = models.ManyToManyField(Tag, related_name='representante_ante_organo_colegiado_tags', blank=True)
def __str__(self):
return "{} : {} : {} : {} - {}".format(self.representante, self.representacion, self.ante, self.cargo_inicio, self.cargo_fin)
class Meta:
verbose_name_plural = 'Representantes Ante Organos Colegiados'
unique_together = ('representante', 'representacion', 'cargo_inicio')
ordering = ['-cargo_inicio']
class ComisionAcademica(models.Model):
comision_academica = models.ForeignKey(Comision)
slug = AutoSlugField(populate_from='comision_academica', unique=True, max_length=255)
descripcion = models.TextField(blank=True)
user = models.ForeignKey(User)
es_evaluacion = models.BooleanField(default=False)
dependencias = models.ManyToManyField(Dependencia)
ubicacion = models.ForeignKey(Ubicacion)
fecha_inicio = models.DateField(auto_now=False)
fecha_fin = models.DateField(auto_now=False)
tags = models.ManyToManyField(Tag, related_name='comision_academica_tags', blank=True)
def __str__(self):
return "[{}] : {} : {} : {}".format(self.user, self.comision_academica, self.fecha_inicio, self.fecha_fin)
class Meta:
verbose_name_plural = 'Comisiones Académicas'
unique_together = ('comision_academica', 'user', 'fecha_inicio')
ordering = ['fecha_inicio']
get_latest_by = ['user', 'comision_academica']
"""
class ComisionEvaluacion(models.Model):
comision_evaluacion = models.ForeignKey(Comision)
descripcion = models.TextField()
user = models.ForeignKey(User)
dependencia = models.ForeignKey(Dependencia)
ubicacion = models.ForeignKey(Ubicacion)
es_academica = models.BooleanField(default=False)
comision_inicio = models.DateField(auto_now=False)
comision_fin = models.DateField(auto_now=False)
tags = models.ManyToManyField(Tag)
slug = AutoSlugField(populate_from='comision_evaluacion', unique=True)
def __str__(self):
return "[{}] : {} : {} : {}".format(self.user, self.comision_evaluacion, self.comision_inicio, self.comision_fin)
class Meta:
verbose_name_plural = 'Comisiones de Evaluación'
unique_together = ('comision_evaluacion', 'user', 'dependencia', 'comision_inicio')
ordering = ['-comision_inicio']
get_latest_by = ['user', 'comision_evaluacion']
"""
class ApoyoTecnico(models.Model):
apoyo_tecnico = models.ForeignKey(Actividad)
descripcion = models.TextField()
user = models.ForeignKey(User)
dependencia = models.ForeignKey(Dependencia)
ubicacion = models.ForeignKey(Ubicacion)
apoyo_inicio = models.DateField(auto_now=False)
apoyo_fin = models.DateField(auto_now=False)
tags = models.ManyToManyField(Tag)
slug = AutoSlugField(populate_from='apoyo_tecnico', unique=True)
tags = models.ManyToManyField(Tag, related_name='apoyo_tecnico_tags', blank=True)
def __str__(self):
return "[{}] : {} : {} : {}".format(self.user, self.apoyo_tecnico, self.apoyo_inicio, self.apoyo_fin)
class Meta:
verbose_name_plural = 'Apoyos de Técnicos'
unique_together = ('apoyo_tecnico', 'user', 'dependencia', 'apoyo_inicio')
ordering = ['-apoyo_inicio']
get_latest_by = ['user', 'apoyo_tecnico']
class ApoyoOtraActividad(models.Model):
apoyo_actividad = models.ForeignKey(Actividad)
descripcion = models.TextField()
user = models.ForeignKey(User)
dependencia = models.ForeignKey(Dependencia)
ubicacion = models.ForeignKey(Ubicacion)
apoyo_inicio = models.DateField(auto_now=False)
apoyo_fin = models.DateField(auto_now=False)
slug = AutoSlugField(populate_from='apoyo_otra_actividad_tags', unique=True)
tags = models.ManyToManyField(Tag, related_name='apoyo_otra_actividad_tags', blank=True)
def __str__(self):
return "[{}] : {} : {} : {}".format(self.user, self.apoyo_actividad, self.apoyo_inicio, self.apoyo_fin)
class Meta:
verbose_name_plural = 'Apoyos en Otras Actividades'
unique_together = ('apoyo_actividad', 'user', 'dependencia', 'apoyo_inicio')
ordering = ['-apoyo_inicio']
get_latest_by = ['user', 'apoyo_actividad']
| {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,046 | CIGAUNAM/SIA | refs/heads/master | /experiencia_laboral/serializers.py | from rest_framework import serializers
from experiencia_laboral.models import *
class ExperienciaLaboralSerializer(serializers.ModelSerializer):
class Meta:
model = ExperienciaLaboral
usuario = serializers.ReadOnlyField(source='usuario.username')
fields = ('id', 'dependencia', 'nombramiento', 'es_nombramiento_definitivo', 'cargo', 'descripcion', 'fecha_inicio', 'fecha_fin', 'usuario')
class LineaInvestigacionSerializer(serializers.ModelSerializer):
class Meta:
model = LineaInvestigacion
usuario = serializers.ReadOnlyField(source='usuario.username')
fields = ('id', 'linea_investigacion', 'descripcion', 'dependencia', 'fecha_inicio', 'fecha_fin', 'usuario')
class CapacidadPotencialidadSerializer(serializers.ModelSerializer):
class Meta:
model = CapacidadPotencialidad
usuario = serializers.ReadOnlyField(source='usuario.username')
fields = ('id', 'competencia', 'descripcion', 'fecha_inicio', 'fecha_fin', 'usuario')
| {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,047 | CIGAUNAM/SIA | refs/heads/master | /vinculacion/admin.py | from django.contrib import admin
# Register your models here.
from . models import ArbitrajePublicacionAcademica, ArbitrajeProyectoInvestigacion, ArbitrajeOtrasActividades, RedAcademica, \
ConvenioEntidadNoAcademica, ClasificacionServicio, ServicioExternoEntidadNoAcademica, OtroProgramaVinculacion
admin.site.register(ArbitrajePublicacionAcademica)
admin.site.register(ArbitrajeProyectoInvestigacion)
admin.site.register(ArbitrajeOtrasActividades)
admin.site.register(RedAcademica)
admin.site.register(ConvenioEntidadNoAcademica)
admin.site.register(ClasificacionServicio)
admin.site.register(ServicioExternoEntidadNoAcademica)
admin.site.register(OtroProgramaVinculacion) | {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,048 | CIGAUNAM/SIA | refs/heads/master | /nucleo/serializers.py | from rest_framework import serializers
from nucleo.models import *
from formacion_academica.models import *
from autoslug import AutoSlugField
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('id', 'tag')
class ZonaPaisSerializer(serializers.ModelSerializer):
class Meta:
model = ZonaPais
fields = ('id', 'zona')
class PaisSerializer(serializers.ModelSerializer):
class Meta:
model = Pais
fields = ('id', 'pais', 'nombre_extendido', 'zona', 'codigo')
class EstadoSerializer(serializers.ModelSerializer):
class Meta:
model = Estado
fields = ('id', 'estado', 'pais')
class CiudadSerializer(serializers.ModelSerializer):
class Meta:
model = Ciudad
fields = ('id', 'ciudad', 'estado')
class UserSerializer(serializers.ModelSerializer):
cursos_especializacion = serializers.PrimaryKeyRelatedField(many=True, queryset=CursoEspecializacion.objects.all())
licenciaturas = serializers.PrimaryKeyRelatedField(many=True, queryset=Licenciatura.objects.all())
maestrias = serializers.PrimaryKeyRelatedField(many=True, queryset=Maestria.objects.all())
doctorados = serializers.PrimaryKeyRelatedField(many=True, queryset=Doctorado.objects.all())
postdoctorados = serializers.PrimaryKeyRelatedField(many=True, queryset=PostDoctorado.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name', 'tipo', 'fecha_nacimiento', 'pais_origen', 'rfc',
'direccion1', 'direccion2', 'ciudad', 'telefono', 'celular', 'url', 'sni', 'pride', 'ingreso_unam', 'ingreso_entidad',
'cursos_especializacion', 'licenciaturas', 'maestrias', 'doctorados', 'postdoctorados')
read_only_fields = ('username',)
class InstitucionSerializer(serializers.ModelSerializer):
class Meta:
model = Institucion
fields = ('id', 'institucion', 'pais')
class DependenciaSerializer(serializers.ModelSerializer):
class Meta:
model = Dependencia
fields = ('id', 'dependencia', 'institucion', 'ciudad', 'subsistema_unam')
class CargoSerializer(serializers.ModelSerializer):
class Meta:
model = Cargo
fields = ('id', 'cargo', 'descripcion', 'tipo_cargo')
class NombramientoSerializer(serializers.ModelSerializer):
class Meta:
model = Nombramiento
fields = ('id', 'nombramiento', 'clave', 'descripcion')
class AreaConocimientoSerializer(serializers.ModelSerializer):
class Meta:
model = AreaConocimiento
fields = ('id', 'area_conocimiento', 'categoria', 'descripcion')
class AreaEspecialidadSerializer(serializers.ModelSerializer):
class Meta:
model = AreaEspecialidad
fields = ('id', 'especialidad', 'descripcion', 'area_conocimiento')
class ImpactoSocialSerializer(serializers.ModelSerializer):
class Meta:
model = ImpactoSocial
fields = ('id', 'impacto_social', 'descripcion')
class ProgramaFinanciamientoSerializer(serializers.ModelSerializer):
class Meta:
model = ProgramaFinanciamiento
fields = ('id', 'programa_financiamiento', 'descripcion')
class FinanciamientoSerializer(serializers.ModelSerializer):
class Meta:
model = Financiamiento
fields = ('id', 'tipo_financiamiento', 'descripcion', 'programas_financiamiento', 'dependencias_financiamiento', 'clave_proyecto')
class MetodologiaSerializer(serializers.ModelSerializer):
class Meta:
model = Metodologia
fields = ('id', 'metodologia', 'descripcion')
class ProgramaLicenciaturaSerializer(serializers.ModelSerializer):
class Meta:
model = ProgramaLicenciatura
fields = ('id', 'programa', 'descripcion', 'area_conocimiento')
class ProgramaMaestriaSerializer(serializers.ModelSerializer):
class Meta:
model = ProgramaMaestria
fields = ('id', 'programa', 'descripcion', 'area_conocimiento')
class ProgramaDoctoradoSerializer(serializers.ModelSerializer):
class Meta:
model = ProgramaDoctorado
fields = ('id', 'programa', 'descripcion', 'area_conocimiento')
class ProyectoSerializer(serializers.ModelSerializer):
class Meta:
model = Proyecto
fields = ('id', 'nombre_proyecto', 'descripcion', 'es_permanente', 'fecha_inicio', 'fecha_fin', 'responsables', 'participantes', 'status', 'clasificacion', 'organizacion', 'modalidad', 'tematica_genero', 'dependencias', 'financiamientos', 'metodologias', 'especialidades', 'impactos_sociales', 'tecnicos', 'alumnos_doctorado', 'alumnos_maestria', 'alumnos_licenciatura')
| {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,049 | CIGAUNAM/SIA | refs/heads/master | /desarrollo_tecnologico/apps.py | from django.apps import AppConfig
class DesarrolloTecnologicoConfig(AppConfig):
name = 'desarrollo_tecnologico'
| {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,050 | CIGAUNAM/SIA | refs/heads/master | /formacion_academica/serializers.py | from rest_framework import serializers
from formacion_academica.models import *
class CursoEspecializacionSerializer(serializers.ModelSerializer):
class Meta:
model = CursoEspecializacion
usuario = serializers.ReadOnlyField(source='usuario.username')
fields = ('id', 'nombre_curso', 'descripcion', 'tipo', 'horas', 'fecha_inicio', 'fecha_fin', 'modalidad', 'area_conocimiento', 'dependencia', 'usuario')
class LicenciaturaSerializer(serializers.ModelSerializer):
class Meta:
model = Licenciatura
usuario = serializers.ReadOnlyField(source='usuario.username')
#fields = ('id', 'carrera', 'descripcion', 'dependencia', 'titulo_tesis', 'tesis', 'tesis_url', 'fecha_inicio', 'fecha_fin', 'fecha_grado', 'usuario')
fields = ('id', 'carrera', 'descripcion', 'dependencia', 'titulo_tesis', 'tesis_url', 'fecha_inicio', 'fecha_fin', 'fecha_grado', 'usuario')
class MaestriaSerializer(serializers.ModelSerializer):
class Meta:
model = Maestria
usuario = serializers.ReadOnlyField(source='usuario.username')
#fields = ('id', 'programa', 'descripcion', 'dependencia', 'titulo_tesis', 'tesis', 'tesis_url', 'fecha_inicio', 'fecha_fin', 'fecha_grado', 'usuario')
fields = ('id', 'programa', 'descripcion', 'dependencia', 'titulo_tesis', 'tesis_url', 'fecha_inicio', 'fecha_fin', 'fecha_grado', 'usuario')
class DoctoradoSerializer(serializers.ModelSerializer):
class Meta:
model = Doctorado
usuario = serializers.ReadOnlyField(source='usuario.username')
#fields = ('id', 'programa', 'descripcion', 'dependencia', 'titulo_tesis', 'tesis', 'tesis_url', 'fecha_inicio', 'fecha_fin', 'fecha_grado', 'usuario')
fields = ('id', 'programa', 'descripcion', 'dependencia', 'titulo_tesis', 'tesis_url', 'fecha_inicio', 'fecha_fin', 'fecha_grado', 'usuario')
class PostDoctoradoSerializer(serializers.ModelSerializer):
class Meta:
model = PostDoctorado
usuario = serializers.ReadOnlyField(source='usuario.username')
fields = ('titulo', 'descripcion', 'area_conocimiento', 'dependencia', 'proyecto', 'fecha_inicio', 'fecha_fin', 'usuario', 'tags')
| {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,051 | CIGAUNAM/SIA | refs/heads/master | /formacion_recursos_humanos/admin.py | from django.contrib import admin
# Register your models here.
from . models import AsesorEstancia, DireccionTesis, ComiteTutoral, ComiteCandidaturaDoctoral
admin.site.register(AsesorEstancia)
admin.site.register(DireccionTesis)
admin.site.register(ComiteTutoral)
admin.site.register(ComiteCandidaturaDoctoral) | {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,052 | CIGAUNAM/SIA | refs/heads/master | /movilidad_academica/models.py | from django.db import models
from autoslug import AutoSlugField
from nucleo.models import User, Tag, Dependencia, Financiamiento, Proyecto
from vinculacion.models import RedAcademica
# Create your models here.
class Vinculacion(models.Model):
tipo = models.CharField(max_length=30, choices=(('INVITACION', 'Invitación'), ('ESTANCIA', 'Estancia de colaboración'), ('SABATICO', 'Sabático')))
academico = models.ForeignKey(User)
descripcion = models.TextField(blank=True)
dependencia = models.ForeignKey(Dependencia)
actividades = models.TextField()
fecha_inicio = models.DateField()
fecha_fin = models.DateField()
intercambio_unam = models.BooleanField(default=False)
financiamiento = models.ForeignKey(Financiamiento)
redes_academicas = models.ManyToManyField(RedAcademica, related_name='vinculacion_redes_academicas', blank=True)
proyectos_investigacion = models.ManyToManyField(Proyecto, related_name='vinculacion_proyectos_investigacion', blank=True)
tags = models.ForeignKey(Tag, related_name='vinculacion_tags')
def __str__(self):
return "{} : {}".format(str(self.academico), str(self.dependencia))
class Meta:
ordering = ['-fecha_inicio']
verbose_name = 'Actividad de vinculación'
verbose_name_plural = 'Actividades de vinculación'
class Invitado(models.Model):
invitado = models.ForeignKey(User)
descripcion = models.TextField(blank=True)
dependencia_procedencia = models.ForeignKey(Dependencia)
actividades = models.TextField()
fecha_inicio = models.DateField()
fecha_fin = models.DateField()
intercambio_unam = models.BooleanField(default=False)
financiamiento = models.ForeignKey(Financiamiento)
redes_academicas = models.ManyToManyField(RedAcademica, related_name='invitado_redes_academicas', blank=True)
proyectos_investigacion = models.ManyToManyField(Proyecto, related_name='invitado_proyectos_investigacion', blank=True)
tags = models.ForeignKey(Tag, related_name='invitado_tags')
def __str__(self):
return "{} : {}".format(str(self.invitado), str(self.dependencia_procedencia))
class Meta:
ordering = ['-fecha_inicio']
verbose_name = 'Invitado nacional'
verbose_name_plural = 'Invitados nacionales'
class EstanciaColaboracion(models.Model):
academico = models.ForeignKey(User)
descripcion = models.TextField(blank=True)
dependencia_visitada = models.ForeignKey(Dependencia)
actividades = models.TextField()
fecha_inicio = models.DateField()
fecha_fin = models.DateField()
intercambio_unam = models.BooleanField(default=False)
financiamiento = models.ForeignKey(Financiamiento)
convocatoria_financiamiento_unam = models.CharField(max_length=255, blank=True)
redes_academicas = models.ManyToManyField(RedAcademica, related_name='estancia_colaboracion_academicas', blank=True)
proyectos_investigacion = models.ManyToManyField(Proyecto, related_name='estancia_colaboracion_investigacion', blank=True)
tags = models.ForeignKey(Tag, related_name='estancia_tags')
def __str__(self):
return "{} : {}".format(str(self.academico), str(self.dependencia_visitada))
class Meta:
ordering = ['-fecha_inicio']
verbose_name = 'Estancia de colaboración'
verbose_name_plural = 'Estancias de colaboración' | {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,053 | CIGAUNAM/SIA | refs/heads/master | /difusion_cientifica/models.py | from django.db import models
from django.conf import settings
#from django.contrib.auth.models import User
from autoslug import AutoSlugField
from nucleo.models import User, Tag, Pais, Ciudad, Ubicacion, Proyecto, TipoEvento, Evento, Libro, Revista, Indice
EVENTO__AMBITO = getattr(settings, 'EVENTO__AMBITO', (('INSTITUCIONAL', 'Institucional'), ('REGIONAL', 'Regional'), ('NACIONAL', 'Nacional'), ('INTERNACIONAL', 'Internacional'), ('OTRO', 'Otro')))
EVENTO__RESPONSABILIDAD = getattr(settings, 'EVENTO__RESPONSABILIDAD', (('COORDINADOR', 'Coordinador general'), ('COMITE', 'Comité organizador'), ('AYUDANTE', 'Ayudante'), ('TECNICO', 'Apoyo técnico'), ('OTRO', 'Otro')))
# Create your models here.
class MemoriaInExtenso(models.Model):
titulo = models.CharField(max_length=255, unique=True)
slug = AutoSlugField(populate_from='titulo', unique=True)
descipcion = models.TextField(blank=True)
ciudad = models.ForeignKey(Ciudad)
fecha = models.DateField()
evento = models.ForeignKey(Evento)
autores = models.ManyToManyField(User, related_name='memoria_in_extenso_autores_externos')
editores = models.ManyToManyField(User, related_name='memoria_in_extenso_editores', blank=True)
indices = models.ManyToManyField(Indice, related_name='memoria_in_extenso_indices', blank=True)
agradecimientos = models.ManyToManyField(User, related_name='memoria_in_extenso_agradecimientos', blank=True)
pais_origen = models.ForeignKey(Pais)
pagina_inicio = models.PositiveIntegerField()
pagina_fin = models.PositiveIntegerField()
issn = models.SlugField(max_length=20, blank=True)
proyectos = models.ForeignKey(Proyecto)
url = models.URLField(blank=True)
def __str__(self):
return self.titulo
class Meta:
verbose_name = 'Memoria in extenso'
verbose_name_plural = 'Memorias in extenso'
class PrologoLibro(models.Model):
descipcion = models.TextField(blank=True)
autor_prologo = models.ForeignKey(User)
autores = models.ManyToManyField(User, related_name='prologo_libro_autores', blank=True)
editores = models.ManyToManyField(User, related_name='prologo_libro_editores', blank=True)
coordinadores = models.ManyToManyField(User, related_name='prologo_libro_coordinadores', blank=True)
libro = models.ForeignKey(Libro, related_name='prologo_libro_libro')
pagina_inicio = models.PositiveIntegerField()
pagina_fin = models.PositiveIntegerField()
url = models.URLField(blank=True)
tags = models.ManyToManyField(Tag, related_name='prologo_libro_tags', blank=True)
def __str__(self):
return '{} : {}'.format(self.autor_prologo, self.libro)
class Meta:
verbose_name = 'Prólogo de libro'
verbose_name_plural = 'Prólogos de libros'
class Resena(models.Model):
titulo_resena = models.CharField(max_length=255, unique=True)
tipo_publicacion = models.CharField(max_length=20, choices=(('LIBRO', 'Libro'), ('REVISTA', 'Revista'), ('OTRO', 'Otro')))
libro = models.ForeignKey(Libro, null=True, related_name='resena_libro')
revista = models.ForeignKey(Revista, related_name='resena_revista', null=True)
volumen = models.CharField(max_length=10, blank=True)
slug = AutoSlugField(populate_from='titulo_resena', unique=True)
descipcion = models.TextField(blank=True)
revistas = models.ManyToManyField(Revista, related_name='resena_revistas', blank=True)
libros = models.ManyToManyField(Libro, related_name='resena_libros', blank=True)
pagina_inicio = models.PositiveIntegerField()
pagina_fin = models.PositiveIntegerField()
autor_resena = models.ForeignKey(User)
autores = models.ManyToManyField(User, related_name='resena_autores', blank=True)
editores = models.ManyToManyField(User, related_name='resena_editores', blank=True)
coordinadores = models.ManyToManyField(User, related_name='resena_coordinadores', blank=True)
url = models.URLField(blank=True)
tags = models.ManyToManyField(Tag, related_name='resena_tags', blank=True)
def __str__(self):
return '{} : {}'.format(self.autor_resena, self.titulo_resena)
class Meta:
verbose_name = 'Reseña de libro'
verbose_name_plural = 'Reseñas de libros'
class OrganizacionEventoAcademico(models.Model):
evento = models.ForeignKey(Evento)
descipcion = models.TextField(blank=True)
responsabilidad = models.CharField(max_length=30, choices=EVENTO__RESPONSABILIDAD)
numero_ponentes = models.PositiveIntegerField()
numero_asistentes = models.PositiveIntegerField()
ambito = models.CharField(max_length=20, choices=EVENTO__AMBITO)
tags = models.ManyToManyField(Tag, related_name='organizacion_evento_academico_tags', blank=True)
def __str__(self):
return str(self.evento)
class Meta:
verbose_name = 'Organización de evento académico'
verbose_name_plural= 'Organización de eventos académicos'
class ParticipacionEventoAcademico(models.Model):
titulo = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='titulo', unique=True)
descipcion = models.TextField(blank=True)
evento = models.ForeignKey(Evento)
resumen_publicado = models.BooleanField(default=False)
autores = models.ManyToManyField(User, related_name='participacion_evento_academico_autores')
ambito = models.CharField(max_length=20, choices=EVENTO__AMBITO)
por_invitacion = models.BooleanField(default=False)
ponencia_magistral = models.BooleanField(default=False)
tags = models.ManyToManyField(Tag, related_name='participacion_evento_academico_tags', blank=True)
def __str__(self):
return "{} : {}".format(self.titulo, self.evento)
class Meta:
verbose_name = 'Participación en evento académico'
verbose_name_plural= 'Participación en eventos académicos'
| {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,054 | CIGAUNAM/SIA | refs/heads/master | /experiencia_laboral/apps.py | from django.apps import AppConfig
class ExperienciaLaboralConfig(AppConfig):
name = 'experiencia_laboral'
| {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,055 | CIGAUNAM/SIA | refs/heads/master | /experiencia_laboral/views.py | from django.shortcuts import render
from django.http.response import HttpResponse
from . permissions import IsOwnerOrReadOnly
from rest_framework import permissions
from experiencia_laboral.serializers import *
from rest_framework import generics
# Create your views here.
class ExperienciaLaboralList(generics.ListCreateAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)
queryset = ExperienciaLaboral.objects.all()
serializer_class = ExperienciaLaboralSerializer
def perform_create(self, serializer):
serializer.save(usuario=self.request.user)
class ExperienciaLaboralDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)
queryset = ExperienciaLaboral.objects.all()
serializer_class = ExperienciaLaboralSerializer
class LineaInvestigacionList(generics.ListCreateAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)
queryset = LineaInvestigacion.objects.all()
serializer_class = LineaInvestigacionSerializer
def perform_create(self, serializer):
serializer.save(usuario=self.request.user)
class LineaInvestigacionDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)
queryset = LineaInvestigacion.objects.all()
serializer_class = LineaInvestigacionSerializer
class CapacidadPotencialidadList(generics.ListCreateAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)
queryset = CapacidadPotencialidad.objects.all()
serializer_class = CapacidadPotencialidadSerializer
def perform_create(self, serializer):
serializer.save(usuario=self.request.user)
class CapacidadPotencialidadDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)
queryset = CapacidadPotencialidad.objects.all()
serializer_class = CapacidadPotencialidadSerializer | {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,056 | CIGAUNAM/SIA | refs/heads/master | /nucleo/views.py | from django.shortcuts import render
from django.http import HttpResponse
from nucleo.models import *
from nucleo.serializers import *
from rest_framework import generics
from . permissions import IsOwnerOrReadOnly, UserListReadOnly, IsAdminUserOrReadOnly
from rest_framework import permissions
def inicio(request):
return render(request=request, context=None, template_name='dashboard.html')
class TagLista(generics.ListCreateAPIView):
def get(self):
return Tag.objects.all()
class TagList(generics.ListCreateAPIView):
queryset = Tag.objects.all()
serializer_class = TagSerializer
class TagDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = Tag.objects.all()
serializer_class = TagSerializer
class ZonaPaisList(generics.ListCreateAPIView):
queryset = ZonaPais.objects.all()
serializer_class = ZonaPaisSerializer
class ZonaPaisDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = ZonaPais.objects.all()
serializer_class = ZonaPaisSerializer
class PaisList(generics.ListCreateAPIView):
queryset = Pais.objects.all()
serializer_class = PaisSerializer
class PaisDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = Pais.objects.all()
serializer_class = PaisSerializer
class EstadoList(generics.ListCreateAPIView):
queryset = Estado.objects.all()
serializer_class = EstadoSerializer
class EstadoDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = Estado.objects.all()
serializer_class = EstadoSerializer
class CiudadList(generics.ListCreateAPIView):
queryset = Ciudad.objects.all()
serializer_class = CiudadSerializer
class CiudadDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = Ciudad.objects.all()
serializer_class = CiudadSerializer
class UserList(generics.ListCreateAPIView):
permission_classes = (UserListReadOnly,)
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (IsAdminUserOrReadOnly,)
queryset = User.objects.all()
serializer_class = UserSerializer
class InstitucionList(generics.ListCreateAPIView):
queryset = Institucion.objects.all()
serializer_class = InstitucionSerializer
class InstitucionDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = Institucion.objects.all()
serializer_class = InstitucionSerializer
class DependenciaList(generics.ListCreateAPIView):
queryset = Dependencia.objects.all()
serializer_class = DependenciaSerializer
class DependenciaDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = Dependencia.objects.all()
serializer_class = DependenciaSerializer
class CargoList(generics.ListCreateAPIView):
queryset = Cargo.objects.all()
serializer_class = CargoSerializer
class CargoDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = Cargo.objects.all()
serializer_class = CargoSerializer
class NombramientoList(generics.ListCreateAPIView):
permission_classes = (UserListReadOnly,)
queryset = Nombramiento.objects.all()
serializer_class = NombramientoSerializer
class NombramientoDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (UserListReadOnly,)
queryset = Nombramiento.objects.all()
serializer_class = NombramientoSerializer
class AreaConocimientoList(generics.ListCreateAPIView):
permission_classes = (UserListReadOnly,)
queryset = AreaConocimiento.objects.all()
serializer_class = AreaConocimientoSerializer
class AreaConocimientoDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (UserListReadOnly,)
queryset = AreaConocimiento.objects.all()
serializer_class = AreaConocimientoSerializer
class AreaEspecialidadList(generics.ListCreateAPIView):
queryset = AreaEspecialidad.objects.all()
serializer_class = AreaEspecialidadSerializer
class AreaEspecialidadDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = AreaEspecialidad.objects.all()
serializer_class = AreaEspecialidadSerializer
class ImpactoSocialList(generics.ListCreateAPIView):
queryset = ImpactoSocial.objects.all()
serializer_class = ImpactoSocialSerializer
class ImpactoSocialDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = ImpactoSocial.objects.all()
serializer_class = ImpactoSocialSerializer
class ProgramaFinanciamientoList(generics.ListCreateAPIView):
queryset = ProgramaFinanciamiento.objects.all()
serializer_class = ProgramaFinanciamientoSerializer
class ProgramaFinanciamientoDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = ProgramaFinanciamiento.objects.all()
serializer_class = ProgramaFinanciamientoSerializer
class FinanciamientoList(generics.ListCreateAPIView):
queryset = Financiamiento.objects.all()
serializer_class = FinanciamientoSerializer
class FinanciamientoDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = ProgramaFinanciamiento.objects.all()
serializer_class = FinanciamientoSerializer
class MetodologiaList(generics.ListCreateAPIView):
queryset = Metodologia.objects.all()
serializer_class = MetodologiaSerializer
class MetodologiaDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = Metodologia.objects.all()
serializer_class = MetodologiaSerializer
class ProgramaLicenciaturaList(generics.ListCreateAPIView):
queryset = ProgramaLicenciatura.objects.all()
serializer_class = ProgramaLicenciaturaSerializer
class ProgramaLicenciaturaDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = ProgramaLicenciatura.objects.all()
serializer_class = ProgramaLicenciaturaSerializer
class ProgramaMaestriaList(generics.ListCreateAPIView):
queryset = ProgramaMaestria.objects.all()
serializer_class = ProgramaMaestriaSerializer
class ProgramaMaestriaDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = ProgramaMaestria.objects.all()
serializer_class = ProgramaMaestriaSerializer
class ProgramaDoctoradoList(generics.ListCreateAPIView):
queryset = ProgramaDoctorado.objects.all()
serializer_class = ProgramaDoctoradoSerializer
class ProgramaDoctoradoDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = ProgramaDoctorado.objects.all()
serializer_class = ProgramaDoctoradoSerializer
class ProyectoList(generics.ListCreateAPIView):
queryset = Proyecto.objects.all()
serializer_class = ProyectoSerializer
class ProyectoDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
queryset = Proyecto.objects.all()
serializer_class = ProyectoSerializer
#@permission_classes((permissions.IsAuthenticatedOrReadOnly,)) | {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,057 | CIGAUNAM/SIA | refs/heads/master | /difusion_cientifica/apps.py | from django.apps import AppConfig
class DifusionCientificaConfig(AppConfig):
name = 'difusion_cientifica'
verbose_name = "Difusión Científica" | {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,058 | CIGAUNAM/SIA | refs/heads/master | /movilidad_academica/apps.py | from django.apps import AppConfig
class MovilidadAcademicaConfig(AppConfig):
name = 'movilidad_academica'
| {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,059 | CIGAUNAM/SIA | refs/heads/master | /SIA/settings.py | """
Django settings for SIA project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
DATA_DIR = os.path.dirname(os.path.dirname(__file__))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm-y6($7)(5vy-*e!2f6pqxt6%^jqrnu4!&tbm2($ku^5i@dtiz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '10.10.2.203', '10.1.11.2', '201.144.41.229']
STATUS_PUBLICACION = (('PUBLICADO', 'Publicado'), ('EN_PRENSA', 'En prensa'), ('ACEPTADO', 'Aceptado'), ('ENVIADO', 'Enviado'), ('OTRO', 'Otro'))
STATUS_PROYECTO = (('NUEVO', 'Nuevo'), ('EN_PROCESO', 'En proceso'), ('CONCLUIDO', 'Concluído'), ('OTRO', 'Otro'))
CLASIFICACION_PROYECTO = (('BASICO', 'Básico'), ('APLICADO', 'Aplicado'), ('DESARROLLO_TECNOLOGICO', 'Desarrollo tecnológico'), ('INNOVACION', 'Innovación'), ('INVESTIGACION_FRONTERA', 'Investigación de frontera'), ('OTRA', 'Otra'))
ORGANIZACION_PROYECTO = (('INDIVIDUAL', 'Individual'), ('COLECTIVO', 'Colectivo'))
MODALIDAD_PROYECTO = (('DISCIPLINARIO', 'Disciplinario'), ('MULTIDISCIPLINARIO', 'Multidisciplinario'), ('INTERDISCIPLINARIO', 'Interisciplinario'), ('TRANSDISCIPLINARIO', 'Transdisciplinario'), ('OTRA', 'Otra'))
FINANCIAMIENTO_UNAM = (('ASIGNADO', 'Presupuesto asignado a la entidad'), ('CONCURSADO', 'Presupuesto concursado por la entidad'), ('AUTOGENERADO', 'Recursos autogenerados (extraordinarios)'), ('OTRO', 'Otro'))
FINANCIAMIENTO_EXTERNO = (('ESTATAL', 'Gubernamental Estatal'), ('FEDERAL', 'Gubernamental Federal'), ('LUCRATIVO', 'Privado lucrativo'), ('NO_LUCRATIVO', 'Privado no lucrativo'), ('EXTRANJERO', 'Recursos del extranjero'))
FINANCIAMIENTO_TIPO = (('UNAM', FINANCIAMIENTO_UNAM), ('Externo', FINANCIAMIENTO_EXTERNO))
CURSO_ESPECIALIZACION_TIPO = (('CURSO', 'Curso'), ('DIPLOMADO', 'Diplomado'), ('CERTIFICACION', 'Certificación'), ('OTRO', 'Otro'))
CURSO_ESPECIALIZACION_MODALIDAD = (('PRESENCIAL', 'Presencial'), ('EN_LINEA', 'En línea'), ('MIXTO', 'Mixto'), ('OTRO', 'Otro'))
CARGO__TIPO_CARGO = (('ACADEMICO', 'Académico'), ('ADMINISTRATIVO', 'Administrativo'))
EVENTO__AMBITO = (('INSTITUCIONAL', 'Institucional'), ('REGIONAL', 'Regional'), ('NACIONAL', 'Nacional'), ('INTERNACIONAL', 'Internacional'), ('OTRO', 'Otro'))
EVENTO__RESPONSABILIDAD = (('COORDINADOR', 'Coordinador general'), ('COMITE', 'Comité organizador'), ('AYUDANTE', 'Ayudante'), ('TECNICO', 'Apoyo técnico'), ('OTRO', 'Otro'))
RED_ACADEMICA__CLASIFICACION = (('LOCAL', 'Local'), ('REGIONAL', 'Regional'), ('NACIONAL', 'Nacional'), ('INTERNACIONAL', 'Internacional'), ('OTRO', 'Otro'))
ENTIDAD_NO_ACADEMICA__CLASIFICACION = (('FEDERAL', 'Gubernamental federal'), ('ESTATAL', 'Gubernamental estatal'), ('PRIVADO', 'Sector privado'), ('NO_LUCRATIVO', 'Sector privado no lucrativo'), ('EXTRANJERO', 'Extranjero'), ('OTRO', 'Otro'))
GRADO_ACADEMICO = (('LICENCIATURA', 'licenciatura'), ('MAESTRIA', 'Maestría'), ('DOCTORADO', 'Doctorado'))
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'nucleo.apps.NucleoConfig',
'formacion_academica.apps.FormacionAcademicaConfig',
'experiencia_laboral.apps.ExperienciaLaboralConfig',
'investigacion.apps.InvestigacionConfig',
'difusion_cientifica.apps.DifusionCientificaConfig',
'divulgacion_cientifica.apps.DivulgacionCientificaConfig',
'vinculacion.apps.VinculacionConfig',
'apoyo_institucional.apps.ApoyoInstitucionalConfig',
'movilidad_academica.apps.MovilidadAcademicaConfig',
'docencia.apps.DocenciaConfig',
'formacion_recursos_humanos.apps.FormacionRecursosHumanosConfig',
'desarrollo_tecnologico.apps.DesarrolloTecnologicoConfig',
'rest_framework',
'sekizai',
'treebeard',
'filer',
'easy_thumbnails',
]
AUTH_USER_MODEL = 'nucleo.User'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SIA.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'SIA.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'django.db.backends.mysql',
'HOST': 'localhost',
'NAME': 'sia',
'PASSWORD': '',
'PORT': '3306',
'USER': 'root'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'America/Mexico_City'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
#MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
#STATIC_ROOT = os.path.join(DATA_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters'
)
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = '/' | {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
61,060 | CIGAUNAM/SIA | refs/heads/master | /desarrollo_tecnologico/models.py | from django.db import models
#from django.contrib.auth.models import User
from autoslug import AutoSlugField
from nucleo.models import User, Tag, Ubicacion, Region, Dependencia, ProgramaFinanciamiento, ImpactoSocial, Proyecto, Indice
# Create your models here.
class TipoDesarrollo(models.Model):
tipo_desarrollo = models.CharField(max_length=255, unique=True)
descripcion = models.TextField()
slug = AutoSlugField(populate_from='tipo_desarrollo', unique=True)
def __str__(self):
return self.tipo_desarrollo
class Meta:
ordering = ['tipo_desarrollo']
verbose_name = 'Tipo de desarrollo'
verbose_name_plural = 'Tipos de desarrollo'
class Licencia(models.Model):
licencia = models.CharField(max_length=255, unique=True)
slug = AutoSlugField(populate_from='licencia', unique=True)
descripcion = models.TextField()
url = models.URLField()
def __str__(self):
return self.licencia
class Meta:
ordering = ['licencia']
"""
class TipoParticipacionProyecto(models.Model):
tipo = models.CharField(max_length=255, unique=True)
slug = AutoSlugField(populate_from='tipo', unique=True)
descripcion = models.TextField()
def __str__(self):
return self.tipo
class Meta:
verbose_name = 'Tipo de participación en proyecto'
verbose_name_plural = 'Tipos de participación en proyectos'
class StatusProyecto(models.Model):
status = models.CharField(max_length=255, unique=True)
slug = AutoSlugField(populate_from='status', unique=True)
descripcion = models.TextField()
def __str__(self):
return self.status
class Meta:
verbose_name = 'Status de proyecto'
verbose_name_plural = 'Status de proyectos'
class ClasificacionProyecto(models.Model):
clasificacion = models.CharField(max_length=255, unique=True)
slug = AutoSlugField(populate_from='clasificacion', unique=True)
descripcion = models.TextField()
def __str__(self):
return self.clasificacion
class Meta:
verbose_name = 'Clasificación de proyecto'
verbose_name_plural = 'Clasificación de proyectos'
class OrganizacionProyecto(models.Model):
organizacion = models.CharField(max_length=255, unique=True)
slug = AutoSlugField(populate_from='organizacion', unique=True)
descripcion = models.TextField()
def __str__(self):
return self.organizacion
class Meta:
verbose_name = 'Organización de proyecto'
verbose_name_plural = 'Organizaciones de proyectos'
class ModalidadProyecto(models.Model):
modalidad = models.CharField(max_length=255, unique=True)
slug = AutoSlugField(populate_from='modalidad', unique=True)
descripcion = models.TextField()
def __str__(self):
return self.modalidad
class Meta:
verbose_name = 'Organización de proyecto'
verbose_name_plural = 'Organizaciones de proyectos'
"""
class DesarrolloTecnologico(models.Model):
nombre_desarrollo_tecnologico = models.CharField(max_length=255, unique=True)
tipo_desarrollo_tecnologico = models.ForeignKey(TipoDesarrollo)
proyectos = models.ManyToManyField(Proyecto, related_name='desarrollo_tecnologico_proyectos')
descripcion = models.TextField()
version = models.CharField(max_length=100)
patente = models.CharField(max_length=255, blank=True)
licencia = models.ForeignKey(Licencia)
url = models.URLField(blank=True)
autores = models.ManyToManyField(User, related_name='desarrollo_tecnologico_autores')
agradecimientos = models.ManyToManyField(User, related_name='desarrollo_tecnologico_agradecimientos')
tags = models.ManyToManyField(Tag, related_name='desarrollo_tecnologico_tags')
fecha = models.DateField()
slug = AutoSlugField(populate_from=nombre_desarrollo_tecnologico, unique=True)
def __str__(self):
return self.nombre_desarrollo_tecnologico
class Meta:
ordering = ['nombre_desarrollo_tecnologico']
get_latest_by = ['fecha', 'nombre_desarrollo_tecnologico']
verbose_name_plural = 'Desarrollos Tecnológicos'
| {"/apoyo_institucional/models.py": ["/nucleo/models.py"], "/experiencia_laboral/serializers.py": ["/experiencia_laboral/models.py"], "/vinculacion/admin.py": ["/vinculacion/models.py"], "/nucleo/serializers.py": ["/nucleo/models.py", "/formacion_academica/models.py"], "/formacion_academica/serializers.py": ["/formacion_academica/models.py"], "/formacion_recursos_humanos/admin.py": ["/formacion_recursos_humanos/models.py"], "/movilidad_academica/models.py": ["/nucleo/models.py", "/vinculacion/models.py"], "/difusion_cientifica/models.py": ["/nucleo/models.py"], "/experiencia_laboral/views.py": ["/experiencia_laboral/serializers.py"], "/nucleo/views.py": ["/nucleo/models.py", "/nucleo/serializers.py"], "/desarrollo_tecnologico/models.py": ["/nucleo/models.py"], "/formacion_academica/admin.py": ["/formacion_academica/models.py"], "/investigacion/admin.py": ["/investigacion/models.py"], "/difusion_cientifica/admin.py": ["/difusion_cientifica/models.py"], "/investigacion/models.py": ["/nucleo/models.py"], "/formacion_academica/models.py": ["/nucleo/models.py"], "/movilidad_academica/admin.py": ["/movilidad_academica/models.py"], "/geom/envolvente.py": ["/geom/funciones.py"], "/nucleo/admin.py": ["/nucleo/models.py"], "/vinculacion/models.py": ["/nucleo/models.py", "/investigacion/models.py"], "/divulgacion_cientifica/admin.py": ["/divulgacion_cientifica/models.py"], "/experiencia_laboral/models.py": ["/nucleo/models.py"], "/docencia/models.py": ["/nucleo/models.py", "/vinculacion/models.py", "/formacion_academica/models.py"], "/experiencia_laboral/admin.py": ["/experiencia_laboral/models.py"], "/desarrollo_tecnologico/admin.py": ["/desarrollo_tecnologico/models.py"], "/divulgacion_cientifica/models.py": ["/nucleo/models.py"], "/formacion_academica/views.py": ["/formacion_academica/serializers.py"], "/apoyo_institucional/admin.py": ["/apoyo_institucional/models.py"], "/formacion_recursos_humanos/models.py": ["/nucleo/models.py"], "/distinciones/models.py": ["/nucleo/models.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.