code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# def functions are not used purposefully. Code will be condensed once it is approved. The code is verbose considering jit, but it is not critical.
# pip3 is assumed to be installed. Replace pip3 with pip in if pip is used instead.
# First few lines of code will install and setup HD-BET from https://github.com/MIC-DKFZ/HD-BET and DeepBrainSeg from https://github.com/koriavinash1/DeepBrainSeg. HD-BET is the most up-to-date brain segmentation algorithm (6/16/21). DeepBrainSeg has a pretrained model for tumor segmentation that could also be run in Macbook Pro. Delete or comment on lines 21-26 once HD-BET and DeepBrainSeg are installed.
# Line 221 comment : FLIRT before brain extraction vs. FLIRT after brain extraction?
# Line 348 comment: Tumor segmentation has not been done yet due to poor processing speed in Macbook Pro. Confirming the location of saved files is necessary.
import os
import numpy as np
import nibabel as nib
from nipype.interfaces import fsl
from nipype.testing import example_data
if __name__ == "__main__":
# Task 0 : Reminding the requirements and installing HD-BET / DeepBrainSeg.
print("\nAll MNI files must be in the same directory as the python script without any additional subfolders. T1w and MNI152 reference files must be included, whereas T2w file is recommended.\n\n")
if not os.path.exists("HD-BET"):
os.system("git clone https://github.com/MIC-DKFZ/HD-BET")
os.system("cd HD-BET")
os.system("pip3 install -e .")
os.system("cd ..")
#os.system("pip3 install DeepBrainSeg") / remove sharp if this works in hospital intranet
#from DeepBrainSeg import deepSeg / remove sharp if this works in hospital intranet
# Task 1 : Query. T1w and MNI152 files are mandatory, while T2w files are optional.
T1w_name = input("\n(REQUIRED) Type in the name of the input T1w file. Make sure you include nii.gz format.\n\n")
T2w_name = input("\n(OPTIONAL) Type in the name of the input T2w file. Make sure you include nii.gz format. Write down N/A if T2w file is not applicable or available.\n\n")
MNI_name = input("\n(REQUIRED) Type in the name of the MNI152 reference file. Make sure you include nii.gz format.\n\n")
print("\nInput complete.\n")
os.rename(MNI_name, "MNI-template.nii.gz")
os.rename(T1w_name, "input-t1w.nii.gz")
if(T2w_name == 'N/A'):
MNIreplace = nib.load('MNI-template.nii.gz')
MNIreplace_array = np.array(MNIreplace.dataobj)
replace = np.zeros((MNIreplace_array.shape[0], MNIreplace_array.shape[1], MNIreplace_array.shape[2]))
replace_nib = nib.Nifti1Image(replace, affine=np.eye(4))
nib.save(replace_nib, "input-t2w.nii.gz")
else:
os.rename(T2w_name, "input-t2w.nii.gz")
# Completion 1 notified.
confirmation1 = input("\nFile labels standardized for alignment. Press enter to continue.")
# Task 2 : Alignment of T1w and T2w files to MNI152 file.
flt = fsl.FLIRT(bins=640, cost_func='mutualinfo')
flt.inputs.in_file = 'input-t1w.nii.gz'
flt.inputs.reference = 'MNI-template.nii.gz'
flt.inputs.output_type = "NIFTI_GZ"
flt.cmdline
res = flt.run()
os.remove("input-t1w_flirt.mat")
flt2 = fsl.FLIRT(bins=640, cost_func='mutualinfo')
flt2.inputs.in_file = 'input-t2w.nii.gz'
flt2.inputs.reference = 'MNI-template.nii.gz'
flt2.inputs.output_type = "NIFTI_GZ"
flt2.cmdline
res = flt2.run()
os.remove("input-t2w_flirt.mat")
flt3 = fsl.FLIRT(bins=640, cost_func='mutualinfo')
flt3.inputs.in_file = 'input-t2w_flirt.nii.gz'
flt3.inputs.reference = 'input-t1w_flirt.nii.gz'
flt3.inputs.output_type = "NIFTI_GZ"
flt3.cmdline
res = flt3.run()
os.remove("input-t2w_flirt_flirt.mat")
os.rename("input-t2w_flirt_flirt.nii.gz", "input-t2w_flirt_t1w.nii.gz")
os.remove("input-t2w_flirt.nii.gz")
# Completion 2 notified.
confirmation2 = input("Files registered for overlay. Press enter to continue.")
# Task 3 : Overlay of MNI-aligned T1 and T2
processedt1w = nib.load('input-t1w_flirt.nii.gz')
processedt2w = nib.load('input-t2w_flirt_t1w.nii.gz')
processedt1w_array = np.array(processedt1w.dataobj)
processedt2w_array = np.array(processedt2w.dataobj)
if (processedt1w_array.shape == processedt2w_array.shape):
confirmation31 = input("\nOverlay possible. Press enter to continue.\n")
t1w_t2w_array = np.add(processedt1w_array, processedt2w_array)
t1w_t2w_overlay = nib.Nifti1Image(t1w_t2w_array, affine=np.eye(4))
nib.save(t1w_t2w_overlay, "t1w_t2w_overlay_MNIenabled.nii.gz")
else:
print("Overlay not possible. Check dimensions.")
# Completion 3 notified.
confirmation3 = input("Files ready for brain mask segmentation. Press enter to continue.")
# Task 4 : Brain Mask Segmentation with HD-BET.
os.system("hd-bet -i t1w_t2w_overlay_MNIenabled.nii.gz -device cpu -mode fast -tta 0")
#remove "-device cpu -mode fast -tta 0" if GPU support is available.
os.remove("t1w_t2w_overlay_MNIenabled_bet.nii.gz")
os.rename("t1w_t2w_overlay_MNIenabled_bet_mask.nii.gz", "t1w_t2w_overlay_MNIenabled_BRAINMASK.nii.gz")
# Completion 4 notified. MNI-normalized BRAIN MASK EXTRACTION COMPLETE.
confirmation4 = input("MNI-normalized BRAIN MASK EXTRACTION COMPLETE. Saved as t1w_t2w_overlay_MNIenabled_BRAINMASK.nii.gz. Files ready for scalp segmentation. Press enter to continue.")
# Task 5 : Manual Scalp Segmentation. Parameters may be changed as necessary.
brain_mask = nib.load('t1w_t2w_overlay_MNIenabled_BRAINMASK.nii.gz')
t1w_t2w = nib.load('t1w_t2w_overlay_MNIenabled.nii.gz')
brain_mask_A = np.array(brain_mask.dataobj)
t1w_t2w_A = np.array(t1w_t2w.dataobj)
# 5.1 : Checking dimensional congruency between brain mask and overlaid file.
if(brain_mask_A.shape == t1w_t2w_A.shape):
# 5.2 : Removing brain from overlaid file.
for x in range(0, brain_mask_A.shape[0]-1):
for y in range(0, brain_mask_A.shape[1]-1):
for z in range(0, brain_mask_A.shape[2]-1):
if(brain_mask_A[x][y][z] > 0):
t1w_t2w_A[x][y][z] = 0
else:
print("Comparison not possible due to difference in dimensions.")
# 5.3 : Isolating scalp with enclosed coordinate volume.
for x in range(0, t1w_t2w_A.shape[0]-1):
for y in range(0, t1w_t2w_A.shape[1]-1):
for z in range(0, t1w_t2w_A.shape[2]-1):
if(x < ((t1w_t2w_A.shape[0]-1)*0.03) or x > ((t1w_t2w_A.shape[0]-1)*0.96) or y < ((t1w_t2w_A.shape[1]-1)*0.01) or y > ((t1w_t2w_A.shape[1]-1)*0.99) or z < ((-(t1w_t2w_A.shape[2]-1)*y*0.000275)+85)):
t1w_t2w_A[x][y][z] = 0
# 5.4 : Finding value of threshold intensity for scalp segmentation.
def paraMAX():
M = 0
for x in range(int(0.05*(t1w_t2w_A.shape[0]-1)),int(0.95*(t1w_t2w_A.shape[0]-1))):
for y in range(int(0.05*(t1w_t2w_A.shape[1]-1)),int(0.95*(t1w_t2w_A.shape[1]-1))):
for z in range(int(0.05*(t1w_t2w_A.shape[2]-1)),int(0.95*(t1w_t2w_A.shape[2]-1))):
if(M < t1w_t2w_A[x][y][z]):
M = t1w_t2w_A[x][y][z]
return M
MAX = paraMAX()
MAX_thres = MAX*0.225
#Multiplication constant was determined from optimizing scalp segmentation from most updated pre-operative and post-operative T1w files (Patient 3) in OPENNEURO Database: https://openneuro.org/datasets/ds001226/versions/1.0.0 and https://openneuro.org/datasets/ds002080/versions/1.0.1. Patient 3 has right-sided (lateral view), parietal meningioma I. Full patient information available at Supplementary Table 1. Patient characteristics. from https://onlinelibrary.wiley.com/doi/abs/10.1002/pon.5195. For scalp segmentation, T2w files may be recommended to not be included in the input for reducing intensity values of scalp regions.
# 5.5 : Segmenting scalp using threshold intensity.
for x in range(0, t1w_t2w_A.shape[0]-1):
for y in range(0, t1w_t2w_A.shape[1]-1):
for z in range(0, t1w_t2w_A.shape[2]-1):
if(t1w_t2w_A[x][y][z] < MAX_thres):
t1w_t2w_A[x][y][z] = 0
# Task 5.6 : Removing non-scalp voxels by area inspection.
ns_thres = MAX*0.34
for x in range(1, t1w_t2w_A.shape[0]-1):
for y in range(1, t1w_t2w_A.shape[1]-1):
for z in range(1, t1w_t2w_A.shape[2]-1):
M = 0
for k in range(-1,2):
for m in range(-1,2):
for n in range(-1,2):
if t1w_t2w_A[x+k][y+m][z+n] >= M:
M = t1w_t2w_A[x+k][y+m][z+n]
if M < ns_thres:
t1w_t2w_A[x][y][z] = 0
# Task 5.7 : Extraction
scalp_array = nib.Nifti1Image(t1w_t2w_A, affine=np.eye(4))
nib.save(scalp_array, "t1w_t2w_overlay_MNIenabled_SCALP.nii.gz")
# Completion 5 notified. MNI-normalized SCALP EXTRACTION COMPLETE.
confirmation5 = input("MNI-normalized SCALP EXTRACTION COMPLETE. Saved as t1w_t2w_overlay_MNIenabled_SCALP.nii.gz. Files ready for tumor segmentation. For this segmentation, {T1w, T2w, T1ce, FLAIR} files are required. Press enter to continue.")
# Task 6 : Brain Tumor Segmentation with DeepBrainSeg. Pretrained weights will be used.
# Comment : nnuNet is ideal for brain tumor segmentation, but a simpler package is used instead so as to be implementable in Macbook Pro. Code may be changed if Hospital Intranet makes nnUNet installation possible.
# Task 6.1 : Brain extraction from MNI-aligned T1w, T2w, T1ce, and FLAIR files.
# Comment : FLIRT before brain extraction vs. FLIRT after brain extraction?
# T1w
os.system("hd-bet -i input-t1w_flirt.nii.gz -device cpu -mode fast -tta 0")
#remove "-device cpu -mode fast -tta 0" if GPU support is available.
os.rename("input-t1w_flirt_bet.nii.gz", "input-t1w_flirt_BRAIN.nii.gz")
os.remove("input-t1w_flirt_bet_mask.nii.gz")
T1w_pretumor_name = "input-t1w_flirt_BRAIN.nii.gz"
# T2w
if(T2w_name == 'N/A'):
T2w_name = input("\n(REQUIRED) Type in the name of the input T2w file. Make sure you include nii.gz format. You cannot write N/A for this one. If you wrote N/A previously, then avoid writing input-t2w.nii.gz for this one.\n\n")
flt1 = fsl.FLIRT(bins=640, cost_func='mutualinfo')
flt1.inputs.in_file = T2w_name
flt1.inputs.reference = 'MNI-template.nii.gz'
flt1.inputs.output_type = "NIFTI_GZ"
flt1.cmdline
res = flt1.run()
os.remove(str(T2w_name[:-7]) + "_flirt.mat")
T2w_name = str(T2w_name[:-7]) + "_flirt.nii.gz"
flt2 = fsl.FLIRT(bins=640, cost_func='mutualinfo')
flt2.inputs.in_file = T2w_name
flt2.inputs.reference = 'input-t1w_flirt.nii.gz'
flt2.inputs.output_type = "NIFTI_GZ"
flt2.cmdline
res = flt2.run()
os.remove(str(T2w_name[:-7]) + "_flirt.mat")
os.rename(str(T2w_name[:-7]) + "_flirt.nii.gz", str(T2w_name[:-7]) + "_t1w.nii.gz")
T2w_name = str(T2w_name[:-7]) + "_t1w.nii.gz"
else:
T2w_name = "input-t2w_flirt_t1w.nii.gz"
os.system("hd-bet -i " + str(T2w_name) + " -device cpu -mode fast -tta 0")
#remove "-device cpu -mode fast -tta 0" if GPU support is available.
os.rename(str(T2w_name[:-7]) + "_bet.nii.gz", str(T2w_name[:-7]) + "_BRAIN.nii.gz")
os.remove(str(T2w_name[:-7]) + "_bet_mask.nii.gz")
T2w_pretumor_name = str(T2w_name[:-7]) + "_BRAIN.nii.gz"
#T1ce
T1ce_name = input("\n(REQUIRED) Type in the name of the input T1ce file. Make sure you include nii.gz format.\n\n")
flt1 = fsl.FLIRT(bins=640, cost_func='mutualinfo')
flt1.inputs.in_file = T1ce_name
flt1.inputs.reference = 'MNI-template.nii.gz'
flt1.inputs.output_type = "NIFTI_GZ"
flt1.cmdline
res = flt1.run()
os.remove(str(T1ce_name[:-7]) + "_flirt.mat")
T1ce_name = str(T1ce_name[:-7]) + "_flirt.nii.gz"
flt2 = fsl.FLIRT(bins=640, cost_func='mutualinfo')
flt2.inputs.in_file = T1ce_name
flt2.inputs.reference = 'input-t1w_flirt.nii.gz'
flt2.inputs.output_type = "NIFTI_GZ"
flt2.cmdline
res = flt2.run()
os.remove(str(T1ce_name[:-7]) + "_flirt.mat")
os.rename(str(T1ce_name[:-7]) + "_flirt.nii.gz", str(T1ce_name[:-7]) + "_t1w.nii.gz")
T1ce_name = str(T1ce_name[:-7]) + "_t1w.nii.gz"
os.system("hd-bet -i " + str(T1ce_name) + " -device cpu -mode fast -tta 0")
#remove "-device cpu -mode fast -tta 0" if GPU support is available.
os.rename(str(T1ce_name[:-7]) + "_bet.nii.gz", str(T1ce_name[:-7]) + "_BRAIN.nii.gz")
os.remove(str(T1ce_name[:-7]) + "_bet_mask.nii.gz")
T1ce_pretumor_name = str(T1ce_name[:-7]) + "_BRAIN.nii.gz"
#FLAIR
FLAIR_name = input("\n(REQUIRED) Type in the name of the input FLAIR file. Make sure you include nii.gz format.\n\n")
flt1 = fsl.FLIRT(bins=640, cost_func='mutualinfo')
flt1.inputs.in_file = FLAIR_name
flt1.inputs.reference = 'MNI-template.nii.gz'
flt1.inputs.output_type = "NIFTI_GZ"
flt1.cmdline
res = flt1.run()
os.remove(str(FLAIR_name[:-7]) + "_flirt.mat")
FLAIR_name = str(FLAIR_name[:-7]) + "_flirt.nii.gz"
flt2 = fsl.FLIRT(bins=640, cost_func='mutualinfo')
flt2.inputs.in_file = FLAIR_name
flt2.inputs.reference = 'input-t1w_flirt.nii.gz'
flt2.inputs.output_type = "NIFTI_GZ"
flt2.cmdline
res = flt2.run()
os.remove(str(FLAIR_name[:-7]) + "_flirt.mat")
os.rename(str(FLAIR_name[:-7]) + "_flirt.nii.gz", str(FLAIR_name[:-7]) + "_t1w.nii.gz")
FLAIR_name = str(FLAIR_name[:-7]) + "_t1w.nii.gz"
os.system("hd-bet -i " + str(FLAIR_name) + " -device cpu -mode fast -tta 0")
#remove "-device cpu -mode fast -tta 0" if GPU support is available.
os.rename(str(FLAIR_name[:-7]) + "_bet.nii.gz", str(FLAIR_name[:-7]) + "_BRAIN.nii.gz")
os.remove(str(FLAIR_name[:-7]) + "_bet_mask.nii.gz")
FLAIR_pretumor_name = str(FLAIR_name[:-7]) + "_BRAIN.nii.gz"
# Location Specification
#t1_path = T1w_pretumor_name
#t2_path = T2w_pretumor_name
#t1ce_path = T1ce_pretumor_name
#flair_path = FLAIR_pretumor_name
#segmentor = deepSeg(quick=True) #put a sharp if this doesn't work in hospital intranet
#segmentor.get_segmentation(t1_path, t2_path, t1ce_path, flair_path, save = True) / remove sharp if this works in hospital intranet
#Tumor segmentation has not been done yet due to poor processing speed in Macbook Pro. Confirming the location of saved files is necessary.
#Unlike HD-BET, nnUNet requires GPU, so the code cannot be done in regular laptop. Link to nnUNet: https://github.com/MIC-DKFZ/nnUNet. Installation and setup cannot be done in Macbook Pro as of right now.
#Other examples: https://github.com/Mr-TalhaIlyas/Brain-Tumor-Segmentation, https://github.com/galprz/brain-tumor-segmentation.
| [
"os.path.exists",
"numpy.eye",
"nibabel.save",
"nipype.interfaces.fsl.FLIRT",
"nibabel.load",
"numpy.add",
"os.rename",
"numpy.array",
"numpy.zeros",
"os.system",
"os.remove"
] | [((2267, 2309), 'os.rename', 'os.rename', (['MNI_name', '"""MNI-template.nii.gz"""'], {}), "(MNI_name, 'MNI-template.nii.gz')\n", (2276, 2309), False, 'import os\n'), ((2314, 2353), 'os.rename', 'os.rename', (['T1w_name', '"""input-t1w.nii.gz"""'], {}), "(T1w_name, 'input-t1w.nii.gz')\n", (2323, 2353), False, 'import os\n'), ((3021, 3064), 'nipype.interfaces.fsl.FLIRT', 'fsl.FLIRT', ([], {'bins': '(640)', 'cost_func': '"""mutualinfo"""'}), "(bins=640, cost_func='mutualinfo')\n", (3030, 3064), False, 'from nipype.interfaces import fsl\n'), ((3239, 3271), 'os.remove', 'os.remove', (['"""input-t1w_flirt.mat"""'], {}), "('input-t1w_flirt.mat')\n", (3248, 3271), False, 'import os\n'), ((3284, 3327), 'nipype.interfaces.fsl.FLIRT', 'fsl.FLIRT', ([], {'bins': '(640)', 'cost_func': '"""mutualinfo"""'}), "(bins=640, cost_func='mutualinfo')\n", (3293, 3327), False, 'from nipype.interfaces import fsl\n'), ((3507, 3539), 'os.remove', 'os.remove', (['"""input-t2w_flirt.mat"""'], {}), "('input-t2w_flirt.mat')\n", (3516, 3539), False, 'import os\n'), ((3552, 3595), 'nipype.interfaces.fsl.FLIRT', 'fsl.FLIRT', ([], {'bins': '(640)', 'cost_func': '"""mutualinfo"""'}), "(bins=640, cost_func='mutualinfo')\n", (3561, 3595), False, 'from nipype.interfaces import fsl\n'), ((3784, 3822), 'os.remove', 'os.remove', (['"""input-t2w_flirt_flirt.mat"""'], {}), "('input-t2w_flirt_flirt.mat')\n", (3793, 3822), False, 'import os\n'), ((3827, 3898), 'os.rename', 'os.rename', (['"""input-t2w_flirt_flirt.nii.gz"""', '"""input-t2w_flirt_t1w.nii.gz"""'], {}), "('input-t2w_flirt_flirt.nii.gz', 'input-t2w_flirt_t1w.nii.gz')\n", (3836, 3898), False, 'import os\n'), ((3904, 3939), 'os.remove', 'os.remove', (['"""input-t2w_flirt.nii.gz"""'], {}), "('input-t2w_flirt.nii.gz')\n", (3913, 3939), False, 'import os\n'), ((4126, 4160), 'nibabel.load', 'nib.load', (['"""input-t1w_flirt.nii.gz"""'], {}), "('input-t1w_flirt.nii.gz')\n", (4134, 4160), True, 'import nibabel as nib\n'), ((4180, 4218), 'nibabel.load', 'nib.load', (['"""input-t2w_flirt_t1w.nii.gz"""'], {}), "('input-t2w_flirt_t1w.nii.gz')\n", (4188, 4218), True, 'import nibabel as nib\n'), ((4245, 4275), 'numpy.array', 'np.array', (['processedt1w.dataobj'], {}), '(processedt1w.dataobj)\n', (4253, 4275), True, 'import numpy as np\n'), ((4301, 4331), 'numpy.array', 'np.array', (['processedt2w.dataobj'], {}), '(processedt2w.dataobj)\n', (4309, 4331), True, 'import numpy as np\n'), ((4956, 5052), 'os.system', 'os.system', (['"""hd-bet -i t1w_t2w_overlay_MNIenabled.nii.gz -device cpu -mode fast -tta 0"""'], {}), "(\n 'hd-bet -i t1w_t2w_overlay_MNIenabled.nii.gz -device cpu -mode fast -tta 0'\n )\n", (4965, 5052), False, 'import os\n'), ((5122, 5172), 'os.remove', 'os.remove', (['"""t1w_t2w_overlay_MNIenabled_bet.nii.gz"""'], {}), "('t1w_t2w_overlay_MNIenabled_bet.nii.gz')\n", (5131, 5172), False, 'import os\n'), ((5177, 5283), 'os.rename', 'os.rename', (['"""t1w_t2w_overlay_MNIenabled_bet_mask.nii.gz"""', '"""t1w_t2w_overlay_MNIenabled_BRAINMASK.nii.gz"""'], {}), "('t1w_t2w_overlay_MNIenabled_bet_mask.nii.gz',\n 't1w_t2w_overlay_MNIenabled_BRAINMASK.nii.gz')\n", (5186, 5283), False, 'import os\n'), ((5652, 5707), 'nibabel.load', 'nib.load', (['"""t1w_t2w_overlay_MNIenabled_BRAINMASK.nii.gz"""'], {}), "('t1w_t2w_overlay_MNIenabled_BRAINMASK.nii.gz')\n", (5660, 5707), True, 'import nibabel as nib\n'), ((5722, 5767), 'nibabel.load', 'nib.load', (['"""t1w_t2w_overlay_MNIenabled.nii.gz"""'], {}), "('t1w_t2w_overlay_MNIenabled.nii.gz')\n", (5730, 5767), True, 'import nibabel as nib\n'), ((5788, 5816), 'numpy.array', 'np.array', (['brain_mask.dataobj'], {}), '(brain_mask.dataobj)\n', (5796, 5816), True, 'import numpy as np\n'), ((5833, 5858), 'numpy.array', 'np.array', (['t1w_t2w.dataobj'], {}), '(t1w_t2w.dataobj)\n', (5841, 5858), True, 'import numpy as np\n'), ((9114, 9178), 'nibabel.save', 'nib.save', (['scalp_array', '"""t1w_t2w_overlay_MNIenabled_SCALP.nii.gz"""'], {}), "(scalp_array, 't1w_t2w_overlay_MNIenabled_SCALP.nii.gz')\n", (9122, 9178), True, 'import nibabel as nib\n'), ((9995, 10070), 'os.system', 'os.system', (['"""hd-bet -i input-t1w_flirt.nii.gz -device cpu -mode fast -tta 0"""'], {}), "('hd-bet -i input-t1w_flirt.nii.gz -device cpu -mode fast -tta 0')\n", (10004, 10070), False, 'import os\n'), ((10148, 10219), 'os.rename', 'os.rename', (['"""input-t1w_flirt_bet.nii.gz"""', '"""input-t1w_flirt_BRAIN.nii.gz"""'], {}), "('input-t1w_flirt_bet.nii.gz', 'input-t1w_flirt_BRAIN.nii.gz')\n", (10157, 10219), False, 'import os\n'), ((10224, 10268), 'os.remove', 'os.remove', (['"""input-t1w_flirt_bet_mask.nii.gz"""'], {}), "('input-t1w_flirt_bet_mask.nii.gz')\n", (10233, 10268), False, 'import os\n'), ((12013, 12056), 'nipype.interfaces.fsl.FLIRT', 'fsl.FLIRT', ([], {'bins': '(640)', 'cost_func': '"""mutualinfo"""'}), "(bins=640, cost_func='mutualinfo')\n", (12022, 12056), False, 'from nipype.interfaces import fsl\n'), ((12340, 12383), 'nipype.interfaces.fsl.FLIRT', 'fsl.FLIRT', ([], {'bins': '(640)', 'cost_func': '"""mutualinfo"""'}), "(bins=640, cost_func='mutualinfo')\n", (12349, 12383), False, 'from nipype.interfaces import fsl\n'), ((13262, 13305), 'nipype.interfaces.fsl.FLIRT', 'fsl.FLIRT', ([], {'bins': '(640)', 'cost_func': '"""mutualinfo"""'}), "(bins=640, cost_func='mutualinfo')\n", (13271, 13305), False, 'from nipype.interfaces import fsl\n'), ((13593, 13636), 'nipype.interfaces.fsl.FLIRT', 'fsl.FLIRT', ([], {'bins': '(640)', 'cost_func': '"""mutualinfo"""'}), "(bins=640, cost_func='mutualinfo')\n", (13602, 13636), False, 'from nipype.interfaces import fsl\n'), ((1336, 1360), 'os.path.exists', 'os.path.exists', (['"""HD-BET"""'], {}), "('HD-BET')\n", (1350, 1360), False, 'import os\n'), ((1371, 1428), 'os.system', 'os.system', (['"""git clone https://github.com/MIC-DKFZ/HD-BET"""'], {}), "('git clone https://github.com/MIC-DKFZ/HD-BET')\n", (1380, 1428), False, 'import os\n'), ((1437, 1459), 'os.system', 'os.system', (['"""cd HD-BET"""'], {}), "('cd HD-BET')\n", (1446, 1459), False, 'import os\n'), ((1468, 1498), 'os.system', 'os.system', (['"""pip3 install -e ."""'], {}), "('pip3 install -e .')\n", (1477, 1498), False, 'import os\n'), ((1507, 1525), 'os.system', 'os.system', (['"""cd .."""'], {}), "('cd ..')\n", (1516, 1525), False, 'import os\n'), ((2403, 2434), 'nibabel.load', 'nib.load', (['"""MNI-template.nii.gz"""'], {}), "('MNI-template.nii.gz')\n", (2411, 2434), True, 'import nibabel as nib\n'), ((2462, 2490), 'numpy.array', 'np.array', (['MNIreplace.dataobj'], {}), '(MNIreplace.dataobj)\n', (2470, 2490), True, 'import numpy as np\n'), ((2509, 2604), 'numpy.zeros', 'np.zeros', (['(MNIreplace_array.shape[0], MNIreplace_array.shape[1], MNIreplace_array.\n shape[2])'], {}), '((MNIreplace_array.shape[0], MNIreplace_array.shape[1],\n MNIreplace_array.shape[2]))\n', (2517, 2604), True, 'import numpy as np\n'), ((2674, 2715), 'nibabel.save', 'nib.save', (['replace_nib', '"""input-t2w.nii.gz"""'], {}), "(replace_nib, 'input-t2w.nii.gz')\n", (2682, 2715), True, 'import nibabel as nib\n'), ((2734, 2773), 'os.rename', 'os.rename', (['T2w_name', '"""input-t2w.nii.gz"""'], {}), "(T2w_name, 'input-t2w.nii.gz')\n", (2743, 2773), False, 'import os\n'), ((4501, 4547), 'numpy.add', 'np.add', (['processedt1w_array', 'processedt2w_array'], {}), '(processedt1w_array, processedt2w_array)\n', (4507, 4547), True, 'import numpy as np\n'), ((4631, 4693), 'nibabel.save', 'nib.save', (['t1w_t2w_overlay', '"""t1w_t2w_overlay_MNIenabled.nii.gz"""'], {}), "(t1w_t2w_overlay, 't1w_t2w_overlay_MNIenabled.nii.gz')\n", (4639, 4693), True, 'import nibabel as nib\n'), ((10624, 10667), 'nipype.interfaces.fsl.FLIRT', 'fsl.FLIRT', ([], {'bins': '(640)', 'cost_func': '"""mutualinfo"""'}), "(bins=640, cost_func='mutualinfo')\n", (10633, 10667), False, 'from nipype.interfaces import fsl\n'), ((11003, 11046), 'nipype.interfaces.fsl.FLIRT', 'fsl.FLIRT', ([], {'bins': '(640)', 'cost_func': '"""mutualinfo"""'}), "(bins=640, cost_func='mutualinfo')\n", (11012, 11046), False, 'from nipype.interfaces import fsl\n'), ((9099, 9108), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (9105, 9108), True, 'import numpy as np\n'), ((2655, 2664), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2661, 2664), True, 'import numpy as np\n'), ((4612, 4621), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4618, 4621), True, 'import numpy as np\n')] |
from datetime import datetime
import numpy as np
from typedecorator import params, returns
from roadnet import RoadNetwork
from utils import greate_circle_distance
from graph import seq2graph
__all__ = ['Trajectory']
class Trajectory(object):
""" An object to represent the daily mobility of individuals.
The dewelling time at each location is controlled by
:param dwelling_split_ratio: (default 0.8). With two timestamps at
successive locations, the :param dwelling_split_ratio: * elapsed_duration
contributes to the first location and the left to the second.
"""
def __init__(self, user_id, dtstart, timestamps, locations, coordinates, dwelling_split_ratio=0.8):
assert isinstance(dtstart, datetime)
assert len(timestamps) == len(locations) == len(coordinates)
self.id = user_id
self.dtstart = dtstart
self.dwelling = []
self.accdwelling = {} # accumulative dwelling time in secs
# Remove duplicate records
nodup = []
last_location = None
for i in range(len(locations)):
if locations[i] != last_location:
nodup.append(i)
last_location = locations[i]
self.timestamps = [timestamps[i] for i in nodup] # timestamp sequence
self.locations = [locations[i] for i in nodup] # location sequence
self.coordinates = [coordinates[i] for i in nodup] # coordinate sequence
self.circles = self._mine_circles(self.locations)
# Calculate raw dwelling time
last_timestamp = None
last_location = None
for i in range(len(locations)):
if last_timestamp is None:
last_timestamp = timestamps[i]
self.dwelling.append(0)
else:
# Remove transmission slot
delta = timestamps[i] - last_timestamp
# Adjust dwelling time
self.dwelling[-1] += int(delta * dwelling_split_ratio)
split_left = int(delta * (1 - dwelling_split_ratio))
if locations[i] != last_location:
self.dwelling.append(split_left)
else:
self.dwelling[-1] += split_left
last_location = locations[i]
last_timestamp = timestamps[i]
# Accumulative dwelling times
for i in range(len(self.coordinates)):
coord = self.coordinates[i]
if coord not in self.accdwelling:
self.accdwelling[coord] = 0
self.accdwelling[coord] += self.dwelling[i]
# Transition frequency
self.freq = {}
last_coord = None
for coord in coordinates:
if last_coord is None or coord == last_coord:
last_coord = coord
continue
if (last_coord, coord) not in self.freq:
self.freq[(last_coord, coord)] = 1
else:
self.freq[(last_coord, coord)] += 1
last_coord = coord
def __str__(self):
return 'User %d: %s %d %s' % (
self.id,
self.dtstart.strftime('%Y%m%d'),
len(self.circles),
self.locations )
def __len__(self):
return len(self.locations)
def is_strict_valid(self):
pass
def which_day(self):
return self.dtstart.strftime("%m%d")
@params(self=object, locs=[object])
def _mine_circles(self, locs):
""" Extract movement circles from a location sequence
"""
i = 0; n = len(locs)
circles = []
while i < n:
found = False
for j in range(i+1, n):
if locs[j] == locs[i]:
found = True
circles.append((i, j))
deeper = self._mine_circles(locs[i+1:j])
deeper = [(t[0]+i+1, t[1]+i+1) for t in deeper]
circles.extend(deeper)
break
i = j if found else (i + 1)
return circles
@params(self=object, road_network=RoadNetwork)
def get_distances_from(self, road_network):
""" Get geographical distances for each movement."""
N = len(self.coordinates)
distances = []
for p1, p2 in zip(self.coordinates[0:N-1], self.coordinates[1, N]):
distances.append(road_network.shortest_path_distance(p1, p2))
return distances
@params(self=object, road_network=RoadNetwork, directed=bool,
edge_weighted_by_distance=bool, node_weighted_by_dwelling=bool)
def convert2graph(self, road_network=None, directed=True,
edge_weighted_by_distance=True,
node_weighted_by_dwelling=True):
""" Return a graph representation of human mobility, one which
is weighted by traveling distance for edge and dwelling time for node.
**PerfStat** (PersonNum,Calls,AccTime): 100,1519,54.191s
"""
graph = seq2graph(self.coordinates, directed)
if edge_weighted_by_distance:
for edge in graph.edges_iter():
if road_network:
dist = road_network.shortest_path_distance(edge[0], edge[1])
else:
dist = greate_circle_distance(edge[0][0], edge[0][1], edge[1][0], edge[1][1])
graph.edge[edge[0]][edge[1]]['weight'] = dist
if edge in self.freq:
graph.edge[edge[0]][edge[1]]['frequency'] = self.freq[edge]
else:
graph.edge[edge[0]][edge[1]]['frequency'] = 1
if node_weighted_by_dwelling:
for node in graph.nodes_iter():
graph.node[node]['weight'] = self.accdwelling.get(node)
return graph
def radius_of_gyration(self):
""" R_g based on edge distances
"""
clon = np.average([coord[0] for coord in self.coordinates])
clat = np.average([coord[1] for coord in self.coordinates])
return np.average([greate_circle_distance(clon, clat, coord[0], coord[1]) for coord in self.coordinates])
def travel_dist(self):
""" Calculate the travelling distance totally.
"""
if len(self.coordinates) < 2:
return 0
total = 0
for i in range(0, len(self.coordinates)-1):
lon1, lat1 = self.coordinates[i]
lon2, lat2 = self.coordinates[i+1]
total += greate_circle_distance(lon1, lat1, lon2, lat2)
return total
def distinct_loc_num(self):
return len(set(self.locations))
| [
"graph.seq2graph",
"utils.greate_circle_distance",
"typedecorator.params",
"numpy.average"
] | [((3414, 3448), 'typedecorator.params', 'params', ([], {'self': 'object', 'locs': '[object]'}), '(self=object, locs=[object])\n', (3420, 3448), False, 'from typedecorator import params, returns\n'), ((4074, 4119), 'typedecorator.params', 'params', ([], {'self': 'object', 'road_network': 'RoadNetwork'}), '(self=object, road_network=RoadNetwork)\n', (4080, 4119), False, 'from typedecorator import params, returns\n'), ((4467, 4595), 'typedecorator.params', 'params', ([], {'self': 'object', 'road_network': 'RoadNetwork', 'directed': 'bool', 'edge_weighted_by_distance': 'bool', 'node_weighted_by_dwelling': 'bool'}), '(self=object, road_network=RoadNetwork, directed=bool,\n edge_weighted_by_distance=bool, node_weighted_by_dwelling=bool)\n', (4473, 4595), False, 'from typedecorator import params, returns\n'), ((5019, 5056), 'graph.seq2graph', 'seq2graph', (['self.coordinates', 'directed'], {}), '(self.coordinates, directed)\n', (5028, 5056), False, 'from graph import seq2graph\n'), ((5922, 5974), 'numpy.average', 'np.average', (['[coord[0] for coord in self.coordinates]'], {}), '([coord[0] for coord in self.coordinates])\n', (5932, 5974), True, 'import numpy as np\n'), ((5990, 6042), 'numpy.average', 'np.average', (['[coord[1] for coord in self.coordinates]'], {}), '([coord[1] for coord in self.coordinates])\n', (6000, 6042), True, 'import numpy as np\n'), ((6495, 6541), 'utils.greate_circle_distance', 'greate_circle_distance', (['lon1', 'lat1', 'lon2', 'lat2'], {}), '(lon1, lat1, lon2, lat2)\n', (6517, 6541), False, 'from utils import greate_circle_distance\n'), ((6071, 6125), 'utils.greate_circle_distance', 'greate_circle_distance', (['clon', 'clat', 'coord[0]', 'coord[1]'], {}), '(clon, clat, coord[0], coord[1])\n', (6093, 6125), False, 'from utils import greate_circle_distance\n'), ((5303, 5373), 'utils.greate_circle_distance', 'greate_circle_distance', (['edge[0][0]', 'edge[0][1]', 'edge[1][0]', 'edge[1][1]'], {}), '(edge[0][0], edge[0][1], edge[1][0], edge[1][1])\n', (5325, 5373), False, 'from utils import greate_circle_distance\n')] |
import numpy as np
import cv2
import numba
def prim_mst(graph, W, N):
visited = np.zeros(N, dtype=bool)
mst = - np.ones(N, dtype=np.int)
cost = np.inf * np.ones(N)
pi = np.zeros(N)
# Start from the pixel at (0, 0)
visited[0] = True
mst[0] = 0
cost[1], pi[1] = graph[0, 1], 0 # cost between (0, 0) pixel and the right pixel
cost[W], pi[W] = graph[0, W], 0 # cost between (0, 0) pixel and the bottom pixel
while True:
if np.sum(visited) == N:
break
# Select minimal cost node
min_cost, u = np.min(cost), np.argmin(cost)
# Add the node having minimal cost
visited[u] = True
mst[u] = pi[u]
cost[u] = np.inf
# Update cost array
v = u - 1 # left
if u % W > 0 and (not visited[v]) and graph[u, v] < cost[v]:
cost[v], pi[v] = graph[u, v], u
v = u - W # upper
if v >= 0 and (not visited[v]) and graph[u, v] < cost[v]:
cost[v], pi[v] = graph[u, v], u
v = u + 1 # right
if v % W > 0 and (not visited[v]) and graph[u, v] < cost[v]:
cost[v], pi[v] = graph[u, v], u
v = u + W # bottom
if v < N and (not visited[v]) and graph[u, v] < cost[v]:
cost[v], pi[v] = graph[u, v], u
return mst
@numba.jit
def labeling(ind, W):
N = ind.size
labels = - np.ones(N, dtype=np.int)
num = 0
for i in range(N):
if labels[i] < 0:
visit(i, num, ind, labels, W)
num += 1
return num, labels
@numba.jit
def visit(i, num, ind, labels, W):
labels[i] = num
j = i - 1 # left
if i % W > 0 and labels[j] < 0 and (ind[i] == j or ind[j] == i):
visit(j, num, ind, labels, W)
j = i - W # upper
if i - W >= 0 and labels[j] < 0 and (ind[i] == j or ind[j] == i):
visit(j, num, ind, labels, W)
j = i + 1 # right
if j % W > 0 and labels[j] < 0 and (ind[i] == j or ind[j] == i):
visit(j, num, ind, labels, W)
j = i + W # bottom
if j < ind.size and labels[j] < 0 and (ind[i] == j or ind[j] == i):
visit(j, num, ind, labels, W)
class FitnessEvaluation(object):
def __init__(self, img_arr, min_region_num=1, max_region_num=50, min_region_size=100):
super(FitnessEvaluation, self).__init__()
self.img_arr = img_arr # height x width
self.N = img_arr.shape[0] * img_arr.shape[1]
self.W = img_arr.shape[1]
self.min_region_num = min_region_num
self.max_region_num = max_region_num
self.min_region_size = min_region_size
# For calculating edge value
self.left_edge = np.zeros((self.img_arr.shape[0], self.img_arr.shape[1]))
self.left_edge[:, 1:] = self.dist(self.img_arr[:, 1:], self.img_arr[:, :-1], axis=2)
self.upper_edge = np.zeros((self.img_arr.shape[0], self.img_arr.shape[1]))
self.upper_edge[1:, :] = self.dist(self.img_arr[1:, :], self.img_arr[:-1, :], axis=2)
self.right_edge = np.zeros((self.img_arr.shape[0], self.img_arr.shape[1]))
self.right_edge[:, :-1] = self.dist(self.img_arr[:, :-1], self.img_arr[:, 1:], axis=2)
self.bottom_edge = np.zeros((self.img_arr.shape[0], self.img_arr.shape[1]))
self.bottom_edge[:-1, :] = self.dist(self.img_arr[:-1, :], self.img_arr[1:, :], axis=2)
@staticmethod
def dist(x, y, axis=0):
return np.sqrt(np.sum((x - y) ** 2, axis=axis))
def __call__(self, ind):
ind = ind.flatten()
num, labels = labeling(ind, self.W)
# Check the constraints. Set infinity if the constraint is violated.
_, count = np.unique(labels, return_counts=True)
if num < self.min_region_num or num > self.max_region_num or np.min(count) < self.min_region_size:
return np.inf, np.inf
# Overall deviation
im_flat = self.img_arr.reshape(-1, 3)
dev = np.sum(
[np.sum(self.dist(im_flat[labels == n], im_flat[labels == n].mean(axis=0), axis=1)) for n in range(num)])
# Edge
labels_arr = labels.reshape(self.img_arr.shape[0], self.img_arr.shape[1])
edge = 0.
# Left
left = np.zeros_like(labels_arr)
left[:, 1:] = labels_arr[:, 1:] - labels_arr[:, :-1]
left_mask = left.astype(np.int) != 0
edge += self.left_edge[left_mask].sum()
# Upper
upper = np.zeros_like(labels_arr)
upper[1:, :] = labels_arr[1:, :] - labels_arr[:-1, :]
upper_mask = upper.astype(np.int) != 0
edge += self.upper_edge[upper_mask].sum()
# Right
right = np.zeros_like(labels_arr)
right[:, :-1] = labels_arr[:, :-1] - labels_arr[:, 1:]
right_mask = right.astype(np.int) != 0
edge += self.right_edge[right_mask].sum()
# Bottom
bottom = np.zeros_like(labels_arr)
bottom[:-1, :] = labels_arr[:-1, :] - labels_arr[1:, :]
bottom_mask = bottom.astype(np.int) != 0
edge += self.bottom_edge[bottom_mask].sum()
# Note: For edge value, the equation in the CEC paper is wrong. It does not include the division by the number
# of edge pixels, but it should be divided by the number.
bound_num = (np.sum(left_mask) + np.sum(upper_mask) + np.sum(right_mask) + np.sum(bottom_mask))
if bound_num == 0:
return dev, 0.
else:
return dev, - edge / bound_num
@numba.jit
def mutation(ind, width, toolbox, mutate_rate=0.0001):
new_ind = toolbox.clone(ind)
mask = np.random.rand(ind.size) < mutate_rate
for i in np.where(mask)[0]:
r = np.random.randint(5)
if r == 0 and i % width > 0: # left
new_ind[0][i] = i - 1
elif r == 1 and i - width >= 0: # upper
new_ind[0][i] = i - width
elif r == 2 and (i + 1) % width > 0: # right
new_ind[0][i] = i + 1
elif r == 3 and i + width < ind.size: # bottom
new_ind[0][i] = i + width
elif r == 4:
new_ind[0][i] = i
return new_ind
@numba.jit
def crossover(ind1, ind2, toolbox, cross_rate=0.7):
new_ind1, new_ind2 = toolbox.clone(ind1), toolbox.clone(ind2)
if np.random.rand() > cross_rate:
return new_ind1, new_ind2
mask = np.random.rand(ind1.size) < 0.5
new_ind1[0][mask] = ind2[0][mask]
new_ind2[0][mask] = ind1[0][mask]
return new_ind1, new_ind2
@numba.jit
def reproduction(pop, offspring_size, width, toolbox, mutate_rate=0.0001, cross_rate=0.7):
offspring = []
pop_size = len(pop)
while len(offspring) < offspring_size:
# Random selection
c = np.random.choice(np.arange(pop_size), 2, replace=False)
# Crossover and mutation
child1, child2 = crossover(pop[c[0]], pop[c[1]], toolbox, cross_rate)
child1 = mutation(child1, width, toolbox, mutate_rate=mutate_rate)
child2 = mutation(child2, width, toolbox, mutate_rate=mutate_rate)
# Fitness evaluation
child1.fitness.values = toolbox.evaluate(child1)
child2.fitness.values = toolbox.evaluate(child2)
# Resampling if constraint is violated
if child1.fitness.values[0] != np.inf:
offspring.append(child1)
if child2.fitness.values[0] != np.inf:
offspring.append(child2)
return offspring[:offspring_size]
def save_segment_img(ind, W, H, file_name=None):
# Calculate connected components
N = W * H
num, labels = labeling(ind, W)
# Create segmentation image
labels_arr = labels.reshape(H, W)
seg_img = np.ones((H, W))
for n in range(N):
x, y = n % W, n // W
# right
if x < W - 1 and labels_arr[y, x] != labels_arr[y, x + 1]:
seg_img[y, x] = 0
# down
if y < H - 1 and labels_arr[y, x] != labels_arr[y + 1, x]:
seg_img[y, x] = 0
img_arr = np.asarray(seg_img * 255).astype(np.uint8)
if file_name is not None:
cv2.imwrite(file_name, img_arr)
return img_arr
| [
"cv2.imwrite",
"numpy.unique",
"numpy.random.rand",
"numpy.ones",
"numpy.where",
"numpy.asarray",
"numpy.sum",
"numpy.zeros",
"numpy.random.randint",
"numpy.min",
"numpy.argmin",
"numpy.zeros_like",
"numpy.arange"
] | [((86, 109), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'bool'}), '(N, dtype=bool)\n', (94, 109), True, 'import numpy as np\n'), ((187, 198), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (195, 198), True, 'import numpy as np\n'), ((7586, 7601), 'numpy.ones', 'np.ones', (['(H, W)'], {}), '((H, W))\n', (7593, 7601), True, 'import numpy as np\n'), ((122, 146), 'numpy.ones', 'np.ones', (['N'], {'dtype': 'np.int'}), '(N, dtype=np.int)\n', (129, 146), True, 'import numpy as np\n'), ((167, 177), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (174, 177), True, 'import numpy as np\n'), ((1384, 1408), 'numpy.ones', 'np.ones', (['N'], {'dtype': 'np.int'}), '(N, dtype=np.int)\n', (1391, 1408), True, 'import numpy as np\n'), ((2660, 2716), 'numpy.zeros', 'np.zeros', (['(self.img_arr.shape[0], self.img_arr.shape[1])'], {}), '((self.img_arr.shape[0], self.img_arr.shape[1]))\n', (2668, 2716), True, 'import numpy as np\n'), ((2836, 2892), 'numpy.zeros', 'np.zeros', (['(self.img_arr.shape[0], self.img_arr.shape[1])'], {}), '((self.img_arr.shape[0], self.img_arr.shape[1]))\n', (2844, 2892), True, 'import numpy as np\n'), ((3013, 3069), 'numpy.zeros', 'np.zeros', (['(self.img_arr.shape[0], self.img_arr.shape[1])'], {}), '((self.img_arr.shape[0], self.img_arr.shape[1]))\n', (3021, 3069), True, 'import numpy as np\n'), ((3192, 3248), 'numpy.zeros', 'np.zeros', (['(self.img_arr.shape[0], self.img_arr.shape[1])'], {}), '((self.img_arr.shape[0], self.img_arr.shape[1]))\n', (3200, 3248), True, 'import numpy as np\n'), ((3647, 3684), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (3656, 3684), True, 'import numpy as np\n'), ((4187, 4212), 'numpy.zeros_like', 'np.zeros_like', (['labels_arr'], {}), '(labels_arr)\n', (4200, 4212), True, 'import numpy as np\n'), ((4399, 4424), 'numpy.zeros_like', 'np.zeros_like', (['labels_arr'], {}), '(labels_arr)\n', (4412, 4424), True, 'import numpy as np\n'), ((4616, 4641), 'numpy.zeros_like', 'np.zeros_like', (['labels_arr'], {}), '(labels_arr)\n', (4629, 4641), True, 'import numpy as np\n'), ((4836, 4861), 'numpy.zeros_like', 'np.zeros_like', (['labels_arr'], {}), '(labels_arr)\n', (4849, 4861), True, 'import numpy as np\n'), ((5546, 5570), 'numpy.random.rand', 'np.random.rand', (['ind.size'], {}), '(ind.size)\n', (5560, 5570), True, 'import numpy as np\n'), ((5598, 5612), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (5606, 5612), True, 'import numpy as np\n'), ((5629, 5649), 'numpy.random.randint', 'np.random.randint', (['(5)'], {}), '(5)\n', (5646, 5649), True, 'import numpy as np\n'), ((6207, 6223), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6221, 6223), True, 'import numpy as np\n'), ((6284, 6309), 'numpy.random.rand', 'np.random.rand', (['ind1.size'], {}), '(ind1.size)\n', (6298, 6309), True, 'import numpy as np\n'), ((7975, 8006), 'cv2.imwrite', 'cv2.imwrite', (['file_name', 'img_arr'], {}), '(file_name, img_arr)\n', (7986, 8006), False, 'import cv2\n'), ((473, 488), 'numpy.sum', 'np.sum', (['visited'], {}), '(visited)\n', (479, 488), True, 'import numpy as np\n'), ((571, 583), 'numpy.min', 'np.min', (['cost'], {}), '(cost)\n', (577, 583), True, 'import numpy as np\n'), ((585, 600), 'numpy.argmin', 'np.argmin', (['cost'], {}), '(cost)\n', (594, 600), True, 'import numpy as np\n'), ((3415, 3446), 'numpy.sum', 'np.sum', (['((x - y) ** 2)'], {'axis': 'axis'}), '((x - y) ** 2, axis=axis)\n', (3421, 3446), True, 'import numpy as np\n'), ((5302, 5321), 'numpy.sum', 'np.sum', (['bottom_mask'], {}), '(bottom_mask)\n', (5308, 5321), True, 'import numpy as np\n'), ((6668, 6687), 'numpy.arange', 'np.arange', (['pop_size'], {}), '(pop_size)\n', (6677, 6687), True, 'import numpy as np\n'), ((7894, 7919), 'numpy.asarray', 'np.asarray', (['(seg_img * 255)'], {}), '(seg_img * 255)\n', (7904, 7919), True, 'import numpy as np\n'), ((3754, 3767), 'numpy.min', 'np.min', (['count'], {}), '(count)\n', (3760, 3767), True, 'import numpy as np\n'), ((5281, 5299), 'numpy.sum', 'np.sum', (['right_mask'], {}), '(right_mask)\n', (5287, 5299), True, 'import numpy as np\n'), ((5240, 5257), 'numpy.sum', 'np.sum', (['left_mask'], {}), '(left_mask)\n', (5246, 5257), True, 'import numpy as np\n'), ((5260, 5278), 'numpy.sum', 'np.sum', (['upper_mask'], {}), '(upper_mask)\n', (5266, 5278), True, 'import numpy as np\n')] |
"""
Responsible for production of data visualisations and rendering this data as inline
base64 data for various django templates to use.
"""
from datetime import datetime, timedelta
from collections import Counter, defaultdict
from typing import Iterable, Callable
import numpy as np
import pandas as pd
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from pandas.core.base import NoNewAttributesMixin
import plotnine as p9
from lazydict import LazyDictionary
from django.contrib.auth import get_user_model
from app.models import (
Timeframe,
timing,
user_purchases,
all_available_dates,
)
from app.data import (
make_portfolio_dataframe,
cache_plot,
make_portfolio_performance_dataframe,
price_change_bins,
calc_ma_crossover_points,
)
from plotnine.layer import Layers
def cached_portfolio_performance(user):
assert isinstance(user, get_user_model())
username = user.username
overall_key = f"{username}-portfolio-performance"
stock_key = f"{username}-stock-performance"
contributors_key = f"{username}-contributor-performance"
def data_factory(
ld: LazyDictionary,
): # dont create the dataframe unless we have to - avoid exxpensive call!
purchase_buy_dates = []
purchases = []
stocks = []
for stock, purchases_for_stock in user_purchases(user).items():
stocks.append(stock)
for purchase in purchases_for_stock:
purchase_buy_dates.append(purchase.buy_date)
purchases.append(purchase)
purchase_buy_dates = sorted(purchase_buy_dates)
# print("earliest {} latest {}".format(purchase_buy_dates[0], purchase_buy_dates[-1]))
timeframe = Timeframe(
from_date=str(purchase_buy_dates[0]), to_date=all_available_dates()[-1]
)
return make_portfolio_performance_dataframe(stocks, timeframe, purchases)
ld = LazyDictionary()
ld["df"] = lambda ld: data_factory(ld)
return (
cache_plot(overall_key, plot_overall_portfolio, datasets=ld),
cache_plot(stock_key, plot_portfolio_stock_performance, datasets=ld),
cache_plot(contributors_key, plot_portfolio_contributors, datasets=ld),
)
def user_theme(
plot: p9.ggplot,
x_axis_label: str = "",
y_axis_label: str = "",
title: str = "",
**plot_theme,
) -> p9.ggplot:
"""Render the specified plot in the current theme with common attributes to all plots eg. legend_position etc. The themed plot is
returned. Saves code in each plot by handled all the standard stuff here."""
theme_args = { # TODO FIXME... make defaults chosen from user profile
"axis_text_x": p9.element_text(size=7),
"axis_text_y": p9.element_text(size=7),
"figure_size": (12, 6),
"legend_position": "none",
}
theme_args.update(**plot_theme)
# remove asxtrade kwargs
want_cmap_d = theme_args.pop("asxtrade_want_cmap_d", True)
want_fill_d = theme_args.pop(
"asxtrade_want_fill_d", False
) # most graphs dont fill, so False by default
want_fill_continuous = theme_args.pop("asxtrade_want_fill_continuous", False)
plot = (
plot
+ p9.theme_bw() # TODO FIXME... make chosen theme from user profile
+ p9.labs(x=x_axis_label, y=y_axis_label, title=title)
+ p9.theme(**theme_args)
)
if want_cmap_d:
plot += p9.scale_colour_cmap_d()
if want_fill_d:
plot += p9.scale_fill_cmap_d()
elif want_fill_continuous:
plot += p9.scale_fill_cmap()
return plot
def make_sentiment_plot(sentiment_df, exclude_zero_bin=True, plot_text_labels=True):
rows = []
print(
"Sentiment plot: exclude zero bins? {} show text? {}".format(
exclude_zero_bin, plot_text_labels
)
)
for column in filter(lambda c: c.startswith("bin_"), sentiment_df.columns):
c = Counter(sentiment_df[column])
date = column[4:]
for bin_name, val in c.items():
if exclude_zero_bin and (
bin_name == "0.0" or not isinstance(bin_name, str)
):
continue
bin_name = str(bin_name)
assert isinstance(bin_name, str)
val = int(val)
rows.append(
{
"date": datetime.strptime(date, "%Y-%m-%d"),
"bin": bin_name,
"value": val,
}
)
df = pd.DataFrame.from_records(rows)
# print(df['bin'].unique())
bins, labels = price_change_bins() # pylint: disable=unused-variable
order = filter(
lambda s: s != "0.0", labels
) # dont show the no change bin since it dominates the activity heatmap
df["bin_ordered"] = pd.Categorical(df["bin"], categories=order)
plot = p9.ggplot(df, p9.aes("date", "bin_ordered", fill="value")) + p9.geom_tile(
show_legend=False
)
if plot_text_labels:
plot = plot + p9.geom_text(p9.aes(label="value"), size=8, color="white")
return user_theme(plot, y_axis_label="Daily change (%)")
@timing
def plot_fundamentals(
df: pd.DataFrame,
stock: str,
line_size=1.5, # pylint: disable=unused-argument
columns_to_report=(
"pe",
"eps",
"annual_dividend_yield",
"volume",
"last_price",
"change_in_percent_cumulative",
"change_price",
"market_cap",
"number_of_shares",
),
) -> str:
plot_df = pd.melt(
df,
id_vars="fetch_date",
value_vars=columns_to_report,
var_name="indicator",
value_name="value",
)
plot_df = plot_df[plot_df["indicator"].isin(columns_to_report)]
plot_df["value"] = pd.to_numeric(plot_df["value"])
plot_df = plot_df.dropna(axis=0, subset=["value"], how="any")
n = len(columns_to_report)
plot = (
p9.ggplot(
plot_df,
p9.aes("fetch_date", "value", colour="indicator"),
)
+ p9.geom_path(show_legend=False, size=line_size)
+ p9.facet_wrap("~ indicator", nrow=n, ncol=1, scales="free_y")
)
return user_theme(plot, figure_size=(12, n))
def plot_overall_portfolio(
ld: LazyDictionary,
figure_size=(12, 4),
line_size=1.5,
date_text_size=7,
) -> p9.ggplot:
"""
Given a daily snapshot of virtual purchases plot both overall and per-stock
performance. Return a ggplot instance representing the visualisation
"""
portfolio_df = ld["df"]
df = portfolio_df.filter(
items=["portfolio_cost", "portfolio_worth", "portfolio_profit", "date"]
)
df = df.melt(id_vars=["date"], var_name="field")
plot = (
p9.ggplot(df, p9.aes("date", "value", group="field", color="field"))
+ p9.geom_line(size=line_size)
+ p9.facet_wrap("~ field", nrow=3, ncol=1, scales="free_y")
)
return user_theme(
plot,
y_axis_label="$ AUD",
axis_text_x=p9.element_text(angle=30, size=date_text_size),
)
def plot_portfolio_contributors(ld: LazyDictionary, figure_size=(11, 5)) -> p9.ggplot:
df = ld["df"]
melted_df = make_portfolio_dataframe(df, melt=True)
all_dates = sorted(melted_df["date"].unique())
df = melted_df[melted_df["date"] == all_dates[-1]]
# print(df)
df = df[df["field"] == "stock_profit"] # only latest profit is plotted
df["contribution"] = [
"positive" if profit >= 0.0 else "negative" for profit in df["value"]
]
# 2. plot contributors ie. winners and losers
plot = (
p9.ggplot(df, p9.aes("stock", "value", group="stock", fill="stock"))
+ p9.geom_bar(stat="identity")
+ p9.facet_grid("contribution ~ field", scales="free_y")
)
return user_theme(
plot, y_axis_label="$ AUD", figure_size=figure_size, asxtrade_want_fill_d=True
)
def plot_portfolio_stock_performance(
ld: LazyDictionary, figure_width: int = 12, date_text_size=7
) -> p9.ggplot:
df = ld["df"]
df = df[df["stock_cost"] > 0.0]
# latest_date = df.iloc[-1, 6]
# latest_profit = df[df["date"] == latest_date]
# print(df)
pivoted_df = df.pivot(index="stock", columns="date", values="stock_profit")
latest_date = pivoted_df.columns[-1]
# print(latest_date)
mean_profit = pivoted_df.mean(axis=1)
n_stocks = len(mean_profit)
# if we want ~4 stocks per facet plot, then we need to specify the appropriate calculation for df.qcut()
bins = pd.qcut(mean_profit, int(100 / n_stocks) + 1)
# print(bins)
df = df.merge(bins.to_frame(name="bins"), left_on="stock", right_index=True)
# print(df)
textual_df = df[df["date"] == latest_date]
# print(textual_df)
# melted_df = make_portfolio_dataframe(df, melt=True)
plot = (
p9.ggplot(df, p9.aes("date", "stock_profit", group="stock", colour="stock"))
+ p9.geom_smooth(size=1.0, span=0.3, se=False)
+ p9.facet_wrap("~bins", ncol=1, nrow=len(bins), scales="free_y")
+ p9.geom_text(
p9.aes(x="date", y="stock_profit", label="stock"),
color="black",
size=9,
data=textual_df,
position=p9.position_jitter(width=10, height=10),
)
)
return user_theme(
plot,
y_axis_label="$ AUD",
figure_size=(figure_width, int(len(bins) * 1.2)),
axis_text_x=p9.element_text(angle=30, size=date_text_size),
)
def plot_company_rank(ld: LazyDictionary) -> p9.ggplot:
df = ld["rank"]
# assert 'sector' in df.columns
n_bin = len(df["bin"].unique())
# print(df)
plot = (
p9.ggplot(df, p9.aes("date", "rank", group="asx_code", color="asx_code"))
+ p9.geom_smooth(span=0.3, se=False)
+ p9.geom_text(
p9.aes(label="asx_code", x="x", y="y"),
nudge_x=1.2,
size=6,
show_legend=False,
)
+ p9.facet_wrap("~bin", nrow=n_bin, ncol=1, scales="free_y")
)
return user_theme(
plot,
figure_size=(12, 20),
subplots_adjust={"right": 0.8},
)
def plot_company_versus_sector(
df: pd.DataFrame, stock: str, sector: str # pylint: disable=unused-argument
) -> p9.ggplot:
if df is None or len(df) < 1:
print("No data for stock vs. sector plot... ignored")
return None
df["date"] = pd.to_datetime(df["date"])
# print(df)
plot = p9.ggplot(
df, p9.aes("date", "value", group="group", color="group", fill="group")
) + p9.geom_line(size=1.5)
return user_theme(
plot,
y_axis_label="Change since start (%)",
subplots_adjust={"right": 0.8},
legend_position="right",
)
def plot_market_wide_sector_performance(ld: LazyDictionary) -> p9.ggplot:
"""
Display specified dates for average sector performance. Each company is assumed to have at zero
at the start of the observation period. A plot as base64 data is returned.
"""
all_stocks_cip = ld["sector_cumsum_df"]
n_stocks = len(all_stocks_cip)
# merge in sector information for each company
code_and_sector = ld["stocks_by_sector"]
n_unique_sectors = len(code_and_sector["sector_name"].unique())
print("Found {} unique sectors".format(n_unique_sectors))
# print(df)
# print(code_and_sector)
df = all_stocks_cip.merge(code_and_sector, left_index=True, right_on="asx_code")
print(
"Found {} stocks, {} sectors and merged total: {}".format(
n_stocks, len(code_and_sector), len(df)
)
)
# print(df)
grouped_df = df.groupby("sector_name").mean()
# print(grouped_df)
# ready the dataframe for plotting
grouped_df = pd.melt(
grouped_df,
ignore_index=False,
var_name="date",
value_name="cumulative_change_percent",
)
grouped_df["sector"] = grouped_df.index
grouped_df["date"] = pd.to_datetime(grouped_df["date"])
n_col = 3
plot = (
p9.ggplot(
grouped_df, p9.aes("date", "cumulative_change_percent", color="sector")
)
+ p9.geom_line(size=1.5)
+ p9.facet_wrap(
"~sector", nrow=n_unique_sectors // n_col + 1, ncol=n_col, scales="free_y"
)
)
return user_theme(
plot,
y_axis_label="Average sector change (%)",
panel_spacing=0.3,
axis_text_x=p9.element_text(angle=30, size=7),
)
def plot_series(
df,
x=None,
y=None,
tick_text_size=6,
line_size=1.5,
y_axis_label="Point score",
x_axis_label="",
color="stock",
use_smooth_line=False,
):
if df is None or len(df) < 1:
return None
assert len(x) > 0 and len(y) > 0
assert line_size > 0.0
assert isinstance(tick_text_size, int) and tick_text_size > 0
assert y_axis_label is not None
assert x_axis_label is not None
args = {"x": x, "y": y}
if color:
args["color"] = color
plot = p9.ggplot(df, p9.aes(**args))
if use_smooth_line:
plot += p9.geom_smooth(
size=line_size, span=0.3, se=False
) # plotnine doesnt support confidence intervals with Loess smoothings, so se=False
else:
plot += p9.geom_line(size=line_size)
return user_theme(
plot,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
axis_text_x=p9.element_text(angle=30, size=tick_text_size),
axis_text_y=p9.element_text(size=tick_text_size),
)
def plot_market_cap_distribution(ld: LazyDictionary) -> p9.ggplot:
df = ld["market_cap_df"]
assert set(df.columns).intersection(set(["market", "market_cap", "bin"])) == set(
["market", "market_cap", "bin"]
)
pos_market_cap_only = df[df["market_cap"] > 0.0]
plot = (
p9.ggplot(pos_market_cap_only)
+ p9.geom_boxplot(p9.aes(x="market", y="market_cap"))
+ p9.facet_wrap("bin", scales="free_y")
+ p9.scales.scale_y_log10()
)
return user_theme(
plot,
y_axis_label="Market cap. ($AUD Millions)",
subplots_adjust={"wspace": 0.30},
)
def plot_breakdown(ld: LazyDictionary) -> p9.ggplot:
"""Stacked bar plot of increasing and decreasing stocks per sector in the specified df"""
cip_df = ld["cip_df"]
cols_to_drop = [colname for colname in cip_df.columns if colname.startswith("bin_")]
df = cip_df.drop(columns=cols_to_drop)
df = pd.DataFrame(df.sum(axis="columns"), columns=["sum"])
ss = ld["stocks_by_sector"]
# ss should be:
# asx_code sector_name
# asx_code
# 14D 14D Industrials
# 1AD 1AD Health Care
# 1AG 1AG Industrials
# 1AL 1AL Consumer Discretionary........
# print(ss)
df = df.merge(ss, left_index=True, right_index=True)
if len(df) == 0: # no stock in cip_df have a sector? ie. ETF?
return None
assert set(df.columns) == set(["sum", "asx_code", "sector_name"])
df["increasing"] = df.apply(
lambda row: "up" if row["sum"] >= 0.0 else "down", axis=1
)
sector_names = (
df["sector_name"].value_counts().index.tolist()
) # sort bars by value count (ascending)
sector_names_cat = pd.Categorical(df["sector_name"], categories=sector_names)
df = df.assign(sector_name_cat=sector_names_cat)
# print(df)
plot = (
p9.ggplot(df, p9.aes(x="factor(sector_name_cat)", fill="factor(increasing)"))
+ p9.geom_bar()
+ p9.coord_flip()
)
return user_theme(
plot,
x_axis_label="Sector",
y_axis_label="Number of stocks",
subplots_adjust={"left": 0.2, "right": 0.85},
legend_title=p9.element_blank(),
asxtrade_want_fill_d=True,
)
def plot_heatmap(
timeframe: Timeframe, ld: LazyDictionary, bin_cb=price_change_bins
) -> p9.ggplot:
"""
Plot the specified data matrix as binned values (heatmap) with X axis being dates over the specified timeframe and Y axis being
the percentage change on the specified date (other metrics may also be used, but you will likely need to adjust the bins)
:rtype: p9.ggplot instance representing the heatmap
"""
df = ld["cip_df"]
bins, labels = bin_cb()
# print(df.columns)
# print(bins)
try:
# NB: this may fail if no prices are available so we catch that error and handle accordingly...
for date in df.columns:
df["bin_{}".format(date)] = pd.cut(df[date], bins, labels=labels)
sentiment_plot = make_sentiment_plot(
df, plot_text_labels=timeframe.n_days <= 30
) # show counts per bin iff not too many bins
return sentiment_plot
except KeyError:
return None
def plot_sector_performance(dataframe: pd.DataFrame, descriptor: str):
assert len(dataframe) > 0
dataframe["date"] = pd.to_datetime(dataframe["date"], format="%Y-%m-%d")
# now do the plot
labels = [
"Number of stocks up >5%",
"Number of stocks down >5%",
"Remaining stocks",
]
# print(dataframe)
dataframe.columns = labels + ["date"]
melted_df = dataframe.melt(value_vars=labels, id_vars="date")
plot = (
p9.ggplot(
melted_df,
p9.aes("date", "value", colour="variable", group="factor(variable)"),
)
+ p9.facet_wrap("~variable", ncol=1, scales="free_y")
+ p9.geom_line(size=1.3)
)
return user_theme(plot)
def auto_dates():
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
formatter.formats = [
"%y", # ticks are mostly years
"%b", # ticks are mostly months
"%d", # ticks are mostly days
"%H:%M", # hrs
"%H:%M", # min
"%S.%f",
] # secs
# these are mostly just the level above...
formatter.zero_formats = [""] + formatter.formats[:-1]
# ...except for ticks that are mostly hours, then it is nice to have
# month-day:
formatter.zero_formats[3] = "%d-%b"
formatter.offset_formats = [
"",
"%Y",
"%b %Y",
"%d %b %Y",
"%d %b %Y",
"%d %b %Y %H:%M",
]
return (locator, formatter)
def relative_strength(prices, n=14):
# see https://stackoverflow.com/questions/20526414/relative-strength-index-in-python-pandas
assert n > 0
assert prices is not None
# Get the difference in price from previous step
delta = prices.diff()
# Get rid of the first row, which is NaN since it did not have a previous
# row to calculate the differences
delta = delta[1:]
# Make the positive gains (up) and negative gains (down) Series
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
# Calculate the EWMA
roll_up1 = up.ewm(span=n).mean()
roll_down1 = down.abs().ewm(span=n).mean()
# Calculate the RSI based on EWMA
rs = roll_up1 / roll_down1
rsi = 100.0 - (100.0 / (1.0 + rs))
# NB: format is carefully handled here, so downstream code doesnt break
new_date = datetime.strftime(
datetime.now(), "%Y-%m-%d "
) # make sure it is not an existing date
# print(new_date)
rsi.at[new_date] = np.nan # ensure data series are the same length for matplotlib
# print(len(rsi), " ", len(prices))
# assert len(rsi) == len(prices)
return rsi
@timing
def plot_momentum(stock: str, timeframe: Timeframe, ld: LazyDictionary) -> plt.Figure:
assert len(stock) > 0
assert "stock_df" in ld or "stock_df_200" in ld
start_date = timeframe.earliest_date
stock_df = ld["stock_df_200"] if "stock_df_200" in ld else ld["stock_df"]
last_price = stock_df["last_price"]
volume = stock_df["volume"]
day_low_price = stock_df["day_low_price"]
day_high_price = stock_df["day_high_price"]
# print(last_price)
# print(volume)
# print(day_low_price)
# print(day_high_price)
plt.rc("axes", grid=True)
plt.rc("grid", color="0.75", linestyle="-", linewidth=0.5)
textsize = 8
left, width = 0.1, 0.8
rect1 = [left, 0.7, width, 0.2]
rect2 = [left, 0.3, width, 0.4]
rect3 = [left, 0.1, width, 0.2]
fig = plt.figure(facecolor="white", figsize=(12, 6))
axescolor = "#f6f6f6" # the axes background color
ax1 = fig.add_axes(rect1, facecolor=axescolor) # left, bottom, width, height
ax2 = fig.add_axes(rect2, facecolor=axescolor, sharex=ax1)
ax2t = ax2.twinx()
ax3 = fig.add_axes(rect3, facecolor=axescolor, sharex=ax1)
fig.autofmt_xdate()
# plot the relative strength indicator
rsi = relative_strength(last_price)
# print(len(rsi))
fillcolor = "darkgoldenrod"
timeline = pd.to_datetime(last_price.index, format="%Y-%m-%d")
ax1.plot(timeline, rsi, color=fillcolor)
ax1.axhline(70, color="darkgreen")
ax1.axhline(30, color="darkgreen")
ax1.fill_between(
timeline, rsi, 70, where=(rsi >= 70), facecolor=fillcolor, edgecolor=fillcolor
)
ax1.fill_between(
timeline, rsi, 30, where=(rsi <= 30), facecolor=fillcolor, edgecolor=fillcolor
)
ax1.text(
0.6,
0.9,
">70 = overbought",
va="top",
transform=ax1.transAxes,
fontsize=textsize,
)
ax1.text(0.6, 0.1, "<30 = oversold", transform=ax1.transAxes, fontsize=textsize)
ax1.set_ylim(0, 100)
ax1.set_yticks([30, 70])
ax1.text(
0.025, 0.95, "RSI (14)", va="top", transform=ax1.transAxes, fontsize=textsize
)
# ax1.set_title('{} daily'.format(stock))
# plot the price and volume data
dx = 0.0
low = day_low_price + dx
high = day_high_price + dx
deltas = np.zeros_like(last_price)
deltas[1:] = np.diff(last_price)
up = deltas > 0
ax2.vlines(timeline[up], low[up], high[up], color="black", label="_nolegend_")
ax2.vlines(timeline[~up], low[~up], high[~up], color="black", label="_nolegend_")
ma20 = last_price.rolling(window=20).mean()
ma200 = last_price.rolling(window=200, min_periods=50).mean()
# timeline = timeline.to_list()
(linema20,) = ax2.plot(timeline, ma20, color="blue", lw=2, label="MA (20)")
(linema200,) = ax2.plot(timeline, ma200, color="red", lw=2, label="MA (200)")
assert linema20 is not None
assert linema200 is not None
props = font_manager.FontProperties(size=10)
leg = ax2.legend(loc="lower left", shadow=True, fancybox=True, prop=props)
leg.get_frame().set_alpha(0.5)
volume = (last_price * volume) / 1e6 # dollar volume in millions
# print(volume)
vmax = np.nanmax(volume)
# print(vmax)
poly = ax2t.fill_between(
timeline,
volume.to_list(),
0,
alpha=0.5,
label="Volume",
facecolor=fillcolor,
edgecolor=fillcolor,
)
assert poly is not None # avoid unused variable from pylint
ax2t.set_ylim(0, 5 * vmax)
ax2t.set_yticks([])
# compute the MACD indicator
fillcolor = "darkslategrey"
n_fast = 12
n_slow = 26
n_ema = 9
emafast = last_price.ewm(span=n_fast, adjust=False).mean()
emaslow = last_price.ewm(span=n_slow, adjust=False).mean()
macd = emafast - emaslow
nema = macd.ewm(span=n_ema, adjust=False).mean()
ax3.plot(timeline, macd, color="black", lw=2)
ax3.plot(timeline, nema, color="blue", lw=1)
ax3.fill_between(
timeline, macd - nema, 0, alpha=0.3, facecolor=fillcolor, edgecolor=fillcolor
)
ax3.text(
0.025,
0.95,
"MACD ({}, {}, {})".format(n_fast, n_slow, n_ema),
va="top",
transform=ax3.transAxes,
fontsize=textsize,
)
ax3.set_yticks([])
locator, formatter = auto_dates()
for ax in ax1, ax2, ax2t, ax3:
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
plt.xticks(fontsize=8)
try:
plt.xlim(left=datetime.strptime(start_date, "%Y-%m-%d"))
except IndexError:
print("WARNING: unable to set plot start_date - things may look weird")
plt.plot()
fig = plt.gcf()
plt.close(fig)
return fig
@timing
def plot_trend(sample_period="M", ld: LazyDictionary = None) -> str:
"""
Given a dataframe of a single stock from company_prices() this plots the highest price
in each month over the time period of the dataframe.
"""
assert "stock_df" in ld
def inner_date_fmt(dates_to_format):
results = []
for d in dates_to_format:
d -= timedelta(
weeks=4
) # breaks are set to the end of the month rather than the start... so
results.append(d.strftime("%Y-%m"))
return results
stock_df = ld["stock_df"]
# print(stock_df)
dataframe = stock_df.filter(items=["last_price"])
dataframe.index = pd.to_datetime(dataframe.index, format="%Y-%m-%d")
dataframe = dataframe.resample(sample_period).max()
# print(dataframe)
plot = (
p9.ggplot(
dataframe,
p9.aes(
x="dataframe.index", y=dataframe.columns[0], fill=dataframe.columns[0]
),
)
+ p9.geom_bar(stat="identity", alpha=0.7)
+ p9.scale_x_datetime(
labels=inner_date_fmt
) # dont print day (always 1st day of month due to resampling)
)
return user_theme(plot, y_axis_label="$ AUD", asxtrade_want_fill_continuous=True)
def plot_points_by_rule(net_points_by_rule: defaultdict(int)) -> p9.ggplot:
if net_points_by_rule is None or len(net_points_by_rule) < 1:
return None
rows = []
for k, v in net_points_by_rule.items():
rows.append({"rule": str(k), "net_points": v})
df = pd.DataFrame.from_records(rows)
plot = (
p9.ggplot(df, p9.aes(x="rule", y="net_points", fill="net_points"))
+ p9.geom_bar(stat="identity", alpha=0.7)
+ p9.coord_flip()
)
return user_theme(
plot,
x_axis_label="Rule",
y_axis_label="Contributions to points by rule",
subplots_adjust={"left": 0.2},
asxtrade_want_fill_continuous=True,
)
def plot_boxplot_series(df, normalisation_method=None):
"""
Treating each column as a separate boxplot and each row as an independent observation
(ie. different company)
render a series of box plots to identify a shift in performance from the observations.
normalisation_method should be one of the values present in
SectorSentimentSearchForm.normalisation_choices
"""
# and plot the normalised data
if normalisation_method is None or normalisation_method == "1":
normalized_df = df
y_label = "Percentage change"
elif normalisation_method == "2":
normalized_df = (df - df.min()) / (df.max() - df.min())
y_label = "Percentage change (min/max. scaled)"
else:
normalized_df = df / df.max(axis=0) # div by max if all else fails...
y_label = "Percentage change (normalised by dividing by max)"
n_inches = len(df.columns) / 5
melted = normalized_df.melt(ignore_index=False).dropna()
plot = (
p9.ggplot(melted, p9.aes(x="fetch_date", y="value"))
+ p9.geom_boxplot(outlier_colour="blue")
+ p9.coord_flip()
)
return user_theme(plot, y_axis_label=y_label, figure_size=(12, n_inches))
def plot_sector_field(df: pd.DataFrame, field, n_col=3):
# print(df.columns)
# assert set(df.columns) == set(['sector', 'date', 'mean_pe', 'sum_pe', 'sum_eps', 'mean_eps', 'n_stocks'])
n_unique_sectors = df["sector"].nunique()
df["date"] = pd.to_datetime(df["date"])
plot = (
p9.ggplot(df, p9.aes("date", field, group="sector", color="sector"))
+ p9.geom_line(size=1.0)
+ p9.facet_wrap(
"~sector", nrow=n_unique_sectors // n_col + 1, ncol=n_col, scales="free_y"
)
)
return user_theme(
plot,
y_axis_label=f"Sector-wide {field}",
panel_spacing=0.3,
axis_text_x=p9.element_text(angle=30, size=7),
)
def plot_sector_top_eps_contributors(
df: pd.DataFrame, stocks_by_sector_df: pd.DataFrame
) -> p9.ggplot:
"""
Returns a plot of the top 20 contributors per sector, based on the most recent EPS value per stock in the dataframe. If no
stocks in a given sector have positive EPS, the sector will not be plotted.
"""
most_recent_date = df.columns[-1]
last_known_eps = df[most_recent_date]
last_known_eps = last_known_eps[last_known_eps >= 0.0].to_frame()
# print(stocks_by_sector_df)
last_known_eps = last_known_eps.merge(
stocks_by_sector_df, left_index=True, right_on="asx_code"
)
last_known_eps["rank"] = last_known_eps.groupby("sector_name")[
most_recent_date
].rank("dense", ascending=False)
last_known_eps = last_known_eps[last_known_eps["rank"] <= 10.0]
n_sectors = last_known_eps["sector_name"].nunique()
last_known_eps["eps"] = last_known_eps[most_recent_date]
plot = (
p9.ggplot(
last_known_eps,
p9.aes(
y="eps",
x="reorder(asx_code,eps)", # sort bars by eps within each sub-plot
group="sector_name",
fill="sector_name",
),
)
+ p9.geom_bar(stat="identity")
+ p9.facet_wrap("~sector_name", ncol=1, nrow=n_sectors, scales="free")
+ p9.coord_flip()
)
return user_theme(
plot,
y_axis_label="EPS ($AUD)",
x_axis_label="Top 10 ASX stocks per sector as at {}".format(most_recent_date),
subplots_adjust={"hspace": 0.4},
figure_size=(12, int(n_sectors * 1.5)),
asxtrade_want_cmap_d=False,
asxtrade_want_fill_d=True,
)
def plot_monthly_returns(
timeframe: Timeframe, stock: str, ld: LazyDictionary
) -> p9.ggplot:
start = timeframe.earliest_date
end = timeframe.most_recent_date
dt = pd.date_range(start, end, freq="BMS")
df = ld["stock_df"]
# print(df)
df = df.filter([d.strftime("%Y-%m-%d") for d in dt], axis=0)
df["percentage_change"] = df["last_price"].pct_change(periods=1) * 100.0
df.index = pd.to_datetime(df.index, format="%Y-%m-%d")
df = df.fillna(0.0) # NB: avoid plotnine warning plotting missing data
# print(df)
plot = p9.ggplot(
df, p9.aes(x="df.index", y="percentage_change", fill="percentage_change")
) + p9.geom_bar(stat="identity")
return user_theme(
plot, asxtrade_want_cmap_d=False, asxtrade_want_fill_continuous=True
)
def plot_sector_monthly_mean_returns(ld: LazyDictionary) -> dict:
all_stocks = ld["monthly_returns_by_stock"]
ret = {}
ss = ld["stocks_by_sector"]
all_stock_average_df = all_stocks.mean(axis=1).to_frame(name="average")
all_stock_average_df["dataset"] = "All stock average"
final_df = all_stock_average_df
# print(ss)
for current_sector in ss["sector_name"].unique():
# print(current_sector)
wanted_stocks = set(ss[ss["sector_name"] == current_sector]["asx_code"])
# print(wanted_stocks)
df = (
all_stocks.filter(items=wanted_stocks, axis="columns")
.mean(axis=1)
.to_frame(name="average")
)
df["dataset"] = current_sector
final_df = final_df.append(df)
final_df["date"] = pd.to_datetime(final_df.index, format="%Y-%m-%d")
plot = (
p9.ggplot(final_df, p9.aes(x="date", y="average", fill="average"))
+ p9.geom_bar(stat="identity")
+ p9.facet_wrap("~dataset", ncol=2, scales="free_y")
)
ret["month-by-month-average-returns"] = cache_plot(
"monthly-mean-returns",
lambda ld: user_theme(
plot,
y_axis_label="Average percent return per month",
figure_size=(12, 10),
subplots_adjust={"wspace": 0.15},
axis_text_x=p9.element_text(angle=30, size=7),
asxtrade_want_fill_continuous=True,
),
)
return ret
| [
"plotnine.ggplot",
"app.models.all_available_dates",
"plotnine.scales.scale_y_log10",
"plotnine.coord_flip",
"plotnine.geom_bar",
"plotnine.aes",
"plotnine.geom_smooth",
"plotnine.scale_fill_cmap",
"datetime.timedelta",
"pandas.to_datetime",
"pandas.date_range",
"matplotlib.dates.ConciseDateFo... | [((1985, 2001), 'lazydict.LazyDictionary', 'LazyDictionary', ([], {}), '()\n', (1999, 2001), False, 'from lazydict import LazyDictionary\n'), ((4554, 4585), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['rows'], {}), '(rows)\n', (4579, 4585), True, 'import pandas as pd\n'), ((4637, 4656), 'app.data.price_change_bins', 'price_change_bins', ([], {}), '()\n', (4654, 4656), False, 'from app.data import make_portfolio_dataframe, cache_plot, make_portfolio_performance_dataframe, price_change_bins, calc_ma_crossover_points\n'), ((4850, 4893), 'pandas.Categorical', 'pd.Categorical', (["df['bin']"], {'categories': 'order'}), "(df['bin'], categories=order)\n", (4864, 4893), True, 'import pandas as pd\n'), ((5576, 5686), 'pandas.melt', 'pd.melt', (['df'], {'id_vars': '"""fetch_date"""', 'value_vars': 'columns_to_report', 'var_name': '"""indicator"""', 'value_name': '"""value"""'}), "(df, id_vars='fetch_date', value_vars=columns_to_report, var_name=\n 'indicator', value_name='value')\n", (5583, 5686), True, 'import pandas as pd\n'), ((5820, 5851), 'pandas.to_numeric', 'pd.to_numeric', (["plot_df['value']"], {}), "(plot_df['value'])\n", (5833, 5851), True, 'import pandas as pd\n'), ((7229, 7268), 'app.data.make_portfolio_dataframe', 'make_portfolio_dataframe', (['df'], {'melt': '(True)'}), '(df, melt=True)\n', (7253, 7268), False, 'from app.data import make_portfolio_dataframe, cache_plot, make_portfolio_performance_dataframe, price_change_bins, calc_ma_crossover_points\n'), ((10445, 10471), 'pandas.to_datetime', 'pd.to_datetime', (["df['date']"], {}), "(df['date'])\n", (10459, 10471), True, 'import pandas as pd\n'), ((11784, 11885), 'pandas.melt', 'pd.melt', (['grouped_df'], {'ignore_index': '(False)', 'var_name': '"""date"""', 'value_name': '"""cumulative_change_percent"""'}), "(grouped_df, ignore_index=False, var_name='date', value_name=\n 'cumulative_change_percent')\n", (11791, 11885), True, 'import pandas as pd\n'), ((11989, 12023), 'pandas.to_datetime', 'pd.to_datetime', (["grouped_df['date']"], {}), "(grouped_df['date'])\n", (12003, 12023), True, 'import pandas as pd\n'), ((15350, 15408), 'pandas.Categorical', 'pd.Categorical', (["df['sector_name']"], {'categories': 'sector_names'}), "(df['sector_name'], categories=sector_names)\n", (15364, 15408), True, 'import pandas as pd\n'), ((16986, 17038), 'pandas.to_datetime', 'pd.to_datetime', (["dataframe['date']"], {'format': '"""%Y-%m-%d"""'}), "(dataframe['date'], format='%Y-%m-%d')\n", (17000, 17038), True, 'import pandas as pd\n'), ((17624, 17648), 'matplotlib.dates.AutoDateLocator', 'mdates.AutoDateLocator', ([], {}), '()\n', (17646, 17648), True, 'import matplotlib.dates as mdates\n'), ((17665, 17701), 'matplotlib.dates.ConciseDateFormatter', 'mdates.ConciseDateFormatter', (['locator'], {}), '(locator)\n', (17692, 17701), True, 'import matplotlib.dates as mdates\n'), ((20075, 20100), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'grid': '(True)'}), "('axes', grid=True)\n", (20081, 20100), True, 'import matplotlib.pyplot as plt\n'), ((20105, 20163), 'matplotlib.pyplot.rc', 'plt.rc', (['"""grid"""'], {'color': '"""0.75"""', 'linestyle': '"""-"""', 'linewidth': '(0.5)'}), "('grid', color='0.75', linestyle='-', linewidth=0.5)\n", (20111, 20163), True, 'import matplotlib.pyplot as plt\n'), ((20328, 20374), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""white"""', 'figsize': '(12, 6)'}), "(facecolor='white', figsize=(12, 6))\n", (20338, 20374), True, 'import matplotlib.pyplot as plt\n'), ((20840, 20891), 'pandas.to_datetime', 'pd.to_datetime', (['last_price.index'], {'format': '"""%Y-%m-%d"""'}), "(last_price.index, format='%Y-%m-%d')\n", (20854, 20891), True, 'import pandas as pd\n'), ((21813, 21838), 'numpy.zeros_like', 'np.zeros_like', (['last_price'], {}), '(last_price)\n', (21826, 21838), True, 'import numpy as np\n'), ((21856, 21875), 'numpy.diff', 'np.diff', (['last_price'], {}), '(last_price)\n', (21863, 21875), True, 'import numpy as np\n'), ((22457, 22493), 'matplotlib.font_manager.FontProperties', 'font_manager.FontProperties', ([], {'size': '(10)'}), '(size=10)\n', (22484, 22493), True, 'import matplotlib.font_manager as font_manager\n'), ((22710, 22727), 'numpy.nanmax', 'np.nanmax', (['volume'], {}), '(volume)\n', (22719, 22727), True, 'import numpy as np\n'), ((23972, 23994), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(8)'}), '(fontsize=8)\n', (23982, 23994), True, 'import matplotlib.pyplot as plt\n'), ((24176, 24186), 'matplotlib.pyplot.plot', 'plt.plot', ([], {}), '()\n', (24184, 24186), True, 'import matplotlib.pyplot as plt\n'), ((24197, 24206), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (24204, 24206), True, 'import matplotlib.pyplot as plt\n'), ((24211, 24225), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (24220, 24225), True, 'import matplotlib.pyplot as plt\n'), ((24945, 24995), 'pandas.to_datetime', 'pd.to_datetime', (['dataframe.index'], {'format': '"""%Y-%m-%d"""'}), "(dataframe.index, format='%Y-%m-%d')\n", (24959, 24995), True, 'import pandas as pd\n'), ((25828, 25859), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['rows'], {}), '(rows)\n', (25853, 25859), True, 'import pandas as pd\n'), ((27714, 27740), 'pandas.to_datetime', 'pd.to_datetime', (["df['date']"], {}), "(df['date'])\n", (27728, 27740), True, 'import pandas as pd\n'), ((30057, 30094), 'pandas.date_range', 'pd.date_range', (['start', 'end'], {'freq': '"""BMS"""'}), "(start, end, freq='BMS')\n", (30070, 30094), True, 'import pandas as pd\n'), ((30292, 30335), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {'format': '"""%Y-%m-%d"""'}), "(df.index, format='%Y-%m-%d')\n", (30306, 30335), True, 'import pandas as pd\n'), ((31479, 31528), 'pandas.to_datetime', 'pd.to_datetime', (['final_df.index'], {'format': '"""%Y-%m-%d"""'}), "(final_df.index, format='%Y-%m-%d')\n", (31493, 31528), True, 'import pandas as pd\n'), ((941, 957), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (955, 957), False, 'from django.contrib.auth import get_user_model\n'), ((1908, 1974), 'app.data.make_portfolio_performance_dataframe', 'make_portfolio_performance_dataframe', (['stocks', 'timeframe', 'purchases'], {}), '(stocks, timeframe, purchases)\n', (1944, 1974), False, 'from app.data import make_portfolio_dataframe, cache_plot, make_portfolio_performance_dataframe, price_change_bins, calc_ma_crossover_points\n'), ((2066, 2126), 'app.data.cache_plot', 'cache_plot', (['overall_key', 'plot_overall_portfolio'], {'datasets': 'ld'}), '(overall_key, plot_overall_portfolio, datasets=ld)\n', (2076, 2126), False, 'from app.data import make_portfolio_dataframe, cache_plot, make_portfolio_performance_dataframe, price_change_bins, calc_ma_crossover_points\n'), ((2136, 2204), 'app.data.cache_plot', 'cache_plot', (['stock_key', 'plot_portfolio_stock_performance'], {'datasets': 'ld'}), '(stock_key, plot_portfolio_stock_performance, datasets=ld)\n', (2146, 2204), False, 'from app.data import make_portfolio_dataframe, cache_plot, make_portfolio_performance_dataframe, price_change_bins, calc_ma_crossover_points\n'), ((2214, 2284), 'app.data.cache_plot', 'cache_plot', (['contributors_key', 'plot_portfolio_contributors'], {'datasets': 'ld'}), '(contributors_key, plot_portfolio_contributors, datasets=ld)\n', (2224, 2284), False, 'from app.data import make_portfolio_dataframe, cache_plot, make_portfolio_performance_dataframe, price_change_bins, calc_ma_crossover_points\n'), ((2755, 2778), 'plotnine.element_text', 'p9.element_text', ([], {'size': '(7)'}), '(size=7)\n', (2770, 2778), True, 'import plotnine as p9\n'), ((2803, 2826), 'plotnine.element_text', 'p9.element_text', ([], {'size': '(7)'}), '(size=7)\n', (2818, 2826), True, 'import plotnine as p9\n'), ((3412, 3434), 'plotnine.theme', 'p9.theme', ([], {}), '(**theme_args)\n', (3420, 3434), True, 'import plotnine as p9\n'), ((3477, 3501), 'plotnine.scale_colour_cmap_d', 'p9.scale_colour_cmap_d', ([], {}), '()\n', (3499, 3501), True, 'import plotnine as p9\n'), ((3538, 3560), 'plotnine.scale_fill_cmap_d', 'p9.scale_fill_cmap_d', ([], {}), '()\n', (3558, 3560), True, 'import plotnine as p9\n'), ((3983, 4012), 'collections.Counter', 'Counter', (['sentiment_df[column]'], {}), '(sentiment_df[column])\n', (3990, 4012), False, 'from collections import Counter, defaultdict\n'), ((4967, 4998), 'plotnine.geom_tile', 'p9.geom_tile', ([], {'show_legend': '(False)'}), '(show_legend=False)\n', (4979, 4998), True, 'import plotnine as p9\n'), ((6143, 6204), 'plotnine.facet_wrap', 'p9.facet_wrap', (['"""~ indicator"""'], {'nrow': 'n', 'ncol': '(1)', 'scales': '"""free_y"""'}), "('~ indicator', nrow=n, ncol=1, scales='free_y')\n", (6156, 6204), True, 'import plotnine as p9\n'), ((6901, 6958), 'plotnine.facet_wrap', 'p9.facet_wrap', (['"""~ field"""'], {'nrow': '(3)', 'ncol': '(1)', 'scales': '"""free_y"""'}), "('~ field', nrow=3, ncol=1, scales='free_y')\n", (6914, 6958), True, 'import plotnine as p9\n'), ((7769, 7823), 'plotnine.facet_grid', 'p9.facet_grid', (['"""contribution ~ field"""'], {'scales': '"""free_y"""'}), "('contribution ~ field', scales='free_y')\n", (7782, 7823), True, 'import plotnine as p9\n'), ((10002, 10060), 'plotnine.facet_wrap', 'p9.facet_wrap', (['"""~bin"""'], {'nrow': 'n_bin', 'ncol': '(1)', 'scales': '"""free_y"""'}), "('~bin', nrow=n_bin, ncol=1, scales='free_y')\n", (10015, 10060), True, 'import plotnine as p9\n'), ((10598, 10620), 'plotnine.geom_line', 'p9.geom_line', ([], {'size': '(1.5)'}), '(size=1.5)\n', (10610, 10620), True, 'import plotnine as p9\n'), ((12207, 12300), 'plotnine.facet_wrap', 'p9.facet_wrap', (['"""~sector"""'], {'nrow': '(n_unique_sectors // n_col + 1)', 'ncol': 'n_col', 'scales': '"""free_y"""'}), "('~sector', nrow=n_unique_sectors // n_col + 1, ncol=n_col,\n scales='free_y')\n", (12220, 12300), True, 'import plotnine as p9\n'), ((13048, 13062), 'plotnine.aes', 'p9.aes', ([], {}), '(**args)\n', (13054, 13062), True, 'import plotnine as p9\n'), ((13104, 13154), 'plotnine.geom_smooth', 'p9.geom_smooth', ([], {'size': 'line_size', 'span': '(0.3)', 'se': '(False)'}), '(size=line_size, span=0.3, se=False)\n', (13118, 13154), True, 'import plotnine as p9\n'), ((13286, 13314), 'plotnine.geom_line', 'p9.geom_line', ([], {'size': 'line_size'}), '(size=line_size)\n', (13298, 13314), True, 'import plotnine as p9\n'), ((14009, 14034), 'plotnine.scales.scale_y_log10', 'p9.scales.scale_y_log10', ([], {}), '()\n', (14032, 14034), True, 'import plotnine as p9\n'), ((15612, 15627), 'plotnine.coord_flip', 'p9.coord_flip', ([], {}), '()\n', (15625, 15627), True, 'import plotnine as p9\n'), ((17533, 17555), 'plotnine.geom_line', 'p9.geom_line', ([], {'size': '(1.3)'}), '(size=1.3)\n', (17545, 17555), True, 'import plotnine as p9\n'), ((19236, 19250), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19248, 19250), False, 'from datetime import datetime, timedelta\n'), ((25322, 25364), 'plotnine.scale_x_datetime', 'p9.scale_x_datetime', ([], {'labels': 'inner_date_fmt'}), '(labels=inner_date_fmt)\n', (25341, 25364), True, 'import plotnine as p9\n'), ((25587, 25603), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (25598, 25603), False, 'from collections import Counter, defaultdict\n'), ((26008, 26023), 'plotnine.coord_flip', 'p9.coord_flip', ([], {}), '()\n', (26021, 26023), True, 'import plotnine as p9\n'), ((27356, 27371), 'plotnine.coord_flip', 'p9.coord_flip', ([], {}), '()\n', (27369, 27371), True, 'import plotnine as p9\n'), ((27874, 27967), 'plotnine.facet_wrap', 'p9.facet_wrap', (['"""~sector"""'], {'nrow': '(n_unique_sectors // n_col + 1)', 'ncol': 'n_col', 'scales': '"""free_y"""'}), "('~sector', nrow=n_unique_sectors // n_col + 1, ncol=n_col,\n scales='free_y')\n", (27887, 27967), True, 'import plotnine as p9\n'), ((29527, 29542), 'plotnine.coord_flip', 'p9.coord_flip', ([], {}), '()\n', (29540, 29542), True, 'import plotnine as p9\n'), ((30541, 30569), 'plotnine.geom_bar', 'p9.geom_bar', ([], {'stat': '"""identity"""'}), "(stat='identity')\n", (30552, 30569), True, 'import plotnine as p9\n'), ((31666, 31716), 'plotnine.facet_wrap', 'p9.facet_wrap', (['"""~dataset"""'], {'ncol': '(2)', 'scales': '"""free_y"""'}), "('~dataset', ncol=2, scales='free_y')\n", (31679, 31716), True, 'import plotnine as p9\n'), ((3349, 3401), 'plotnine.labs', 'p9.labs', ([], {'x': 'x_axis_label', 'y': 'y_axis_label', 'title': 'title'}), '(x=x_axis_label, y=y_axis_label, title=title)\n', (3356, 3401), True, 'import plotnine as p9\n'), ((3608, 3628), 'plotnine.scale_fill_cmap', 'p9.scale_fill_cmap', ([], {}), '()\n', (3626, 3628), True, 'import plotnine as p9\n'), ((4920, 4963), 'plotnine.aes', 'p9.aes', (['"""date"""', '"""bin_ordered"""'], {'fill': '"""value"""'}), "('date', 'bin_ordered', fill='value')\n", (4926, 4963), True, 'import plotnine as p9\n'), ((6085, 6132), 'plotnine.geom_path', 'p9.geom_path', ([], {'show_legend': '(False)', 'size': 'line_size'}), '(show_legend=False, size=line_size)\n', (6097, 6132), True, 'import plotnine as p9\n'), ((6862, 6890), 'plotnine.geom_line', 'p9.geom_line', ([], {'size': 'line_size'}), '(size=line_size)\n', (6874, 6890), True, 'import plotnine as p9\n'), ((7052, 7098), 'plotnine.element_text', 'p9.element_text', ([], {'angle': '(30)', 'size': 'date_text_size'}), '(angle=30, size=date_text_size)\n', (7067, 7098), True, 'import plotnine as p9\n'), ((7730, 7758), 'plotnine.geom_bar', 'p9.geom_bar', ([], {'stat': '"""identity"""'}), "(stat='identity')\n", (7741, 7758), True, 'import plotnine as p9\n'), ((9120, 9169), 'plotnine.aes', 'p9.aes', ([], {'x': '"""date"""', 'y': '"""stock_profit"""', 'label': '"""stock"""'}), "(x='date', y='stock_profit', label='stock')\n", (9126, 9169), True, 'import plotnine as p9\n'), ((9470, 9516), 'plotnine.element_text', 'p9.element_text', ([], {'angle': '(30)', 'size': 'date_text_size'}), '(angle=30, size=date_text_size)\n', (9485, 9516), True, 'import plotnine as p9\n'), ((10522, 10589), 'plotnine.aes', 'p9.aes', (['"""date"""', '"""value"""'], {'group': '"""group"""', 'color': '"""group"""', 'fill': '"""group"""'}), "('date', 'value', group='group', color='group', fill='group')\n", (10528, 10589), True, 'import plotnine as p9\n'), ((12174, 12196), 'plotnine.geom_line', 'p9.geom_line', ([], {'size': '(1.5)'}), '(size=1.5)\n', (12186, 12196), True, 'import plotnine as p9\n'), ((12459, 12492), 'plotnine.element_text', 'p9.element_text', ([], {'angle': '(30)', 'size': '(7)'}), '(angle=30, size=7)\n', (12474, 12492), True, 'import plotnine as p9\n'), ((13442, 13488), 'plotnine.element_text', 'p9.element_text', ([], {'angle': '(30)', 'size': 'tick_text_size'}), '(angle=30, size=tick_text_size)\n', (13457, 13488), True, 'import plotnine as p9\n'), ((13510, 13546), 'plotnine.element_text', 'p9.element_text', ([], {'size': 'tick_text_size'}), '(size=tick_text_size)\n', (13525, 13546), True, 'import plotnine as p9\n'), ((13961, 13998), 'plotnine.facet_wrap', 'p9.facet_wrap', (['"""bin"""'], {'scales': '"""free_y"""'}), "('bin', scales='free_y')\n", (13974, 13998), True, 'import plotnine as p9\n'), ((15588, 15601), 'plotnine.geom_bar', 'p9.geom_bar', ([], {}), '()\n', (15599, 15601), True, 'import plotnine as p9\n'), ((15818, 15836), 'plotnine.element_blank', 'p9.element_blank', ([], {}), '()\n', (15834, 15836), True, 'import plotnine as p9\n'), ((16593, 16630), 'pandas.cut', 'pd.cut', (['df[date]', 'bins'], {'labels': 'labels'}), '(df[date], bins, labels=labels)\n', (16599, 16630), True, 'import pandas as pd\n'), ((17471, 17522), 'plotnine.facet_wrap', 'p9.facet_wrap', (['"""~variable"""'], {'ncol': '(1)', 'scales': '"""free_y"""'}), "('~variable', ncol=1, scales='free_y')\n", (17484, 17522), True, 'import plotnine as p9\n'), ((24626, 24644), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(4)'}), '(weeks=4)\n', (24635, 24644), False, 'from datetime import datetime, timedelta\n'), ((25272, 25311), 'plotnine.geom_bar', 'p9.geom_bar', ([], {'stat': '"""identity"""', 'alpha': '(0.7)'}), "(stat='identity', alpha=0.7)\n", (25283, 25311), True, 'import plotnine as p9\n'), ((25958, 25997), 'plotnine.geom_bar', 'p9.geom_bar', ([], {'stat': '"""identity"""', 'alpha': '(0.7)'}), "(stat='identity', alpha=0.7)\n", (25969, 25997), True, 'import plotnine as p9\n'), ((27307, 27345), 'plotnine.geom_boxplot', 'p9.geom_boxplot', ([], {'outlier_colour': '"""blue"""'}), "(outlier_colour='blue')\n", (27322, 27345), True, 'import plotnine as p9\n'), ((27841, 27863), 'plotnine.geom_line', 'p9.geom_line', ([], {'size': '(1.0)'}), '(size=1.0)\n', (27853, 27863), True, 'import plotnine as p9\n'), ((28122, 28155), 'plotnine.element_text', 'p9.element_text', ([], {'angle': '(30)', 'size': '(7)'}), '(angle=30, size=7)\n', (28137, 28155), True, 'import plotnine as p9\n'), ((29448, 29516), 'plotnine.facet_wrap', 'p9.facet_wrap', (['"""~sector_name"""'], {'ncol': '(1)', 'nrow': 'n_sectors', 'scales': '"""free"""'}), "('~sector_name', ncol=1, nrow=n_sectors, scales='free')\n", (29461, 29516), True, 'import plotnine as p9\n'), ((30463, 30532), 'plotnine.aes', 'p9.aes', ([], {'x': '"""df.index"""', 'y': '"""percentage_change"""', 'fill': '"""percentage_change"""'}), "(x='df.index', y='percentage_change', fill='percentage_change')\n", (30469, 30532), True, 'import plotnine as p9\n'), ((31627, 31655), 'plotnine.geom_bar', 'p9.geom_bar', ([], {'stat': '"""identity"""'}), "(stat='identity')\n", (31638, 31655), True, 'import plotnine as p9\n'), ((1399, 1419), 'app.models.user_purchases', 'user_purchases', (['user'], {}), '(user)\n', (1413, 1419), False, 'from app.models import Timeframe, timing, user_purchases, all_available_dates\n'), ((3272, 3285), 'plotnine.theme_bw', 'p9.theme_bw', ([], {}), '()\n', (3283, 3285), True, 'import plotnine as p9\n'), ((5073, 5094), 'plotnine.aes', 'p9.aes', ([], {'label': '"""value"""'}), "(label='value')\n", (5079, 5094), True, 'import plotnine as p9\n'), ((6014, 6063), 'plotnine.aes', 'p9.aes', (['"""fetch_date"""', '"""value"""'], {'colour': '"""indicator"""'}), "('fetch_date', 'value', colour='indicator')\n", (6020, 6063), True, 'import plotnine as p9\n'), ((6797, 6850), 'plotnine.aes', 'p9.aes', (['"""date"""', '"""value"""'], {'group': '"""field"""', 'color': '"""field"""'}), "('date', 'value', group='field', color='field')\n", (6803, 6850), True, 'import plotnine as p9\n'), ((7665, 7718), 'plotnine.aes', 'p9.aes', (['"""stock"""', '"""value"""'], {'group': '"""stock"""', 'fill': '"""stock"""'}), "('stock', 'value', group='stock', fill='stock')\n", (7671, 7718), True, 'import plotnine as p9\n'), ((8965, 9009), 'plotnine.geom_smooth', 'p9.geom_smooth', ([], {'size': '(1.0)', 'span': '(0.3)', 'se': '(False)'}), '(size=1.0, span=0.3, se=False)\n', (8979, 9009), True, 'import plotnine as p9\n'), ((9268, 9307), 'plotnine.position_jitter', 'p9.position_jitter', ([], {'width': '(10)', 'height': '(10)'}), '(width=10, height=10)\n', (9286, 9307), True, 'import plotnine as p9\n'), ((9795, 9829), 'plotnine.geom_smooth', 'p9.geom_smooth', ([], {'span': '(0.3)', 'se': '(False)'}), '(span=0.3, se=False)\n', (9809, 9829), True, 'import plotnine as p9\n'), ((9866, 9904), 'plotnine.aes', 'p9.aes', ([], {'label': '"""asx_code"""', 'x': '"""x"""', 'y': '"""y"""'}), "(label='asx_code', x='x', y='y')\n", (9872, 9904), True, 'import plotnine as p9\n'), ((12094, 12153), 'plotnine.aes', 'p9.aes', (['"""date"""', '"""cumulative_change_percent"""'], {'color': '"""sector"""'}), "('date', 'cumulative_change_percent', color='sector')\n", (12100, 12153), True, 'import plotnine as p9\n'), ((13858, 13888), 'plotnine.ggplot', 'p9.ggplot', (['pos_market_cap_only'], {}), '(pos_market_cap_only)\n', (13867, 13888), True, 'import plotnine as p9\n'), ((15514, 15576), 'plotnine.aes', 'p9.aes', ([], {'x': '"""factor(sector_name_cat)"""', 'fill': '"""factor(increasing)"""'}), "(x='factor(sector_name_cat)', fill='factor(increasing)')\n", (15520, 15576), True, 'import plotnine as p9\n'), ((17381, 17449), 'plotnine.aes', 'p9.aes', (['"""date"""', '"""value"""'], {'colour': '"""variable"""', 'group': '"""factor(variable)"""'}), "('date', 'value', colour='variable', group='factor(variable)')\n", (17387, 17449), True, 'import plotnine as p9\n'), ((24026, 24067), 'datetime.datetime.strptime', 'datetime.strptime', (['start_date', '"""%Y-%m-%d"""'], {}), "(start_date, '%Y-%m-%d')\n", (24043, 24067), False, 'from datetime import datetime, timedelta\n'), ((25142, 25220), 'plotnine.aes', 'p9.aes', ([], {'x': '"""dataframe.index"""', 'y': 'dataframe.columns[0]', 'fill': 'dataframe.columns[0]'}), "(x='dataframe.index', y=dataframe.columns[0], fill=dataframe.columns[0])\n", (25148, 25220), True, 'import plotnine as p9\n'), ((25895, 25946), 'plotnine.aes', 'p9.aes', ([], {'x': '"""rule"""', 'y': '"""net_points"""', 'fill': '"""net_points"""'}), "(x='rule', y='net_points', fill='net_points')\n", (25901, 25946), True, 'import plotnine as p9\n'), ((27262, 27295), 'plotnine.aes', 'p9.aes', ([], {'x': '"""fetch_date"""', 'y': '"""value"""'}), "(x='fetch_date', y='value')\n", (27268, 27295), True, 'import plotnine as p9\n'), ((27776, 27829), 'plotnine.aes', 'p9.aes', (['"""date"""', 'field'], {'group': '"""sector"""', 'color': '"""sector"""'}), "('date', field, group='sector', color='sector')\n", (27782, 27829), True, 'import plotnine as p9\n'), ((29409, 29437), 'plotnine.geom_bar', 'p9.geom_bar', ([], {'stat': '"""identity"""'}), "(stat='identity')\n", (29420, 29437), True, 'import plotnine as p9\n'), ((31570, 31615), 'plotnine.aes', 'p9.aes', ([], {'x': '"""date"""', 'y': '"""average"""', 'fill': '"""average"""'}), "(x='date', y='average', fill='average')\n", (31576, 31615), True, 'import plotnine as p9\n'), ((1856, 1877), 'app.models.all_available_dates', 'all_available_dates', ([], {}), '()\n', (1875, 1877), False, 'from app.models import Timeframe, timing, user_purchases, all_available_dates\n'), ((4404, 4439), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (4421, 4439), False, 'from datetime import datetime, timedelta\n'), ((8892, 8953), 'plotnine.aes', 'p9.aes', (['"""date"""', '"""stock_profit"""'], {'group': '"""stock"""', 'colour': '"""stock"""'}), "('date', 'stock_profit', group='stock', colour='stock')\n", (8898, 8953), True, 'import plotnine as p9\n'), ((9725, 9783), 'plotnine.aes', 'p9.aes', (['"""date"""', '"""rank"""'], {'group': '"""asx_code"""', 'color': '"""asx_code"""'}), "('date', 'rank', group='asx_code', color='asx_code')\n", (9731, 9783), True, 'import plotnine as p9\n'), ((13915, 13949), 'plotnine.aes', 'p9.aes', ([], {'x': '"""market"""', 'y': '"""market_cap"""'}), "(x='market', y='market_cap')\n", (13921, 13949), True, 'import plotnine as p9\n'), ((29184, 29272), 'plotnine.aes', 'p9.aes', ([], {'y': '"""eps"""', 'x': '"""reorder(asx_code,eps)"""', 'group': '"""sector_name"""', 'fill': '"""sector_name"""'}), "(y='eps', x='reorder(asx_code,eps)', group='sector_name', fill=\n 'sector_name')\n", (29190, 29272), True, 'import plotnine as p9\n'), ((32025, 32058), 'plotnine.element_text', 'p9.element_text', ([], {'angle': '(30)', 'size': '(7)'}), '(angle=30, size=7)\n', (32040, 32058), True, 'import plotnine as p9\n')] |
import torch
import time
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import numpy as np
from joblib import load
def split_train_eval_test(ids,train_scenes,test_scenes, eval_prop = 0.8):
test_ids,train_ids,eval_ids = [],[],[]
train = {}
for id_ in ids:
scene = id_.split("_")[0]
if scene in test_scenes:
test_ids.append(id_)
elif scene in train_scenes:
if scene not in train:
train[scene] = []
train[scene].append(id_)
for key in train:
nb_scene_samples = len(train[key])
nb_train = int(eval_prop*nb_scene_samples)
train_ids += train[key][:nb_train]
eval_ids += train[key][nb_train:]
return train_ids,eval_ids,test_ids
"""
Train loop for an epoch
Uses cuda if available
LOss is averaged for a batch
THen averaged batch losses are averaged
over the number of batches
"""
def train(model, device, train_loader,criterion, optimizer, epoch,batch_size,print_every = 100):
model.train()
epoch_loss = 0.
batches_loss = []
start_time = time.time()
for batch_idx, data in enumerate(train_loader):
inputs, labels, ids = data
# inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# outputs = model(inputs)
# ####################
# mask = mask_loss(labels.detach().cpu().numpy())
# outputs = outputs.contiguous().view([outputs.size()[0] * outputs.size()[1]] + list(outputs.size()[2:]))
# labels = labels.contiguous().view([labels.size()[0] * labels.size()[1]] + list(labels.size()[2:]))
# outputs = outputs[mask]
# labels = labels[mask]
# ###########################
# loss = criterion(outputs, labels)
# loss.backward()
# optimizer.step()
# epoch_loss += loss.item()
# batches_loss.append(loss.item())
if batch_idx % print_every == 0:
# print(batch_idx,loss.item(),time.time()-start_time)
print(batch_idx,time.time()-start_time)
epoch_loss /= float(len(train_loader))
print('Epoch n {} Loss: {}'.format(epoch,epoch_loss))
return epoch_loss,batches_loss
"""
Evaluation loop for an epoch
Uses cuda if available
LOss is averaged for a batch
THen averaged batch losses are averaged
over the number of batches
FDE loss is added using MSEerror on the last point of prediction and target
sequences
model: 0 rnn_mlp
1 iatcnn
"""
def evaluate(model, device, eval_loader,criterion, epoch, batch_size,scalers_path,multiple_scalers,model_type ):
model.eval()
eval_loss = 0.
fde = 0.
ade = 0.
nb_sample = len(eval_loader)*batch_size
start_time = time.time()
for data in eval_loader:
inputs, labels, ids = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
# output = output.view(labels.size())
####################
mask = mask_loss(labels.detach().cpu().numpy())
outputs = outputs.contiguous().view([outputs.size()[0] * outputs.size()[1]] + list(outputs.size()[2:]))
labels = labels.contiguous().view([labels.size()[0] * labels.size()[1]] + list(labels.size()[2:]))
outputs = outputs[mask]
labels = labels[mask]
###########################
loss = criterion(outputs, labels)
inv_labels,inv_outputs = None,None
if model_type == 0:
inv_labels,inv_outputs = revert_scaling(ids,labels,outputs,scalers_path,multiple_scalers)
inv_outputs = inv_outputs.view(inv_labels.size())
elif model_type == 1:
inv_labels,inv_outputs = revert_scaling(ids,labels,outputs[:,:,:2],scalers_path,multiple_scalers)
ade += ade_loss(inv_outputs,inv_labels).item()
fde += fde_loss(inv_outputs,inv_labels).item()
eval_loss += loss.item()
eval_loss /= float(len(eval_loader))
ade /= float(len(eval_loader))
fde /= float(len(eval_loader))
print('Epoch n {} Evaluation Loss: {}, ADE: {}, FDE: {}'.format(epoch,eval_loss,ade,fde))
return eval_loss,fde,ade
"""
Saves model and optimizer states as dict
THe current epoch is stored
THe different losses at previous time_steps are loaded
"""
def save_model(epoch,net,optimizer,train_losses,eval_losses,batch_losses,fde_losses,save_root = "./learning/data/models/" ):
save_path = save_root + "model_{}.tar".format(time.time())
state = {
'epoch': epoch,
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
'train_losses': train_losses,
'eval_losses': eval_losses,
'batch_losses': batch_losses,
'fde_losses': fde_losses
}
torch.save(state, save_path)
print("model saved in {}".format(save_path))
"""
Training loop
For NUMBER OF EPOCHS calls train and evaluate
if a model path is given, loads model
and resume training
if plot, display the different losses
If exception during training, model is stored
"""
def training_loop(n_epochs,batch_size,net,device,train_loader,eval_loader,criterion_train,criterion_eval,optimizer,scalers_path,multiple_scalers,model_type,plot = True,early_stopping = True,load_path = None):
train_losses = []
eval_losses = []
batch_losses = []
fde_losses = []
ade_losses = []
start_epoch = 0
if load_path != "":
print("loading former model from {}".format(load_path))
checkpoint = torch.load(load_path)
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
train_losses = checkpoint["train_losses"]
eval_losses = checkpoint["eval_losses"]
batch_losses = checkpoint["batch_losses"]
fde_losses = checkpoint["fde_losses"]
start_epoch = checkpoint["epoch"]
s = time.time()
# try:
for epoch in range(start_epoch,n_epochs):
train_loss,batches_loss = train(net, device, train_loader,criterion_train, optimizer, epoch,batch_size)
batch_losses += batches_loss
train_losses.append(train_loss)
eval_loss,fde,ade = evaluate(net, device, eval_loader,criterion_eval, epoch, batch_size,scalers_path,multiple_scalers,model_type)
eval_losses.append(eval_loss)
fde_losses.append(fde)
ade_losses.append(ade)
print(time.time()-s)
# except :
# # logging.error(traceback.format_exc())
# # save_model(epoch,net,optimizer,train_losses,eval_losses,batch_losses,save_path)
# pass
save_model(epoch,net,optimizer,train_losses,eval_losses,batch_losses,fde_losses)
if plot:
plt.plot(train_losses)
plt.plot(eval_losses)
plt.show()
plt.plot(ade_losses)
plt.plot(fde_losses)
plt.show()
return train_losses,eval_losses,batch_losses
def train_sophie(
generator,
discriminator,
device,
train_loader,
criterion_gan,
criterion_gen,
optimizer_gen,
optimizer_disc,
epoch,
batch_size,
obs_length,
pred_length,
output_size,
print_every = 100):
# model.train()
losses = {
"mse": 0.,
"real": 0.,
"fake": 0.,
"gen": 0.
}
batch_losses = {
"mse": [],
"real": [],
"fake": [],
"gen": []
}
batch_idx = 0
start_time = time.time()
for batch_idx, data in enumerate(train_loader):
inputs,images, labels,ids = data
inputs,images, labels = inputs.to(device),images.to(device), labels.to(device)
# train discriminator
optimizer_disc.zero_grad()
#### groundtruth batch
traj_obs = inputs[:,0].view(batch_size,obs_length,output_size)
traj_pred_real = labels[:,0].view(batch_size,pred_length,output_size)
real_traj = torch.cat([traj_obs,traj_pred_real], dim = 1)
real_labels = torch.ones(batch_size).to(device)
disc_class = discriminator(real_traj).view(batch_size)
real_loss = criterion_gan(disc_class,real_labels)
real_loss.backward()
#### generated batch
z = generator.gaussian.sample((batch_size,1,)).to(device)
traj_pred_fake = generator(inputs,images,z)
fake_traj = torch.cat([traj_obs,traj_pred_fake], dim = 1)
fake_labels = torch.zeros(batch_size).to(device)
disc_class = discriminator(fake_traj.detach()).view(batch_size)
fake_loss = criterion_gan(disc_class,fake_labels)
fake_loss.backward()
optimizer_disc.step()
#################
# train generator
gen_labels = real_labels # we aim for the discriminator to predict 1
optimizer_gen.zero_grad()
disc_class = discriminator(fake_traj).view(batch_size)
gen_loss_gan = criterion_gan(disc_class,gen_labels)
mse_loss = criterion_gen(traj_pred_fake,traj_pred_real)
loss = gen_loss_gan + mse_loss
loss.backward()
optimizer_gen.step()
losses["mse"] += mse_loss.item()
losses["real"] += real_loss.item()
losses["fake"] += fake_loss.item()
losses["gen"] += gen_loss_gan.item()
batch_losses["mse"].append(mse_loss.item())
batch_losses["real"].append(real_loss.item())
batch_losses["fake"].append(fake_loss.item())
batch_losses["gen"].append(gen_loss_gan.item())
if batch_idx % print_every == 0:
print(batch_idx,time.time()-start_time)
print("average mse loss summed over trajectory: {}, gan loss real: {},gan loss fake: {}, gen loss: {}".format(batch_losses["mse"][-1],batch_losses["real"][-1],batch_losses["fake"][-1],batch_losses["gen"][-1]))
for key in losses:
losses[key] /= batch_idx
print('Epoch n {} Loss: {}, gan loss real: {},gan loss fake: {}, gen loss: {}'.format(epoch,losses["mse"],losses["real"],losses["fake"],losses["gen"]))
return losses,batch_losses
def eval_sophie(
generator,
discriminator,
device,
eval_loader,
criterion_gan,
criterion_gen,
epoch,
batch_size,
obs_length,
pred_length,
output_size,
scalers_path,
multiple_scalers,
print_every = 100):
# model.train()
# generator.eval()
# discriminator.eval()
with torch.no_grad():
losses = {
"mse": 0.,
"real": 0.,
"fake": 0.,
"gen": 0.,
"ade":0.,
"fde":0.
}
# generator.to(device)
# discriminator.to(device)
batch_idx = 0
for batch_idx, data in enumerate(eval_loader):
inputs,images, labels,ids = data
inputs,images, labels = inputs.to(device),images.to(device), labels.to(device)
# train discriminator
#### groundtruth batch
traj_obs = inputs[:,0].view(batch_size,obs_length,output_size)
traj_pred_real = labels[:,0].view(batch_size,pred_length,output_size)
real_traj = torch.cat([traj_obs,traj_pred_real], dim = 1)
real_labels = torch.ones(batch_size).to(device)
disc_class = discriminator(real_traj).view(batch_size)
real_loss = criterion_gan(disc_class,real_labels)
#### generated batch
z = generator.gaussian.sample((batch_size,1,))
z = z.to(device)
# print(generator)
traj_pred_fake = generator(inputs,images,z)
fake_traj = torch.cat([traj_obs,traj_pred_fake], dim = 1)
fake_labels = torch.zeros(batch_size).to(device)
disc_class = discriminator(fake_traj.detach()).view(batch_size)
fake_loss = criterion_gan(disc_class,fake_labels)
mse_loss = criterion_gen(traj_pred_fake,traj_pred_real)
###################################
inv_labels,inv_outputs = revert_scaling(ids,traj_pred_real,traj_pred_fake,scalers_path,multiple_scalers)
inv_outputs = inv_outputs.view(inv_labels.size())
losses["ade"] += ade_loss(inv_outputs,inv_labels).item()
losses["fde"] += fde_loss(inv_outputs,inv_labels).item()
####################################
losses["mse"] += mse_loss.item()
losses["real"] += real_loss.item()
losses["fake"] += fake_loss.item()
for key in losses:
losses[key] /= batch_idx
print('Eval Epoch n {} Loss: {}, gan loss real: {},gan loss fake: {}'.format(epoch,losses["mse"],losses["real"],losses["fake"]))
print('Eval Epoch n {} ade: {},fde: {}'.format(epoch,losses["ade"],losses["fde"]))
# generator.train()
# discriminator.train()
return losses
def save_sophie(epoch,generator,discriminator,optimizer_gen,optimizer_disc,losses,save_root = "./learning/data/models/" ):
save_path = save_root + "sophie_{}.tar".format(time.time())
state = {
'epoch': epoch,
'state_dict_d': discriminator.state_dict(),
'state_dict_g': generator.state_dict(),
'optimizer_g': optimizer_gen.state_dict(),
'optimizer_d': optimizer_disc.state_dict(),
'losses': losses
}
torch.save(state, save_path)
print("model saved in {}".format(save_path))
def sophie_training_loop(n_epochs,batch_size,generator,discriminator,optimizer_gen,optimizer_disc,device,
train_loader,eval_loader,obs_length, criterion_gan,criterion_gen,
pred_length, output_size,scalers_path,multiple_scalers,plot = True,load_path = None):
losses = {
"train": {
"mse": [],
"real": [],
"fake": [],
"gen": []
},
"eval": {
"mse": [],
"real": [],
"fake": [],
"gen": [],
"ade": [],
"fde": []
}
}
start_epoch = 0
if load_path != "":
print("loading former model from {}".format(load_path))
checkpoint = torch.load(load_path)
generator.load_state_dict(checkpoint['state_dict_g'])
discriminator.load_state_dict(checkpoint['state_dict_d'])
optimizer_gen.load_state_dict(checkpoint['optimizer_g'])
optimizer_disc.load_state_dict(checkpoint['optimizer_d'])
losses = checkpoint["losses"]
start_epoch = checkpoint["epoch"]
s = time.time()
try:
for epoch in range(start_epoch,n_epochs):
train_losses,_ = train_sophie(generator,discriminator,device,train_loader,criterion_gan,criterion_gen,
optimizer_gen, optimizer_disc,epoch,batch_size,obs_length,pred_length,output_size)
for key in train_losses:
losses["train"][key].append(train_losses[key])
test_losses = eval_sophie(generator,discriminator,device,eval_loader,criterion_gan,criterion_gen,epoch,batch_size,obs_length,pred_length,output_size,scalers_path,multiple_scalers)
for key in test_losses:
losses["eval"][key].append(test_losses[key])
print(time.time()-s)
except :
pass
save_sophie(epoch,generator,discriminator,optimizer_gen,optimizer_disc,losses)
if plot:
plt.plot(losses["train"]["mse"])
plt.plot(losses["eval"]["mse"])
plt.show()
return losses
def revert_scaling(ids,labels,outputs,scalers_root,multiple_scalers = 1):
if multiple_scalers:
scaler_ids = ["_".join(id_.split("_")[:-1]) for id_ in ids]
scalers_path = [scalers_root + id_ +".joblib" for id_ in scaler_ids]
scaler_sample = {}
for scaler in scalers_path:
if scaler not in scaler_sample:
scaler_sample[scaler] = []
for i,scaler1 in enumerate(scalers_path):
if scaler == scaler1:
scaler_sample[scaler].append(i)
for scaler_id in scaler_sample:
scaler = load(scaler_id)
samples_ids = scaler_sample[scaler_id]
sub_labels_torch = labels[samples_ids]
# b,a,s,i = sub_labels.size()
sub_labels = sub_labels_torch.contiguous().view(-1,1).cpu().numpy()
inv_sub_labels = torch.FloatTensor(scaler.inverse_transform(sub_labels)).view(sub_labels_torch.size()).cuda()
labels[samples_ids] = inv_sub_labels
sub_outputs_torch = outputs[samples_ids]
# b,a,s,i = sub_outputs.size()
sub_outputs = sub_outputs_torch.contiguous().view(-1,1).cpu().detach().numpy()
inv_sub_outputs = torch.FloatTensor(scaler.inverse_transform(sub_outputs)).view(sub_outputs_torch.size()).cuda()
outputs[samples_ids] = inv_sub_outputs
return labels,outputs
else:
scaler = load(scalers_root)
torch_labels = labels.contiguous().view(-1,1).cpu().numpy()
torch_outputs = outputs.contiguous().view(-1,1).cpu().detach().numpy()
non_zeros_labels = np.argwhere(torch_labels.reshape(-1))
non_zeros_outputs = np.argwhere(torch_outputs.reshape(-1))
torch_labels[non_zeros_labels] = np.expand_dims( scaler.inverse_transform(torch_labels[non_zeros_labels].squeeze(-1)) ,axis = 1)
torch_outputs[non_zeros_outputs] = np.expand_dims( scaler.inverse_transform(torch_outputs[non_zeros_outputs].squeeze(-1)),axis = 1)
inv_labels = torch.FloatTensor(torch_labels).cuda()
inv_outputs = torch.FloatTensor(torch_outputs).cuda()
inv_labels = inv_labels.view(labels.size())
inv_outputs = inv_outputs.view(outputs.size())
return inv_labels,inv_outputs
def mask_loss(targets):
b,a = targets.shape[0],targets.shape[1]
mask = targets.reshape(b,a,-1)
mask = np.sum(mask,axis = 2)
mask = mask.reshape(-1)
mask = np.argwhere(mask).reshape(-1)
return mask
def ade_loss(outputs,targets):
outputs = outputs.contiguous().view(-1,2)
targets = targets.contiguous().view(-1,2)
mse = nn.MSELoss(reduction= "none")
mse_loss = mse(outputs,targets )
mse_loss = torch.sum(mse_loss,dim = 1 )
mse_loss = torch.sqrt(mse_loss )
mse_loss = torch.mean(mse_loss )
return mse_loss
def fde_loss(outputs,targets):
outputs = outputs[:,-1,:]
targets = targets[:,-1,:]
mse = nn.MSELoss(reduction= "none")
mse_loss = mse(outputs,targets )
mse_loss = torch.sum(mse_loss,dim = 1 )
mse_loss = torch.sqrt(mse_loss )
mse_loss = torch.mean(mse_loss )
return mse_loss | [
"torch.ones",
"torch.mean",
"torch.load",
"matplotlib.pyplot.plot",
"torch.sqrt",
"torch.FloatTensor",
"numpy.sum",
"torch.nn.MSELoss",
"numpy.argwhere",
"torch.sum",
"torch.save",
"joblib.load",
"torch.no_grad",
"time.time",
"torch.zeros",
"torch.cat",
"matplotlib.pyplot.show"
] | [((1128, 1139), 'time.time', 'time.time', ([], {}), '()\n', (1137, 1139), False, 'import time\n'), ((2814, 2825), 'time.time', 'time.time', ([], {}), '()\n', (2823, 2825), False, 'import time\n'), ((4911, 4939), 'torch.save', 'torch.save', (['state', 'save_path'], {}), '(state, save_path)\n', (4921, 4939), False, 'import torch\n'), ((6057, 6068), 'time.time', 'time.time', ([], {}), '()\n', (6066, 6068), False, 'import time\n'), ((7682, 7693), 'time.time', 'time.time', ([], {}), '()\n', (7691, 7693), False, 'import time\n'), ((13617, 13645), 'torch.save', 'torch.save', (['state', 'save_path'], {}), '(state, save_path)\n', (13627, 13645), False, 'import torch\n'), ((14810, 14821), 'time.time', 'time.time', ([], {}), '()\n', (14819, 14821), False, 'import time\n'), ((18236, 18256), 'numpy.sum', 'np.sum', (['mask'], {'axis': '(2)'}), '(mask, axis=2)\n', (18242, 18256), True, 'import numpy as np\n'), ((18488, 18516), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (18498, 18516), True, 'import torch.nn as nn\n'), ((18571, 18597), 'torch.sum', 'torch.sum', (['mse_loss'], {'dim': '(1)'}), '(mse_loss, dim=1)\n', (18580, 18597), False, 'import torch\n'), ((18615, 18635), 'torch.sqrt', 'torch.sqrt', (['mse_loss'], {}), '(mse_loss)\n', (18625, 18635), False, 'import torch\n'), ((18652, 18672), 'torch.mean', 'torch.mean', (['mse_loss'], {}), '(mse_loss)\n', (18662, 18672), False, 'import torch\n'), ((18804, 18832), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (18814, 18832), True, 'import torch.nn as nn\n'), ((18887, 18913), 'torch.sum', 'torch.sum', (['mse_loss'], {'dim': '(1)'}), '(mse_loss, dim=1)\n', (18896, 18913), False, 'import torch\n'), ((18931, 18951), 'torch.sqrt', 'torch.sqrt', (['mse_loss'], {}), '(mse_loss)\n', (18941, 18951), False, 'import torch\n'), ((18968, 18988), 'torch.mean', 'torch.mean', (['mse_loss'], {}), '(mse_loss)\n', (18978, 18988), False, 'import torch\n'), ((5676, 5697), 'torch.load', 'torch.load', (['load_path'], {}), '(load_path)\n', (5686, 5697), False, 'import torch\n'), ((6896, 6918), 'matplotlib.pyplot.plot', 'plt.plot', (['train_losses'], {}), '(train_losses)\n', (6904, 6918), True, 'import matplotlib.pyplot as plt\n'), ((6927, 6948), 'matplotlib.pyplot.plot', 'plt.plot', (['eval_losses'], {}), '(eval_losses)\n', (6935, 6948), True, 'import matplotlib.pyplot as plt\n'), ((6957, 6967), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6965, 6967), True, 'import matplotlib.pyplot as plt\n'), ((6977, 6997), 'matplotlib.pyplot.plot', 'plt.plot', (['ade_losses'], {}), '(ade_losses)\n', (6985, 6997), True, 'import matplotlib.pyplot as plt\n'), ((7006, 7026), 'matplotlib.pyplot.plot', 'plt.plot', (['fde_losses'], {}), '(fde_losses)\n', (7014, 7026), True, 'import matplotlib.pyplot as plt\n'), ((7035, 7045), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7043, 7045), True, 'import matplotlib.pyplot as plt\n'), ((8141, 8185), 'torch.cat', 'torch.cat', (['[traj_obs, traj_pred_real]'], {'dim': '(1)'}), '([traj_obs, traj_pred_real], dim=1)\n', (8150, 8185), False, 'import torch\n'), ((8563, 8607), 'torch.cat', 'torch.cat', (['[traj_obs, traj_pred_fake]'], {'dim': '(1)'}), '([traj_obs, traj_pred_fake], dim=1)\n', (8572, 8607), False, 'import torch\n'), ((10679, 10694), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10692, 10694), False, 'import torch\n'), ((14438, 14459), 'torch.load', 'torch.load', (['load_path'], {}), '(load_path)\n', (14448, 14459), False, 'import torch\n'), ((15675, 15707), 'matplotlib.pyplot.plot', 'plt.plot', (["losses['train']['mse']"], {}), "(losses['train']['mse'])\n", (15683, 15707), True, 'import matplotlib.pyplot as plt\n'), ((15716, 15747), 'matplotlib.pyplot.plot', 'plt.plot', (["losses['eval']['mse']"], {}), "(losses['eval']['mse'])\n", (15724, 15747), True, 'import matplotlib.pyplot as plt\n'), ((15756, 15766), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15764, 15766), True, 'import matplotlib.pyplot as plt\n'), ((17253, 17271), 'joblib.load', 'load', (['scalers_root'], {}), '(scalers_root)\n', (17257, 17271), False, 'from joblib import load\n'), ((4599, 4610), 'time.time', 'time.time', ([], {}), '()\n', (4608, 4610), False, 'import time\n'), ((11394, 11438), 'torch.cat', 'torch.cat', (['[traj_obs, traj_pred_real]'], {'dim': '(1)'}), '([traj_obs, traj_pred_real], dim=1)\n', (11403, 11438), False, 'import torch\n'), ((11889, 11933), 'torch.cat', 'torch.cat', (['[traj_obs, traj_pred_fake]'], {'dim': '(1)'}), '([traj_obs, traj_pred_fake], dim=1)\n', (11898, 11933), False, 'import torch\n'), ((13319, 13330), 'time.time', 'time.time', ([], {}), '()\n', (13328, 13330), False, 'import time\n'), ((16417, 16432), 'joblib.load', 'load', (['scaler_id'], {}), '(scaler_id)\n', (16421, 16432), False, 'from joblib import load\n'), ((18297, 18314), 'numpy.argwhere', 'np.argwhere', (['mask'], {}), '(mask)\n', (18308, 18314), True, 'import numpy as np\n'), ((6593, 6604), 'time.time', 'time.time', ([], {}), '()\n', (6602, 6604), False, 'import time\n'), ((8209, 8231), 'torch.ones', 'torch.ones', (['batch_size'], {}), '(batch_size)\n', (8219, 8231), False, 'import torch\n'), ((8631, 8654), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (8642, 8654), False, 'import torch\n'), ((17871, 17902), 'torch.FloatTensor', 'torch.FloatTensor', (['torch_labels'], {}), '(torch_labels)\n', (17888, 17902), False, 'import torch\n'), ((17932, 17964), 'torch.FloatTensor', 'torch.FloatTensor', (['torch_outputs'], {}), '(torch_outputs)\n', (17949, 17964), False, 'import torch\n'), ((2072, 2083), 'time.time', 'time.time', ([], {}), '()\n', (2081, 2083), False, 'import time\n'), ((9778, 9789), 'time.time', 'time.time', ([], {}), '()\n', (9787, 9789), False, 'import time\n'), ((11466, 11488), 'torch.ones', 'torch.ones', (['batch_size'], {}), '(batch_size)\n', (11476, 11488), False, 'import torch\n'), ((11961, 11984), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (11972, 11984), False, 'import torch\n'), ((15520, 15531), 'time.time', 'time.time', ([], {}), '()\n', (15529, 15531), False, 'import time\n')] |
from fastai.conv_learner import *
from fastai.dataset import *
from tensorboard_cb_old import *
#from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
import pandas as pd
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
import scipy.optimize as opt
from sklearn.model_selection import StratifiedKFold
from itertools import chain
from collections import Counter
import pickle as pkl
import warnings
warnings.filterwarnings("ignore")
#=======================================================================================================================
# Something
#=======================================================================================================================
PATH = './'
TRAIN = '../input/train/'
TEST = '../input/test/'
LABELS = '../input/train.csv'
LABELS_ext = '../input/HPAv18RBGY_wodpl.csv'
SAMPLE = '../input/sample_submission.csv'
name_label_dict = {
0: 'Nucleoplasm',
1: 'Nuclear membrane',
2: 'Nucleoli',
3: 'Nucleoli fibrillar center',
4: 'Nuclear speckles',
5: 'Nuclear bodies',
6: 'Endoplasmic reticulum',
7: 'Golgi apparatus',
8: 'Peroxisomes',
9: 'Endosomes',
10: 'Lysosomes',
11: 'Intermediate filaments',
12: 'Actin filaments',
13: 'Focal adhesion sites',
14: 'Microtubules',
15: 'Microtubule ends',
16: 'Cytokinetic bridge',
17: 'Mitotic spindle',
18: 'Microtubule organizing center',
19: 'Centrosome',
20: 'Lipid droplets',
21: 'Plasma membrane',
22: 'Cell junctions',
23: 'Mitochondria',
24: 'Aggresome',
25: 'Cytosol',
26: 'Cytoplasmic bodies',
27: 'Rods & rings' }
#=======================================================================================================================
# Data
#=======================================================================================================================
# image_df = pd.read_csv(LABELS)
# image_df = image_df[(image_df.Id != 'dc756dea-bbb4-11e8-b2ba-ac1f6b6435d0') &
# (image_df.Id != 'c861eb54-bb9f-11e8-b2b9-ac1f6b6435d0') &
# (image_df.Id != '7a88f200-bbc3-11e8-b2bc-ac1f6b6435d0')]
image_df = pd.read_csv(LABELS_ext)
image_df = image_df[(image_df.Id != '27751_219_G10_1') ]
image_df['target_list'] = image_df['Target'].map(lambda x: [int(a) for a in x.split(' ')])
all_labels = list(chain.from_iterable(image_df['target_list'].values))
c_val = Counter(all_labels)
n_keys = c_val.keys()
max_idx = max(n_keys)
#==================================================================================
# visualize train distribution
fig, ax1 = plt.subplots(1,1, figsize = (10, 5))
ax1.bar(n_keys, [c_val[k] for k in n_keys])
ax1.set_xticks(range(max_idx))
ax1.set_xticklabels([name_label_dict[k] for k in range(max_idx)], rotation=90)
plt.show()
#==================================================================================
for k,v in c_val.items():
print(name_label_dict[k], 'count:', v)
# create a categorical vector
image_df['target_vec'] = image_df['target_list'].map(lambda ck: [i in ck for i in range(max_idx+1)])
raw_train_df, valid_df = train_test_split(image_df,
test_size = 0.15,
# hack to make stratification work
stratify = image_df['Target'].map(lambda x: x[:3] if '27' not in x else '0'),
random_state= 42)
print(raw_train_df.shape[0], 'training masks')
print(valid_df.shape[0], 'validation masks')
# #=================================================================================
# # # Balance data
# #================================================================================
TRAIN_IMAGES_PER_CATEGORY=50
out_df_list = []
for k,v in c_val.items():
if v>40:
keep_rows = raw_train_df['target_list'].map(lambda x: k in x)
out_df_list += [raw_train_df[keep_rows].sample(TRAIN_IMAGES_PER_CATEGORY,
replace=True)]
train_df = pd.concat(out_df_list, ignore_index=True)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 5))
train_sum_vec = np.sum(np.stack(train_df['target_vec'].values, 0), 0)
valid_sum_vec = np.sum(np.stack(valid_df['target_vec'].values, 0), 0)
ax1.bar(n_keys, [train_sum_vec[k] for k in n_keys])
ax1.set_title('Training Distribution')
ax2.bar(n_keys, [valid_sum_vec[k] for k in n_keys])
ax2.set_title('Validation Distribution')
plt.show()
#=======================================================================================================================
train_df.to_csv('../input/train_ext_balanced.csv', index=False)
raw_train_df.to_csv('../input/train_ext_unbalanced.csv', index=False)
valid_df.to_csv('../input/valid_ext_unbalanced.csv', index=False)
tr_n = raw_train_df['Id'].values.tolist()
val_n = valid_df['Id'].values.tolist()
tr_n = tr_n[:-2] # pytorch has problems if last batch has one sample
test_names = list({f[:36] for f in os.listdir(TEST)}) | [
"os.listdir",
"pandas.read_csv",
"collections.Counter",
"itertools.chain.from_iterable",
"numpy.stack",
"pandas.concat",
"warnings.filterwarnings"
] | [((479, 512), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (502, 512), False, 'import warnings\n'), ((2160, 2183), 'pandas.read_csv', 'pd.read_csv', (['LABELS_ext'], {}), '(LABELS_ext)\n', (2171, 2183), True, 'import pandas as pd\n'), ((2413, 2432), 'collections.Counter', 'Counter', (['all_labels'], {}), '(all_labels)\n', (2420, 2432), False, 'from collections import Counter\n'), ((3989, 4030), 'pandas.concat', 'pd.concat', (['out_df_list'], {'ignore_index': '(True)'}), '(out_df_list, ignore_index=True)\n', (3998, 4030), True, 'import pandas as pd\n'), ((2352, 2403), 'itertools.chain.from_iterable', 'chain.from_iterable', (["image_df['target_list'].values"], {}), "(image_df['target_list'].values)\n", (2371, 2403), False, 'from itertools import chain\n'), ((4111, 4153), 'numpy.stack', 'np.stack', (["train_df['target_vec'].values", '(0)'], {}), "(train_df['target_vec'].values, 0)\n", (4119, 4153), True, 'import numpy as np\n'), ((4181, 4223), 'numpy.stack', 'np.stack', (["valid_df['target_vec'].values", '(0)'], {}), "(valid_df['target_vec'].values, 0)\n", (4189, 4223), True, 'import numpy as np\n'), ((4932, 4948), 'os.listdir', 'os.listdir', (['TEST'], {}), '(TEST)\n', (4942, 4948), False, 'import os\n')] |
from collections import defaultdict
from typing import Dict, List
import numpy
from overrides import overrides
from ..instance import TextInstance, IndexedInstance
from ...dataset import TextDataset
from ...data_indexer import DataIndexer
def __can_be_converted_to_multiple_true_false(dataset: TextDataset) -> bool:
"""
This method checks that dataset matches the assumptions we make about question data: that
it is a list of sentences corresponding to four-choice questions, with one correct answer
for every four instances.
So, specifically, we check that the number of instances is a multiple of four, and we check
that each group of four instances has exactly one instance with label True, and all other
labels are False (i.e., no None labels for validation data).
"""
for instance in dataset.instances:
if isinstance(instance, MultipleTrueFalseInstance):
return False
if len(dataset.instances) % 4 != 0:
return False
questions = zip(*[dataset.instances[i::4] for i in range(4)])
for question in questions:
question_labels = [instance.label for instance in question]
label_counts = {x: question_labels.count(x) for x in set(question_labels)}
if label_counts[True] != 1:
return False
if label_counts[False] != 3:
return False
return True
def convert_dataset_to_multiple_true_false(dataset: TextDataset) -> TextDataset:
"""
Converts a ``Dataset`` of ``TextClassificationInstances`` (assumed to have binary labels) into
a dataset of ``MultipleTrueFalse`` labels, by considering each consecutive group of 4 instances
to represent one question, with exactly one ``True`` label in each group of 4.
"""
assert __can_be_converted_to_multiple_true_false(dataset)
questions = zip(*[dataset.instances[i::4] for i in range(4)])
question_instances = []
for question in questions:
question_instances.append(MultipleTrueFalseInstance(question))
return TextDataset(question_instances)
class MultipleTrueFalseInstance(TextInstance):
"""
A MultipleTrueFalseInstance is a grouping of other Instances, where exactly one of those
Instances must have label True. This means that this really needs to be backed by
TextClassificationInstances with binary labels, though those could have already been wrapped in
BackgroundInstances.
When this is converted to training data, it will group all of those option Instances into a
single training instance, with a label that is an index to the answer option that is correct
for its label.
"""
def __init__(self, options: List[TextInstance]):
self.options = options
no_label = len(list([i for i in options if i.label is not None])) == 0
if no_label:
label = None
else:
positive_index = [index for index, instance in enumerate(options) if instance.label is True]
assert len(positive_index) == 1
label = positive_index[0]
super(MultipleTrueFalseInstance, self).__init__(label, None)
def __str__(self):
options_string = ',\n '.join([str(x) for x in self.options])
return 'MultipleTrueFalseInstance( \n(\n ' + options_string + '\n ),\n ' + \
str(self.label) + '\n)'
@overrides
def words(self):
words = defaultdict(list)
for option in self.options:
option_words = option.words()
for namespace in option_words:
words[namespace].extend(option_words[namespace])
return words
@overrides
def to_indexed_instance(self, data_indexer: DataIndexer):
indexed_options = [option.to_indexed_instance(data_indexer) for option in self.options]
return IndexedMultipleTrueFalseInstance(indexed_options, self.label)
class IndexedMultipleTrueFalseInstance(IndexedInstance):
"""
A MultipleTrueFalseInstance that has been indexed. MultipleTrueFalseInstance has a better
description of what this represents.
"""
def __init__(self, options: List[IndexedInstance], label):
super(IndexedMultipleTrueFalseInstance, self).__init__(label=label, index=None)
self.options = options
@classmethod
@overrides
def empty_instance(cls):
return IndexedMultipleTrueFalseInstance([], None)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
"""
Here we return the max of get_padding_lengths on all of the Instances in self.options.
"""
padding_lengths = {}
padding_lengths['num_options'] = len(self.options)
lengths = [instance.get_padding_lengths() for instance in self.options]
if not lengths:
return padding_lengths
for key in lengths[0]:
padding_lengths[key] = max(x[key] for x in lengths)
return padding_lengths
@overrides
def pad(self, padding_lengths: Dict[str, int]):
"""
This method pads all of the underlying Instances in self.options.
"""
num_options = padding_lengths['num_options']
# First we pad the number of options.
while len(self.options) < num_options:
self.options.append(self.options[0].empty_instance())
self.options = self.options[:num_options]
# Then we pad each option.
for instance in self.options: # type: IndexedInstance
instance.pad(padding_lengths)
@overrides
def as_training_data(self):
inputs = []
unzip_inputs = False
for option in self.options:
option_input, _ = option.as_training_data()
if isinstance(option_input, tuple):
unzip_inputs = True
inputs.append(option_input)
if unzip_inputs:
inputs = tuple(zip(*inputs)) # pylint: disable=redefined-variable-type
inputs = tuple([numpy.asarray(x) for x in inputs])
else:
inputs = numpy.asarray(inputs)
if self.label is None:
label = None
else:
label = numpy.zeros(len(self.options))
label[self.label] = 1
return inputs, label
| [
"numpy.asarray",
"collections.defaultdict"
] | [((3405, 3422), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3416, 3422), False, 'from collections import defaultdict\n'), ((6025, 6046), 'numpy.asarray', 'numpy.asarray', (['inputs'], {}), '(inputs)\n', (6038, 6046), False, 'import numpy\n'), ((5955, 5971), 'numpy.asarray', 'numpy.asarray', (['x'], {}), '(x)\n', (5968, 5971), False, 'import numpy\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from cogdl.layers import SELayer
from .. import BaseModel
from cogdl.layers import MLP, GATLayer, GINLayer
from cogdl.utils import batch_sum_pooling, batch_mean_pooling, batch_max_pooling
from cogdl.layers import Set2Set
class ApplyNodeFunc(nn.Module):
"""Update the node feature hv with MLP, BN and ReLU."""
def __init__(self, mlp, use_selayer):
super(ApplyNodeFunc, self).__init__()
self.mlp = mlp
self.bn = (
SELayer(self.mlp.output_dim, int(np.sqrt(self.mlp.output_dim)))
if use_selayer
else nn.BatchNorm1d(self.mlp.output_dim)
)
def forward(self, h):
h = self.mlp(h)
h = self.bn(h)
h = F.relu(h)
return h
class GATModel(nn.Module):
def __init__(self, in_feats, hidden_size, num_layers, nhead, dropout=0.0, attn_drop=0.0, alpha=0.2, residual=False):
super(GATModel, self).__init__()
assert hidden_size % nhead == 0
self.layers = nn.ModuleList(
[
GATLayer(
in_feats=in_feats if i > 0 else hidden_size // nhead,
out_feats=hidden_size // nhead,
nhead=nhead,
attn_drop=0.0,
alpha=0.2,
residual=False,
activation=F.leaky_relu if i + 1 < num_layers else None,
)
for i in range(num_layers)
]
)
def forward(self, graph, x):
for i, layer in enumerate(self.layers):
x = layer(graph, x)
return x
class GINModel(nn.Module):
def __init__(
self,
num_layers,
in_feats,
hidden_dim,
out_feats,
num_mlp_layers,
eps=0,
pooling="sum",
train_eps=False,
dropout=0.5,
final_dropout=0.2,
):
super(GINModel, self).__init__()
self.gin_layers = nn.ModuleList()
self.batch_norm = nn.ModuleList()
self.num_layers = num_layers
for i in range(num_layers - 1):
if i == 0:
mlp = MLP(in_feats, hidden_dim, hidden_dim, num_mlp_layers, norm="batchnorm")
else:
mlp = MLP(hidden_dim, hidden_dim, hidden_dim, num_mlp_layers, norm="batchnorm")
self.gin_layers.append(GINLayer(mlp, eps, train_eps))
self.batch_norm.append(nn.BatchNorm1d(hidden_dim))
self.linear_prediction = nn.ModuleList()
for i in range(self.num_layers):
if i == 0:
self.linear_prediction.append(nn.Linear(in_feats, out_feats))
else:
self.linear_prediction.append(nn.Linear(hidden_dim, out_feats))
self.dropout = nn.Dropout(dropout)
if pooling == "sum":
self.pool = batch_sum_pooling
elif pooling == "mean":
self.pool = batch_mean_pooling
elif pooling == "max":
self.pool = batch_max_pooling
else:
raise NotImplementedError
self.final_drop = nn.Dropout(final_dropout)
def forward(self, batch, n_feat):
h = n_feat
# device = h.device
# batchsize = int(torch.max(batch.batch)) + 1
layer_rep = [h]
for i in range(self.num_layers - 1):
h = self.gin_layers[i](batch, h)
h = self.batch_norm[i](h)
h = F.relu(h)
layer_rep.append(h)
score_over_layer = 0
all_outputs = []
for i, h in enumerate(layer_rep):
pooled_h = self.pool(h, batch.batch)
all_outputs.append(pooled_h)
score_over_layer += self.final_drop(self.linear_prediction[i](pooled_h))
return score_over_layer, all_outputs[1:]
class GCCModel(BaseModel):
"""
MPNN from
`Neural Message Passing for Quantum Chemistry <https://arxiv.org/abs/1704.01212>`__
Parameters
----------
node_input_dim : int
Dimension of input node feature, default to be 15.
edge_input_dim : int
Dimension of input edge feature, default to be 15.
output_dim : int
Dimension of prediction, default to be 12.
node_hidden_dim : int
Dimension of node feature in hidden layers, default to be 64.
edge_hidden_dim : int
Dimension of edge feature in hidden layers, default to be 128.
num_step_message_passing : int
Number of message passing steps, default to be 6.
num_step_set2set : int
Number of set2set steps
num_layer_set2set : int
Number of set2set layers
"""
@staticmethod
def add_args(parser):
parser.add_argument("--hidden-size", type=int, default=64)
parser.add_argument("--positional-embedding-size", type=int, default=32)
parser.add_argument("--degree-embedding-size", type=int, default=16)
parser.add_argument("--max-node-freq", type=int, default=16)
parser.add_argument("--max-edge-freq", type=int, default=16)
parser.add_argument("--max-degree", type=int, default=512)
parser.add_argument("--freq-embedding-size", type=int, default=16)
parser.add_argument("--num-layers", type=int, default=2)
parser.add_argument("--num-heads", type=int, default=2)
parser.add_argument("--output-size", type=int, default=32)
@classmethod
def build_model_from_args(cls, args):
return cls(
positional_embedding_size=args.positional_embedding_size,
max_node_freq=args.max_node_freq,
max_edge_freq=args.max_edge_freq,
max_degree=args.max_degree,
num_layers=args.num_layers,
num_heads=args.num_heads,
degree_embedding_size=args.degree_embedding_size,
node_hidden_dim=args.hidden_size,
output_dim=args.output_size,
)
def __init__(
self,
positional_embedding_size=32,
max_node_freq=8,
max_edge_freq=8,
max_degree=128,
freq_embedding_size=32,
degree_embedding_size=32,
output_dim=32,
node_hidden_dim=32,
edge_hidden_dim=32,
num_layers=6,
num_heads=4,
num_step_set2set=6,
num_layer_set2set=3,
norm=False,
gnn_model="gin",
degree_input=False,
):
super(GCCModel, self).__init__()
if degree_input:
node_input_dim = positional_embedding_size + degree_embedding_size + 1
else:
node_input_dim = positional_embedding_size + 1
# node_input_dim = (
# positional_embedding_size + freq_embedding_size + degree_embedding_size + 3
# )
# edge_input_dim = freq_embedding_size + 1
if gnn_model == "gat":
self.gnn = GATModel(
in_feats=node_input_dim,
hidden_size=node_hidden_dim,
num_layers=num_layers,
nhead=num_heads,
dropout=0.0,
)
elif gnn_model == "gin":
self.gnn = GINModel(
num_layers=num_layers,
num_mlp_layers=2,
in_feats=node_input_dim,
hidden_dim=node_hidden_dim,
out_feats=output_dim,
final_dropout=0.5,
train_eps=False,
pooling="sum",
# neighbor_pooling_type="sum",
# use_selayer=False,
)
self.gnn_model = gnn_model
self.max_node_freq = max_node_freq
self.max_edge_freq = max_edge_freq
self.max_degree = max_degree
self.degree_input = degree_input
# self.node_freq_embedding = nn.Embedding(
# num_embeddings=max_node_freq + 1, embedding_dim=freq_embedding_size
# )
if degree_input:
self.degree_embedding = nn.Embedding(num_embeddings=max_degree + 1, embedding_dim=degree_embedding_size)
# self.edge_freq_embedding = nn.Embedding(
# num_embeddings=max_edge_freq + 1, embedding_dim=freq_embedding_size
# )
self.set2set = Set2Set(node_hidden_dim, num_step_set2set, num_layer_set2set)
if gnn_model != "gin":
self.lin_readout = nn.Sequential(
nn.Linear(2 * node_hidden_dim, node_hidden_dim),
nn.ReLU(),
nn.Linear(node_hidden_dim, output_dim),
)
else:
self.lin_readout = None
self.norm = norm
def forward(self, g, return_all_outputs=False):
"""Predict molecule labels
Parameters
----------
g : Graph
n_feat : tensor of dtype float32 and shape (B1, D1)
Node features. B1 for number of nodes and D1 for
the node feature size.
e_feat : tensor of dtype float32 and shape (B2, D2)
Edge features. B2 for number of edges and D2 for
the edge feature size.
Returns
-------
res : Predicted labels
"""
# nfreq = g.ndata["nfreq"]
device = self.device
pos_undirected = g.pos_undirected
seed_emb = g.seed.unsqueeze(1).float()
if not torch.is_tensor(seed_emb):
seed_emb = torch.Tensor(seed_emb)
if self.degree_input:
degrees = g.degrees()
if device != torch.device("cpu"):
degrees = degrees.cuda(device)
deg_emb = self.degree_embedding(degrees.clamp(0, self.max_degree))
n_feat = torch.cat((pos_undirected, deg_emb, seed_emb), dim=-1)
else:
n_feat = torch.cat(
(
pos_undirected,
# self.node_freq_embedding(nfreq.clamp(0, self.max_node_freq)),
# self.degree_embedding(degrees.clamp(0, self.max_degree)),
seed_emb,
# nfreq.unsqueeze(1).float() / self.max_node_freq,
# degrees.unsqueeze(1).float() / self.max_degree,
),
dim=-1,
)
if self.gnn_model == "gin":
x, all_outputs = self.gnn(g, n_feat)
else:
x, all_outputs = self.gnn(g, n_feat), None
x = self.set2set(g, x)
x = self.lin_readout(x)
if self.norm:
x = F.normalize(x, p=2, dim=-1, eps=1e-5)
if return_all_outputs:
return x, all_outputs
else:
return x
# --------------------------------------------------------------------
# --------------------------------------------------------------------
def warmup_linear(x, warmup=0.002):
"""Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.
After `t_total`-th training step, learning rate is zero."""
if x < warmup:
return x / warmup
return max((x - 1.0) / (warmup - 1.0), 0)
| [
"cogdl.layers.MLP",
"torch.nn.Dropout",
"torch.nn.ReLU",
"numpy.sqrt",
"torch.device",
"torch.nn.ModuleList",
"torch.Tensor",
"torch.nn.functional.normalize",
"torch.nn.BatchNorm1d",
"torch.is_tensor",
"torch.cat",
"cogdl.layers.GATLayer",
"torch.nn.functional.relu",
"torch.nn.Linear",
"... | [((787, 796), 'torch.nn.functional.relu', 'F.relu', (['h'], {}), '(h)\n', (793, 796), True, 'import torch.nn.functional as F\n'), ((2023, 2038), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2036, 2038), True, 'import torch.nn as nn\n'), ((2065, 2080), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2078, 2080), True, 'import torch.nn as nn\n'), ((2552, 2567), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2565, 2567), True, 'import torch.nn as nn\n'), ((2831, 2850), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (2841, 2850), True, 'import torch.nn as nn\n'), ((3149, 3174), 'torch.nn.Dropout', 'nn.Dropout', (['final_dropout'], {}), '(final_dropout)\n', (3159, 3174), True, 'import torch.nn as nn\n'), ((8194, 8255), 'cogdl.layers.Set2Set', 'Set2Set', (['node_hidden_dim', 'num_step_set2set', 'num_layer_set2set'], {}), '(node_hidden_dim, num_step_set2set, num_layer_set2set)\n', (8201, 8255), False, 'from cogdl.layers import Set2Set\n'), ((655, 690), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['self.mlp.output_dim'], {}), '(self.mlp.output_dim)\n', (669, 690), True, 'import torch.nn as nn\n'), ((3484, 3493), 'torch.nn.functional.relu', 'F.relu', (['h'], {}), '(h)\n', (3490, 3493), True, 'import torch.nn.functional as F\n'), ((7943, 8028), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': '(max_degree + 1)', 'embedding_dim': 'degree_embedding_size'}), '(num_embeddings=max_degree + 1, embedding_dim=degree_embedding_size\n )\n', (7955, 8028), True, 'import torch.nn as nn\n'), ((9270, 9295), 'torch.is_tensor', 'torch.is_tensor', (['seed_emb'], {}), '(seed_emb)\n', (9285, 9295), False, 'import torch\n'), ((9320, 9342), 'torch.Tensor', 'torch.Tensor', (['seed_emb'], {}), '(seed_emb)\n', (9332, 9342), False, 'import torch\n'), ((9603, 9657), 'torch.cat', 'torch.cat', (['(pos_undirected, deg_emb, seed_emb)'], {'dim': '(-1)'}), '((pos_undirected, deg_emb, seed_emb), dim=-1)\n', (9612, 9657), False, 'import torch\n'), ((9693, 9738), 'torch.cat', 'torch.cat', (['(pos_undirected, seed_emb)'], {'dim': '(-1)'}), '((pos_undirected, seed_emb), dim=-1)\n', (9702, 9738), False, 'import torch\n'), ((10414, 10452), 'torch.nn.functional.normalize', 'F.normalize', (['x'], {'p': '(2)', 'dim': '(-1)', 'eps': '(1e-05)'}), '(x, p=2, dim=-1, eps=1e-05)\n', (10425, 10452), True, 'import torch.nn.functional as F\n'), ((1112, 1328), 'cogdl.layers.GATLayer', 'GATLayer', ([], {'in_feats': '(in_feats if i > 0 else hidden_size // nhead)', 'out_feats': '(hidden_size // nhead)', 'nhead': 'nhead', 'attn_drop': '(0.0)', 'alpha': '(0.2)', 'residual': '(False)', 'activation': '(F.leaky_relu if i + 1 < num_layers else None)'}), '(in_feats=in_feats if i > 0 else hidden_size // nhead, out_feats=\n hidden_size // nhead, nhead=nhead, attn_drop=0.0, alpha=0.2, residual=\n False, activation=F.leaky_relu if i + 1 < num_layers else None)\n', (1120, 1328), False, 'from cogdl.layers import MLP, GATLayer, GINLayer\n'), ((2203, 2274), 'cogdl.layers.MLP', 'MLP', (['in_feats', 'hidden_dim', 'hidden_dim', 'num_mlp_layers'], {'norm': '"""batchnorm"""'}), "(in_feats, hidden_dim, hidden_dim, num_mlp_layers, norm='batchnorm')\n", (2206, 2274), False, 'from cogdl.layers import MLP, GATLayer, GINLayer\n'), ((2315, 2388), 'cogdl.layers.MLP', 'MLP', (['hidden_dim', 'hidden_dim', 'hidden_dim', 'num_mlp_layers'], {'norm': '"""batchnorm"""'}), "(hidden_dim, hidden_dim, hidden_dim, num_mlp_layers, norm='batchnorm')\n", (2318, 2388), False, 'from cogdl.layers import MLP, GATLayer, GINLayer\n'), ((2424, 2453), 'cogdl.layers.GINLayer', 'GINLayer', (['mlp', 'eps', 'train_eps'], {}), '(mlp, eps, train_eps)\n', (2432, 2453), False, 'from cogdl.layers import MLP, GATLayer, GINLayer\n'), ((2490, 2516), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_dim'], {}), '(hidden_dim)\n', (2504, 2516), True, 'import torch.nn as nn\n'), ((8349, 8396), 'torch.nn.Linear', 'nn.Linear', (['(2 * node_hidden_dim)', 'node_hidden_dim'], {}), '(2 * node_hidden_dim, node_hidden_dim)\n', (8358, 8396), True, 'import torch.nn as nn\n'), ((8414, 8423), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8421, 8423), True, 'import torch.nn as nn\n'), ((8441, 8479), 'torch.nn.Linear', 'nn.Linear', (['node_hidden_dim', 'output_dim'], {}), '(node_hidden_dim, output_dim)\n', (8450, 8479), True, 'import torch.nn as nn\n'), ((9433, 9452), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (9445, 9452), False, 'import torch\n'), ((580, 608), 'numpy.sqrt', 'np.sqrt', (['self.mlp.output_dim'], {}), '(self.mlp.output_dim)\n', (587, 608), True, 'import numpy as np\n'), ((2678, 2708), 'torch.nn.Linear', 'nn.Linear', (['in_feats', 'out_feats'], {}), '(in_feats, out_feats)\n', (2687, 2708), True, 'import torch.nn as nn\n'), ((2774, 2806), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'out_feats'], {}), '(hidden_dim, out_feats)\n', (2783, 2806), True, 'import torch.nn as nn\n')] |
import numpy as np
import math
def kepler_3rd(planet_x, p1, p2, a1):
"""Function that gets as input the orbital period of a planet in years and returns the orbital distance of a planet to the Sun.
Input: Planet of interest(planet_x), orbital period planet Earth(p1) days, orbital period planet x(p2) days, distane planet Earth(a1) AU
Output: Orbital distance planet x(a2) AU"""
a2 = np.round(np.cbrt(((p2**2)*(a1**3))/(p1**2)), 2)
print('The distance from the planet', planet_x, 'is', a2, 'AU.')
return a2
def piston(V,P0,V0,T0,gamma):
"""Calculates the pressures for the piston volumes in V, using the adiabatic law, and calculates the temperatures using the ideal gas law.
Input: Volume(L), inital pressure (ATM), inital temperature (K), gamma
Output: pressure(ATM), temperature(K)"""
const = (P0*V0)/T0
const1 = P0*(V0**gamma)
P = const1/(V**gamma)
T = P*V/ const
Piston = np.array([P, V, T])
return Piston | [
"numpy.array",
"numpy.cbrt"
] | [((976, 995), 'numpy.array', 'np.array', (['[P, V, T]'], {}), '([P, V, T])\n', (984, 995), True, 'import numpy as np\n'), ((414, 450), 'numpy.cbrt', 'np.cbrt', (['(p2 ** 2 * a1 ** 3 / p1 ** 2)'], {}), '(p2 ** 2 * a1 ** 3 / p1 ** 2)\n', (421, 450), True, 'import numpy as np\n')] |
import copy
from PyQt4 import QtGui
import numpy as np
from core.region.region import Region
from gui.graph_widget.edge import Edge
from gui.graph_widget.graph_line import LineType, GraphLine
from gui.graph_widget.node import Node
from gui.graph_widget_loader import FROM_TOP, SPACE_BETWEEN_HOR, SPACE_BETWEEN_VER, GAP
from gui.img_controls.gui_utils import cvimg2qtpixmap
import numbers
__author__ = '<NAME>'
class Column:
def __init__(self, frame, scene, im_manager, relative_margin, width, height, empty=False):
self.scene = scene
self.im_manager = im_manager
self.empty = empty
self.x = 0
self.frame = frame
self.relative_margin = relative_margin
self.width = width
self.height = height
self.frame_sign = None
self.objects = []
self.edges = {}
self.items_nodes = {}
self.regions_images = {}
self.compress_marker = QtGui.QGraphicsTextItem()
self.objects.append(0)
self.compress_marker.setDefaultTextColor(QtGui.QColor(0, 0, 0, 120))
self.scene.addItem(self.compress_marker)
self.def_img = np.zeros((self.height, self.width, 3), dtype=np.uint8)
self.def_img[:, :, :] = 255
# color borders
self.def_img[:3, :, :] = self.def_img[:, :3, :] = self.def_img[-3:, :, :] = self.def_img[:, -3:, :] = 30
def get_start_frame(self):
if isinstance(self.frame, tuple):
return self.frame[0]
else:
return self.frame
def get_end_frame(self):
if isinstance(self.frame, tuple):
return self.frame[1]
else:
return self.frame
def add_object(self, to_add, position):
if position < len(self.objects):
# if not (self.objects[position] == to_add or isinstance(self.objects[position], GraphLine)):
self.objects[position] = to_add
else:
while len(self.objects) < position:
self.objects.append(None)
else:
self.objects.append(to_add)
def is_free(self, position=0, item=None):
if position < 0:
return False
elif position > len(self.objects) - 1:
return True
elif item == self.objects[position]:
return True
elif isinstance(self.objects[position], (Region, Node)):
return False
elif isinstance(self.objects[position], GraphLine):
if self.objects[position].type == LineType.TRACKLET or \
self.objects[position].type == LineType.PARTIAL_TRACKLET:
return False
return True
def contains(self, item):
if item in self.objects:
return True
else:
for obj in self.objects:
if isinstance(obj, GraphLine):
if obj.region_from is item or obj.region_to is item:
return True
return False
def get_position_item(self, item_to_locate):
# # Filip's code...
if isinstance(item_to_locate, Region):
for i, item in enumerate(self.objects):
if isinstance(item, GraphLine):
if item.region_from == item_to_locate or item.region_to == item_to_locate:
return i
if item_to_locate in self.objects:
return self.objects.index(item_to_locate)
else:
for i, item in enumerate(self.objects):
if isinstance(item, GraphLine):
if item.region_from == item_to_locate or item.region_to == item_to_locate:
return i
elif isinstance(item, Node):
if item_to_locate == item.region:
return i
def get_position_with_chunk_id(self, ch_id):
position = 0
for item in self.objects:
if isinstance(item, GraphLine):
if item.id == ch_id:
return position
position += 1
def prepare_images(self):
for item in self.objects:
if not (item in self.items_nodes or item in self.regions_images or item is None):
if isinstance(item, GraphLine):
if item.region_from.frame_ == self.frame:
region = item.region_from
elif item.region_to.frame_ == self.frame:
region = item.region_to
else:
continue
else:
region = item
if region in self.items_nodes:
continue
if not isinstance(region, numbers.Integral):
img = self.def_img
# img = self.im_manager.get_crop(self.frame, region, width=self.width, height=self.height, relative_margin=self.relative_margin)
self.regions_images[region] = img
def add_crop_to_col(self):
for item in self.objects:
if item not in self.items_nodes:
if not item:
continue
if isinstance(item, GraphLine):
if item.region_from.frame_ == self.frame:
item = item.region_from
elif item.region_to.frame_ == self.frame:
item = item.region_to
else:
continue
if item in self.items_nodes:
continue
if item not in self.regions_images:
img = self.def_img
self.regions_images[item] = img
# img = self.im_manager.get_crop(self.frame, item, width=self.width, height=self.height, relative_margin=self.relative_margin)
else:
img = self.regions_images[item]
pixmap = cvimg2qtpixmap(img)
node = Node(self.scene.addPixmap(pixmap), self.scene, item, self.im_manager, self.relative_margin,
self.width, self.height)
node.parent_pixmap.hide()
self.items_nodes[item] = node
def set_x(self, x):
self.x = x
def draw(self, compress_axis, vertically, frames_columns):
if self.empty:
self.show_compress_marker(compress_axis, vertically)
else:
self.show_frame_number(vertically)
for item in self.objects:
if isinstance(item, Region):
self.show_node(item, vertically)
elif isinstance(item, GraphLine):
if item.region_from.frame_ == self.frame:
self.show_node(item.region_from, vertically)
elif item.region_to.frame_ == self.frame:
self.show_node(item.region_to, vertically)
self.show_edge(item, frames_columns, vertically)
def show_edge(self, edge, frame_columns, vertically, direction=None, node=None):
from_x = self.x
if node is None:
node = edge.region_to
position = self.get_position_item(node)
if edge.type == LineType.PARTIAL_TRACKLET:
pass
from_y = GAP + FROM_TOP + position * self.height + self.height / 2 + SPACE_BETWEEN_VER * position
column_left = frame_columns[edge.region_from.frame_]
position = column_left.get_position_item(edge.region_from)
to_x = column_left.x + self.width
to_y = GAP + FROM_TOP + position * self.height + self.height / 2 + SPACE_BETWEEN_VER * position
z_value = -1 if edge.type == LineType.TRACKLET or LineType.PARTIAL_TRACKLET else -3
self.draw_edge(from_x, from_y, to_x, to_y, vertically, z_value, edge)
if edge.type == LineType.PARTIAL_TRACKLET:
z_value = -2
if edge.overlaps_left():
self.draw_edge(column_left.x, from_y, column_left.x - SPACE_BETWEEN_HOR / 2.5, from_y, vertically, z_value, edge, partial=True)
if edge.overlaps_right():
self.draw_edge(self.x + self.width, from_y, self.x + self.width + SPACE_BETWEEN_HOR / 2.5, from_y, vertically, z_value, edge, partial=True)
# to_y = from_y
# to_x = self.x - SPACE_BETWEEN_HOR / 2.5
# if not direction == "left":
# from_x += self.width
# to_x += self.width + SPACE_BETWEEN_HOR * 4 / 5.0
def draw_edge(self, from_x, from_y, to_x, to_y, vertically, z_value, edge, partial=False):
if vertically:
from_x, from_y, to_x, to_y = from_y, from_x, to_y, to_x
edge_obj = Edge(from_x, from_y, to_x, to_y, edge, self.scene, vertically, partial)
if edge in self.edges:
self.edges[edge].append(edge_obj)
else:
self.edges[edge] = [edge_obj]
edge_obj.graphical_object.setZValue(z_value)
self.scene.addItem(edge_obj.graphical_object)
def delete_scene(self):
for key, object in list(self.edges.items()):
for o in object:
self.scene.removeItem(o.graphical_object)
del self.edges[key]
def show_node(self, region, vertically, compressed=True):
position = self.get_position_item(region)
x = self.x
y = GAP + FROM_TOP + position * self.height + SPACE_BETWEEN_VER * position
if vertically:
x, y = y, x
if region not in self.items_nodes:
if region not in self.regions_images:
if compressed:
img = self.def_img
else:
img = self.im_manager.get_crop(self.frame, region, width=self.width, height=self.height,
relative_margin=self.relative_margin)
self.regions_images[region] = img
else:
img = self.regions_images[region]
pixmap = cvimg2qtpixmap(img)
node = Node(self.scene.addPixmap(pixmap), self.scene, region, self.im_manager, self.relative_margin,
self.width, self.height)
self.items_nodes[region] = node
self.items_nodes[region].setPos(x, y)
self.items_nodes[region].parent_pixmap.show()
def show_compress_marker(self, compress_axis, vertically):
if isinstance(self.frame, tuple):
x = self.x + self.width / 4 - 12.5
y = FROM_TOP
if vertically:
x, y = y, x - 17.5
string_len = len(str(self.frame[0] if isinstance(self.frame, tuple) else self.frame)) / 2
self.compress_marker.setPlainText((" " * string_len + ".\n") * 3)
else:
self.compress_marker.setPlainText(". . .")
self.compress_marker.setPos(x, y)
self.compress_marker.show()
if not compress_axis:
self.compress_marker.hide()
else:
self.show_frame_number(vertically, compress_axis, True)
if not compress_axis:
self.frame_sign.hide()
def show_frame_number(self, vertically, compress_axis=True, empty=False):
text = str(self.frame)
text_obj = QtGui.QGraphicsTextItem(text) if self.frame_sign is None else self.frame_sign
y = FROM_TOP
if empty:
text_obj.setDefaultTextColor(QtGui.QColor(0, 0, 0, 120))
x = self.x + self.width / (4 if empty else 2)
if vertically:
x, y = y, x - 10
else:
x -= (len(text)) / 2.0 * 10
if self.frame_sign is None:
text_obj.setPos(x, y)
self.frame_sign = text_obj
self.scene.addItem(text_obj)
else:
self.frame_sign.setPos(x, y)
self.frame_sign.show() | [
"PyQt4.QtGui.QColor",
"PyQt4.QtGui.QGraphicsTextItem",
"numpy.zeros",
"gui.graph_widget.edge.Edge",
"gui.img_controls.gui_utils.cvimg2qtpixmap"
] | [((942, 967), 'PyQt4.QtGui.QGraphicsTextItem', 'QtGui.QGraphicsTextItem', ([], {}), '()\n', (965, 967), False, 'from PyQt4 import QtGui\n'), ((1148, 1202), 'numpy.zeros', 'np.zeros', (['(self.height, self.width, 3)'], {'dtype': 'np.uint8'}), '((self.height, self.width, 3), dtype=np.uint8)\n', (1156, 1202), True, 'import numpy as np\n'), ((8730, 8801), 'gui.graph_widget.edge.Edge', 'Edge', (['from_x', 'from_y', 'to_x', 'to_y', 'edge', 'self.scene', 'vertically', 'partial'], {}), '(from_x, from_y, to_x, to_y, edge, self.scene, vertically, partial)\n', (8734, 8801), False, 'from gui.graph_widget.edge import Edge\n'), ((1048, 1074), 'PyQt4.QtGui.QColor', 'QtGui.QColor', (['(0)', '(0)', '(0)', '(120)'], {}), '(0, 0, 0, 120)\n', (1060, 1074), False, 'from PyQt4 import QtGui\n'), ((10028, 10047), 'gui.img_controls.gui_utils.cvimg2qtpixmap', 'cvimg2qtpixmap', (['img'], {}), '(img)\n', (10042, 10047), False, 'from gui.img_controls.gui_utils import cvimg2qtpixmap\n'), ((11307, 11336), 'PyQt4.QtGui.QGraphicsTextItem', 'QtGui.QGraphicsTextItem', (['text'], {}), '(text)\n', (11330, 11336), False, 'from PyQt4 import QtGui\n'), ((5949, 5968), 'gui.img_controls.gui_utils.cvimg2qtpixmap', 'cvimg2qtpixmap', (['img'], {}), '(img)\n', (5963, 5968), False, 'from gui.img_controls.gui_utils import cvimg2qtpixmap\n'), ((11465, 11491), 'PyQt4.QtGui.QColor', 'QtGui.QColor', (['(0)', '(0)', '(0)', '(120)'], {}), '(0, 0, 0, 120)\n', (11477, 11491), False, 'from PyQt4 import QtGui\n')] |
import numpy as np
class FirFilter:
def __init__(self, filter_coeff: np.ndarray, buffer_init = 0):
self._buffer = np.zeros(len(filter_coeff)) * buffer_init
self._filter_coeff = np.flip(filter_coeff,0)
self.last_filtered_value = None
def filter(self, input: float) -> float:
# push new input into buffer
# https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue
self._buffer[:-1] = self._buffer[1:]
self._buffer[-1] = input
# apply FIR filter to buffer
self.last_filtered_value = np.sum(self._buffer * self._filter_coeff)
return self.last_filtered_value
class RateLimiter:
def __init__(self, sample_rate, rate_limit, inital_value = 0):
self._discrete_rate_limit = 1/sample_rate * rate_limit
self._prev_value = inital_value
def limit(self, input):
if input - self._prev_value > self._discrete_rate_limit:
self._prev_value = self._prev_value + self._discrete_rate_limit
elif input - self._prev_value < -self._discrete_rate_limit:
self._prev_value = self._prev_value - self._discrete_rate_limit
else:
self._prev_value = input
return self._prev_value
class Unwrapper:
def __init__(self, initial_value = 0, tol = np.pi+0.1):
self._tol = tol
self._prev_value = initial_value
def unwrap(self, input):
self._prev_value = np.unwrap([self._prev_value, input] , discont = self._tol)[1]
return self._prev_value
class Integrator:
'''
Discrete integrator expected to be called at fixed rate
equal to sample_rate (hz)
'''
def __init__(self, sample_rate, inital_value = 0):
self.current_value = inital_value
self.sample_rate = sample_rate
def integrate(self, input):
self.current_value = self.current_value + 1/self.sample_rate * input
return self.current_value
class AntiWindup:
'''
Attempt at discrete anti-windup
'''
def __init__(self, gain, initial_value = 0):
self._saturation_diff = initial_value
self.gain = gain
def get_feedback(self):
return self._saturation_diff * self.gain
def update(self, saturation_diff):
self._saturation_diff = saturation_diff
def wrapToPi(input):
# https://stackoverflow.com/questions/15927755/opposite-of-numpy-unwrap/15927914
return (input + np.pi) % (2 * np.pi) - np.pi | [
"numpy.flip",
"numpy.unwrap",
"numpy.sum"
] | [((198, 222), 'numpy.flip', 'np.flip', (['filter_coeff', '(0)'], {}), '(filter_coeff, 0)\n', (205, 222), True, 'import numpy as np\n'), ((617, 658), 'numpy.sum', 'np.sum', (['(self._buffer * self._filter_coeff)'], {}), '(self._buffer * self._filter_coeff)\n', (623, 658), True, 'import numpy as np\n'), ((1490, 1545), 'numpy.unwrap', 'np.unwrap', (['[self._prev_value, input]'], {'discont': 'self._tol'}), '([self._prev_value, input], discont=self._tol)\n', (1499, 1545), True, 'import numpy as np\n')] |
"""
Author : <NAME>
01 October 2021
Hacktoberfest Mozilla Campus Club
Cummins College of Engineering for Women Pune
"""
import re
import numpy as np
#makes all the ones that are part of the same island 0
def remove_ones(x,y):
global r
global c
global grid
#check that indices x and y exist in grid
if (x<0 or x>=r or y<0 or y>=c):
return
if grid[x][y]==0:
return
#Marks the cell as 0
grid[x][y] = 0
#this function should keep calling itself till the entire island
#has been traversed and all the ones in it made to 0
#checking
#horizontal and vertical
remove_ones(x+1,y)
remove_ones(x-1,y)
remove_ones(x,y+1)
remove_ones(x,y-1)
#diagonal
remove_ones(x+1,y+1)
remove_ones(x+1,y-1)
remove_ones(x-1,y+1)
remove_ones(x-1,y-1)
def iterate():
count=0
#this is simple ireator that calls the remove_ones
#function for the first time for a particular island
for i in range(0,r):
for j in range(0,c):
if grid[i][j]==1:
count+=1
remove_ones(i,j)
#print(grid)
##uncomment above to visialize the islands being removed
return(count)
#the grid must be entered in the following format {{0,1},{1,0},{1,1},{1,0}}
s=input("grid = ")
#no of rows
r=s.count("{") - 1
#grid is initially a 1D numpy array
#the regex removes all the characters that are not 0 or 1
grid=np.array(list(re.sub(r"[^0-1]","",s)), dtype= int)
#reshape the grid
c=(int) (len(grid)/r) #columns
grid = np.reshape(grid, (r,c))
#print(grid)
##uncomment above line to visualize grid
islands = iterate()
print(islands)
#print (sum(sum(grid)))
##if the above statement prints 0, the program has taken
##all islands into account | [
"re.sub",
"numpy.reshape"
] | [((1423, 1447), 'numpy.reshape', 'np.reshape', (['grid', '(r, c)'], {}), '(grid, (r, c))\n', (1433, 1447), True, 'import numpy as np\n'), ((1329, 1352), 're.sub', 're.sub', (['"""[^0-1]"""', '""""""', 's'], {}), "('[^0-1]', '', s)\n", (1335, 1352), False, 'import re\n')] |
from random import randint
import matplotlib.pyplot as plt
import numpy as np
class Solution:
def rand5(self):
r7 = randint(1, 7)
while r7 > 5:
r7 = randint(1, 7)
return r7
soln = Solution()
randint_sample = np.array([randint(1, 5) for i in range(10_000)])
rand5_sample = np.array([soln.rand5() for i in range(10_000)])
hist1 = np.hstack(randint_sample)
plt.hist(hist1, bins='auto')
plt.title("randint(1, 5) histogram")
plt.show()
hist2 = np.hstack(rand5_sample)
plt.hist(hist2, bins='auto')
plt.title("soln.rand5() histogram")
plt.show()
| [
"matplotlib.pyplot.hist",
"numpy.hstack",
"matplotlib.pyplot.title",
"random.randint",
"matplotlib.pyplot.show"
] | [((343, 368), 'numpy.hstack', 'np.hstack', (['randint_sample'], {}), '(randint_sample)\n', (352, 368), True, 'import numpy as np\n'), ((369, 397), 'matplotlib.pyplot.hist', 'plt.hist', (['hist1'], {'bins': '"""auto"""'}), "(hist1, bins='auto')\n", (377, 397), True, 'import matplotlib.pyplot as plt\n'), ((398, 434), 'matplotlib.pyplot.title', 'plt.title', (['"""randint(1, 5) histogram"""'], {}), "('randint(1, 5) histogram')\n", (407, 434), True, 'import matplotlib.pyplot as plt\n'), ((435, 445), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (443, 445), True, 'import matplotlib.pyplot as plt\n'), ((455, 478), 'numpy.hstack', 'np.hstack', (['rand5_sample'], {}), '(rand5_sample)\n', (464, 478), True, 'import numpy as np\n'), ((479, 507), 'matplotlib.pyplot.hist', 'plt.hist', (['hist2'], {'bins': '"""auto"""'}), "(hist2, bins='auto')\n", (487, 507), True, 'import matplotlib.pyplot as plt\n'), ((508, 543), 'matplotlib.pyplot.title', 'plt.title', (['"""soln.rand5() histogram"""'], {}), "('soln.rand5() histogram')\n", (517, 543), True, 'import matplotlib.pyplot as plt\n'), ((544, 554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (552, 554), True, 'import matplotlib.pyplot as plt\n'), ((121, 134), 'random.randint', 'randint', (['(1)', '(7)'], {}), '(1, 7)\n', (128, 134), False, 'from random import randint\n'), ((232, 245), 'random.randint', 'randint', (['(1)', '(5)'], {}), '(1, 5)\n', (239, 245), False, 'from random import randint\n'), ((159, 172), 'random.randint', 'randint', (['(1)', '(7)'], {}), '(1, 7)\n', (166, 172), False, 'from random import randint\n')] |
"""<https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm>"""
import sys
import numba as nb
import numpy as np
input = sys.stdin.readline
# Dijkstra algorithm without priority queue (this is slow for sparse graphs)
@nb.njit("i8[:](i8,i8[:,:],i8,i8)", cache=True)
def dijkstra(V, G, s, INF):
# Shortest path from vertex s
dist = np.full(shape=V, fill_value=INF, dtype=np.int64)
dist[s] = 0
unvisited = [True] * V
while len(unvisited) != 0:
min_dist = INF
u = -1
for v in range(V):
if unvisited[v] and dist[v] < min_dist:
min_dist = dist[v]
u = v
# when planning a complete traversal; occurs when there is no connection
# between the initial node and remaining unvisited nodes
if min_dist == INF:
break
unvisited[u] = False
for v in range(V):
if unvisited[v] and G[u][v] != INF:
alt = dist[u] + G[u][v]
if alt < dist[v]:
dist[v] = alt
return dist
def main():
N = int(input())
INF = 1 << 30
G = np.full(shape=(N, N), fill_value=INF, dtype=np.int64)
for _ in range(N):
u, k, *vc = map(int, input().split())
for i in range(k):
v = vc[2 * i]
c = vc[2 * i + 1]
G[u][v] = c
for s in range(N):
print(s, dijkstra(N, G, s, INF))
if __name__ == "__main__":
main()
"""
<https://onlinejudge.u-aizu.ac.jp/courses/lesson/1/ALDS1/12/ALDS1_12_B>
Example for input
5
0 3 2 3 3 1 1 2
1 2 0 2 3 4
2 3 0 3 3 1 4 1
3 4 2 1 0 1 1 4 4 3
4 2 2 1 3 3
0 [0 2 2 1 3]
1 [2 0 4 3 5]
2 [2 4 0 1 1]
3 [1 3 1 0 2]
4 [3 5 1 2 0]
"""
| [
"numpy.full",
"numba.njit"
] | [((219, 265), 'numba.njit', 'nb.njit', (['"""i8[:](i8,i8[:,:],i8,i8)"""'], {'cache': '(True)'}), "('i8[:](i8,i8[:,:],i8,i8)', cache=True)\n", (226, 265), True, 'import numba as nb\n'), ((339, 387), 'numpy.full', 'np.full', ([], {'shape': 'V', 'fill_value': 'INF', 'dtype': 'np.int64'}), '(shape=V, fill_value=INF, dtype=np.int64)\n', (346, 387), True, 'import numpy as np\n'), ((1123, 1176), 'numpy.full', 'np.full', ([], {'shape': '(N, N)', 'fill_value': 'INF', 'dtype': 'np.int64'}), '(shape=(N, N), fill_value=INF, dtype=np.int64)\n', (1130, 1176), True, 'import numpy as np\n')] |
from typing import Dict
import numpy as np
from amazon_review import AmazonReview
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
EarlyStoppingCallback,
EvalPrediction,
Trainer,
TrainingArguments,
)
# Read data
# About slice https://huggingface.co/docs/datasets/splits.html
review = AmazonReview(lang="ja")
# Define pretrained tokenizer and model
model_name = "cl-tohoku/bert-base-japanese-whole-word-masking"
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)
tokenizer = AutoTokenizer.from_pretrained(model_name)
dataset = review.load("validation")
dataset = dataset.train_test_split(test_size=0.2)
dataset_train = review.format(dataset["train"], tokenizer)
dataset_validation = review.format(dataset["test"], tokenizer)
print(review.statistics(dataset_train))
print(review.statistics(dataset_validation))
# Define Trainer parameters
def compute_metrics(eval: EvalPrediction) -> Dict[str, float]:
pred, labels = eval
pred = np.argmax(pred, axis=1)
accuracy = accuracy_score(y_true=labels, y_pred=pred)
recall = recall_score(y_true=labels, y_pred=pred)
precision = precision_score(y_true=labels, y_pred=pred)
f1 = f1_score(y_true=labels, y_pred=pred)
return {"accuracy": accuracy, "precision": precision, "recall": recall, "f1": f1}
# Define Trainer
args = TrainingArguments(
output_dir="output",
per_device_train_batch_size=8,
per_device_eval_batch_size=8,
num_train_epochs=3,
evaluation_strategy="steps",
eval_steps=100,
save_strategy="epoch",
seed=0,
load_best_model_at_end=True,
)
trainer = Trainer(
model=model,
args=args,
train_dataset=dataset_train,
eval_dataset=dataset_validation,
compute_metrics=compute_metrics,
callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
)
# Train pre-trained model
trainer.train()
| [
"sklearn.metrics.accuracy_score",
"sklearn.metrics.f1_score",
"transformers.EarlyStoppingCallback",
"transformers.TrainingArguments",
"numpy.argmax",
"sklearn.metrics.precision_score",
"transformers.AutoModelForSequenceClassification.from_pretrained",
"sklearn.metrics.recall_score",
"transformers.Au... | [((423, 446), 'amazon_review.AmazonReview', 'AmazonReview', ([], {'lang': '"""ja"""'}), "(lang='ja')\n", (435, 446), False, 'from amazon_review import AmazonReview\n'), ((559, 635), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['model_name'], {'num_labels': '(2)'}), '(model_name, num_labels=2)\n', (609, 635), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, EarlyStoppingCallback, EvalPrediction, Trainer, TrainingArguments\n'), ((648, 689), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (677, 689), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, EarlyStoppingCallback, EvalPrediction, Trainer, TrainingArguments\n'), ((1470, 1707), 'transformers.TrainingArguments', 'TrainingArguments', ([], {'output_dir': '"""output"""', 'per_device_train_batch_size': '(8)', 'per_device_eval_batch_size': '(8)', 'num_train_epochs': '(3)', 'evaluation_strategy': '"""steps"""', 'eval_steps': '(100)', 'save_strategy': '"""epoch"""', 'seed': '(0)', 'load_best_model_at_end': '(True)'}), "(output_dir='output', per_device_train_batch_size=8,\n per_device_eval_batch_size=8, num_train_epochs=3, evaluation_strategy=\n 'steps', eval_steps=100, save_strategy='epoch', seed=0,\n load_best_model_at_end=True)\n", (1487, 1707), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, EarlyStoppingCallback, EvalPrediction, Trainer, TrainingArguments\n'), ((1114, 1137), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (1123, 1137), True, 'import numpy as np\n'), ((1154, 1196), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'labels', 'y_pred': 'pred'}), '(y_true=labels, y_pred=pred)\n', (1168, 1196), False, 'from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\n'), ((1210, 1250), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_true': 'labels', 'y_pred': 'pred'}), '(y_true=labels, y_pred=pred)\n', (1222, 1250), False, 'from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\n'), ((1267, 1310), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_true': 'labels', 'y_pred': 'pred'}), '(y_true=labels, y_pred=pred)\n', (1282, 1310), False, 'from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\n'), ((1320, 1356), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'labels', 'y_pred': 'pred'}), '(y_true=labels, y_pred=pred)\n', (1328, 1356), False, 'from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\n'), ((1909, 1957), 'transformers.EarlyStoppingCallback', 'EarlyStoppingCallback', ([], {'early_stopping_patience': '(3)'}), '(early_stopping_patience=3)\n', (1930, 1957), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, EarlyStoppingCallback, EvalPrediction, Trainer, TrainingArguments\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 15 16:40:12 2016
@author: mark
This file contains methods for calculating kinetics values that don't
necessarily require cantera or another outside less known software package.
all rates are currently only in kcal/mol (except arrhenius)
"""
# -*- coding: utf-8 -*-
import numpy as np
def calculate_arr_rate(a,n,ea,T):
Rkin = 8.314#J/molK
return a*T**n*np.exp(-ea/T/Rkin)
def calculate_troe_rate_constant(troe_parameters, high_parameters, low_parameters, temperature, molar_density,efficiencies={}, concentrations={}):
"""parameter order is based off of cantera's code for troe formulas"""
# ignoring efficienies for now
corrected_density = calculate_falloff_efficienies(molar_density,efficiencies,concentrations)
reduced_pressure = calculate_reduced_pressure(high_parameters,low_parameters,corrected_density,temperature)
F_cent = (1-troe_parameters[0])*np.exp(-temperature/troe_parameters[1]) + troe_parameters[0]*np.exp(-temperature/troe_parameters[2])
# The lack of 4th parameter makes the whole expression last term zero
if len(troe_parameters) > 3:
F_cent += np.exp(-troe_parameters[3]/temperature)
log_F_numerator = np.log10(F_cent)
C = -0.4 - 0.67*log_F_numerator
N = 0.75 - 1.27*log_F_numerator
log_pressure = np.log10(reduced_pressure)
log_F_denominator = 1.+ ((log_pressure + C)/(N-.14*(log_pressure + C)))**2
F = 10.**(log_F_numerator/log_F_denominator)
# get lindemann falloff form now
k_lind = calculate_falloff_lindemann(high_parameters,low_parameters,corrected_density,temperature)
return F*k_lind
def calculate_falloff_efficienies(molar_denisty,efficiencies,concentration):
# currently no efficiency calculation. this is hard-coded.
return molar_denisty*1.45
def calculate_falloff_lindemann(high_parameters,low_parameters,density_molar,temperature):
high_rate_constant = calculate_arr_rate(high_parameters[0],high_parameters[1],high_parameters[2],temperature)
low_rate_constant = calculate_arr_rate(low_parameters[0],low_parameters[1],low_parameters[2],temperature)
return high_rate_constant/(1+high_rate_constant/(low_rate_constant*density_molar))
def calculate_reduced_pressure(high_parameters,low_parameters,corrected_density, temperature):
high_rate_constant = calculate_arr_rate(high_parameters[0],high_parameters[1],high_parameters[2],temperature)
low_rate_constant = calculate_arr_rate(low_parameters[0],low_parameters[1],low_parameters[2],temperature)
return low_rate_constant*corrected_density/high_rate_constant
| [
"numpy.exp",
"numpy.log10"
] | [((1257, 1273), 'numpy.log10', 'np.log10', (['F_cent'], {}), '(F_cent)\n', (1265, 1273), True, 'import numpy as np\n'), ((1369, 1395), 'numpy.log10', 'np.log10', (['reduced_pressure'], {}), '(reduced_pressure)\n', (1377, 1395), True, 'import numpy as np\n'), ((415, 437), 'numpy.exp', 'np.exp', (['(-ea / T / Rkin)'], {}), '(-ea / T / Rkin)\n', (421, 437), True, 'import numpy as np\n'), ((1195, 1236), 'numpy.exp', 'np.exp', (['(-troe_parameters[3] / temperature)'], {}), '(-troe_parameters[3] / temperature)\n', (1201, 1236), True, 'import numpy as np\n'), ((965, 1006), 'numpy.exp', 'np.exp', (['(-temperature / troe_parameters[1])'], {}), '(-temperature / troe_parameters[1])\n', (971, 1006), True, 'import numpy as np\n'), ((1027, 1068), 'numpy.exp', 'np.exp', (['(-temperature / troe_parameters[2])'], {}), '(-temperature / troe_parameters[2])\n', (1033, 1068), True, 'import numpy as np\n')] |
"""This module contains the mathematical formulas to calculate several
stock indicators
"""
__author__ = '<NAME>'
__version__ = '1.0'
import numpy as np
def movingaverage(values, window):
"""Calculates a Simple Moving Average
Args:
values (int): The integer value of the current moving average
window (int): The window used to calculate the current moving average
Returns:
int: The Moving Average with the specified values
"""
weights = np.repeat(1.0, window)/window
smas = np.convolve(values, weights, 'valid')
return smas
def rsi_function(prices, period=14):
"""Calculates the Relative Strength Index (RSI)
Args:
prices ([int]): The integer value of the current moving average
period (int): The number of data points used to calculate the RSI
Returns:
[int]: The list containg the values for the RSI
"""
deltas = np.diff(prices)
seed = deltas[:period+1]
up = seed[seed >= 0].sum()/period
down = -seed[seed < 0].sum()/period
rs = up/down
rsi = np.zeros_like(prices)
rsi[:period] = 100. - 100./(1. + rs)
for i in range(period, len(prices)):
delta = deltas[i-1]
if delta > 0:
upval = delta
downval = 0.
else:
upval = 0.
downval = -delta
up = (up*(period-1)+upval)/period
down = (down*(period-1)+downval)/period
rs = up/down
rsi[i] = 100. - 100./(1.+rs)
return rsi
| [
"numpy.convolve",
"numpy.zeros_like",
"numpy.diff",
"numpy.repeat"
] | [((526, 563), 'numpy.convolve', 'np.convolve', (['values', 'weights', '"""valid"""'], {}), "(values, weights, 'valid')\n", (537, 563), True, 'import numpy as np\n'), ((917, 932), 'numpy.diff', 'np.diff', (['prices'], {}), '(prices)\n', (924, 932), True, 'import numpy as np\n'), ((1067, 1088), 'numpy.zeros_like', 'np.zeros_like', (['prices'], {}), '(prices)\n', (1080, 1088), True, 'import numpy as np\n'), ((485, 507), 'numpy.repeat', 'np.repeat', (['(1.0)', 'window'], {}), '(1.0, window)\n', (494, 507), True, 'import numpy as np\n')] |
### prop predict RNN-LSTM ###
## tensor board ##
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from tensorflow.contrib import rnn
import warnings
warnings.filterwarnings('ignore')
tf.set_random_seed(777)
tf.reset_default_graph()
time = ["%i"%(i) + "-%i"%(j) for i in range(2010, 2022) for j in range(3, 12, 4)]
## parameter ##
seq_length = 10 # 데이터의 시퀀스 length (연관된 데이터) -> output row
data_dim = 1 # 입력 차원 --> 인구수 1 (동별)
output_dim = 1 # 출력 차원 --> 예측치 1
#hidden_size = 20 # 셀 연산 후 나오는 output col
learning_rate = 0.1
iteration = 8000
m = 105 # --> None
MSE_list = []
predict_list = []
is_training = True
l2norm = 0.0001
### 데이터 전처리 ###
all_data = pd.read_csv("d:/project_data/peopleDataAll01.csv", sep=",", encoding='cp949')
## 청운효자동 LSTM ##
for k in range(1,10):
tf.reset_default_graph()
keep_prob = tf.placeholder(dtype=tf.float32)
test1 = all_data.iloc[:, [k]] # shape(105,1) m = 105
# train scaling #
mm1 = StandardScaler()
test1 = mm1.fit_transform(test1)
## split ## --> 시계열(시간순)
train_size = int(len(test1) * 0.8)
train_set = test1[:train_size, :] # shape(512, 5)
test_set = test1[train_size:, :] # test(220, 5)
# RNN data building #
def build(time_series, seq_length):
x_data = []
y_data = []
for i in range(0, len(time_series) - seq_length):
x_tmp = time_series[i: i + seq_length, :]
y_tmp = time_series[i + seq_length, [-1]]
x_data.append(x_tmp)
y_data.append(y_tmp)
return np.array(x_data), np.array(y_data)
x_train, y_train = build(train_set, seq_length)
x_test, y_test = build(test_set, seq_length)
predict_x = test_set[-seq_length:].reshape(1, seq_length, 1)
## RNN building ##
# cell #
def lstm_cell(hidden_size):
cell = tf.nn.rnn_cell.LSTMCell(num_units=hidden_size, activation=tf.tanh)
return cell
cell1 = rnn.DropoutWrapper(lstm_cell(15), input_keep_prob=keep_prob, output_keep_prob=keep_prob, seed=77)
cell2 = rnn.DropoutWrapper(lstm_cell(10), input_keep_prob=keep_prob, output_keep_prob=keep_prob, seed=77)
# cell3 = rnn.DropoutWrapper(lstm_cell(10), input_keep_prob=keep_prob, output_keep_prob=keep_prob, seed=77)
# cell4 = rnn.DropoutWrapper(lstm_cell(), input_keep_prob=keep_prob, output_keep_prob=keep_prob, seed=77)
#cell5 = rnn.DropoutWrapper(lstm_cell(), input_keep_prob=keep_prob, output_keep_prob=keep_prob, seed=77)
#cell = rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True, activation=tf.tanh)
cell = rnn.MultiRNNCell([cell1, cell2], state_is_tuple=True) # dropout cell 5개
#
X = tf.placeholder(dtype=tf.float32, shape=[None, seq_length, data_dim])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
#
## 초기화 #
output, _state = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
Y_pred = tf.contrib.layers.fully_connected(output[:, -1], output_dim, activation_fn=None) # last cell output --> 15일 뒤
# # config #
# config = tf.ConfigProto(log_device_placement=True)
# config.gpu_options.allow_growth = True
init = tf.contrib.layers.xavier_initializer(seed=77)
W1 = tf.Variable(init([1, 100]), name='weight1')
b1 = tf.Variable(init([100]), name='bias1')
layer1 = tf.matmul(Y_pred, W1) + b1
l1 = tf.contrib.layers.batch_norm(layer1, center=True, scale=True,
is_training=is_training)
L1 = tf.nn.relu(l1, name='relu1')
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
W2 = tf.Variable(init([100, 1]), name='weight2')
b2 = tf.Variable(init([1]), name='bias2')
hypothesis = tf.matmul(L1, W2) + b2
# cost #
cost = tf.reduce_sum(tf.square(Y_pred - y)) # sum of sq --> 수치 예측이기 때문에 sq loss가 필요 없다.
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
train = opt.minimize(cost)
## tf.trainable --> l2 norm ##
var = tf.trainable_variables()
l2reg = tf.add_n([tf.nn.l2_loss(v) for v in var if 'bias' not in v.name]) * l2norm
# cost #
cost = tf.reduce_sum(tf.square(Y_pred - y)) # sum of sq --> 수치 예측이기 때문에 sq loss가 필요 없다.
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # batch_norm
with tf.control_dependencies(update_ops):
train = opt.minimize(cost)
# MSE # --> mean squared error
targets= tf.placeholder(tf.float32, [None, 1])
predicts = tf.placeholder(tf.float32, [None, 1])
MSE = tf.sqrt(tf.reduce_mean(tf.square(predicts - targets)))
## session ##
# training#
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
for i in range(iteration):
cost_val, _, out= sess.run([cost, train, output], feed_dict={X: x_train, y: y_train, keep_prob:0.7})
if i % 1000 == 0:
print(cost_val)
# predict #
is_training = False
y_hat_train = sess.run(Y_pred, feed_dict={X: x_train, keep_prob:1.0})
y_hat = sess.run(Y_pred, feed_dict={X: x_test, keep_prob:1.0})
# y_hat = mm1.inverse_transform(y_hat)
# y_test = mm1.inverse_transform(y_test)
y_hat = mm1.inverse_transform(y_hat)
y_test = mm1.inverse_transform(y_test)
RMSE_train = sess.run(MSE, feed_dict={targets: y_train, predicts: y_hat_train, keep_prob:1.0})
RMSE = sess.run(MSE, feed_dict={targets: y_test, predicts: y_hat, keep_prob:1.0})
print("RMSE_train: ", RMSE_train)
print("RMSE: ", RMSE)
predict_hat = sess.run(Y_pred, feed_dict={X: predict_x, keep_prob:1.0})
MSE_list.append(RMSE)
predict_list.append(mm1.inverse_transform(predict_hat)[0,0])
plt.figure(figsize=(8,3))
plt.plot(y_train, 'r-')
plt.plot(y_hat_train, 'b-')
plt.xlabel("Time")
plt.ylabel("Population")
plt.show()
plt.figure(figsize=(8,3))
plt.plot(y_test, 'r-')
plt.plot(y_hat, 'b-')
plt.xlabel("Time")
plt.ylabel("Population")
plt.show()
sess.close()
sess.close()
#plt.figure()
#plt.plot(y_train, 'r-')
#plt.plot(y_hat_train, 'b-')
#plt.show()
#
#plt.figure()
#plt.plot(y_test, 'r-')
#plt.plot(y_hat, 'b-')
#plt.show()
mse = pd.DataFrame(MSE_list)
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.array",
"tensorflow.control_dependencies",
"tensorflow.nn.dropout",
"tensorflow.set_random_seed",
"tensorflow.GPUOptions",
"tensorflow.placeholder",
"tensorflow.contrib.layers.fully_connected",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xla... | [((244, 277), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (267, 277), False, 'import warnings\n'), ((279, 302), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(777)'], {}), '(777)\n', (297, 302), True, 'import tensorflow as tf\n'), ((303, 327), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (325, 327), True, 'import tensorflow as tf\n'), ((749, 826), 'pandas.read_csv', 'pd.read_csv', (['"""d:/project_data/peopleDataAll01.csv"""'], {'sep': '""","""', 'encoding': '"""cp949"""'}), "('d:/project_data/peopleDataAll01.csv', sep=',', encoding='cp949')\n", (760, 826), True, 'import pandas as pd\n'), ((6389, 6411), 'pandas.DataFrame', 'pd.DataFrame', (['MSE_list'], {}), '(MSE_list)\n', (6401, 6411), True, 'import pandas as pd\n'), ((871, 895), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (893, 895), True, 'import tensorflow as tf\n'), ((912, 944), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (926, 944), True, 'import tensorflow as tf\n'), ((1034, 1050), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1048, 1050), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2662, 2715), 'tensorflow.contrib.rnn.MultiRNNCell', 'rnn.MultiRNNCell', (['[cell1, cell2]'], {'state_is_tuple': '(True)'}), '([cell1, cell2], state_is_tuple=True)\n', (2678, 2715), False, 'from tensorflow.contrib import rnn\n'), ((2748, 2816), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, seq_length, data_dim]'}), '(dtype=tf.float32, shape=[None, seq_length, data_dim])\n', (2762, 2816), True, 'import tensorflow as tf\n'), ((2825, 2874), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, 1]'}), '(dtype=tf.float32, shape=[None, 1])\n', (2839, 2874), True, 'import tensorflow as tf\n'), ((2915, 2959), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'X'], {'dtype': 'tf.float32'}), '(cell, X, dtype=tf.float32)\n', (2932, 2959), True, 'import tensorflow as tf\n'), ((2974, 3059), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['output[:, -1]', 'output_dim'], {'activation_fn': 'None'}), '(output[:, -1], output_dim, activation_fn=None\n )\n', (3007, 3059), True, 'import tensorflow as tf\n'), ((3216, 3261), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'seed': '(77)'}), '(seed=77)\n', (3252, 3261), True, 'import tensorflow as tf\n'), ((3412, 3503), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['layer1'], {'center': '(True)', 'scale': '(True)', 'is_training': 'is_training'}), '(layer1, center=True, scale=True, is_training=\n is_training)\n', (3440, 3503), True, 'import tensorflow as tf\n'), ((3546, 3574), 'tensorflow.nn.relu', 'tf.nn.relu', (['l1'], {'name': '"""relu1"""'}), "(l1, name='relu1')\n", (3556, 3574), True, 'import tensorflow as tf\n'), ((3584, 3622), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['L1'], {'keep_prob': 'keep_prob'}), '(L1, keep_prob=keep_prob)\n', (3597, 3622), True, 'import tensorflow as tf\n'), ((3891, 3942), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (3913, 3942), True, 'import tensorflow as tf\n'), ((4024, 4048), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4046, 4048), True, 'import tensorflow as tf\n'), ((4256, 4307), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (4278, 4307), True, 'import tensorflow as tf\n'), ((4325, 4367), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (4342, 4367), True, 'import tensorflow as tf\n'), ((4515, 4552), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]'], {}), '(tf.float32, [None, 1])\n', (4529, 4552), True, 'import tensorflow as tf\n'), ((4568, 4605), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]'], {}), '(tf.float32, [None, 1])\n', (4582, 4605), True, 'import tensorflow as tf\n'), ((4728, 4778), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.7)'}), '(per_process_gpu_memory_fraction=0.7)\n', (4741, 4778), True, 'import tensorflow as tf\n'), ((5876, 5902), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (5886, 5902), True, 'import matplotlib.pyplot as plt\n'), ((5906, 5929), 'matplotlib.pyplot.plot', 'plt.plot', (['y_train', '"""r-"""'], {}), "(y_train, 'r-')\n", (5914, 5929), True, 'import matplotlib.pyplot as plt\n'), ((5934, 5961), 'matplotlib.pyplot.plot', 'plt.plot', (['y_hat_train', '"""b-"""'], {}), "(y_hat_train, 'b-')\n", (5942, 5961), True, 'import matplotlib.pyplot as plt\n'), ((5966, 5984), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (5976, 5984), True, 'import matplotlib.pyplot as plt\n'), ((5989, 6013), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Population"""'], {}), "('Population')\n", (5999, 6013), True, 'import matplotlib.pyplot as plt\n'), ((6018, 6028), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6026, 6028), True, 'import matplotlib.pyplot as plt\n'), ((6038, 6064), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (6048, 6064), True, 'import matplotlib.pyplot as plt\n'), ((6068, 6090), 'matplotlib.pyplot.plot', 'plt.plot', (['y_test', '"""r-"""'], {}), "(y_test, 'r-')\n", (6076, 6090), True, 'import matplotlib.pyplot as plt\n'), ((6095, 6116), 'matplotlib.pyplot.plot', 'plt.plot', (['y_hat', '"""b-"""'], {}), "(y_hat, 'b-')\n", (6103, 6116), True, 'import matplotlib.pyplot as plt\n'), ((6121, 6139), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (6131, 6139), True, 'import matplotlib.pyplot as plt\n'), ((6144, 6168), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Population"""'], {}), "('Population')\n", (6154, 6168), True, 'import matplotlib.pyplot as plt\n'), ((6173, 6183), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6181, 6183), True, 'import matplotlib.pyplot as plt\n'), ((1915, 1981), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', ([], {'num_units': 'hidden_size', 'activation': 'tf.tanh'}), '(num_units=hidden_size, activation=tf.tanh)\n', (1938, 1981), True, 'import tensorflow as tf\n'), ((3376, 3397), 'tensorflow.matmul', 'tf.matmul', (['Y_pred', 'W1'], {}), '(Y_pred, W1)\n', (3385, 3397), True, 'import tensorflow as tf\n'), ((3744, 3761), 'tensorflow.matmul', 'tf.matmul', (['L1', 'W2'], {}), '(L1, W2)\n', (3753, 3761), True, 'import tensorflow as tf\n'), ((3814, 3835), 'tensorflow.square', 'tf.square', (['(Y_pred - y)'], {}), '(Y_pred - y)\n', (3823, 3835), True, 'import tensorflow as tf\n'), ((4179, 4200), 'tensorflow.square', 'tf.square', (['(Y_pred - y)'], {}), '(Y_pred - y)\n', (4188, 4200), True, 'import tensorflow as tf\n'), ((4390, 4425), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (4413, 4425), True, 'import tensorflow as tf\n'), ((4862, 4895), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4893, 4895), True, 'import tensorflow as tf\n'), ((1621, 1637), 'numpy.array', 'np.array', (['x_data'], {}), '(x_data)\n', (1629, 1637), True, 'import numpy as np\n'), ((1639, 1655), 'numpy.array', 'np.array', (['y_data'], {}), '(y_data)\n', (1647, 1655), True, 'import numpy as np\n'), ((4639, 4668), 'tensorflow.square', 'tf.square', (['(predicts - targets)'], {}), '(predicts - targets)\n', (4648, 4668), True, 'import tensorflow as tf\n'), ((4808, 4847), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (4822, 4847), True, 'import tensorflow as tf\n'), ((4071, 4087), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), '(v)\n', (4084, 4087), True, 'import tensorflow as tf\n')] |
from __future__ import absolute_import
from chainer import backend
from chainer import Variable
import numpy as np
class ReplayBuffer(object):
""" Buffer for handling the experience replay.
Args:
size (int): buffer size
p (float): probability to evoke the past experience
return_variable (bool): if True, return chainer's variable
See also:
https://arxiv.org/pdf/1612.07828.pdf
https://arxiv.org/pdf/1703.10593.pdf
"""
def __init__(self, size, p=0.5, return_variable=True):
self.size = size
self.p = p
self.return_variable = return_variable
self._buffer = []
@property
def buffer(self):
if len(self._buffer) == 0:
return None
return self._buffer
def _preprocess(self, x):
if isinstance(x, Variable):
x = x.array
return x
def _postprocess(self, x):
if not self.return_variable:
return x
return Variable(x)
def __call__(self, samples):
samples = self._preprocess(samples)
xp = backend.get_array_module(samples)
n_samples = len(samples)
if self.size == 0:
pass
elif len(self._buffer) == 0:
self._buffer = samples
elif len(self._buffer) < self.size:
self._buffer = xp.vstack((self._buffer, samples))
else:
# evoke the memory
random_bool = np.random.rand(n_samples) < self.p
replay_indices = np.random.randint(0, len(self._buffer), size=n_samples)[random_bool]
sample_indices = np.random.randint(0, n_samples, size=n_samples)[random_bool]
self._buffer[replay_indices], samples[sample_indices] \
= samples[sample_indices], self._buffer[replay_indices] # swap
return self._postprocess(samples)
| [
"chainer.Variable",
"chainer.backend.get_array_module",
"numpy.random.rand",
"numpy.random.randint"
] | [((988, 999), 'chainer.Variable', 'Variable', (['x'], {}), '(x)\n', (996, 999), False, 'from chainer import Variable\n'), ((1093, 1126), 'chainer.backend.get_array_module', 'backend.get_array_module', (['samples'], {}), '(samples)\n', (1117, 1126), False, 'from chainer import backend\n'), ((1455, 1480), 'numpy.random.rand', 'np.random.rand', (['n_samples'], {}), '(n_samples)\n', (1469, 1480), True, 'import numpy as np\n'), ((1617, 1664), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_samples'], {'size': 'n_samples'}), '(0, n_samples, size=n_samples)\n', (1634, 1664), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Useful utilities
"""
import sys
import numpy as np
from numpy import array, zeros
import csv
from happyfuntokenizing import Tokenizer
TOKENIZER = Tokenizer(preserve_case=True)
from happyfuntokenizing import Tokenizer
TOKENIZER = Tokenizer(preserve_case=True)
import itertools
BLUE = '\033[34;1m'
CLOSE = '\033[0m'
def log(msg, *args):
sys.stderr.write(BLUE + str(msg).format(*args) + CLOSE + "\n")
class Index(object):
"""
A lazy feature index, which maps string input to an integer label.
"""
def __init__(self, max_labels = None):
self.index = {}
self.max_labels = max_labels
def __getitem__(self, key):
if key not in self.index:
assert self.max_labels is None or len(self.index) < self.max_labels, "Too many unique labels"
self.index[key] = len(self.index)
return self.index[key]
UNICODE_EMOJI_TO_ASCII_TABLE = [
('o/' , '👋'),
('</3' , '💔'),
('<3' , '💗'),
('=D' , '😁'),
(r":\')" , '😂'),
(':)' , '😄'),
('0:)' , '😇'),
('3:)' , '😈'),
('*)' , '😉'),
(':|' , '😐'),
(':(' , '😒'),
('%)' , '😖'),
(':P' , '😜'),
(':@' , '😠'),
(':/' , '😡'),
(r":\'(" , '😢'),
('^5' , '😤'),
('|-O' , '😫'),
(':###..', '😰'),
('D:' , '😱'),
(':O' , '😲'),
(':$' , '😳'),
('#-)' , '😵'),
(':#' , '😶'),
(':-J' , '😼'),
(':*' , '😽'),
]
def to_ascii(text):
# Convert from ascii
for asci, unicod in UNICODE_EMOJI_TO_ASCII_TABLE:
text = text.replace(unicod, asci)
text = unicode(text, errors='ignore').encode('ascii', errors='ignore')
return text
def tokenize(text):
"""
Call the tokenizer
"""
return TOKENIZER.tokenize(text)
def ordc(c):
"""
Compressed version of ord that returns indices between 0 and 95.
"""
c = ord(c)
if c < 32 or c > 126: return 95
else: return c - 32
class RowObject(object):
"""
Is an empty object that can be modified by the factory to have the required fields.
"""
pass
class RowObjectFactory(object):
"""
Creates a row object using the specified schema.
"""
def __init__(self, header):
self.header = header
def build(self, row):
"""
Build a row using the specified schema.
"""
obj = RowObject()
for key, value in zip(self.header, row):
setattr(obj, key, value)
return obj
@staticmethod
def from_stream(stream):
"""
Creates a stream of RowObjects from input stream.
"""
header = next(stream)
factory = RowObjectFactory(header)
return map(factory.build, stream)
def make_one_hot(vec, n_classes):
mat = np.zeros((len(vec), n_classes))
for i, j in enumerate(vec):
mat[i,j] = 1
return mat
class WordVectorModel(dict):
"""
A wrapper around a word vector model.
"""
def __init__(self, wvecs, dim, preserve_case=False, unknown='*UNKNOWN*'):
dict.__init__(self, wvecs)
self.dim = dim
self.preserve_case = preserve_case
self.unknown = unknown
def __getitem__(self, key):
if not self.preserve_case:
key = str.lower(key)
try:
return dict.__getitem__(self, key)
except KeyError:
return dict.__getitem__(self, self.unknown)
def __setitem__(self, key, val):
if not self.preserve_case:
key = str.lower(key)
return dict.__setitem__(self, key, val)
@staticmethod
def from_file(f, preserve_case, unknown):
"""
Construct a word vector map from a file
"""
log("Reading word vectors")
wvecs = {}
dim = None
for line in f:
parts = line.split()
tok = parts[0]
vec = array([float(x) for x in parts[1:]])
if dim is None:
dim = len(vec)
assert dim == len(vec), "Incorrectly sized vector"
wvecs[tok] = vec
assert unknown in wvecs, "Unknown token not defined in word vectors"
log("Done. Loaded {} vectors.", len(wvecs))
return WordVectorModel(wvecs, dim, preserve_case, unknown)
@staticmethod
def from_filename(fname, preserve_case, unknown):
"""
Construct a word vector map from a file
"""
with open(fname) as f:
return WordVectorModel.from_file(f, preserve_case, unknown)
def embed_sentence(self, toks, max_length=50):
"""
Return the list of tokens embedded as a matrix.
"""
X = zeros((max_length, self.dim))
for i, w in enumerate(toks[:max_length]):
X[i, :] = self[w]
return X
def embed_sentences(self, sentences, max_length=50):
"""
Return the list of tokens embedded as a matrix.
"""
return array([[self.embed_sentence(toks, max_length)] for toks in sentences])
def test_wvec_model():
"""
Test that the WordVectorModel handles case and unknowns correctly.
"""
import os
from numpy import allclose
assert os.path.exists('./glove.6B.50d.txt'), "Can't find word vector file at"
undiplomatically = array([-0.44594,-1.1723,0.058833,-0.99806,0.065609,0.059089,0.60732,1.2965,-0.32984,-0.045185,-0.30061,0.33055,-0.18897,0.82746,-0.28756,-0.31784,-0.091562,-0.42301,1.173,-0.65538,0.027537,0.1238,0.26689,0.39363,0.62385,0.847,1.0387,0.82124,-0.064256,0.043767,-0.97034,-0.4336,1.1662,0.24741,0.54262,0.58686,0.51069,0.67763,-0.78139,-0.21806,0.029529,0.11175,-0.43608,0.41791,-0.34094,-1.0393,0.64999,0.2285,-0.033636,0.23816])
unk = array([-0.1292,-0.2887,-0.01225,-0.05677,-0.2021,-0.08389,0.3336,0.1605,0.03867,0.1783,0.04697,-0.002858,0.291,0.04614,-0.2092,-0.06613,-0.06822,0.07666,0.3134,0.1785,-0.1226,-0.09917,-0.07496,0.06413,0.1444,0.6089,0.1746,0.05335,-0.01274,0.03474,-0.8124,-0.04689,0.2019,0.2031,-0.03936,0.06968,-0.01554,-0.03405,-0.06528,0.1225,0.1399,-0.1745,-0.08012,0.08495,-0.01042,-0.137,0.2013,0.1007,0.00653,0.01685])
model = WordVectorModel.from_file('./glove.6B.50d.txt', False, '*UNKNOWN*')
assert allclose(undiplomatically, model['undiplomatically']), "Vectors are not equal!"
assert allclose(undiplomatically, model['UndIplomAtically']), "Vectors are not equal!"
assert allclose(unk, model['undiplomaticallyy']), "Vectors are not equal!"
def grouper(n, iterable):
"""
grouper(3, 'ABCDEFG') --> 'ABC', 'DEF', 'G'
"""
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
| [
"os.path.exists",
"itertools.islice",
"numpy.allclose",
"numpy.array",
"numpy.zeros",
"happyfuntokenizing.Tokenizer"
] | [((198, 227), 'happyfuntokenizing.Tokenizer', 'Tokenizer', ([], {'preserve_case': '(True)'}), '(preserve_case=True)\n', (207, 227), False, 'from happyfuntokenizing import Tokenizer\n'), ((282, 311), 'happyfuntokenizing.Tokenizer', 'Tokenizer', ([], {'preserve_case': '(True)'}), '(preserve_case=True)\n', (291, 311), False, 'from happyfuntokenizing import Tokenizer\n'), ((5341, 5377), 'os.path.exists', 'os.path.exists', (['"""./glove.6B.50d.txt"""'], {}), "('./glove.6B.50d.txt')\n", (5355, 5377), False, 'import os\n'), ((5435, 5940), 'numpy.array', 'array', (['[-0.44594, -1.1723, 0.058833, -0.99806, 0.065609, 0.059089, 0.60732, 1.2965,\n -0.32984, -0.045185, -0.30061, 0.33055, -0.18897, 0.82746, -0.28756, -\n 0.31784, -0.091562, -0.42301, 1.173, -0.65538, 0.027537, 0.1238, \n 0.26689, 0.39363, 0.62385, 0.847, 1.0387, 0.82124, -0.064256, 0.043767,\n -0.97034, -0.4336, 1.1662, 0.24741, 0.54262, 0.58686, 0.51069, 0.67763,\n -0.78139, -0.21806, 0.029529, 0.11175, -0.43608, 0.41791, -0.34094, -\n 1.0393, 0.64999, 0.2285, -0.033636, 0.23816]'], {}), '([-0.44594, -1.1723, 0.058833, -0.99806, 0.065609, 0.059089, 0.60732, \n 1.2965, -0.32984, -0.045185, -0.30061, 0.33055, -0.18897, 0.82746, -\n 0.28756, -0.31784, -0.091562, -0.42301, 1.173, -0.65538, 0.027537, \n 0.1238, 0.26689, 0.39363, 0.62385, 0.847, 1.0387, 0.82124, -0.064256, \n 0.043767, -0.97034, -0.4336, 1.1662, 0.24741, 0.54262, 0.58686, 0.51069,\n 0.67763, -0.78139, -0.21806, 0.029529, 0.11175, -0.43608, 0.41791, -\n 0.34094, -1.0393, 0.64999, 0.2285, -0.033636, 0.23816])\n', (5440, 5940), False, 'from numpy import array, zeros\n'), ((5873, 6359), 'numpy.array', 'array', (['[-0.1292, -0.2887, -0.01225, -0.05677, -0.2021, -0.08389, 0.3336, 0.1605, \n 0.03867, 0.1783, 0.04697, -0.002858, 0.291, 0.04614, -0.2092, -0.06613,\n -0.06822, 0.07666, 0.3134, 0.1785, -0.1226, -0.09917, -0.07496, 0.06413,\n 0.1444, 0.6089, 0.1746, 0.05335, -0.01274, 0.03474, -0.8124, -0.04689, \n 0.2019, 0.2031, -0.03936, 0.06968, -0.01554, -0.03405, -0.06528, 0.1225,\n 0.1399, -0.1745, -0.08012, 0.08495, -0.01042, -0.137, 0.2013, 0.1007, \n 0.00653, 0.01685]'], {}), '([-0.1292, -0.2887, -0.01225, -0.05677, -0.2021, -0.08389, 0.3336, \n 0.1605, 0.03867, 0.1783, 0.04697, -0.002858, 0.291, 0.04614, -0.2092, -\n 0.06613, -0.06822, 0.07666, 0.3134, 0.1785, -0.1226, -0.09917, -0.07496,\n 0.06413, 0.1444, 0.6089, 0.1746, 0.05335, -0.01274, 0.03474, -0.8124, -\n 0.04689, 0.2019, 0.2031, -0.03936, 0.06968, -0.01554, -0.03405, -\n 0.06528, 0.1225, 0.1399, -0.1745, -0.08012, 0.08495, -0.01042, -0.137, \n 0.2013, 0.1007, 0.00653, 0.01685])\n', (5878, 6359), False, 'from numpy import array, zeros\n'), ((6374, 6427), 'numpy.allclose', 'allclose', (['undiplomatically', "model['undiplomatically']"], {}), "(undiplomatically, model['undiplomatically'])\n", (6382, 6427), False, 'from numpy import allclose\n'), ((6465, 6518), 'numpy.allclose', 'allclose', (['undiplomatically', "model['UndIplomAtically']"], {}), "(undiplomatically, model['UndIplomAtically'])\n", (6473, 6518), False, 'from numpy import allclose\n'), ((6556, 6597), 'numpy.allclose', 'allclose', (['unk', "model['undiplomaticallyy']"], {}), "(unk, model['undiplomaticallyy'])\n", (6564, 6597), False, 'from numpy import allclose\n'), ((4822, 4851), 'numpy.zeros', 'zeros', (['(max_length, self.dim)'], {}), '((max_length, self.dim))\n', (4827, 4851), False, 'from numpy import array, zeros\n'), ((6778, 6801), 'itertools.islice', 'itertools.islice', (['it', 'n'], {}), '(it, n)\n', (6794, 6801), False, 'import itertools\n')] |
import numpy as np
import pickle
import glob
from collections import defaultdict
import os
import sys
splits = {1, 2, 3, 4}
data_name = {'f1'} # {"f1", "f2", "f3", "f4", "f5"}
data_per = 0.35
base_dir = sys.argv[1]
ground_truth_dir = base_dir + 'groundTruth/' #sys.argv[1] # "/mnt/ssd/all_users/dipika/ms_tcn/data/50salads/groundTruth/"
for split in splits:
traindataset = base_dir + '/splits/train.split{}.bundle'.format(split)
all_train_dataset = open(traindataset).read().split("\n")[0:-1]
for dn in data_name:
activity_with_vid_dict = defaultdict(list)
for filename in all_train_dataset:
video_id = filename.split(".txt")[0]
main_act = video_id.split("_")[-1]
activity_with_vid_dict[main_act].append(filename)
uniq_labels = []
selected_vids = []
total_data = 0
count = 0
while True:
for activity in activity_with_vid_dict.keys():
amt_data = int(data_per * len(activity_with_vid_dict[activity]))
vids = np.random.choice(activity_with_vid_dict[activity], size=amt_data)
total_data += amt_data
selected_vids.extend(vids)
temp_labels = []
for vid in vids:
labels = open(os.path.join(ground_truth_dir, vid)).read().split("\n")[0:-1]
temp_labels.extend(np.unique(labels))
uniq_labels.extend(np.unique(temp_labels))
uniq_labels = np.unique(uniq_labels).tolist()
if len(uniq_labels) == 48:
print(f"Completed selecting for {dn} and data per {data_per}, Number of videos selected = {total_data}")
break
else:
if count % 50 == 0:
print(f"Completed tryng {count} times with unique labels = {len(uniq_labels)}")
if count > 2000:
all_train_dataset = open(traindataset).read().split("\n")[0:-1]
activity_with_vid_dict = defaultdict(list)
for filename in all_train_dataset:
video_id = filename.split(".txt")[0]
main_act = video_id.split("_")[-1]
activity_with_vid_dict[main_act].append(filename)
total_data = 0
selected_vids = []
uniq_labels = []
count = count + 1
continue
# semi_supervised_train_dataset = base_dir + "/error_bars/train.split{}_errn{}_amt{}.bundle".format(split, dn, data_per)
# semi_supervised/train.split1_amt0.05.bundle
semi_supervised_train_dataset = base_dir + "/semi_supervised/train.split{}_amt{}.bundle".format(split, data_per)
with open(semi_supervised_train_dataset, "w") as wfp:
wfp.write("\n".join(selected_vids))
wfp.write("\n")
all_train_dataset = list(set(all_train_dataset) - set(selected_vids))
| [
"numpy.random.choice",
"collections.defaultdict",
"numpy.unique",
"os.path.join"
] | [((562, 579), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (573, 579), False, 'from collections import defaultdict\n'), ((1074, 1139), 'numpy.random.choice', 'np.random.choice', (['activity_with_vid_dict[activity]'], {'size': 'amt_data'}), '(activity_with_vid_dict[activity], size=amt_data)\n', (1090, 1139), True, 'import numpy as np\n'), ((1478, 1500), 'numpy.unique', 'np.unique', (['temp_labels'], {}), '(temp_labels)\n', (1487, 1500), True, 'import numpy as np\n'), ((2063, 2080), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2074, 2080), False, 'from collections import defaultdict\n'), ((1424, 1441), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1433, 1441), True, 'import numpy as np\n'), ((1532, 1554), 'numpy.unique', 'np.unique', (['uniq_labels'], {}), '(uniq_labels)\n', (1541, 1554), True, 'import numpy as np\n'), ((1323, 1358), 'os.path.join', 'os.path.join', (['ground_truth_dir', 'vid'], {}), '(ground_truth_dir, vid)\n', (1335, 1358), False, 'import os\n')] |
import numpy as np
from scipy.signal import convolve2d
from dataclasses import dataclass, field
from copy import deepcopy
from typing import List, Tuple
import os
from environments.environment_abc import Environment, State, Action
@dataclass
class HomebrewConnect4State(State):
board: np.ndarray = field(default_factory=lambda: np.zeros((6, 7),
dtype=np.uint8))
current_player: int = 0
reward_list: List[int] = field(default_factory=list)
@property
def legal_actions(self):
if self.is_terminal():
return []
return list(np.where(self.board[0, :] == 0)[0])
def rewards(self):
return self.reward_list
def returns(self):
return self.rewards()
def player_reward(self, player_ID: int):
return self.reward_list[player_ID]
def is_terminal(self):
return self.terminal
def apply_action(self, action: int):
if action not in self.legal_actions:
raise ValueError("Given action not in legal actions(" +
str(self.legal_actions)+"): " + str(action))
# action == column_id. Get row ID
# get lowest index that has a 0 where column == action
row_ID = np.where(self.board[:, action] == 0)[0][-1]
# 0 == empty, 1 == playerID0, 2 == playerID1
self.board[row_ID, action] = self.current_player + 1
# check if playerID just won with this action
if self.__check_player_won(self.current_player):
self.terminal = True
# assert self.terminal == True and self.is_terminal() == True
self.reward_list[self.current_player] = 1
self.reward_list[(self.current_player+1) % 2] = -1
elif not self.legal_actions:
# else check if board is full(draw)
self.terminal = True
else:
# progress to next player's ply
self.current_player = (self.current_player + 1) % 2
def __post_init__(self):
self.reward_list.extend([0, 0])
# self.board.fill(0) # for some reason we need to
# solution taken from: https://stackoverflow.com/a/63991845/6301103
# create masks
self.__horizontal_kernel = np.array([[1, 1, 1, 1]],
dtype=np.uint8) # 1x4
self.__vertical_kernel = np.transpose(self.__horizontal_kernel) # 4x1
self.__diag1_kernel = np.eye(4, dtype=np.uint8)
self.__diag2_kernel = np.fliplr(self.__diag1_kernel)
self.__detection_kernels = [self.__horizontal_kernel,
self.__vertical_kernel,
self.__diag1_kernel,
self.__diag2_kernel]
def __check_player_won(self, player_ID: int):
# solution taken from: https://stackoverflow.com/a/63991845/6301103
# check via convolution if current_player has won
for kernel in self.__detection_kernels:
conv_result = convolve2d(
self.board == (player_ID+1),
kernel, mode="valid")
if (conv_result == 4).any():
# print("End of game", conv_result)
# print(self.board_repr)
# print(self.terminal)
return True
else:
return False
@property
def check_won(self):
return self.__check_player_won(self.current_player)
@property
def board_repr(self):
retval = str()
for row_index in range(self.board.shape[0]):
for col_index in range(self.board.shape[1]):
pos_state = self.board[row_index, col_index]
if pos_state == 0:
retval += '.'
elif pos_state == 1:
retval += 'x'
elif pos_state == 2:
retval += 'o'
else:
raise ValueError("Invalid Value in Board: "+str(pos_state))
retval += str(os.linesep)
return retval
def clone(self):
return deepcopy(self)
class HomebrewConnect4Environment(Environment):
def __init__(self, initial_state: State = None):
self.current_state: State = initial_state or self.get_initial()
self.is_done: bool = False
self.game_name: str = "Connect_Four"
def reset(self, initial_state=None):
self.current_state: State = initial_state or self.get_initial()
self.is_done: bool = False
def get_initial(self) -> State:
return HomebrewConnect4State()
def take_random_action(self, current_state: HomebrewConnect4State = None) -> int:
possible_actions = self.get_possible_actions(current_state)
return np.random.choice(possible_actions)
def step(self, action: Action) -> Tuple[float, State, bool]:
"""
Execute the given action and return new state, achieved reward
and whether the game is done or not.
Args:
action (Action): [description]
Returns:
Tuple[float, State, bool]: reward, state, is_done
"""
player_ID = self.current_state.current_player
self.current_state.apply_action(action)
self.is_done = self.current_state.is_terminal()
rewards = self.current_state.player_reward(player_ID)
state = self.current_state.clone()
# New state, reward, game over
return rewards, state, self.is_done
def get_possible_actions(self, current_state: HomebrewConnect4State = None) -> List[int]:
current_state = current_state or self.current_state
return current_state.legal_actions
def get_possible_next_states(self, current_state=None) -> List[State]:
current_state = current_state or self.current_state
possible_actions = self.get_possible_actions(
current_state=current_state)
possible_states = []
# duplicate current state
duplicate_state = current_state.clone()
for possible_action in possible_actions:
working_copy = duplicate_state.clone()
working_copy.apply_action(possible_action)
# get current state from duplicate state
possible_states.append(working_copy.clone())
# duplicate_state.undo_action(possible_action)
return possible_states, possible_actions
| [
"scipy.signal.convolve2d",
"numpy.eye",
"numpy.random.choice",
"numpy.fliplr",
"numpy.where",
"numpy.array",
"numpy.zeros",
"copy.deepcopy",
"numpy.transpose",
"dataclasses.field"
] | [((488, 515), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (493, 515), False, 'from dataclasses import dataclass, field\n'), ((2276, 2316), 'numpy.array', 'np.array', (['[[1, 1, 1, 1]]'], {'dtype': 'np.uint8'}), '([[1, 1, 1, 1]], dtype=np.uint8)\n', (2284, 2316), True, 'import numpy as np\n'), ((2401, 2439), 'numpy.transpose', 'np.transpose', (['self.__horizontal_kernel'], {}), '(self.__horizontal_kernel)\n', (2413, 2439), True, 'import numpy as np\n'), ((2477, 2502), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.uint8'}), '(4, dtype=np.uint8)\n', (2483, 2502), True, 'import numpy as np\n'), ((2533, 2563), 'numpy.fliplr', 'np.fliplr', (['self.__diag1_kernel'], {}), '(self.__diag1_kernel)\n', (2542, 2563), True, 'import numpy as np\n'), ((4141, 4155), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (4149, 4155), False, 'from copy import deepcopy\n'), ((4806, 4840), 'numpy.random.choice', 'np.random.choice', (['possible_actions'], {}), '(possible_actions)\n', (4822, 4840), True, 'import numpy as np\n'), ((3060, 3121), 'scipy.signal.convolve2d', 'convolve2d', (['(self.board == player_ID + 1)', 'kernel'], {'mode': '"""valid"""'}), "(self.board == player_ID + 1, kernel, mode='valid')\n", (3070, 3121), False, 'from scipy.signal import convolve2d\n'), ((334, 366), 'numpy.zeros', 'np.zeros', (['(6, 7)'], {'dtype': 'np.uint8'}), '((6, 7), dtype=np.uint8)\n', (342, 366), True, 'import numpy as np\n'), ((634, 665), 'numpy.where', 'np.where', (['(self.board[0, :] == 0)'], {}), '(self.board[0, :] == 0)\n', (642, 665), True, 'import numpy as np\n'), ((1278, 1314), 'numpy.where', 'np.where', (['(self.board[:, action] == 0)'], {}), '(self.board[:, action] == 0)\n', (1286, 1314), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from daug.transforms import build_transformation_matrix
# TODO: test shear, flip, and translate
def run_rotation_scale_tests(n=10):
heights = np.random.randint(16, 512, size=n)
widths = np.random.randint(16, 512, size=n)
thetas = (2 * np.pi) * np.random.random(n)
scales = np.random.random(n)
for i in range(n):
theta, scale = thetas[i], scales[i]
height, width = heights[i], widths[i]
# getRotMat2D only supports isotropic scaling
L = cv2.getRotationMatrix2D(
(height / 2, width / 2), 180. / np.pi * theta, scale)
M = build_transformation_matrix(
(height, width), theta=theta, stretch=(scale, scale))
assert np.allclose(L, M[:2, :]), (
'OpenCV transformation matrix:\n'
'%r,\nbut Daug transformation matrix:\n%r' % (L, M[:2, :]))
print('All rotation/scale tests passed.')
def main():
run_rotation_scale_tests(n=10)
if __name__ == '__main__':
main()
| [
"numpy.allclose",
"numpy.random.random",
"daug.transforms.build_transformation_matrix",
"numpy.random.randint",
"cv2.getRotationMatrix2D"
] | [((179, 213), 'numpy.random.randint', 'np.random.randint', (['(16)', '(512)'], {'size': 'n'}), '(16, 512, size=n)\n', (196, 213), True, 'import numpy as np\n'), ((227, 261), 'numpy.random.randint', 'np.random.randint', (['(16)', '(512)'], {'size': 'n'}), '(16, 512, size=n)\n', (244, 261), True, 'import numpy as np\n'), ((323, 342), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (339, 342), True, 'import numpy as np\n'), ((290, 309), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (306, 309), True, 'import numpy as np\n'), ((523, 601), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(height / 2, width / 2)', '(180.0 / np.pi * theta)', 'scale'], {}), '((height / 2, width / 2), 180.0 / np.pi * theta, scale)\n', (546, 601), False, 'import cv2\n'), ((626, 711), 'daug.transforms.build_transformation_matrix', 'build_transformation_matrix', (['(height, width)'], {'theta': 'theta', 'stretch': '(scale, scale)'}), '((height, width), theta=theta, stretch=(scale,\n scale))\n', (653, 711), False, 'from daug.transforms import build_transformation_matrix\n'), ((737, 761), 'numpy.allclose', 'np.allclose', (['L', 'M[:2, :]'], {}), '(L, M[:2, :])\n', (748, 761), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import numpy as np
from .... import units as u
from ... import FK4NoETerms, FK4
from ....time import Time
from ....table import Table
from ...angle_utilities import angular_separation
# It looks as though SLALIB, which AST relies on, assumes a simplified version
# of the e-terms corretion, so we have to up the tolerance a bit to get things
# to agree.
TOLERANCE = 1.e-5 # arcseconds
ROOT = os.path.dirname(os.path.abspath(__file__))
def test_fk4_no_e_fk5():
t = Table.read(os.path.join(ROOT, 'fk4_no_e_fk4.csv'), format='ascii')
# FK4 to FK5
c1 = FK4(t['ra_in'], t['dec_in'],
unit=(u.degree, u.degree),
obstime=Time(t['obstime'], scale='utc'))
c2 = c1.transform_to(FK4NoETerms)
# Find difference
diff = angular_separation(c2.ra.radian, c2.dec.radian,
np.radians(t['ra_fk4ne']), np.radians(t['dec_fk4ne']))
assert np.all(np.degrees(diff) * 3600. < TOLERANCE)
# FK5 to FK4
c1 = FK4NoETerms(t['ra_in'], t['dec_in'],
unit=(u.degree, u.degree),
obstime=Time(t['obstime'], scale='utc'))
c2 = c1.transform_to(FK4)
# Find difference
diff = angular_separation(c2.ra.radian, c2.dec.radian,
np.radians(t['ra_fk4']), np.radians(t['dec_fk4']))
assert np.all(np.degrees(diff) * 3600. < TOLERANCE)
| [
"os.path.abspath",
"numpy.degrees",
"os.path.join",
"numpy.radians"
] | [((598, 623), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (613, 623), False, 'import os\n'), ((672, 710), 'os.path.join', 'os.path.join', (['ROOT', '"""fk4_no_e_fk4.csv"""'], {}), "(ROOT, 'fk4_no_e_fk4.csv')\n", (684, 710), False, 'import os\n'), ((1028, 1053), 'numpy.radians', 'np.radians', (["t['ra_fk4ne']"], {}), "(t['ra_fk4ne'])\n", (1038, 1053), True, 'import numpy as np\n'), ((1055, 1081), 'numpy.radians', 'np.radians', (["t['dec_fk4ne']"], {}), "(t['dec_fk4ne'])\n", (1065, 1081), True, 'import numpy as np\n'), ((1456, 1479), 'numpy.radians', 'np.radians', (["t['ra_fk4']"], {}), "(t['ra_fk4'])\n", (1466, 1479), True, 'import numpy as np\n'), ((1481, 1505), 'numpy.radians', 'np.radians', (["t['dec_fk4']"], {}), "(t['dec_fk4'])\n", (1491, 1505), True, 'import numpy as np\n'), ((1102, 1118), 'numpy.degrees', 'np.degrees', (['diff'], {}), '(diff)\n', (1112, 1118), True, 'import numpy as np\n'), ((1526, 1542), 'numpy.degrees', 'np.degrees', (['diff'], {}), '(diff)\n', (1536, 1542), True, 'import numpy as np\n')] |
import os
import numpy as np
import utils.eval_metrics as em
import config as cfg
import torch
import pandas as pd
import torch.nn as nn
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, multilabel_confusion_matrix
def train(Model, Trainloader, Optimizer, Criterion, Epoch):
""" Defines a training structure that considers the model, optimizer
and criterion and robtains data from a dataloader one batch at a
time.
Args:
Model: PyTorch Model
Trainloader: Iterable dataloader
Optimizer: Optimizer to update model's parameters
Criterion: Loss function to compute loss and backpropogate
Epoch: Training epoch #
Returns:
(Model, Optimizer) : Returns the model and optimizer after one training cycle
"""
for batch_num, (features, labels, lengths) in enumerate(Trainloader):
# Send data to device
features, labels = features.to(cfg.DEVICE), labels.to(cfg.DEVICE)
Optimizer.zero_grad()
pred = Model(features)
# Compute loss and update optimizer
loss = Criterion(pred, labels)
loss.backward()
Optimizer.step()
if batch_num % 2000 == 1:
curr_loss = float(loss.item())
print("Epoch: ", Epoch, "Training Loss: ", curr_loss)
# Clear redundant variables
torch.cuda.empty_cache()
del features
del labels
del loss
return Model, Optimizer
def eval(Model, Evalloader, Criterion, Epoch, filehandler=None, classes=None):
""" Defines a evaluation structure that considers the model, optimizer
and criterion and obtains data from a dataloader one batch at a
time.
Args:
Model: PyTorch Model
Evalloader: Iterable dataloader
Optimizer: Optimizer to update model's parameters
Criterion: Loss function to compute loss and backpropogate
Epoch: Training epoch #
Returns:
(Accuracy) : Returns the accuracy
"""
accuracy = 0
tot_loss = 0
true_labels = []
preds = []
for batch_num, (features, labels, lengths) in enumerate(Evalloader):
# Send data to device
features, labels = features.to(cfg.DEVICE), labels.to(cfg.DEVICE)
pred = Model(features)
# Compute loss
loss = Criterion(pred, labels)
pred = torch.where(pred[0,:].cpu() > 0.5, torch.tensor([1.0]),torch.tensor([0.0]))
labels = labels[0,:]
pred = pred.detach().numpy()
labels = labels.cpu().detach().numpy()
preds.append(pred.tolist())
true_labels.append(labels.tolist())
tot_loss += float(loss.item())
if batch_num % 50 == 1:
curr_loss = float(loss.item())
print("Epoch: ", Epoch, "Validation Loss: ", curr_loss)
# Clear redundant variables
torch.cuda.empty_cache()
del features
del labels
del loss
# Compute final metrics
true_labels = np.vstack(true_labels)
preds = np.vstack(preds)
accuracy, precision, recall, misclass_rate, _ = em.print_multilabel_report(true_labels, preds, filehandler, classes)
if filehandler:
print("\n\n\nTotal Accuracy: ", accuracy,
"Total Misclassification Rate: ", misclass_rate,
"Total Recall: ", recall,
"Total Precision: ", precision,
file=filehandler)
else:
print("Accuracy: ", accuracy,
"Misclassification Rate: ", misclass_rate,
"Recall: ", recall,
"Precision: ", precision)
return accuracy, tot_loss/(batch_num+1), recall, precision | [
"torch.cuda.empty_cache",
"torch.tensor",
"numpy.vstack",
"utils.eval_metrics.print_multilabel_report"
] | [((3116, 3138), 'numpy.vstack', 'np.vstack', (['true_labels'], {}), '(true_labels)\n', (3125, 3138), True, 'import numpy as np\n'), ((3151, 3167), 'numpy.vstack', 'np.vstack', (['preds'], {}), '(preds)\n', (3160, 3167), True, 'import numpy as np\n'), ((3221, 3289), 'utils.eval_metrics.print_multilabel_report', 'em.print_multilabel_report', (['true_labels', 'preds', 'filehandler', 'classes'], {}), '(true_labels, preds, filehandler, classes)\n', (3247, 3289), True, 'import utils.eval_metrics as em\n'), ((1421, 1445), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1443, 1445), False, 'import torch\n'), ((2987, 3011), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (3009, 3011), False, 'import torch\n'), ((2514, 2533), 'torch.tensor', 'torch.tensor', (['[1.0]'], {}), '([1.0])\n', (2526, 2533), False, 'import torch\n'), ((2534, 2553), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (2546, 2553), False, 'import torch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ____________developed by <NAME>____________________
# _________collaboration with <NAME>____________
import threading
from ._client_robot import ClientRobot
import time
import Pyro4
import cv2
from urllib import request, parse, error
import numpy as np
def track(image):
blur = cv2.GaussianBlur(image, (5, 5), 0)
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
lower_green = np.array([29, 86, 6])
upper_green = np.array([64, 255, 255])
mask = cv2.inRange(hsv, lower_green, upper_green)
bmask = cv2.GaussianBlur(mask, (5, 5), 0)
moments = cv2.moments(bmask)
m00 = moments['m00']
centroid_x, centroid_y = None, None
if m00 != 0:
centroid_x = int(moments['m10'] / m00)
centroid_y = int(moments['m01'] / m00)
ctr = (-1, -1)
if centroid_x is not None and centroid_y is not None:
ctr = (centroid_x, centroid_y)
cv2.circle(image, ctr, 10, (255, 0, 0))
return ctr, image
def run_camera(cam):
while True:
c = cam.image
centro = []
centro, img = track(c)
# print centro
cv2.imshow('learnbot', img)
if cv2.waitKey(1) == 27:
exit(0)
# nombre del bot en la name no el fichero json
bot = ClientRobot("learnbot1@192.168.1.40")
cam = threading.Thread(target=run_camera, args=(bot.camera,))
cam.setDaemon(True)
cam.start()
bot.base.set__vel(-600, -600)
time.sleep(5)
bot.base.set__vel(0, 0)
bot.pantilt.move(80, 160)
for i in range(1, 200):
laser = bot.laser.get_laser()
mx = max(laser)
ind = laser.index(mx)
print("laser:%s ind:%s" % (laser, ind))
if ind == 0:
# bot.base.Set_Vel(300,100)
print("izquierda")
if ind == 1:
# bot.base.Set_Vel(200,200)
print("centro")
if ind == 2:
# bot.base.Set_Vel(100,300)
print("derecha")
time.sleep(0.1)
bot.base.set__vel(0, 0)
while True:
pass
| [
"cv2.inRange",
"time.sleep",
"cv2.imshow",
"numpy.array",
"cv2.circle",
"cv2.cvtColor",
"cv2.moments",
"threading.Thread",
"cv2.GaussianBlur",
"cv2.waitKey"
] | [((1318, 1373), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_camera', 'args': '(bot.camera,)'}), '(target=run_camera, args=(bot.camera,))\n', (1334, 1373), False, 'import threading\n'), ((1436, 1449), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1446, 1449), False, 'import time\n'), ((334, 368), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(5, 5)', '(0)'], {}), '(image, (5, 5), 0)\n', (350, 368), False, 'import cv2\n'), ((379, 416), 'cv2.cvtColor', 'cv2.cvtColor', (['blur', 'cv2.COLOR_BGR2HSV'], {}), '(blur, cv2.COLOR_BGR2HSV)\n', (391, 416), False, 'import cv2\n'), ((435, 456), 'numpy.array', 'np.array', (['[29, 86, 6]'], {}), '([29, 86, 6])\n', (443, 456), True, 'import numpy as np\n'), ((475, 499), 'numpy.array', 'np.array', (['[64, 255, 255]'], {}), '([64, 255, 255])\n', (483, 499), True, 'import numpy as np\n'), ((511, 553), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_green', 'upper_green'], {}), '(hsv, lower_green, upper_green)\n', (522, 553), False, 'import cv2\n'), ((566, 599), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['mask', '(5, 5)', '(0)'], {}), '(mask, (5, 5), 0)\n', (582, 599), False, 'import cv2\n'), ((614, 632), 'cv2.moments', 'cv2.moments', (['bmask'], {}), '(bmask)\n', (625, 632), False, 'import cv2\n'), ((1888, 1903), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1898, 1903), False, 'import time\n'), ((933, 972), 'cv2.circle', 'cv2.circle', (['image', 'ctr', '(10)', '(255, 0, 0)'], {}), '(image, ctr, 10, (255, 0, 0))\n', (943, 972), False, 'import cv2\n'), ((1138, 1165), 'cv2.imshow', 'cv2.imshow', (['"""learnbot"""', 'img'], {}), "('learnbot', img)\n", (1148, 1165), False, 'import cv2\n'), ((1177, 1191), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1188, 1191), False, 'import cv2\n')] |
"""
Tests data reading and writing operation, along with condition generation
"""
import pytest
import numpy as np
import pandas as pd
import os
from shutil import rmtree
from numpy.testing import assert_equal, assert_allclose
from matplotlib.figure import Figure
from xsugar import Experiment, ureg
from sugarplot import assert_figures_equal, prettifyPlot
from ast import literal_eval
def testSavePSDFigureFilename(exp, path_data, convert_name):
"""
Tests that we successfully created and saved a single PSD figure when
our dataset is just a single item.
"""
raw_data = pd.DataFrame({'Time (ms)': [1, 2, 3],
'Current (mV)': [4,4.5,6]})
cond = {'wavelength': 1, 'temperature': 25, 'frequency': 8500}
condition_name = convert_name('TEST1~temperatures=25~wavelengths=1')
filename_desired = condition_name + '~PSD.png'
exp.data = {condition_name: raw_data}
exp.plotPSD(average_along=None)
file_found = os.path.isfile(path_data['figures_full_path'] + filename_desired)
assert_equal(file_found, True)
def testSavePSDFigureMultipleFilename(exp, path_data, convert_name):
"""
Tests that we successfully created and saved a single PSD figure when
our dataset is just a single item.
"""
raw_data = pd.DataFrame({'Time (ms)': [1, 2, 3],
'Current (mV)': [4,4.5,6]})
cond = {'wavelength': 1, 'temperature': 25, 'frequency': 8500}
condition_name_1 = convert_name('TEST1~replicate=0~temperatures=25~wavelengths=1')
condition_name_2 = convert_name('TEST1~replicate=1~temperatures=25~wavelengths=1')
filename_desired_1 = condition_name_1 + '~PSD.png'
filename_desired_2 = condition_name_2 + '~PSD.png'
exp.data = {condition_name_1: raw_data, condition_name_2: raw_data}
exp.plotPSD(average_along=None)
file1_found = os.path.isfile(path_data['figures_full_path'] +
filename_desired_1)
file2_found = os.path.isfile(path_data['figures_full_path'] +
filename_desired_2)
assert_equal(file1_found, True)
assert_equal(file2_found, True)
def testSavePSDFigureAverageFilename(exp, path_data, convert_name):
"""
Tests that we successfully created and saved a single PSD figure when
we want to create an averaged PSD plot
"""
raw_data = pd.DataFrame({'Time (ms)': [1, 2, 3],
'Current (mV)': [4,4.5,6]})
raw_data_2 = pd.DataFrame({'Time (ms)': [1, 2, 3],
'Current (mV)': [8,4.5,8]})
cond = {'wavelength': 1, 'temperature': 25, 'frequency': 8500}
condition_name_1 = convert_name('TEST1~replicate=1~temperatures=25~wavelengths=1')
condition_name_2 = convert_name('TEST1~replicate=1~temperatures=25~wavelengths=2')
filename_desired = convert_name('TEST1~temperatures=25~wavelengths=1~PSD~averaged.png')
exp.data = {condition_name_1: raw_data,
condition_name_2: raw_data_2}
exp.plotPSD(average_along='replicate')
file_found = os.path.isfile(path_data['figures_full_path'] + filename_desired)
assert_equal(file_found, True)
def testGenerateTimeDomainPlot(exp, path_data, convert_name):
"""
Tests that we successfully create a simple figure from a single pandas
array.
"""
raw_data = pd.DataFrame({'Time (ms)': [1, 2, 3],
'Current (mV)': [4,4.5,6]})
cond = {'wavelength': 1, 'temperature': 25, 'frequency': 8500}
condition_name = convert_name('TEST1~wavelengths-1~temperatures-25_replicate-1')
filename_desired = condition_name + '.png'
exp.data = {condition_name: raw_data}
exp.plot()
file_found = os.path.isfile(path_data['figures_full_path'] + filename_desired)
assert_equal(file_found, True)
def test_plot_time_domain_pdf(exp, path_data, convert_name):
"""
Tests that we successfully create a simple figure from a single pandas
array.
"""
raw_data = pd.DataFrame({'Time (ms)': [1, 2, 3],
'Current (mV)': [4,4.5,6]})
cond = {'wavelength': 1, 'temperature': 25, 'frequency': 8500}
condition_name = convert_name('TEST1~wavelengths-1~temperatures-25_replicate-1')
filename_desired = condition_name + '.pdf'
exp.data = {condition_name: raw_data}
exp.plot(save_kw={'format': 'pdf'})
file_found = os.path.isfile(path_data['figures_full_path'] + filename_desired)
assert_equal(file_found, True)
def testGenerateRepresentativePlot(exp, path_data, convert_name):
"""
Tests that we successfully create a single figure from a whole set of
replicate data, instead of a bunch of figures
"""
raw_data = pd.DataFrame({'Time (ms)': [1, 2, 3],
'Current (mV)': [4,4.5,6]})
cond_1 = {'wavelength': 1, 'temperature': 25, 'frequency': 8500,
'replicate': 1}
cond_2 = {'wavelength': 1, 'temperature': 25, 'frequency': 8500,
'replicate': 2}
condition_name_1 = convert_name('TEST1~replicate=1~temperatures=25~wavelengths=1')
condition_name_2 = convert_name('TEST1~replicate=2~temperatures=25~wavelengths=1')
filename_desired = convert_name('TEST1~temperatures=25~wavelengths=1~representative')
exp.data = {condition_name_1: raw_data,
condition_name_2 : raw_data}
exp.plot(representative='replicate')
files_found = os.listdir(path_data['figures_full_path'])
file_1_found = \
os.path.isfile(path_data['figures_full_path'] + filename_desired + '.png')
assert_equal(file_1_found, True)
assert_equal(len(files_found), 1)
def test_generate_plot_1var(exp, convert_name):
names = [convert_name(name) for name in \
[
'TEST1~wavelength=1',
'TEST1~wavelength=2',
]]
data_dict = {
names[0]: 1.0,
names[1]: 2.0,
}
actual_figs, actual_axes = exp.plot(data_dict)
actual_fig = actual_figs[0]
actual_ax = actual_axes[0]
desired_fig = Figure()
desired_ax = desired_fig.subplots(subplot_kw={'xlabel': 'wavelength', 'ylabel': 'Value'})
desired_ax.plot([1, 2], [1.0, 2.0])
prettifyPlot(fig=desired_fig, ax=desired_ax)
assert_figures_equal(actual_fig, desired_fig)
def test_generate_plot_1var_units(exp_units, convert_name):
names = [convert_name(name) for name in \
[
'TEST1~wavelength=1nm',
'TEST1~wavelength=2nm',
]]
data_dict = {
names[0]: 1.0 * ureg.nA,
names[1]: 2.0 * ureg.nA,
}
actual_figs, actual_axes = exp_units.plot(data_dict)
actual_fig = actual_figs[0]
actual_ax = actual_axes[0]
desired_fig = Figure()
desired_ax = desired_fig.subplots(subplot_kw={'xlabel': 'wavelength (nm)', 'ylabel': 'current (nA)'})
desired_ax.plot([1, 2], [1.0, 2.0])
prettifyPlot(fig=desired_fig, ax=desired_ax)
assert_figures_equal(actual_fig, desired_fig)
def test_generate_plot_2var(exp, convert_name):
names = [convert_name(name) for name in \
[
'TEST1~wavelength=1~temperature=25.0',
'TEST1~wavelength=2~temperature=25.0',
'TEST1~wavelength=1~temperature=35.0',
'TEST1~wavelength=2~temperature=35.0',
]]
data_dict = {
names[0]: 1.0,
names[1]: 2.0,
names[2]: 3.0,
names[3]: 4.0,
}
actual_figs, actual_axes = exp.plot(data_dict)
assert_equal(len(actual_figs), 2)
assert_equal(len(actual_axes), 2)
desired_fig0 = Figure()
desired_ax0 = desired_fig0.subplots(subplot_kw={'xlabel': 'wavelength', 'ylabel': 'Value'})
desired_ax0.plot([1, 2], [1.0, 2.0])
desired_ax0.plot([1, 2], [3.0, 4.0])
prettifyPlot(fig=desired_fig0, ax=desired_ax0)
desired_fig1 = Figure()
desired_ax1 = desired_fig1.subplots(subplot_kw={'xlabel': 'temperature', 'ylabel': 'Value'})
desired_ax1.plot([25.0, 35.0], [1.0, 3.0])
desired_ax1.plot([25.0, 35.0], [2.0, 4.0])
prettifyPlot(fig=desired_fig1, ax=desired_ax1)
assert_figures_equal(actual_figs[0], desired_fig0)
assert_figures_equal(actual_figs[1], desired_fig1)
| [
"os.listdir",
"numpy.testing.assert_equal",
"matplotlib.figure.Figure",
"sugarplot.prettifyPlot",
"os.path.isfile",
"sugarplot.assert_figures_equal",
"pandas.DataFrame"
] | [((592, 659), 'pandas.DataFrame', 'pd.DataFrame', (["{'Time (ms)': [1, 2, 3], 'Current (mV)': [4, 4.5, 6]}"], {}), "({'Time (ms)': [1, 2, 3], 'Current (mV)': [4, 4.5, 6]})\n", (604, 659), True, 'import pandas as pd\n'), ((973, 1038), 'os.path.isfile', 'os.path.isfile', (["(path_data['figures_full_path'] + filename_desired)"], {}), "(path_data['figures_full_path'] + filename_desired)\n", (987, 1038), False, 'import os\n'), ((1043, 1073), 'numpy.testing.assert_equal', 'assert_equal', (['file_found', '(True)'], {}), '(file_found, True)\n', (1055, 1073), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((1288, 1355), 'pandas.DataFrame', 'pd.DataFrame', (["{'Time (ms)': [1, 2, 3], 'Current (mV)': [4, 4.5, 6]}"], {}), "({'Time (ms)': [1, 2, 3], 'Current (mV)': [4, 4.5, 6]})\n", (1300, 1355), True, 'import pandas as pd\n'), ((1860, 1927), 'os.path.isfile', 'os.path.isfile', (["(path_data['figures_full_path'] + filename_desired_1)"], {}), "(path_data['figures_full_path'] + filename_desired_1)\n", (1874, 1927), False, 'import os\n'), ((1979, 2046), 'os.path.isfile', 'os.path.isfile', (["(path_data['figures_full_path'] + filename_desired_2)"], {}), "(path_data['figures_full_path'] + filename_desired_2)\n", (1993, 2046), False, 'import os\n'), ((2084, 2115), 'numpy.testing.assert_equal', 'assert_equal', (['file1_found', '(True)'], {}), '(file1_found, True)\n', (2096, 2115), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((2120, 2151), 'numpy.testing.assert_equal', 'assert_equal', (['file2_found', '(True)'], {}), '(file2_found, True)\n', (2132, 2151), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((2369, 2436), 'pandas.DataFrame', 'pd.DataFrame', (["{'Time (ms)': [1, 2, 3], 'Current (mV)': [4, 4.5, 6]}"], {}), "({'Time (ms)': [1, 2, 3], 'Current (mV)': [4, 4.5, 6]})\n", (2381, 2436), True, 'import pandas as pd\n'), ((2481, 2548), 'pandas.DataFrame', 'pd.DataFrame', (["{'Time (ms)': [1, 2, 3], 'Current (mV)': [8, 4.5, 8]}"], {}), "({'Time (ms)': [1, 2, 3], 'Current (mV)': [8, 4.5, 8]})\n", (2493, 2548), True, 'import pandas as pd\n'), ((3061, 3126), 'os.path.isfile', 'os.path.isfile', (["(path_data['figures_full_path'] + filename_desired)"], {}), "(path_data['figures_full_path'] + filename_desired)\n", (3075, 3126), False, 'import os\n'), ((3131, 3161), 'numpy.testing.assert_equal', 'assert_equal', (['file_found', '(True)'], {}), '(file_found, True)\n', (3143, 3161), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((3342, 3409), 'pandas.DataFrame', 'pd.DataFrame', (["{'Time (ms)': [1, 2, 3], 'Current (mV)': [4, 4.5, 6]}"], {}), "({'Time (ms)': [1, 2, 3], 'Current (mV)': [4, 4.5, 6]})\n", (3354, 3409), True, 'import pandas as pd\n'), ((3710, 3775), 'os.path.isfile', 'os.path.isfile', (["(path_data['figures_full_path'] + filename_desired)"], {}), "(path_data['figures_full_path'] + filename_desired)\n", (3724, 3775), False, 'import os\n'), ((3780, 3810), 'numpy.testing.assert_equal', 'assert_equal', (['file_found', '(True)'], {}), '(file_found, True)\n', (3792, 3810), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((3990, 4057), 'pandas.DataFrame', 'pd.DataFrame', (["{'Time (ms)': [1, 2, 3], 'Current (mV)': [4, 4.5, 6]}"], {}), "({'Time (ms)': [1, 2, 3], 'Current (mV)': [4, 4.5, 6]})\n", (4002, 4057), True, 'import pandas as pd\n'), ((4383, 4448), 'os.path.isfile', 'os.path.isfile', (["(path_data['figures_full_path'] + filename_desired)"], {}), "(path_data['figures_full_path'] + filename_desired)\n", (4397, 4448), False, 'import os\n'), ((4453, 4483), 'numpy.testing.assert_equal', 'assert_equal', (['file_found', '(True)'], {}), '(file_found, True)\n', (4465, 4483), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((4707, 4774), 'pandas.DataFrame', 'pd.DataFrame', (["{'Time (ms)': [1, 2, 3], 'Current (mV)': [4, 4.5, 6]}"], {}), "({'Time (ms)': [1, 2, 3], 'Current (mV)': [4, 4.5, 6]})\n", (4719, 4774), True, 'import pandas as pd\n'), ((5415, 5457), 'os.listdir', 'os.listdir', (["path_data['figures_full_path']"], {}), "(path_data['figures_full_path'])\n", (5425, 5457), False, 'import os\n'), ((5487, 5561), 'os.path.isfile', 'os.path.isfile', (["(path_data['figures_full_path'] + filename_desired + '.png')"], {}), "(path_data['figures_full_path'] + filename_desired + '.png')\n", (5501, 5561), False, 'import os\n'), ((5566, 5598), 'numpy.testing.assert_equal', 'assert_equal', (['file_1_found', '(True)'], {}), '(file_1_found, True)\n', (5578, 5598), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((6015, 6023), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (6021, 6023), False, 'from matplotlib.figure import Figure\n'), ((6162, 6206), 'sugarplot.prettifyPlot', 'prettifyPlot', ([], {'fig': 'desired_fig', 'ax': 'desired_ax'}), '(fig=desired_fig, ax=desired_ax)\n', (6174, 6206), False, 'from sugarplot import assert_figures_equal, prettifyPlot\n'), ((6211, 6256), 'sugarplot.assert_figures_equal', 'assert_figures_equal', (['actual_fig', 'desired_fig'], {}), '(actual_fig, desired_fig)\n', (6231, 6256), False, 'from sugarplot import assert_figures_equal, prettifyPlot\n'), ((6677, 6685), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (6683, 6685), False, 'from matplotlib.figure import Figure\n'), ((6836, 6880), 'sugarplot.prettifyPlot', 'prettifyPlot', ([], {'fig': 'desired_fig', 'ax': 'desired_ax'}), '(fig=desired_fig, ax=desired_ax)\n', (6848, 6880), False, 'from sugarplot import assert_figures_equal, prettifyPlot\n'), ((6885, 6930), 'sugarplot.assert_figures_equal', 'assert_figures_equal', (['actual_fig', 'desired_fig'], {}), '(actual_fig, desired_fig)\n', (6905, 6930), False, 'from sugarplot import assert_figures_equal, prettifyPlot\n'), ((7498, 7506), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (7504, 7506), False, 'from matplotlib.figure import Figure\n'), ((7689, 7735), 'sugarplot.prettifyPlot', 'prettifyPlot', ([], {'fig': 'desired_fig0', 'ax': 'desired_ax0'}), '(fig=desired_fig0, ax=desired_ax0)\n', (7701, 7735), False, 'from sugarplot import assert_figures_equal, prettifyPlot\n'), ((7756, 7764), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (7762, 7764), False, 'from matplotlib.figure import Figure\n'), ((7960, 8006), 'sugarplot.prettifyPlot', 'prettifyPlot', ([], {'fig': 'desired_fig1', 'ax': 'desired_ax1'}), '(fig=desired_fig1, ax=desired_ax1)\n', (7972, 8006), False, 'from sugarplot import assert_figures_equal, prettifyPlot\n'), ((8012, 8062), 'sugarplot.assert_figures_equal', 'assert_figures_equal', (['actual_figs[0]', 'desired_fig0'], {}), '(actual_figs[0], desired_fig0)\n', (8032, 8062), False, 'from sugarplot import assert_figures_equal, prettifyPlot\n'), ((8067, 8117), 'sugarplot.assert_figures_equal', 'assert_figures_equal', (['actual_figs[1]', 'desired_fig1'], {}), '(actual_figs[1], desired_fig1)\n', (8087, 8117), False, 'from sugarplot import assert_figures_equal, prettifyPlot\n')] |
import glob
import numpy as np
tmp=np.zeros(17)
list_daily=glob.glob('/mnt/r01/data/goes-poes_ghrsst/daily/*.nc')
list_daily.sort()
i=0
for y in range(2003,2019):
print("y = "+str(y))
for j in range(0,len(list_daily)):
if str(y) in list_daily[j]:
tmp[i]=tmp[i]+1
i=i+1
| [
"numpy.zeros",
"glob.glob"
] | [((36, 48), 'numpy.zeros', 'np.zeros', (['(17)'], {}), '(17)\n', (44, 48), True, 'import numpy as np\n'), ((60, 114), 'glob.glob', 'glob.glob', (['"""/mnt/r01/data/goes-poes_ghrsst/daily/*.nc"""'], {}), "('/mnt/r01/data/goes-poes_ghrsst/daily/*.nc')\n", (69, 114), False, 'import glob\n')] |
'''
Pretty print: python3 -m json.tool < some.json
'''
import json
import argparse
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
def load_json(data_path, jsfile):
with open(os.path.join(data_path, jsfile), 'r') as f:
js = json.load(f)
return js
def generate_dataset(args):
data_dict = {}
js = load_json(args.data_path, args.json)
js = js["_via_img_metadata"]
keys = js.keys()
rgb = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255)]
images_no_objs = []
for key in keys:
entry = js[key]
filename = entry["filename"]
path = os.path.join(args.data_path, filename)
regions = entry["regions"]
masks = []
for region in regions:
shape = region["shape_attributes"]
x = shape["all_points_x"]
y = shape["all_points_y"]
name = region["region_attributes"]
class_id = name["Name"]
fmt = "%s,%s,%s,%s"
line = fmt % (filename, x, y, class_id)
# print(line)
xy = np.array([x, y], dtype=np.int32)
xy = np.transpose(xy)
xy = np.reshape(xy, [1, -1, 2])
mask = { class_id : xy }
masks.append(mask)
image = plt.imread(path)
if args.show:
plt.xlabel('x')
plt.ylabel('y')
plt.title('Input image', fontsize=14)
fname = os.path.splitext(filename)[0]
fname = fname + "-input.png"
path = os.path.join("images", fname)
plt.imshow(image)
plt.savefig(path)
#plt.show()
else:
image = np.zeros_like(image)
shape = image.shape
shape = (shape[0], shape[1])
bg = np.ones(shape, dtype="uint8")
bg.fill(255)
#i = 0
image = np.zeros_like(image)
#image[:] = [128, 0, 128]
for mask in masks:
name = list(mask)[0]
mask = mask[name]
cv2.fillPoly(image, mask, rgb[int(name)-1])
# cv2.fillPoly(image, mask, rgb[i])
#i += 1
cv2.fillPoly(bg, mask, 0)
if args.show:
name = os.path.splitext(filename)[0]
plt.xlabel('x')
plt.ylabel('y')
plt.title('Ground truth semantic segmentation', fontsize=14)
fname = name + "-semantic.png"
path = os.path.join("images", fname)
plt.imshow(image)
plt.savefig(path)
#plt.show()
#plt.xlabel('x')
#plt.ylabel('y')
#plt.title('Background segmentation', fontsize=14)
#fname = name + "-bg.png"
#path = os.path.join("images", fname)
#plt.imshow(bg, cmap='gray', vmin=0, vmax=255)
#plt.savefig(path)
#plt.show()
shape = (*shape, 1)
bg = np.reshape(bg, shape)
#print(bg.shape)
data = np.concatenate((bg, image), axis=-1)
data = data.astype('float32') / 255
data = data.astype('uint8')
data_dict[filename] = data
print(filename, len(masks))
if len(masks) == 0:
images_no_objs.append(filename)
if not args.show:
np.save(args.save_filename, data_dict)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-j",
"--json",
default='segmentation_train.json',
help='Json filename')
parser.add_argument("-p",
"--data-path",
default='../dataset/drinks',
help='Path to dataset')
parser.add_argument("--save-filename",
default="segmentation_train.npy",
help='Path to dataset')
help_ = "Show and save images"
parser.add_argument("--show",
default=False,
action='store_true',
help=help_)
args = parser.parse_args()
generate_dataset(args)
| [
"matplotlib.pyplot.imshow",
"cv2.fillPoly",
"numpy.reshape",
"numpy.ones",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.imread",
"os.path.join",
"os.path.splitext",
"numpy.array",
"numpy.concatenate",
"js... | [((3383, 3408), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3406, 3408), False, 'import argparse\n'), ((266, 278), 'json.load', 'json.load', (['f'], {}), '(f)\n', (275, 278), False, 'import json\n'), ((659, 697), 'os.path.join', 'os.path.join', (['args.data_path', 'filename'], {}), '(args.data_path, filename)\n', (671, 697), False, 'import os\n'), ((1312, 1328), 'matplotlib.pyplot.imread', 'plt.imread', (['path'], {}), '(path)\n', (1322, 1328), True, 'import matplotlib.pyplot as plt\n'), ((1815, 1844), 'numpy.ones', 'np.ones', (['shape'], {'dtype': '"""uint8"""'}), "(shape, dtype='uint8')\n", (1822, 1844), True, 'import numpy as np\n'), ((1897, 1917), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (1910, 1917), True, 'import numpy as np\n'), ((2949, 2970), 'numpy.reshape', 'np.reshape', (['bg', 'shape'], {}), '(bg, shape)\n', (2959, 2970), True, 'import numpy as np\n'), ((3011, 3047), 'numpy.concatenate', 'np.concatenate', (['(bg, image)'], {'axis': '(-1)'}), '((bg, image), axis=-1)\n', (3025, 3047), True, 'import numpy as np\n'), ((3302, 3340), 'numpy.save', 'np.save', (['args.save_filename', 'data_dict'], {}), '(args.save_filename, data_dict)\n', (3309, 3340), True, 'import numpy as np\n'), ((209, 240), 'os.path.join', 'os.path.join', (['data_path', 'jsfile'], {}), '(data_path, jsfile)\n', (221, 240), False, 'import os\n'), ((1116, 1148), 'numpy.array', 'np.array', (['[x, y]'], {'dtype': 'np.int32'}), '([x, y], dtype=np.int32)\n', (1124, 1148), True, 'import numpy as np\n'), ((1166, 1182), 'numpy.transpose', 'np.transpose', (['xy'], {}), '(xy)\n', (1178, 1182), True, 'import numpy as np\n'), ((1200, 1226), 'numpy.reshape', 'np.reshape', (['xy', '[1, -1, 2]'], {}), '(xy, [1, -1, 2])\n', (1210, 1226), True, 'import numpy as np\n'), ((1363, 1378), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1373, 1378), True, 'import matplotlib.pyplot as plt\n'), ((1391, 1406), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1401, 1406), True, 'import matplotlib.pyplot as plt\n'), ((1419, 1456), 'matplotlib.pyplot.title', 'plt.title', (['"""Input image"""'], {'fontsize': '(14)'}), "('Input image', fontsize=14)\n", (1428, 1456), True, 'import matplotlib.pyplot as plt\n'), ((1567, 1596), 'os.path.join', 'os.path.join', (['"""images"""', 'fname'], {}), "('images', fname)\n", (1579, 1596), False, 'import os\n'), ((1609, 1626), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (1619, 1626), True, 'import matplotlib.pyplot as plt\n'), ((1639, 1656), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (1650, 1656), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1735), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (1728, 1735), True, 'import numpy as np\n'), ((2178, 2203), 'cv2.fillPoly', 'cv2.fillPoly', (['bg', 'mask', '(0)'], {}), '(bg, mask, 0)\n', (2190, 2203), False, 'import cv2\n'), ((2289, 2304), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2299, 2304), True, 'import matplotlib.pyplot as plt\n'), ((2317, 2332), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2327, 2332), True, 'import matplotlib.pyplot as plt\n'), ((2345, 2405), 'matplotlib.pyplot.title', 'plt.title', (['"""Ground truth semantic segmentation"""'], {'fontsize': '(14)'}), "('Ground truth semantic segmentation', fontsize=14)\n", (2354, 2405), True, 'import matplotlib.pyplot as plt\n'), ((2468, 2497), 'os.path.join', 'os.path.join', (['"""images"""', 'fname'], {}), "('images', fname)\n", (2480, 2497), False, 'import os\n'), ((2510, 2527), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (2520, 2527), True, 'import matplotlib.pyplot as plt\n'), ((2540, 2557), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (2551, 2557), True, 'import matplotlib.pyplot as plt\n'), ((1477, 1503), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1493, 1503), False, 'import os\n'), ((2246, 2272), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2262, 2272), False, 'import os\n')] |
import random
import numpy as np
import math
location=np.loadtxt('city_location.txt')
num_ant=200 #蚂蚁个数
num_city=30 #城市个数
alpha=1 #信息素影响因子
beta=1 #期望影响因子
info=0.1 #信息素的挥发率
Q=1 #常数
count_iter = 0
iter_max = 500
#dis_new=1000
#==========================================
#对称矩阵,两个城市之间的距离
def distance_p2p_mat():
dis_mat=[]
for i in range(num_city):
dis_mat_each=[]
for j in range(num_city):
dis=math.sqrt(pow(location[i][0]-location[j][0],2)+pow(location[i][1]-location[j][1],2))
dis_mat_each.append(dis)
dis_mat.append(dis_mat_each)
# print(dis_mat)
return dis_mat
#计算所有路径对应的距离
def cal_newpath(dis_mat,path_new):
dis_list=[]
for each in path_new:
dis=0
for j in range(num_city-1):
dis=dis_mat[each[j]][each[j+1]]+dis
dis=dis_mat[each[num_city-1]][each[0]]+dis#回家
dis_list.append(dis)
return dis_list
#==========================================
#点对点距离矩阵
dis_list=distance_p2p_mat()
dis_mat=np.array(dis_list)#转为矩阵
#期望矩阵
e_mat_init=1.0/(dis_mat+np.diag([10000]*num_city))#加对角阵是因为除数不能是0
diag=np.diag([1.0/10000]*num_city)
e_mat=e_mat_init-diag#还是把对角元素变成0
#初始化每条边的信息素浓度,全1矩阵
pheromone_mat=np.ones((num_city,num_city))
#初始化每只蚂蚁路径,都从0城市出发
path_mat=np.zeros((num_ant,num_city)).astype(int)
#while dis_new>400:
while count_iter < iter_max:
for ant in range(num_ant):
visit=0#都从0城市出发
unvisit_list=list(range(1,30))#未访问的城市
for j in range(1,num_city):
#轮盘法选择下一个城市
trans_list=[]
tran_sum=0
trans=0
for k in range(len(unvisit_list)):
trans +=np.power(pheromone_mat[visit][unvisit_list[k]],alpha)*np.power(e_mat[visit][unvisit_list[k]],beta)
trans_list.append(trans)
tran_sum =trans
rand=random.uniform(0,tran_sum)#产生随机数
for t in range(len(trans_list)):
if(rand <= trans_list[t]):
visit_next=unvisit_list[t]
break
else:
continue
path_mat[ant,j]=visit_next#填路径矩阵
unvisit_list.remove(visit_next)#更新
visit=visit_next#更新
#所有蚂蚁的路径表填满之后,算每只蚂蚁的总距离
dis_allant_list=cal_newpath(dis_mat,path_mat)
#每次迭代更新最短距离和最短路径
if count_iter == 0:
dis_new=min(dis_allant_list)
path_new=path_mat[dis_allant_list.index(dis_new)].copy()
else:
if min(dis_allant_list) < dis_new:
dis_new=min(dis_allant_list)
path_new=path_mat[dis_allant_list.index(dis_new)].copy()
# 更新信息素矩阵
pheromone_change=np.zeros((num_city,num_city))
for i in range(num_ant):
for j in range(num_city-1):
pheromone_change[path_mat[i,j]][path_mat[i,j+1]] += Q/dis_mat[path_mat[i,j]][path_mat[i,j+1]]
pheromone_change[path_mat[i,num_city-1]][path_mat[i,0]] += Q/dis_mat[path_mat[i,num_city-1]][path_mat[i,0]]
pheromone_mat=(1-info)*pheromone_mat+pheromone_change
count_iter += 1 #迭代计数+1,进入下一次
print('最短距离:',dis_new)
print('最短路径:',path_new)
| [
"random.uniform",
"numpy.ones",
"numpy.power",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numpy.loadtxt"
] | [((61, 92), 'numpy.loadtxt', 'np.loadtxt', (['"""city_location.txt"""'], {}), "('city_location.txt')\n", (71, 92), True, 'import numpy as np\n'), ((1054, 1072), 'numpy.array', 'np.array', (['dis_list'], {}), '(dis_list)\n', (1062, 1072), True, 'import numpy as np\n'), ((1157, 1190), 'numpy.diag', 'np.diag', (['([1.0 / 10000] * num_city)'], {}), '([1.0 / 10000] * num_city)\n', (1164, 1190), True, 'import numpy as np\n'), ((1256, 1285), 'numpy.ones', 'np.ones', (['(num_city, num_city)'], {}), '((num_city, num_city))\n', (1263, 1285), True, 'import numpy as np\n'), ((2770, 2800), 'numpy.zeros', 'np.zeros', (['(num_city, num_city)'], {}), '((num_city, num_city))\n', (2778, 2800), True, 'import numpy as np\n'), ((1110, 1137), 'numpy.diag', 'np.diag', (['([10000] * num_city)'], {}), '([10000] * num_city)\n', (1117, 1137), True, 'import numpy as np\n'), ((1315, 1344), 'numpy.zeros', 'np.zeros', (['(num_ant, num_city)'], {}), '((num_ant, num_city))\n', (1323, 1344), True, 'import numpy as np\n'), ((1928, 1955), 'random.uniform', 'random.uniform', (['(0)', 'tran_sum'], {}), '(0, tran_sum)\n', (1942, 1955), False, 'import random\n'), ((1722, 1776), 'numpy.power', 'np.power', (['pheromone_mat[visit][unvisit_list[k]]', 'alpha'], {}), '(pheromone_mat[visit][unvisit_list[k]], alpha)\n', (1730, 1776), True, 'import numpy as np\n'), ((1776, 1821), 'numpy.power', 'np.power', (['e_mat[visit][unvisit_list[k]]', 'beta'], {}), '(e_mat[visit][unvisit_list[k]], beta)\n', (1784, 1821), True, 'import numpy as np\n')] |
from Modules.Utils.ApplyFunctions import *
from Modules.ModelSelection.CrossValidation import CrossValidation
from Modules.DataAugmentations.DataAugmentationDefault import *
from Modules.Embeddings.EmbeddingDefault import *
from Modules.Kernels.KernelDefault import *
import itertools
import numpy as np
import pandas as pd
from tqdm import tqdm
def dictToCartesianProduct(dct):
"""Transform a dict of lists into a lists of all possible combination."""
# Convert dict of hyperparameters as list of lists
dct_l = [dct[key] for key in dct.keys()]
# Cartesian product of all the possible combination of model hp
grid_dct_l = [elt for elt in itertools.product(*dct_l)]
return grid_dct_l
def BuildHpTuples(df_dct, hyperparameters_data_augmentation_dict,
hyperparameters_embeddings_dict,
hyperparameters_models_dict,
hyperparameters_kernels_dict):
"""Build all the possible combination of hyper parameters given as arg."""
# Grid of hp for all model and kernel functions
tuples = []
# Loop over different embedding and data augmentation
for data_aug_func in tqdm(hyperparameters_data_augmentation_dict):
# Extract the hp of the kernel as a dict
dct_data = hyperparameters_data_augmentation_dict[data_aug_func]
grid_data_hp_l = dictToCartesianProduct(dct_data)
for hp_data in grid_data_hp_l:
for embedding_func in hyperparameters_embeddings_dict:
# Extract the hp of the kernel as a dict
dct_embedding = hyperparameters_embeddings_dict[embedding_func]
grid_embedding_hp_l = dictToCartesianProduct(dct_embedding)
for hp_embedding in tqdm(grid_embedding_hp_l):
# Convert hp of the data aug. as a dict
keys_list = list(dct_data.keys())
hp_data_aug_dct = {keys_list[i]: elt for i, elt in enumerate(hp_data)}
# Convert hp of the embedding as a dict
keys_list = list(dct_embedding.keys())
hp_embedding_dct = {keys_list[i]: elt for i, elt in enumerate(hp_embedding)}
# Definition of the embedding
if type(embedding_func) == type:
embedding = embedding_func(**hp_embedding_dct)
else:
embedding = EmbeddingDefault(embedding_func,
hp_embedding_dct)
# Definition of the data augmentation
data_aug = DataAugmentationDefault(data_aug_func,
hp_data_aug_dct)
# Compute the dataset for these functions
computed_df_dct = ApplyFunctions(df_dct, data_aug,
embedding)
# Loop over different models
for model_func in hyperparameters_models_dict.keys():
# Compute all possible combinations
dct_model = hyperparameters_models_dict[model_func]
grid_model_hp_l = dictToCartesianProduct(dct_model)
for hp_model in grid_model_hp_l:
for kernel_func in hyperparameters_kernels_dict.keys():
# Extract the hp of the kernel as a dict
dct_kernel = hyperparameters_kernels_dict[kernel_func]
grid_kernel_hp_l = dictToCartesianProduct(dct_kernel)
for hp_kernel in grid_kernel_hp_l:
# Convert the hp of the kernel as a dict
keys_list = list(dct_kernel.keys())
hp_kernel_dct = {keys_list[i]: elt for i, elt in enumerate(hp_kernel)}
# Convert hp of the model as a dict
keys_list = list(dct_model.keys())
hp_model_dct = {keys_list[i]: elt for i, elt in enumerate(hp_model)}
# Definition of the kernel
kernel = KernelDefault(kernel_func,
hp_kernel_dct)
# Definition of the model with the current hyperparameters
model = model_func(kernel, **hp_model_dct)
# Add this combination ot tuples
tuples.append((data_aug, hp_data_aug_dct,
embedding, hp_embedding_dct,
kernel, hp_kernel_dct,
model, hp_model_dct,
computed_df_dct))
return tuples
def subGridSearch(df_dct, tuple_i, res_df, cv=5, n_jobs=-1):
"""Execute the CrossValidation on tuple_i of hyperparameters."""
# Extract the relevant object from tuple_i
[data_aug, hp_data_aug_dct,
embedding, hp_embedding_dct,
kernel, hp_kernel_dct,
model, hp_model_dct, computed_df_dct] = tuple_i
# Computation of the score trough a Cross Validation
scores = CrossValidation(computed_df_dct, model, cv=cv, n_jobs=n_jobs,
return_int=False)
# Compute the mean scores
scores_1, scores_2, scores_3 = scores
scores_1_mean = np.mean(scores_1)
scores_2_mean = np.mean(scores_2)
scores_3_mean = np.mean(scores_3)
score = (scores_1_mean + scores_2_mean + scores_3_mean) / 3.0
# Save the result in the dataFRame res_df
results = {
'scores_1': scores_1,
'scores_2': scores_2,
'scores_3': scores_3,
'scores_1_mean': scores_1_mean,
'scores_2_mean': scores_2_mean,
'scores_3_mean': scores_3_mean,
'score': score,
'data_aug_type': data_aug.name,
'data_aug_hp': hp_data_aug_dct,
'embedding_type': embedding.name,
'embedding_hp': hp_embedding_dct,
'kernel_type': kernel.name,
'kernel_hp': hp_kernel_dct,
'model_type': model.name,
'model_hp': hp_model_dct,
}
res_df = res_df.append(results, ignore_index=True)
res_df.to_csv('./Resultats/grid_search_res.csv', sep='\t')
# Concatenate the hyperparameters for retruning them
best_score = score
best_parameters_names = {"Data Augmentation": {"Function Name": data_aug.name,
"Best Parameters": hp_data_aug_dct},
"Embedding": {"Function Name": embedding.name,
"Best Parameters": hp_embedding_dct},
"Kernel": {"Function Name": kernel.name,
"Best Parameters": hp_kernel_dct},
"Model": {"Function Name": model.name,
"Best Parameters": hp_model_dct}}
best_parameters_values = {"Data Augmentation": {"Function": data_aug},
"Embedding": {"Function": embedding},
"Kernel": {"Function": kernel},
"Model": {"Function": model}}
# Display score and Parameters
print("Score: {}".format(score))
print("Best Parameters\n--------")
print("DataAugmentation: ", data_aug.name, ", hp: ", hp_data_aug_dct)
print("Embedding: ", embedding.name, ", hp: ", hp_embedding_dct)
print("Kernel: ", kernel.name, ", hp: ", hp_kernel_dct)
print("Model: ", model.name, ", hp: ", hp_model_dct)
print("\n\n")
return best_score, best_parameters_names, best_parameters_values, res_df
def GridSearch(df_dct, hyperparameters_data_augmentation_dict,
hyperparameters_embeddings_dict,
hyperparameters_models_dict,
hyperparameters_kernels_dict,
cv=5, n_jobs=-1, randomise=True):
"""Launch a grid search over different value of the hps."""
# Compute all the possible combinations of hps
tuples_hp = BuildHpTuples(df_dct, hyperparameters_data_augmentation_dict,
hyperparameters_embeddings_dict,
hyperparameters_models_dict,
hyperparameters_kernels_dict)
# Creates dataframe in which all results will be stored
# (allows early stopping of grid search)
pd_res_df = pd.DataFrame()
# Executes a Cross Validation for all possible tuples
scores_param = []
# Randomisation of the tuples
if randomise:
np.random.shuffle(tuples_hp)
for tuple_i in tqdm(tuples_hp):
[best_score, best_params_n,
best_params_v, pd_res_df] = subGridSearch(df_dct, tuple_i, pd_res_df,
cv=cv, n_jobs=n_jobs)
results = (best_score, best_params_n, best_params_v)
scores_param.append(results)
# Extract best scores and parameters
maxi = 0
best_params_names = 0
best_params_values = 0
for sublist in scores_param:
if sublist[0] > maxi:
maxi = sublist[0]
best_params_names = sublist[1]
best_params_values = sublist[2]
# Return result
return maxi, best_params_names, best_params_values
| [
"numpy.mean",
"tqdm.tqdm",
"itertools.product",
"Modules.ModelSelection.CrossValidation.CrossValidation",
"pandas.DataFrame",
"numpy.random.shuffle"
] | [((1160, 1204), 'tqdm.tqdm', 'tqdm', (['hyperparameters_data_augmentation_dict'], {}), '(hyperparameters_data_augmentation_dict)\n', (1164, 1204), False, 'from tqdm import tqdm\n'), ((5480, 5559), 'Modules.ModelSelection.CrossValidation.CrossValidation', 'CrossValidation', (['computed_df_dct', 'model'], {'cv': 'cv', 'n_jobs': 'n_jobs', 'return_int': '(False)'}), '(computed_df_dct, model, cv=cv, n_jobs=n_jobs, return_int=False)\n', (5495, 5559), False, 'from Modules.ModelSelection.CrossValidation import CrossValidation\n'), ((5682, 5699), 'numpy.mean', 'np.mean', (['scores_1'], {}), '(scores_1)\n', (5689, 5699), True, 'import numpy as np\n'), ((5720, 5737), 'numpy.mean', 'np.mean', (['scores_2'], {}), '(scores_2)\n', (5727, 5737), True, 'import numpy as np\n'), ((5758, 5775), 'numpy.mean', 'np.mean', (['scores_3'], {}), '(scores_3)\n', (5765, 5775), True, 'import numpy as np\n'), ((8745, 8759), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8757, 8759), True, 'import pandas as pd\n'), ((8951, 8966), 'tqdm.tqdm', 'tqdm', (['tuples_hp'], {}), '(tuples_hp)\n', (8955, 8966), False, 'from tqdm import tqdm\n'), ((8902, 8930), 'numpy.random.shuffle', 'np.random.shuffle', (['tuples_hp'], {}), '(tuples_hp)\n', (8919, 8930), True, 'import numpy as np\n'), ((663, 688), 'itertools.product', 'itertools.product', (['*dct_l'], {}), '(*dct_l)\n', (680, 688), False, 'import itertools\n'), ((1745, 1770), 'tqdm.tqdm', 'tqdm', (['grid_embedding_hp_l'], {}), '(grid_embedding_hp_l)\n', (1749, 1770), False, 'from tqdm import tqdm\n')] |
# -*- coding: utf-8 -*-
"""
This script saves the input file for the horseshoe problem.
"""
__version__ = '1.0'
__author__ = '<NAME>'
import sys
import numpy as np
import numpy.matlib
sys.path.append(r'C:\BELLA')
from src.divers.excel import autofit_column_widths
from src.divers.excel import delete_file
from src.BELLA.save_set_up import save_constraints_BELLA
from src.BELLA.save_set_up import save_objective_function_BELLA
from src.BELLA.save_set_up import save_multipanel
from src.BELLA.save_set_up import save_materials
from src.BELLA.panels import Panel
from src.BELLA.multipanels import MultiPanel
from src.BELLA.constraints import Constraints
from src.BELLA.obj_function import ObjFunction
from src.BELLA.materials import Material
filename = 'input_file_horseshoe.xlsx'
# check for authorisation before overwriting
delete_file(filename)
n_panels = 18
### Design guidelines ---------------------------------------------------------
constraints_set = 'C0'
constraints_set = 'C1'
# constraints_set == 'C0' ->
# - ply-drop spacing rule enforced with a minimum of
# constraints.min_drop plies between ply drops at panel boundaries
# - covering rule enforced by preventing the drop of the
# constraints.n_covering outermost plies on each laminate surface
# - symmetry rule enforced, no other lay-up rules
#
# constraints_set == 'C1' ->
# - ply-drop spacing rule enforced with a minimum of
# constraints.min_drop plies between ply drops at panel boundaries
# - covering enforrced by preventing the drop of the
# constraints.n_covering outermost plies on each laminate surface
# - symmetry rule enforced
# - 10% rule enforced
# if rule_10_Abdalla == True rule applied by restricting LPs instead of
# ply percentages and percent_Abdalla is the percentage limit of the
# rule
# otherwise:
# if combined_45_135 == True the restrictions are:
# - a maximum percentage of constraints.percent_0 0 deg plies
# - a maximum percentage of constraints.percent_90 90 deg plies
# - a maximum percentage of constraints.percent_45_135 +-45 deg plies
# if combined_45_135 == False the restrictions are:
# - a maximum percentage of constraints.percent_0 0 deg plies
# - a maximum percentage of constraints.percent_90 90 deg plies
# - a maximum percentage of constraints.percent_45 45 deg plies
# - a maximum percentage of constraints.percent_135 -45 deg plies
# - disorientation rule enforced with variation of fibre angle between
# adacent plies limited to a maximum value of constraints.delta_angle
# degrees
# - contiguity rule enforced with no more than constraints.n_contig
# adajacent plies with same fibre angle
# - damage tolerance rule enforced
# if constraints.dam_tol_rule == 1 the restrictions are:
# - one outer ply at + or -45 deg at the laminate surfaces
# (2 plies intotal)
# if constraints.dam_tol_rule == 2 the restrictions are:
# - [+45, -45] or [-45, +45] at the laminate surfaces
# (4 plies in total)
# if constraints.dam_tol_rule == 3 the restrictions are:
# - [+45,-45] [-45,+45] [+45,+45] or [-45,-45] at the laminate
# surfaces (4 plies in total)
# - out-of-plane orthotropy rule enforced to have small absolutes values
# of LP_11 and LP_12 such that the values of D16 and D26 are small too
## lay-up rules
# set of admissible fibre orientations
set_of_angles = np.array([-45, 0, 45, 90], dtype=int)
set_of_angles = np.array([
-45, 0, 45, 90, +30, -30, +60, -60, 15, -15, 75, -75], dtype=int)
sym = True # symmetry rule
oopo = False # out-of-plane orthotropy requirements
if constraints_set == 'C0':
bal = False # balance rule
rule_10_percent = False # 10% rule
diso = False # disorientation rule
contig = False # contiguity rule
dam_tol = False # damage-tolerance rule
else:
bal = True
rule_10_percent = True
diso = True
contig = True
dam_tol = True
rule_10_Abdalla = True # 10% rule restricting LPs instead of ply percentages
percent_Abdalla = 10 # percentage limit for the 10% rule applied on LPs
combine_45_135 = True # True if restriction on +-45 plies combined for 10% rule
percent_0 = 10 # percentage used in the 10% rule for 0 deg plies
percent_45 = 0 # percentage used in the 10% rule for +45 deg plies
percent_90 = 10 # percentage used in the 10% rule for 90 deg plies
percent_135 = 0 # percentage used in the 10% rule for -45 deg plies
percent_45_135 =10 # percentage used in the 10% rule for +-45 deg plies
delta_angle = 45 # maximum angle difference for adjacent plies
n_contig = 5 # maximum number of adjacent plies with the same fibre orientation
dam_tol_rule = 1 # type of damage tolerance rule
## ply-drop rules
covering = True # covering rule
n_covering = 1 # number of plies ruled by covering rule at laminate surfaces
pdl_spacing = True # ply drop spacing rule
min_drop = 2 # Minimum number of continuous plies between ply drops
constraints = Constraints(
sym=sym,
bal=bal,
oopo=oopo,
dam_tol=dam_tol,
dam_tol_rule=dam_tol_rule,
covering=covering,
n_covering=n_covering,
rule_10_percent=rule_10_percent,
rule_10_Abdalla=rule_10_Abdalla,
percent_Abdalla=percent_Abdalla,
percent_0=percent_0,
percent_45=percent_45,
percent_90=percent_90,
percent_135=percent_135,
percent_45_135=percent_45_135,
combine_45_135=combine_45_135,
diso=diso,
contig=contig,
n_contig=n_contig,
delta_angle=delta_angle,
set_of_angles=set_of_angles,
min_drop=min_drop,
pdl_spacing=pdl_spacing)
### Objective function parameters ---------------------------------------------
# Coefficient for the 10% rule penalty
coeff_10 = 1
# Coefficient for the contiguity constraint penalty
coeff_contig = 1
# Coefficient for the disorientation constraint penalty
coeff_diso = 10
# Coefficient for the out-of-plane orthotropy penalty
coeff_oopo = 1
# Lamination-parameter weightings in panel objective functions
# (In practice these weightings can be different for each panel)
lampam_weightings = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0])
## Multi-panel objective function
# Weightings of the panels in the multi-panel objecive function
panel_weightings = np.ones((n_panels,), float)
# Coefficient for the ply drop spacing guideline penalty
coeff_spacing = 1
obj_func_param = ObjFunction(
constraints=constraints,
coeff_contig=coeff_contig,
coeff_diso=coeff_diso,
coeff_10=coeff_10,
coeff_oopo=coeff_oopo,
coeff_spacing=coeff_spacing)
### Multi-panel composite laminate layout -------------------------------------
# panel IDs
ID = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]
# number of panels
n_panels = len(ID)
# panel number of plies
n_plies = [32, 28, 20, 18, 16, 22, 18, 24, 38,
34, 30, 28, 22, 18, 24, 30, 18, 22]
# panels adjacency
neighbour_panels = {
1 : [2, 9],
2 : [1, 3, 6, 10],
3 : [2, 4, 6],
4 : [3, 5, 7],
5 : [4, 8],
6 : [2, 3, 7],
7 : [4, 6, 8],
8 : [5, 7],
9 : [1, 10, 11],
10 : [2, 9, 12],
11 : [9, 12],
12 : [10, 11, 13, 16],
13 : [12, 14, 16],
14 : [13, 15, 17],
15 : [14, 18],
16 : [12, 13, 17],
17 : [14, 16, 18],
18 : [15, 17]}
# boundary weights
boundary_weights = {(1, 2) : 0.610,
(1, 9) : 0.457,
(2, 3) : 0.305,
(2, 6) : 0.305,
(2, 10) : 0.457,
(3, 4) : 0.305,
(3, 6) : 0.508,
(4, 5) : 0.305,
(4, 7) : 0.508,
(5, 8) : 0.508,
(6, 7) : 0.305,
(7, 8) : 0.305,
(9, 10) : 0.610,
(9, 11) : 0.457,
(10, 12) : 0.457,
(11, 12) : 0.610,
(12, 13) : 0.305,
(12, 16) : 0.305,
(13, 14) : 0.305,
(13, 16) : 0.508,
(14, 15) : 0.305,
(14, 17) : 0.508,
(15, 18) : 0.508,
(16, 17) : 0.305,
(17, 18) : 0.305}
# panel length in the x-direction (m)
length_x = (25.40/1000)*np.array([18, 18, 20, 20, 20, 20, 20, 20,
18, 18, 18, 18, 20, 20, 20, 20, 20, 20])
# panel length in the y-direction (m)
length_y = (25.40/1000)*np.array([24, 24, 12, 12, 12, 12, 12, 12,
24, 24, 24, 24, 12, 12, 12, 12, 12, 12])
# 1 lbf/in = 0.175127 N/mm
# panel loading per unit width in the x-direction in N/m
N_x = 175.127*np.array([700, 375, 270, 250, 210, 305, 290, 600,
1100, 900, 375, 400, 330, 190, 300, 815, 320, 300])
# panel loading per unit width in the y-direction in N/m
N_y = 175.127*np.array([400, 360, 325, 200, 100, 360, 195, 480,
600, 400, 525, 320, 330, 205, 610, 1000, 180, 410])
# panel amination parameters targets
lampam_targets = [np.array([0, 0, 0, 0, 0, 0, 0, 0, 0.208, -0.843, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0.092, -0.714, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.722, 0.054, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.582, -0.228, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.477, -0.235, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.469, -0.335, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.582, -0.288, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.597, -0.252, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0.192, -0.657, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0.308, -0.776, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.241, -0.816, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0.092, -0.714, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.469, -0.335, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.582, -0.228, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.597, -0.252, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.241, -0.816, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.582, -0.228, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, -0.469, -0.335, 0, 0])]
panels = []
for ind_panel in range(n_panels):
panels.append(Panel(
ID=ID[ind_panel],
lampam_target=lampam_targets[ind_panel],
lampam_weightings=lampam_weightings,
n_plies=n_plies[ind_panel],
length_x=length_x[ind_panel],
length_y=length_y[ind_panel],
N_x=N_x[ind_panel],
N_y=N_y[ind_panel],
weighting=panel_weightings[ind_panel],
neighbour_panels=neighbour_panels[ID[ind_panel]],
constraints=constraints))
multipanel = MultiPanel(panels, boundary_weights)
multipanel.filter_target_lampams(constraints, obj_func_param)
multipanel.filter_lampam_weightings(constraints, obj_func_param)
### Objective function parameters ---------------------------------------------
# Coefficient for the 10% rule penalty
coeff_10 = 1
# Coefficient for the contiguity constraint penalty
coeff_contig = 1
# Coefficient for the disorientation constraint penalty
coeff_diso = 1
# Coefficient for the out-of-plane orthotropy penalty
coeff_oopo = 1
# Coefficient for the ply drop spacing guideline penalty
coeff_spacing = 1
obj_func_param = ObjFunction(
constraints=constraints,
coeff_contig=coeff_contig,
coeff_diso=coeff_diso,
coeff_10=coeff_10,
coeff_oopo=coeff_oopo,
coeff_spacing=coeff_spacing)
### Material properties -------------------------------------------------------
# Elastic modulus in the fibre direction in Pa
E11 = 20.5/1.45038e-10 # 141 GPa
# Elastic modulus in the transverse direction in Pa
E22 = 1.31/1.45038e-10 # 9.03 GPa
# Poisson's ratio relating transverse deformation and axial loading (-)
nu12 = 0.32
# In-plane shear modulus in Pa
G12 = 0.62/1.45038e-10 # 4.27 GPa
# Density in g/m2
density_area = 300.5
# Ply thickness in m
ply_t = (25.40/1000)*0.0075 # 0.191 mmm
materials = Material(E11=E11, E22=E22, G12=G12, nu12=nu12,
density_area=density_area, ply_t=ply_t)
### Saving everything on an excel file ----------------------------------------
save_multipanel(filename, multipanel, obj_func_param, calc_penalties=False,
constraints=constraints, mat=materials, save_buckling=True)
save_constraints_BELLA(filename, constraints)
save_objective_function_BELLA(filename, obj_func_param)
save_materials(filename, materials)
autofit_column_widths(filename)
| [
"src.divers.excel.autofit_column_widths",
"src.BELLA.save_set_up.save_multipanel",
"src.BELLA.save_set_up.save_materials",
"src.BELLA.obj_function.ObjFunction",
"numpy.ones",
"src.BELLA.constraints.Constraints",
"src.BELLA.panels.Panel",
"src.BELLA.materials.Material",
"numpy.array",
"src.divers.e... | [((197, 225), 'sys.path.append', 'sys.path.append', (['"""C:\\\\BELLA"""'], {}), "('C:\\\\BELLA')\n", (212, 225), False, 'import sys\n'), ((854, 875), 'src.divers.excel.delete_file', 'delete_file', (['filename'], {}), '(filename)\n', (865, 875), False, 'from src.divers.excel import delete_file\n'), ((3573, 3610), 'numpy.array', 'np.array', (['[-45, 0, 45, 90]'], {'dtype': 'int'}), '([-45, 0, 45, 90], dtype=int)\n', (3581, 3610), True, 'import numpy as np\n'), ((3628, 3703), 'numpy.array', 'np.array', (['[-45, 0, 45, 90, +30, -30, +60, -60, 15, -15, 75, -75]'], {'dtype': 'int'}), '([-45, 0, 45, 90, +30, -30, +60, -60, 15, -15, 75, -75], dtype=int)\n', (3636, 3703), True, 'import numpy as np\n'), ((5163, 5718), 'src.BELLA.constraints.Constraints', 'Constraints', ([], {'sym': 'sym', 'bal': 'bal', 'oopo': 'oopo', 'dam_tol': 'dam_tol', 'dam_tol_rule': 'dam_tol_rule', 'covering': 'covering', 'n_covering': 'n_covering', 'rule_10_percent': 'rule_10_percent', 'rule_10_Abdalla': 'rule_10_Abdalla', 'percent_Abdalla': 'percent_Abdalla', 'percent_0': 'percent_0', 'percent_45': 'percent_45', 'percent_90': 'percent_90', 'percent_135': 'percent_135', 'percent_45_135': 'percent_45_135', 'combine_45_135': 'combine_45_135', 'diso': 'diso', 'contig': 'contig', 'n_contig': 'n_contig', 'delta_angle': 'delta_angle', 'set_of_angles': 'set_of_angles', 'min_drop': 'min_drop', 'pdl_spacing': 'pdl_spacing'}), '(sym=sym, bal=bal, oopo=oopo, dam_tol=dam_tol, dam_tol_rule=\n dam_tol_rule, covering=covering, n_covering=n_covering, rule_10_percent\n =rule_10_percent, rule_10_Abdalla=rule_10_Abdalla, percent_Abdalla=\n percent_Abdalla, percent_0=percent_0, percent_45=percent_45, percent_90\n =percent_90, percent_135=percent_135, percent_45_135=percent_45_135,\n combine_45_135=combine_45_135, diso=diso, contig=contig, n_contig=\n n_contig, delta_angle=delta_angle, set_of_angles=set_of_angles,\n min_drop=min_drop, pdl_spacing=pdl_spacing)\n', (5174, 5718), False, 'from src.BELLA.constraints import Constraints\n'), ((6310, 6356), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0])\n', (6318, 6356), True, 'import numpy as np\n'), ((6481, 6508), 'numpy.ones', 'np.ones', (['(n_panels,)', 'float'], {}), '((n_panels,), float)\n', (6488, 6508), True, 'import numpy as np\n'), ((6608, 6775), 'src.BELLA.obj_function.ObjFunction', 'ObjFunction', ([], {'constraints': 'constraints', 'coeff_contig': 'coeff_contig', 'coeff_diso': 'coeff_diso', 'coeff_10': 'coeff_10', 'coeff_oopo': 'coeff_oopo', 'coeff_spacing': 'coeff_spacing'}), '(constraints=constraints, coeff_contig=coeff_contig, coeff_diso=\n coeff_diso, coeff_10=coeff_10, coeff_oopo=coeff_oopo, coeff_spacing=\n coeff_spacing)\n', (6619, 6775), False, 'from src.BELLA.obj_function import ObjFunction\n'), ((11222, 11258), 'src.BELLA.multipanels.MultiPanel', 'MultiPanel', (['panels', 'boundary_weights'], {}), '(panels, boundary_weights)\n', (11232, 11258), False, 'from src.BELLA.multipanels import MultiPanel\n'), ((11839, 12006), 'src.BELLA.obj_function.ObjFunction', 'ObjFunction', ([], {'constraints': 'constraints', 'coeff_contig': 'coeff_contig', 'coeff_diso': 'coeff_diso', 'coeff_10': 'coeff_10', 'coeff_oopo': 'coeff_oopo', 'coeff_spacing': 'coeff_spacing'}), '(constraints=constraints, coeff_contig=coeff_contig, coeff_diso=\n coeff_diso, coeff_10=coeff_10, coeff_oopo=coeff_oopo, coeff_spacing=\n coeff_spacing)\n', (11850, 12006), False, 'from src.BELLA.obj_function import ObjFunction\n'), ((12557, 12647), 'src.BELLA.materials.Material', 'Material', ([], {'E11': 'E11', 'E22': 'E22', 'G12': 'G12', 'nu12': 'nu12', 'density_area': 'density_area', 'ply_t': 'ply_t'}), '(E11=E11, E22=E22, G12=G12, nu12=nu12, density_area=density_area,\n ply_t=ply_t)\n', (12565, 12647), False, 'from src.BELLA.materials import Material\n'), ((12746, 12885), 'src.BELLA.save_set_up.save_multipanel', 'save_multipanel', (['filename', 'multipanel', 'obj_func_param'], {'calc_penalties': '(False)', 'constraints': 'constraints', 'mat': 'materials', 'save_buckling': '(True)'}), '(filename, multipanel, obj_func_param, calc_penalties=False,\n constraints=constraints, mat=materials, save_buckling=True)\n', (12761, 12885), False, 'from src.BELLA.save_set_up import save_multipanel\n'), ((12900, 12945), 'src.BELLA.save_set_up.save_constraints_BELLA', 'save_constraints_BELLA', (['filename', 'constraints'], {}), '(filename, constraints)\n', (12922, 12945), False, 'from src.BELLA.save_set_up import save_constraints_BELLA\n'), ((12947, 13002), 'src.BELLA.save_set_up.save_objective_function_BELLA', 'save_objective_function_BELLA', (['filename', 'obj_func_param'], {}), '(filename, obj_func_param)\n', (12976, 13002), False, 'from src.BELLA.save_set_up import save_objective_function_BELLA\n'), ((13004, 13039), 'src.BELLA.save_set_up.save_materials', 'save_materials', (['filename', 'materials'], {}), '(filename, materials)\n', (13018, 13039), False, 'from src.BELLA.save_set_up import save_materials\n'), ((13041, 13072), 'src.divers.excel.autofit_column_widths', 'autofit_column_widths', (['filename'], {}), '(filename)\n', (13062, 13072), False, 'from src.divers.excel import autofit_column_widths\n'), ((8594, 8681), 'numpy.array', 'np.array', (['[18, 18, 20, 20, 20, 20, 20, 20, 18, 18, 18, 18, 20, 20, 20, 20, 20, 20]'], {}), '([18, 18, 20, 20, 20, 20, 20, 20, 18, 18, 18, 18, 20, 20, 20, 20, \n 20, 20])\n', (8602, 8681), True, 'import numpy as np\n'), ((8748, 8835), 'numpy.array', 'np.array', (['[24, 24, 12, 12, 12, 12, 12, 12, 24, 24, 24, 24, 12, 12, 12, 12, 12, 12]'], {}), '([24, 24, 12, 12, 12, 12, 12, 12, 24, 24, 24, 24, 12, 12, 12, 12, \n 12, 12])\n', (8756, 8835), True, 'import numpy as np\n'), ((8939, 9044), 'numpy.array', 'np.array', (['[700, 375, 270, 250, 210, 305, 290, 600, 1100, 900, 375, 400, 330, 190, 300,\n 815, 320, 300]'], {}), '([700, 375, 270, 250, 210, 305, 290, 600, 1100, 900, 375, 400, 330,\n 190, 300, 815, 320, 300])\n', (8947, 9044), True, 'import numpy as np\n'), ((9141, 9247), 'numpy.array', 'np.array', (['[400, 360, 325, 200, 100, 360, 195, 480, 600, 400, 525, 320, 330, 205, 610,\n 1000, 180, 410]'], {}), '([400, 360, 325, 200, 100, 360, 195, 480, 600, 400, 525, 320, 330, \n 205, 610, 1000, 180, 410])\n', (9149, 9247), True, 'import numpy as np\n'), ((9329, 9384), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0.208, -0.843, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0.208, -0.843, 0, 0])\n', (9337, 9384), True, 'import numpy as np\n'), ((9405, 9460), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0.092, -0.714, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0.092, -0.714, 0, 0])\n', (9413, 9460), True, 'import numpy as np\n'), ((9481, 9536), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.722, 0.054, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.722, 0.054, 0, 0])\n', (9489, 9536), True, 'import numpy as np\n'), ((9557, 9613), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.582, -0.228, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.582, -0.228, 0, 0])\n', (9565, 9613), True, 'import numpy as np\n'), ((9634, 9690), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.477, -0.235, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.477, -0.235, 0, 0])\n', (9642, 9690), True, 'import numpy as np\n'), ((9711, 9767), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.469, -0.335, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.469, -0.335, 0, 0])\n', (9719, 9767), True, 'import numpy as np\n'), ((9788, 9844), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.582, -0.288, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.582, -0.288, 0, 0])\n', (9796, 9844), True, 'import numpy as np\n'), ((9865, 9921), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.597, -0.252, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.597, -0.252, 0, 0])\n', (9873, 9921), True, 'import numpy as np\n'), ((9942, 9997), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0.192, -0.657, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0.192, -0.657, 0, 0])\n', (9950, 9997), True, 'import numpy as np\n'), ((10018, 10073), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0.308, -0.776, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0.308, -0.776, 0, 0])\n', (10026, 10073), True, 'import numpy as np\n'), ((10094, 10150), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.241, -0.816, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.241, -0.816, 0, 0])\n', (10102, 10150), True, 'import numpy as np\n'), ((10171, 10226), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0.092, -0.714, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0.092, -0.714, 0, 0])\n', (10179, 10226), True, 'import numpy as np\n'), ((10247, 10303), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.469, -0.335, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.469, -0.335, 0, 0])\n', (10255, 10303), True, 'import numpy as np\n'), ((10324, 10380), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.582, -0.228, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.582, -0.228, 0, 0])\n', (10332, 10380), True, 'import numpy as np\n'), ((10401, 10457), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.597, -0.252, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.597, -0.252, 0, 0])\n', (10409, 10457), True, 'import numpy as np\n'), ((10478, 10534), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.241, -0.816, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.241, -0.816, 0, 0])\n', (10486, 10534), True, 'import numpy as np\n'), ((10555, 10611), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.582, -0.228, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.582, -0.228, 0, 0])\n', (10563, 10611), True, 'import numpy as np\n'), ((10632, 10688), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, -0.469, -0.335, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, -0.469, -0.335, 0, 0])\n', (10640, 10688), True, 'import numpy as np\n'), ((10759, 11119), 'src.BELLA.panels.Panel', 'Panel', ([], {'ID': 'ID[ind_panel]', 'lampam_target': 'lampam_targets[ind_panel]', 'lampam_weightings': 'lampam_weightings', 'n_plies': 'n_plies[ind_panel]', 'length_x': 'length_x[ind_panel]', 'length_y': 'length_y[ind_panel]', 'N_x': 'N_x[ind_panel]', 'N_y': 'N_y[ind_panel]', 'weighting': 'panel_weightings[ind_panel]', 'neighbour_panels': 'neighbour_panels[ID[ind_panel]]', 'constraints': 'constraints'}), '(ID=ID[ind_panel], lampam_target=lampam_targets[ind_panel],\n lampam_weightings=lampam_weightings, n_plies=n_plies[ind_panel],\n length_x=length_x[ind_panel], length_y=length_y[ind_panel], N_x=N_x[\n ind_panel], N_y=N_y[ind_panel], weighting=panel_weightings[ind_panel],\n neighbour_panels=neighbour_panels[ID[ind_panel]], constraints=constraints)\n', (10764, 11119), False, 'from src.BELLA.panels import Panel\n')] |
# @Author : <NAME>
# @Email : <EMAIL>
from shapely.geometry.polygon import Polygon, Point, LineString
import CoreFiles.GeneralFunctions as GrlFct
from geomeppy.geom.polygons import Polygon2D, Polygon3D,break_polygons
from geomeppy import IDF
from geomeppy.geom import core_perim
import os
import shutil
import BuildObject.DB_Data as DB_Data
import re
import CoreFiles.ProbGenerator as ProbGenerator
import itertools
import matplotlib.pyplot as plt
#this class defines the building characteristics regarding available data in the geojson file
#function that checks if value is out of limits
def checkLim(val, ll, ul):
if val < ll:
val = ll
elif val > ul:
val = round(val/10)
if val > ul:
val = ul
return val
#get the value from the correct key
def getDBValue(DB, Keys):
Val = ''
if type(Keys) ==list:
for key in Keys:
try:
Val = DB[key]
break
except:
pass
else:
try: Val = DB[Keys]
except: pass
return Val
#find the wall id for the shading surfaces from surrounding buildings
def findWallId(Id, Shadingsfile, ref,GE):
finished = 0
ii = 0
ShadeWall = {}
while finished == 0:
if Id in Shadingsfile[ii].properties[GE['ShadingIdKey']]:
ShadeWall[GE['BuildingIdKey']] = Shadingsfile[ii].properties[GE['BuildingIdKey']]
ShadeWall[GE['ShadingIdKey']] = Shadingsfile[ii].properties[GE['ShadingIdKey']]
try:
ShadeWall['height'] = float(Shadingsfile[ii].properties['zmax'])-float(Shadingsfile[ii].properties['zmin'])
except:
pass
ShadeWall[GE['VertexKey']] = []
for jj in Shadingsfile[ii].geometry.coordinates:
ShadeWall[GE['VertexKey']].append(tuple([jj[0]-ref[0],jj[1]-ref[1]]))
finished = 1
else:
ii = ii+1
if ii>len(Shadingsfile):
print('No finded Wall Id ....')
finished = 1
return ShadeWall
#find the height the building's ID
def findBuildId(Id, Buildingsfile,GE):
finished = 0
ii = 0
height = 0
while finished == 0:
if Id == Buildingsfile[ii].properties[GE['BuildingIdKey']]:
height = Buildingsfile[ii].properties['height']
finished = 1
else:
ii = ii+1
if ii>len(Buildingsfile):
print('No finded Build Id ....')
finished = 1
return height
class BuildingList:
def __init__(self):
self.building = []
def addBuilding(self,name,DataBaseInput,nbcase,MainPath,epluspath,LogFile,PlotOnly):
#idf object is created here
IDF.setiddname(os.path.join(epluspath,"Energy+.idd"))
idf = IDF(os.path.normcase(os.path.join(epluspath,"ExampleFiles/Minimal.idf")))
idf.idfname = name
#building object is created here
building = Building(name, DataBaseInput, nbcase, MainPath,LogFile,PlotOnly)
#both are append as dict in the globa studied case list
self.building.append({
'BuildData' : building,
'BuildIDF' : idf,
}
)
class Building:
def __init__(self,name,DataBaseInput,nbcase,MainPath,LogFile,PlotOnly):
Buildingsfile = DataBaseInput['Build']
Shadingsfile = DataBaseInput['Shades']
DB = Buildingsfile[nbcase]
DBL = DB_Data.DBLimits
BE = DB_Data.BasisElement
GE = DB_Data.GeomElement
EPC = DB_Data.EPCMeters
SD = DB_Data.SimuData
ExEn = DB_Data.ExtraEnergy
try:
self.CRS = Buildingsfile.crs['properties']['name'] #this is the coordinates reference system for the polygons
except:
self.CRS = 'Null'
self.getBEData(BE)
self.getSimData(SD)
self.name = name
self.BuildID = self.getBuildID(DB, GE,LogFile)
self.Multipolygon = self.getMultipolygon(DB)
self.nbfloor = self.getnbfloor(DB, DBL,LogFile)
self.nbBasefloor = self.getnbBasefloor(DB, DBL)
self.height = self.getheight(DB, DBL)
self.DistTol = GE['DistanceTolerance']
self.footprint, self.BlocHeight, self.BlocNbFloor = self.getfootprint(DB,LogFile,self.nbfloor)
self.AggregFootprint = self.getAggregatedFootprint()
self.RefCoord = self.getRefCoord()
self.ATemp = self.getsurface(DB, DBL,LogFile)
self.SharedBld, self.VolumeCorRatio = self.IsSameFormularIdBuilding(Buildingsfile, nbcase, LogFile, DBL)
self.BlocHeight, self.BlocNbFloor, self.StoreyHeigth = self.EvenFloorCorrection(self.BlocHeight, self.nbfloor, self.BlocNbFloor, self.footprint, LogFile)
self.EPHeatedArea = self.getEPHeatedArea(LogFile)
self.MaxShadingDist = GE['MaxShadingDist']
self.AdjacentWalls = [] #this will be appended in the getshade function if any present
self.shades = self.getshade(DB,Shadingsfile,Buildingsfile,GE,LogFile)
self.Materials = DB_Data.BaseMaterial
self.InternalMass = DB_Data.InternalMass
self.MakeRelativeCoord() # we need to convert into local coordinate in order to compute adjacencies with more precision than keeping thousand of km for x and y
if not PlotOnly:
#the attributres above are needed in all case, the one below are needed only if energy simulation is asked for
self.VentSyst = self.getVentSyst(DB, LogFile)
self.AreaBasedFlowRate = self.getAreaBasedFlowRate(DB, DBL, BE)
self.OccupType = self.getOccupType(DB, LogFile)
self.nbStairwell = self.getnbStairwell(DB, DBL)
self.WeatherDataFile = DB_Data.WeatherFile['Loc']
self.year = self.getyear(DB, DBL)
self.EPCMeters = self.getEPCMeters(DB, EPC, LogFile)
if len(self.SharedBld) > 0:
self.CheckAndCorrEPCs(Buildingsfile, LogFile, nbcase, EPC)
self.nbAppartments = self.getnbAppartments(DB, DBL)
#we define the internal load only if it's not for making picture
self.IntLoad = self.getIntLoad(MainPath,LogFile)
self.DHWInfos = self.getExtraEnergy(ExEn, MainPath)
#if there are no cooling comsumption, lets considerer a set point at 50deg max
# for key in self.EPCMeters['Cooling']:
# if self.EPCMeters['Cooling'][key]>0:
# self.setTempUpL = BE['setTempUpL']
# self.intT_freecool = 50
# else:
# self.setTempUpL = [50]*len(BE['setTempUpL'])
def MakeRelativeCoord(self):
# we need to convert change the reference coordinate because precision is needed for boundary conditions definition:
newfoot = []
roundfactor = 4
for foot in self.footprint:
newfoot.append([(round(node[0] - self.RefCoord[0],roundfactor), round(node[1] - self.RefCoord[1],roundfactor)) for node in foot])
self.footprint = newfoot
for shade in self.shades.keys():
newcoord = [(round(node[0] - self.RefCoord[0],roundfactor), round(node[1] - self.RefCoord[1],roundfactor)) for node in
self.shades[shade]['Vertex']]
self.shades[shade]['Vertex'] = newcoord
newwalls = []
for Wall in self.AdjacentWalls:
newcoord = [(round(node[0] - self.RefCoord[0],roundfactor), round(node[1] - self.RefCoord[1],roundfactor)) for node in Wall['geometries']]
Wall['geometries'] = newcoord
def CheckAndCorrEPCs(self,Buildingsfile,LogFile,nbcase,EPC):
totHeat = []
tocheck = [nbcase]+self.SharedBld
for share in tocheck:
val = 0
Meas = self.getEPCMeters(Buildingsfile[share],EPC,[])
for key in Meas['Heating'].keys():
val += Meas['Heating'][key]
totHeat.append(val)
# correction on the ATemp if it is the same on all (should be)
HeatDiff = [totHeat[idx + 1] - A for idx, A in enumerate(totHeat[:-1])]
if all(v == 0 for v in HeatDiff):
newval = 0
for keyType in self.EPCMeters.keys():
for key in self.EPCMeters[keyType].keys():
try:
self.EPCMeters[keyType][key] *= self.VolumeCorRatio
except:
pass
if 'Heating' == keyType:
newval += self.EPCMeters['Heating'][key]
msg = '[EPCs correction] The EPCs total heat needs for the each shared buildings is :'+str(totHeat)+'\n'
GrlFct.Write2LogFile(msg, LogFile)
msg = '[EPCs correction] All EPCs metrix will be modified by the Volume ratio as for ATemp\n'
GrlFct.Write2LogFile(msg, LogFile)
msg = '[EPCs correction] For example, the Heat needs is corrected from : '+ str(totHeat[0])+ ' to : '+ str(newval)+'\n'
GrlFct.Write2LogFile(msg, LogFile)
def IsSameFormularIdBuilding(self,Buildingsfile,nbcase,LogFile,DBL):
SharedBld = []
VolumeCorRatio = 1
Correction = False
for nb,Build in enumerate(Buildingsfile):
try:
if Build.properties['FormularId'] == self.BuildID['FormularId'] and nb != nbcase:
SharedBld.append(nb)
Correction = True
except:
pass
maxHeight=[max(self.BlocHeight)]
floors = [self.nbfloor]
ATemp = [self.ATemp]
Volume = [sum([Polygon(foot).area * self.BlocHeight[idx] for idx,foot in enumerate(self.footprint)])]
for nb in SharedBld:
ATemp.append(self.getsurface(Buildingsfile[nb], DBL,[]))
floors.append(self.getnbfloor(Buildingsfile[nb],DBL,[]))
Bldfootprint, BldBlocHeight, BldBlocNbFloor = self.getfootprint(Buildingsfile[nb],[],floors[-1])
maxHeight.append(max(BldBlocHeight))
Volume.append(sum([Polygon(foot).area * BldBlocHeight[idx] for idx,foot in enumerate(Bldfootprint)]))
if Correction:
#some correction is needed on the nb of floor because a higher one, with the same FormularId is higher
newfloor = max(int(floors[maxHeight.index(max(maxHeight))] / (max(maxHeight) / maxHeight[0])),1)
msg = '[Shared EPC] Buildings are found with same FormularId: '+str(SharedBld)+'\n'
GrlFct.Write2LogFile(msg, LogFile)
msg = '[Nb Floor Cor] The nb of floors will be corrected by the height ratio of this building with the highests one with same FormularId (but cannot be lower than 1)\n'
GrlFct.Write2LogFile(msg, LogFile)
msg = '[Nb Floor Cor] nb of floors is thus corrected from : '+ str(self.nbfloor)+ ' to : '+ str(newfloor)+'\n'
GrlFct.Write2LogFile(msg, LogFile)
self.nbfloor = newfloor
#correction on the ATemp if it is the same on all (should be)
Adiff = [ATemp[idx+1]-A for idx,A in enumerate(ATemp[:-1])]
if all(v == 0 for v in Adiff):
VolumeCorRatio = Volume[0] / sum(Volume)
newATemp = self.ATemp * VolumeCorRatio
msg = '[ATemp Cor] The ATemp will also be modified by the volume ratio of this building over the volume sum of all concerned building \n'
GrlFct.Write2LogFile(msg, LogFile)
msg = '[ATemp Cor] The ATemp is thus corrected from : '+ str(self.ATemp)+ ' to : '+ str(newATemp)+'\n'
GrlFct.Write2LogFile(msg, LogFile)
self.ATemp = newATemp
return SharedBld, VolumeCorRatio
def getBEData(self,BE):
for key in BE.keys():
setattr(self, key, BE[key])
def getExtraEnergy(self,ExEn,MaintPath):
output={}
for key in ExEn.keys():
try:
ifFile = os.path.join(os.path.dirname(MaintPath),os.path.normcase(ExEn[key]))
if os.path.isfile(ifFile):
AbsInputFileDir,InputFileDir = self.isInputDir()
iflocFile = os.path.join(AbsInputFileDir,os.path.basename(ifFile))
if not os.path.isfile(iflocFile):
shutil.copy(ifFile,iflocFile)
output[key] = os.path.join(InputFileDir,os.path.basename(ifFile))
else:
output[key] = ExEn[key]
except:
output[key] = ExEn[key]
return output
def getSimData(self,SD):
for key in SD.keys():
setattr(self, key, SD[key])
def getBuildID(self,DB,GE,LogFile):
BuildID={}
for key in GE['BuildIDKey']:
try:
BuildID[key] = DB.properties[key]
except:
pass #BuildID[key] = None
if BuildID:
for key in BuildID:
msg = '[Bld ID] '+ key+' : ' + str(BuildID[key]) + '\n'
GrlFct.Write2LogFile(msg, LogFile)
return BuildID
def getMultipolygon(self,DB):
test = DB.geometry.coordinates[0][0][0]
if type(test) is list:
Multipolygon = True
else:
Multipolygon = False
return Multipolygon
def getRefCoord(self):
"get the reference coodinates for visualisation afterward"
#check for Multipolygon first
if self.Multipolygon:
centroide = [list(Polygon(foot).centroid.coords) for foot in self.footprint] #same reason than below, foot print is computed before now [list(Polygon(DB.geometry.coordinates[i][0]).centroid.coords) for i in range(len(DB.geometry.coordinates))]
x = sum([centroide[i][0][0] for i in range(len(centroide))])/len(centroide)
y = sum([centroide[i][0][1] for i in range(len(centroide))])/len(centroide)
else:
centroide = list(Polygon(self.footprint[0]).centroid.coords)# now the foot print is computed nbefore the reference. before it was defined with list(Polygon(DB.geometry.coordinates[0]).centroid.coords)
x = centroide[0][0]
y = centroide[0][1]
offset = ((2*Polygon(self.AggregFootprint).area)**0.5)/8
ref = (x-offset, y-offset) #there might be not a true need for suche precision....
return ref
def getfootprint(self,DB,LogFile=[],nbfloor=0):
"get the footprint coordinate and the height of each building bloc"
DistTol = self.DistTol
coord = []
node2remove =[]
BlocHeight = []
BlocNbFloor = []
#we first need to check if it is Multipolygon
if self.Multipolygon:
#then we append all the floor and roof fottprints into one with associate height
for idx1,poly1 in enumerate(DB.geometry.coordinates[:-1]):
for idx2,poly2 in enumerate(DB.geometry.coordinates[idx1+1:]):
if poly1 == poly2:
polycoor = []
for j in poly1[0]:
new = (j[0], j[1])
new_coor = new#[]
# for ii in range(len(RefCoord)):
# new_coor.append((new[ii] - RefCoord[ii]))
polycoor.append(tuple(new_coor))
if polycoor[0]==polycoor[-1]:
polycoor = polycoor[:-1]
#even before skewed angle, we need to check for tiny edge below the tolerance onsdered aftward (0.5m)
pt2remove = []
for edge in Polygon2D(polycoor).edges:
if edge.length < DistTol:
pt2remove.append(edge.p2)
for pt in pt2remove:
if len(polycoor)>3:
polycoor.remove(pt)
newpolycoor, node = core_perim.CheckFootprintNodes(polycoor,5)
node2remove.append(node)
#polycoor.reverse()
coord.append(polycoor)
BlocHeight.append(round(abs(DB.geometry.poly3rdcoord[idx1]-DB.geometry.poly3rdcoord[idx2+idx1+1]),1))
#these following lines are here to highlight holes in footprint and split it into two blocs...
#it may appear some errors for other building with several blocs and some with holes (these cases havn't been checked)
poly2merge = []
for idx, coor in enumerate(coord):
for i in range(len(coord)-idx-1):
if Polygon(coor).contains(Polygon(coord[idx+i+1])):
poly2merge.append([idx,idx+i+1])
try:
for i,idx in enumerate(poly2merge):
new_surfaces = break_polygons(Polygon3D(coord[idx[0]]), Polygon3D(coord[idx[1]]))
xs,ys,zs = zip(*list(new_surfaces[0]))
coord[idx[0]] = [(xs[nbv],ys[nbv]) for nbv in range(len(xs))]
xs,ys,zs = zip(*list(new_surfaces[1]))
coord[idx[1]] = [(xs[nbv],ys[nbv]) for nbv in range(len(xs))]
BlocHeight[idx[1]] = BlocHeight[idx[0]]
msg ='[Geom Cor] There is a hole that will split the main surface in two blocs \n'
GrlFct.Write2LogFile(msg, LogFile)
except:
msg = '[Poly Error] Some error are present in the polygon parts. Some are identified as being inside others...\n'
print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
import matplotlib.pyplot as plt
fig = plt.figure(0)
for i in coord:
xs,ys = zip(*i)
plt.plot(xs,ys,'-.')
#titre = 'FormularId : '+str(DB.properties['FormularId'])+'\n 50A_UUID : '+str(DB.properties['50A_UUID'])
# plt.title(titre)
# plt.savefig(self.name+ '.png')
# plt.close(fig)
#we need to clean the footprint from the node2remove but not if there are part of another bloc
newbloccoor= []
for idx,coor in enumerate(coord):
newcoor = []
FilteredNode2remove = []
single = False
for node in node2remove[idx]:
single = True
for idx1,coor1 in enumerate(coord):
if idx!=idx1:
if coor[node] in coor1 and coor[node] not in [n for i,n in enumerate(coor1[idx1]) if i in node2remove[idx1]]:
single =False
if single:
FilteredNode2remove.append(node)
for nodeIdx,node in enumerate(coor):
if not nodeIdx in FilteredNode2remove:
newcoor.append(node)
newbloccoor.append(newcoor)
coord = newbloccoor
else:
#for dealing with 2D files
for j in DB.geometry.coordinates[0]:
new = (j[0], j[1])
new_coor = new#[]
# for ii in range(len(self.RefCoord)):
# new_coor.append((new[ii] - self.RefCoord[ii]))
coord.append(tuple(new_coor))
BlocNbFloor.append(nbfloor)
BlocHeight.append(self.height)
newpolycoor, node = core_perim.CheckFootprintNodes(coord, 5)
coord= [newpolycoor]
#before submitting the full coordinates, we need to check correspondance in case of multibloc
coord, validFootprint = CheckMultiBlocFootprint(coord,tol = DistTol)
if not validFootprint:
msg = '[Poly Error] The different bloc are not adjacent...\n'
#print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
return
# multibloc should share at least one edge and not a polygon as bld:a3848e24-d29e-44bc-a395-a25b5fd26598 in area : 0180C3170 of Sodermalm_V4
SmallEdge = False
for bloc in coord:
if [val for val in Polygon2D(bloc).edges_length if val < 2]:
SmallEdge = True
if SmallEdge:
msg = '[Geom Warning] This building has at least one edge length below 2m\n'
#print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
return coord, BlocHeight, BlocNbFloor
def EvenFloorCorrection(self,BlocHeight,nbfloor,BlocNbFloor,coord,LogFile):
# we compute a storey height as well to choosen the one that correspond to the highest part of the building afterward
BlocNbFloor=[] #the number of blocks is reset to comply with the old 2D geojson files is anyway empty for multipolygons files
StoreyHeigth = 3
if nbfloor !=0:
storeyRatio = StoreyHeigth / (max(BlocHeight) / nbfloor) if (max(BlocHeight) / nbfloor) > 0.5 else 1
msg = '[Geom Info] The max bloc height is : ' + str(round(max(BlocHeight), 2)) + ' for ' + str(
nbfloor) + ' floors declared in the EPC \n'
else:
nbfloor= round(max(BlocHeight)/StoreyHeigth)
try:
storeyRatio = StoreyHeigth / (max(BlocHeight) / nbfloor) if (max(BlocHeight) / nbfloor) > 0.5 else 1
except:
storeyRatio = 0
msg = '[Geom Info] The max bloc height is : ' + str(round(max(BlocHeight), 2)) + ' for ' + str(
nbfloor) + ' floors computed from max bloc height\n'
GrlFct.Write2LogFile(msg, LogFile)
msg = '[Geom Cor] A ratio of ' + str(storeyRatio) + ' will be applied on each bloc height\n'
GrlFct.Write2LogFile(msg, LogFile)
for height in range(len(BlocHeight)):
BlocHeight[height] *= storeyRatio
for idx, Height in enumerate(BlocHeight):
val = int(round(Height, 1) / StoreyHeigth)
BlocNbFloor.append(max(1, val)) # the height is ed to the closest 10cm
BlocHeight[idx] = BlocNbFloor[-1] * StoreyHeigth
msg = '[Geom Info] Bloc height : ' + str(BlocHeight[idx]) + ' with ' + str(BlocNbFloor[-1]) + ' nb of floors\n'
GrlFct.Write2LogFile(msg, LogFile)
msg = '[Geom Info] This bloc has a footprint with : ' + str(len(coord[idx])) + ' vertexes\n'
GrlFct.Write2LogFile(msg, LogFile)
if val == 0:
try:
LogFile.write(
'[WARNING] /!\ This bloc as a height below 3m, it has been raized to 3m to enable construction /!\ \n')
except:
pass
return BlocHeight, BlocNbFloor, StoreyHeigth
def getEPHeatedArea(self,LogFile):
"get the heated area based on the footprint and the number of floors"
self.BlocFootprintArea=[]
EPHeatedArea = 0
for i,foot in enumerate(self.footprint):
EPHeatedArea += Polygon(foot).area*self.BlocNbFloor[i]
self.BlocFootprintArea.append(Polygon(foot).area)
msg = '[Geom Info] Blocs footprint areas : '+ str(self.BlocFootprintArea)+'\n'
GrlFct.Write2LogFile(msg, LogFile)
msg = '[Geom Info] The total heated area is : ' + str(EPHeatedArea)+' for a declared ATemp of : '+str(self.ATemp)+' --> discrepancy of : '+str(round((self.ATemp-EPHeatedArea)/self.ATemp*100,2))+'\n'
GrlFct.Write2LogFile(msg, LogFile)
return EPHeatedArea
def getsurface(self,DB, DBL,LogFile):
"Get the surface from the input file, ATemp"
try: ATemp = int(getDBValue(DB.properties, DBL['surface_key']))
except: ATemp = 1
if ATemp == 1:
msg = '[Geom Error] Atemp not recognized as number, fixed to 1\n'
GrlFct.Write2LogFile(msg, LogFile)
ATemp = checkLim(ATemp,DBL['surface_lim'][0],DBL['surface_lim'][1])
self.ATempOr= ATemp #this is to keep the original value as some correction might done afterward if more then 1 bld is present in 1 Id
return ATemp
def getnbfloor(self,DB, DBL,LogFile):
"Get the number of floor above ground"
try: nbfloor=int(getDBValue(DB.properties, DBL['nbfloor_key']))
except: nbfloor = 0
if nbfloor == 0:
msg = '[EPCs Warning] The nb of floors is 0. It will be defined using the max bloc height and a storey height of 3m\n'
GrlFct.Write2LogFile(msg, LogFile)
nbfloor = checkLim(nbfloor,DBL['nbfloor_lim'][0],DBL['nbfloor_lim'][1])
return nbfloor
def getnbStairwell(self,DB, DBL):
"Get the number of stariwell, need for natural stack effect on infiltration"
try: nbStairwell = int(getDBValue(DB.properties, DBL['nbStairwell_key']))
except: nbStairwell=0
nbStairwell = checkLim(nbStairwell,DBL['nbStairwell_lim'][0],DBL['nbStairwell_lim'][1])
return nbStairwell
def getnbBasefloor(self,DB, DBL):
"Get the number of floor below ground"
try: nbBasefloor = int(getDBValue(DB.properties, DBL['nbBasefloor_key']))
except: nbBasefloor = 0
nbBasefloor = checkLim(nbBasefloor,DBL['nbBasefloor_lim'][0],DBL['nbBasefloor_lim'][1])
return nbBasefloor
def getyear(self,DB, DBL):
"Get the year of construction in the input file"
try: year = int(getDBValue(DB.properties, DBL['year_key']))
except: year = 1900
year = checkLim(year,DBL['year_lim'][0],DBL['year_lim'][1])
return year
def getEPCMeters(self,DB,EPC,LogFile):
"Get the EPC meters values"
Meters = {}
for key1 in EPC:
Meters[key1] = {}
for key2 in EPC[key1]:
if '_key' in key2:
try:
Meters[key1][key2[:-4]] = DB.properties[EPC[key1][key2]]
Meters[key1][key2[:-4]] = int(DB.properties[EPC[key1][key2]])*EPC[key1][key2[:-4]+'COP']
except:
pass
return Meters
def getnbAppartments(self, DB, DBL):
"Get the number of appartment in the building"
try: nbApp = int(getDBValue(DB.properties, DBL['nbAppartments_key']))
except: nbApp = 0
nbApp = checkLim(nbApp,DBL['nbAppartments_lim'][0],DBL['nbAppartments_lim'][1])
return nbApp
def getheight(self, DB, DBL):
"Get the building height from the input file, but not used if 3D coordinates in the footprints"
try: height = int(getDBValue(DB.properties, DBL['height_key']))
except: height = 0
height = checkLim(height,DBL['height_lim'][0],DBL['height_lim'][1])
return height
def getAggregatedFootprint(self):
# lets compute the aggregaded external footprint of the different blocs
# starting with the first one
AggregFootprint = self.footprint[0]
RemainingBlocs = self.footprint[1:]
idx = 0
while RemainingBlocs:
Intersectionline = Polygon(AggregFootprint).intersection(Polygon(RemainingBlocs[idx]))
if Intersectionline and type(Intersectionline) != Point:
AggregFootprint = list(Polygon(AggregFootprint).union(Polygon(RemainingBlocs[idx])).exterior.coords)
RemainingBlocs.remove(RemainingBlocs[idx])
idx = 0
else:
idx += 1
# in order to close the loop if not already done
if AggregFootprint[0] != AggregFootprint[-1]:
AggregFootprint.append(AggregFootprint[0])
return AggregFootprint
def getshade(self, DB,Shadingsfile,Buildingsfile,GE,LogFile,PlotOnly = True):
"Get all the shading surfaces to be build for surrounding building effect"
shades = {}
try:
shadesID = DB.properties[GE['ShadingIdKey']]
except:
return shades
ModifiedShadeVertexes ={'ShadeId' : [], 'OldCoord': [], 'NewCoord' : []} #this dict will log the changes in the vertex coordinate to adjust other shading if necesseray afterward
RelativeAgregFootprint = [(node[0] - self.RefCoord[0], node[1] - self.RefCoord[1]) for node in self.AggregFootprint]
Meancoordx = list(Polygon(RelativeAgregFootprint).centroid.coords)[0][0]
Meancoordy = list(Polygon(RelativeAgregFootprint).centroid.coords)[0][1]
currentRef = self.getRefCoord()
ref = (0, 0) if currentRef==self.RefCoord else self.RefCoord
idlist = [-1]
for m in re.finditer(';', shadesID):
idlist.append(m.start())
for ii, sh in enumerate(idlist):
if ii == len(idlist) - 1:
wallId = shadesID[idlist[ii] + 1:]
else:
wallId = shadesID[idlist[ii] + 1:idlist[ii + 1]]
ShadeWall = findWallId(wallId, Shadingsfile, ref, GE)
if not 'height' in ShadeWall.keys():
ShadeWall['height'] = findBuildId(ShadeWall[GE['BuildingIdKey']], Buildingsfile,GE)
if ShadeWall['height']==None:
ShadeWall['height'] = self.height
currentShadingElement = [(node[0]-self.RefCoord[0],node[1]-self.RefCoord[1]) for node in ShadeWall[GE['VertexKey']]]
meanPx = (currentShadingElement[0][0] + currentShadingElement[1][0]) / 2
meanPy = (currentShadingElement[0][1] + currentShadingElement[1][1]) / 2
edgelength = LineString(currentShadingElement).length
if edgelength<2:
msg = '[Shading Info] This one is dropped, less than 2m wide ('+str(round(edgelength,2))+'m), shading Id : '+ ShadeWall[GE['ShadingIdKey']] +'\n'
GrlFct.Write2LogFile(msg, LogFile)
#print(msg[:-1])
continue
if ShadeWall[GE['ShadingIdKey']] =='V67656-3':
a=1
confirmed,currentShadingElement,OverlapCode = checkShadeWithFootprint(RelativeAgregFootprint,currentShadingElement,ShadeWall[GE['ShadingIdKey']],tol = self.DistTol)
if confirmed:
if ShadeWall['height']<=(max(self.BlocHeight)+self.StoreyHeigth):
OverlapCode +=1
ShadeWall['height'] = self.StoreyHeigth*round(ShadeWall['height'] / self.StoreyHeigth) #max(self.BlocHeight)#
self.AdjacentWalls.append(ShadeWall)
shades[wallId] = {}
shades[wallId]['Vertex'] = [(node[0]+self.RefCoord[0],node[1]+self.RefCoord[1]) for node in currentShadingElement]
shades[wallId]['height'] = ShadeWall['height']
shades[wallId]['distance'] = 0
ModifiedShadeVertexes['ShadeId'].append(ShadeWall[GE['ShadingIdKey']])
ModifiedShadeVertexes['OldCoord'].append(ShadeWall[GE['VertexKey']])
ModifiedShadeVertexes['NewCoord'].append(shades[wallId]['Vertex'])
msg = '[Adjacent Wall] This Shading wall is considered as adjacent with an overlap code of '+str(OverlapCode)+', shading Id : ' + ShadeWall[
GE['ShadingIdKey']] + '\n'
#print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
continue
if OverlapCode== 999:
msg = '[Shading Error] This Shading wall goes inside the building...It is dropped, shading Id : ' + ShadeWall[
GE['ShadingIdKey']] + '\n'
#print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
continue
dist = (abs(meanPx - Meancoordx) ** 2 + abs(meanPy - Meancoordy) ** 2) ** 0.5
shades[wallId] = {}
shades[wallId]['Vertex'] = ShadeWall[GE['VertexKey']]
shades[wallId]['height'] = round(ShadeWall['height'],2)
shades[wallId]['distance'] = dist
return shades
def getVentSyst(self, DB,LogFile):
"Get ventilation system type"
VentSyst = {}
for key in DB_Data.VentSyst:
try:
VentSyst[key] = True if 'Ja' in DB.properties[DB_Data.VentSyst[key]] else False
except:
VentSyst[key] = False
nbVentSyst = [idx for idx, key in enumerate(VentSyst) if VentSyst[key]]
nbVentSystWithHR = [idx for idx, key in enumerate(VentSyst) if
VentSyst[key] and key[-1] == 'X']
if len(nbVentSyst) > 1:
msg = '[Vent Warning] This building has '+str(len(nbVentSyst))+' ventilation systems declared\n'
GrlFct.Write2LogFile(msg, LogFile)
if len(nbVentSystWithHR)>1:
msg = '[Vent Warning] This building has '+str(len(nbVentSystWithHR))+' ventilation systems with heat recovery\n'
GrlFct.Write2LogFile(msg, LogFile)
return VentSyst
def getAreaBasedFlowRate(self, DB, DBL, BE):
"Get the airflow rates based on the floor area"
try: AreaBasedFlowRate = float(getDBValue(DB.properties, DBL['AreaBasedFlowRate_key']))
except : AreaBasedFlowRate = BE['AreaBasedFlowRate']
AreaBasedFlowRate = checkLim(AreaBasedFlowRate,DBL['AreaBasedFlowRate_lim'][0],DBL['AreaBasedFlowRate_lim'][1])
return AreaBasedFlowRate
def getOccupType(self,DB,LogFile):
"get the occupency type of the building"
OccupType = {}
self.OccupRate = {}
for key in DB_Data.OccupType:
if '_key' in key:
try:
OccupType[key[:-4]] = int(DB.properties[DB_Data.OccupType[key]])/100
except:
OccupType[key[:-4]] = 0
if '_Rate' in key:
self.OccupRate[key[:-5]] = DB_Data.OccupType[key]
msg = '[Usage Info] This building has ' + str(1 - OccupType['Residential']) + ' % of none residential occupancy type\n'
GrlFct.Write2LogFile(msg, LogFile)
return OccupType
def isInputDir(self):
InputFileDir = 'InputFiles'
AbsInputFileDir = os.path.join(os.getcwd(),InputFileDir)
if not os.path.exists(AbsInputFileDir):
os.mkdir(AbsInputFileDir)
return AbsInputFileDir,AbsInputFileDir #both values are identicial since the relative path was still creating issues with FMUs afterward...
def getIntLoad(self, MainPath,LogFile):
"get the internal load profil or value"
#we should integrate the loads depending on the number of appartemnent in the building
type = self.IntLoadType
# Input_path = os.path.join(MainPath,'InputFiles')
# #lets used StROBE package by defaults (average over 10 profile
# IntLoad = os.path.join(Input_path, 'P_Mean_over_10.txt')
try:
IntLoad = self.ElecYearlyLoad #this value is in W\m2 prescies in DB_Data
except:
IntLoad = 0
#now we compute power time series in order to match the measures form EPCs
eleval = 0
try :
for x in self.EPCMeters['ElecLoad']:
if self.EPCMeters['ElecLoad'][x]:
eleval += self.EPCMeters['ElecLoad'][x]*1000 #to convert kW in W
except:
pass
if eleval>0:
try:
if 'Cste' in type:
IntLoad = eleval/self.EPHeatedArea/8760 #this value is thus in W/m2 #division by number of hours to convert Wh into W
else:
AbsInputFileDir,InputFileDir = self.isInputDir()
if 'winter' in type:
IntLoad = os.path.join(InputFileDir, self.name + '_winter.txt')
ProbGenerator.SigmoFile('winter', self.IntLoadCurveShape, eleval/self.EPHeatedArea * 100, IntLoad) #the *100 is because we have considered 100m2 for the previous file
if 'summer' in type:
IntLoad = os.path.join(InputFileDir, self.name + '_summer.txt')
ProbGenerator.SigmoFile('summer', self.IntLoadCurveShape, eleval / self.EPHeatedArea * 100,IntLoad) # the *100 is because we have considered 100m2 for the previous file
except:
msg = '[Int Load Error] Unable to write the internal load file...\n'
#print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
return IntLoad
def CheckMultiBlocFootprint(blocs,tol =1):
validMultibloc = True
if len(blocs)>1:
validMultibloc = False
for bloc1,bloc2 in itertools.product(blocs,repeat = 2):
if bloc1 != bloc2:
for ptidx,pt in enumerate(bloc1):
edge = [bloc1[ptidx],bloc1[(ptidx+1)%len(bloc1)]]
comEdges = []
for ptidx1,pt1 in enumerate(bloc2):
edge1 = [bloc2[ptidx1], bloc2[(ptidx1+1)%len(bloc2)]]
if is_parallel(edge,edge1,10) and confirmMatch(edge, edge1, tol):
validMultibloc = True
pt1 = False
pt2 = False
if LineString(edge1).distance(Point(edge[0])) < tol:
edge[0] = point_on_line(edge1[0], edge1[1],edge[0])
edge[0],conf= CoordAdjustement(edge1, edge[0], tol)
pt1 = True
if LineString(edge1).distance(Point(edge[1])) < tol:
edge[1] = point_on_line(edge1[0], edge1[1],edge[1])
edge[1],conf = CoordAdjustement(edge1, edge[1], tol)
pt2 = True
if pt1 and pt2:
if abs(getAngle(edge1, edge) -180) < 5:
comEdges.append([edge[1],edge[0]])
else:
comEdges.append(edge)
bloc1[ptidx] = edge[0]
bloc1[(ptidx + 1) % len(bloc1)] = edge[1]
#lets check if these nodes are also on bloc2
#first which bloc is concerned
for comEdge in comEdges:
if comEdge[0] in bloc2 and comEdge[1] not in bloc2:
index = bloc2.index(comEdge[0])+1
bloc2.insert(index,comEdge[1])
#bloc2 = bloc2[:index]+[comEdge[1]]+bloc2[index:]
if comEdge[1] in bloc2 and comEdge[0] not in bloc2:
index = bloc2.index(comEdge[1])
bloc2.insert(index,comEdge[0])
#bloc2 = bloc2[:index]+[comEdge[0]]+bloc2[index:]
return blocs,validMultibloc
def point_on_line(a, b, p):
import numpy as np
a = np.array(a)
b = np.array(b)
p = np.array(p)
ap = p - a
ab = b - a
result = a + np.dot(ap, ab) / np.dot(ab, ab) * ab
return tuple(result)
def getAngle(line1,line2):
vector_a_x = line1[1][0] - line1[0][0]
vector_a_y = line1[1][1] - line1[0][1]
vector_b_x = line2[1][0] - line2[0][0]
vector_b_y = line2[1][1] - line2[0][1]
import numpy as np
v = np.array([vector_a_x, vector_a_y])
w = np.array([vector_b_x, vector_b_y])
return abs(np.rad2deg(np.arccos(round(v.dot(w) / (np.linalg.norm(v) * np.linalg.norm(w)), 4))))
def is_parallel(line1, line2, tol = 5):
angledeg = getAngle(line1, line2)
if angledeg <tol or abs(angledeg-180) < tol:
return True
else:
return False
def CoordAdjustement(edge,pt,tol):
#if one point is closest than 1 m of on edge point, the point is moved to the edge's point
coormade = False
if Point(pt).distance(Point(edge[0])) < tol:
coormade = True
pt = edge[0]
elif Point(pt).distance(Point(edge[1])) < tol:
coormade = True
pt = edge[1]
return pt,coormade
def confirmMatch(Edge, Edge1,tol):
#this should be enough if both edges are already checked being //
dist1 = min(LineString(Edge).distance(Point(Edge1[0])),LineString(Edge).distance(Point(Edge1[1])))
dist2 = min(LineString(Edge1).distance(Point(Edge[0])), LineString(Edge1).distance(Point(Edge[1])))
if dist1 <tol or dist2 < tol:
#we want to avoid cases with exactly the same vertexes (shading going from the building edge to the outside)
checkNode = [CoordAdjustement(Edge,Edge1[0],1),CoordAdjustement(Edge,Edge1[1],1) or CoordAdjustement(Edge1,Edge[0],1) or CoordAdjustement(Edge1,Edge[1],1)]
if True not in [val[1] for val in checkNode]:
return True
#both shade vertex are on the edge
dist1 = LineString(Edge).distance(Point(Edge1[0]))
dist2 = LineString(Edge).distance(Point(Edge1[1]))
if dist1 <tol and dist2 < tol:
return True
# both shade vertex are on the edge
dist1 = LineString(Edge1).distance(Point(Edge[0]))
dist2 = LineString(Edge1).distance(Point(Edge[1]))
if dist1 < tol and dist2 < tol:
return True
return False
def checkShadeWithFootprint(AggregFootprint, ShadeWall,ShadeId,tol = 2):
if ShadeId == 'V69467-8':
a=1
# check if some shadingssurfaces are too close to the building
# we consider the middle coordinate point fo the shading surface
# if less than 1m than lets consider that the boundary conditions should be adiabatique instead of outdoor conditions (adjacent buildings)
ShadeMidCoord = ((ShadeWall[0][0] + ShadeWall[1][0]) / 2,
(ShadeWall[0][1] + ShadeWall[1][1]) / 2)
#the footprint is closed in order to enable a full loop around all edges (including the last one between first and last veretx
#closedFootprint.append(AggregFootprint[0])
confirmed = False
OverlapCode = 0
# this code is 0 for fulloverlap from edge to edge,
# 2 for partial overlap with one commun edge and longer shading element,
# 4 for partial overlap with no commun edge,
# it is further increased by one if the height is below the building
if min([Point(ShadeWall[0]).distance(Polygon(AggregFootprint)), Point(ShadeWall[1]).distance(Polygon(AggregFootprint))])< tol:
for idx, node in enumerate(AggregFootprint[:-1]):
#dist1 = LineString([AggregFootprint[idx], AggregFootprint[idx + 1]]).distance(LineString(ShadeWall))
if is_parallel([AggregFootprint[idx], AggregFootprint[idx + 1]], ShadeWall):#if dist1 < 0.1:
#first the segment direction shall be compute for the closest point if not equal
Edge = [AggregFootprint[idx], AggregFootprint[idx + 1]]
if confirmMatch(Edge, ShadeWall,tol): #the tolerance is between points and edge (either from the footprint or the
OverlapCode = 4
confirmed = True
ShadeWall[0] = point_on_line(Edge[0], Edge[1],ShadeWall[0])
ShadeWall[0],CoorPt1 = CoordAdjustement(Edge, ShadeWall[0],tol) #the tol is about distance bewteen 2 vertexes
if CoorPt1:
OverlapCode = 2
ShadeWall[1] = point_on_line(Edge[0], Edge[1],ShadeWall[1])
ShadeWall[1], CoorPt2 = CoordAdjustement(Edge,ShadeWall[1],tol) #the tol is about distance bewteen 2 vertexes
if CoorPt2:
OverlapCode = 2
if CoorPt1 and CoorPt2: #it means that the sade's edge is exactly on a footprint's edge, no need to go further
OverlapCode = 0
return confirmed,ShadeWall,OverlapCode
#if the middle point isinside the polygon (with a buffer zone of 1m, lets dropp it
reduceInsideArea = Polygon(AggregFootprint).buffer(distance = -1, join_style=2)
if reduceInsideArea.contains(Point(ShadeMidCoord)):
return False, ShadeWall, 999
return confirmed,ShadeWall,OverlapCode | [
"numpy.array",
"numpy.linalg.norm",
"geomeppy.geom.polygons.Polygon3D",
"shapely.geometry.polygon.LineString",
"os.path.exists",
"itertools.product",
"matplotlib.pyplot.plot",
"geomeppy.geom.core_perim.CheckFootprintNodes",
"numpy.dot",
"re.finditer",
"os.mkdir",
"geomeppy.geom.polygons.Polygo... | [((38886, 38897), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (38894, 38897), True, 'import numpy as np\n'), ((38906, 38917), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (38914, 38917), True, 'import numpy as np\n'), ((38926, 38937), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (38934, 38937), True, 'import numpy as np\n'), ((39278, 39312), 'numpy.array', 'np.array', (['[vector_a_x, vector_a_y]'], {}), '([vector_a_x, vector_a_y])\n', (39286, 39312), True, 'import numpy as np\n'), ((39321, 39355), 'numpy.array', 'np.array', (['[vector_b_x, vector_b_y]'], {}), '([vector_b_x, vector_b_y])\n', (39329, 39355), True, 'import numpy as np\n'), ((21647, 21681), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (21667, 21681), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((21791, 21825), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (21811, 21825), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((23254, 23288), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (23274, 23288), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((23504, 23538), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (23524, 23538), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((28602, 28628), 're.finditer', 're.finditer', (['""";"""', 'shadesID'], {}), "(';', shadesID)\n", (28613, 28628), False, 'import re\n'), ((33914, 33948), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (33934, 33948), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((36535, 36569), 'itertools.product', 'itertools.product', (['blocs'], {'repeat': '(2)'}), '(blocs, repeat=2)\n', (36552, 36569), False, 'import itertools\n'), ((40783, 40798), 'shapely.geometry.polygon.Point', 'Point', (['Edge1[0]'], {}), '(Edge1[0])\n', (40788, 40798), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40838, 40853), 'shapely.geometry.polygon.Point', 'Point', (['Edge1[1]'], {}), '(Edge1[1])\n', (40843, 40853), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40989, 41003), 'shapely.geometry.polygon.Point', 'Point', (['Edge[0]'], {}), '(Edge[0])\n', (40994, 41003), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((41044, 41058), 'shapely.geometry.polygon.Point', 'Point', (['Edge[1]'], {}), '(Edge[1])\n', (41049, 41058), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((2742, 2780), 'os.path.join', 'os.path.join', (['epluspath', '"""Energy+.idd"""'], {}), "(epluspath, 'Energy+.idd')\n", (2754, 2780), False, 'import os\n'), ((8668, 8702), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (8688, 8702), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((8821, 8855), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (8841, 8855), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((9000, 9034), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (9020, 9034), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((10479, 10513), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (10499, 10513), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((10707, 10741), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (10727, 10741), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((10877, 10911), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (10897, 10911), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((19540, 19580), 'geomeppy.geom.core_perim.CheckFootprintNodes', 'core_perim.CheckFootprintNodes', (['coord', '(5)'], {}), '(coord, 5)\n', (19570, 19580), False, 'from geomeppy.geom import core_perim\n'), ((19939, 19973), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (19959, 19973), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((20453, 20487), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (20473, 20487), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((22305, 22339), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (22325, 22339), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((22457, 22491), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (22477, 22491), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((23876, 23910), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (23896, 23910), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((24514, 24548), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (24534, 24548), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((32612, 32646), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (32632, 32646), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((32820, 32854), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (32840, 32854), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((34076, 34087), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (34085, 34087), False, 'import os\n'), ((34117, 34148), 'os.path.exists', 'os.path.exists', (['AbsInputFileDir'], {}), '(AbsInputFileDir)\n', (34131, 34148), False, 'import os\n'), ((34162, 34187), 'os.mkdir', 'os.mkdir', (['AbsInputFileDir'], {}), '(AbsInputFileDir)\n', (34170, 34187), False, 'import os\n'), ((39813, 39827), 'shapely.geometry.polygon.Point', 'Point', (['edge[0]'], {}), '(edge[0])\n', (39818, 39827), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40148, 40163), 'shapely.geometry.polygon.Point', 'Point', (['Edge1[0]'], {}), '(Edge1[0])\n', (40153, 40163), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40191, 40206), 'shapely.geometry.polygon.Point', 'Point', (['Edge1[1]'], {}), '(Edge1[1])\n', (40196, 40206), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40252, 40266), 'shapely.geometry.polygon.Point', 'Point', (['Edge[0]'], {}), '(Edge[0])\n', (40257, 40266), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40296, 40310), 'shapely.geometry.polygon.Point', 'Point', (['Edge[1]'], {}), '(Edge[1])\n', (40301, 40310), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40757, 40773), 'shapely.geometry.polygon.LineString', 'LineString', (['Edge'], {}), '(Edge)\n', (40767, 40773), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40812, 40828), 'shapely.geometry.polygon.LineString', 'LineString', (['Edge'], {}), '(Edge)\n', (40822, 40828), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40962, 40979), 'shapely.geometry.polygon.LineString', 'LineString', (['Edge1'], {}), '(Edge1)\n', (40972, 40979), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((41017, 41034), 'shapely.geometry.polygon.LineString', 'LineString', (['Edge1'], {}), '(Edge1)\n', (41027, 41034), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((43927, 43947), 'shapely.geometry.polygon.Point', 'Point', (['ShadeMidCoord'], {}), '(ShadeMidCoord)\n', (43932, 43947), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((2816, 2867), 'os.path.join', 'os.path.join', (['epluspath', '"""ExampleFiles/Minimal.idf"""'], {}), "(epluspath, 'ExampleFiles/Minimal.idf')\n", (2828, 2867), False, 'import os\n'), ((11419, 11453), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (11439, 11453), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((11589, 11623), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (11609, 11623), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((12030, 12052), 'os.path.isfile', 'os.path.isfile', (['ifFile'], {}), '(ifFile)\n', (12044, 12052), False, 'import os\n'), ((13018, 13052), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (13038, 13052), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((27141, 27169), 'shapely.geometry.polygon.Polygon', 'Polygon', (['RemainingBlocs[idx]'], {}), '(RemainingBlocs[idx])\n', (27148, 27169), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((29519, 29552), 'shapely.geometry.polygon.LineString', 'LineString', (['currentShadingElement'], {}), '(currentShadingElement)\n', (29529, 29552), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((29767, 29801), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (29787, 29801), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((31229, 31263), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (31249, 31263), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((31552, 31586), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (31572, 31586), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((38985, 38999), 'numpy.dot', 'np.dot', (['ap', 'ab'], {}), '(ap, ab)\n', (38991, 38999), True, 'import numpy as np\n'), ((39002, 39016), 'numpy.dot', 'np.dot', (['ab', 'ab'], {}), '(ab, ab)\n', (39008, 39016), True, 'import numpy as np\n'), ((39794, 39803), 'shapely.geometry.polygon.Point', 'Point', (['pt'], {}), '(pt)\n', (39799, 39803), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((39909, 39923), 'shapely.geometry.polygon.Point', 'Point', (['edge[1]'], {}), '(edge[1])\n', (39914, 39923), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40122, 40138), 'shapely.geometry.polygon.LineString', 'LineString', (['Edge'], {}), '(Edge)\n', (40132, 40138), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40165, 40181), 'shapely.geometry.polygon.LineString', 'LineString', (['Edge'], {}), '(Edge)\n', (40175, 40181), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40225, 40242), 'shapely.geometry.polygon.LineString', 'LineString', (['Edge1'], {}), '(Edge1)\n', (40235, 40242), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((40269, 40286), 'shapely.geometry.polygon.LineString', 'LineString', (['Edge1'], {}), '(Edge1)\n', (40279, 40286), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((43829, 43853), 'shapely.geometry.polygon.Polygon', 'Polygon', (['AggregFootprint'], {}), '(AggregFootprint)\n', (43836, 43853), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((11955, 11981), 'os.path.dirname', 'os.path.dirname', (['MaintPath'], {}), '(MaintPath)\n', (11970, 11981), False, 'import os\n'), ((11982, 12009), 'os.path.normcase', 'os.path.normcase', (['ExEn[key]'], {}), '(ExEn[key])\n', (11998, 12009), False, 'import os\n'), ((17422, 17456), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (17442, 17456), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((17655, 17689), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (17675, 17689), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((17760, 17773), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (17770, 17773), True, 'import matplotlib.pyplot as plt\n'), ((23058, 23071), 'shapely.geometry.polygon.Polygon', 'Polygon', (['foot'], {}), '(foot)\n', (23065, 23071), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((23139, 23152), 'shapely.geometry.polygon.Polygon', 'Polygon', (['foot'], {}), '(foot)\n', (23146, 23152), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((27103, 27127), 'shapely.geometry.polygon.Polygon', 'Polygon', (['AggregFootprint'], {}), '(AggregFootprint)\n', (27110, 27127), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((36328, 36362), 'CoreFiles.GeneralFunctions.Write2LogFile', 'GrlFct.Write2LogFile', (['msg', 'LogFile'], {}), '(msg, LogFile)\n', (36348, 36362), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((39890, 39899), 'shapely.geometry.polygon.Point', 'Point', (['pt'], {}), '(pt)\n', (39895, 39899), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((42170, 42194), 'shapely.geometry.polygon.Polygon', 'Polygon', (['AggregFootprint'], {}), '(AggregFootprint)\n', (42177, 42194), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((42226, 42250), 'shapely.geometry.polygon.Polygon', 'Polygon', (['AggregFootprint'], {}), '(AggregFootprint)\n', (42233, 42250), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((12184, 12208), 'os.path.basename', 'os.path.basename', (['ifFile'], {}), '(ifFile)\n', (12200, 12208), False, 'import os\n'), ((12237, 12262), 'os.path.isfile', 'os.path.isfile', (['iflocFile'], {}), '(iflocFile)\n', (12251, 12262), False, 'import os\n'), ((12288, 12318), 'shutil.copy', 'shutil.copy', (['ifFile', 'iflocFile'], {}), '(ifFile, iflocFile)\n', (12299, 12318), False, 'import shutil\n'), ((12378, 12402), 'os.path.basename', 'os.path.basename', (['ifFile'], {}), '(ifFile)\n', (12394, 12402), False, 'import os\n'), ((13935, 13961), 'shapely.geometry.polygon.Polygon', 'Polygon', (['self.footprint[0]'], {}), '(self.footprint[0])\n', (13942, 13961), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((14204, 14233), 'shapely.geometry.polygon.Polygon', 'Polygon', (['self.AggregFootprint'], {}), '(self.AggregFootprint)\n', (14211, 14233), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((15985, 16028), 'geomeppy.geom.core_perim.CheckFootprintNodes', 'core_perim.CheckFootprintNodes', (['polycoor', '(5)'], {}), '(polycoor, 5)\n', (16015, 16028), False, 'from geomeppy.geom import core_perim\n'), ((16703, 16730), 'shapely.geometry.polygon.Polygon', 'Polygon', (['coord[idx + i + 1]'], {}), '(coord[idx + i + 1])\n', (16710, 16730), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((16905, 16929), 'geomeppy.geom.polygons.Polygon3D', 'Polygon3D', (['coord[idx[0]]'], {}), '(coord[idx[0]])\n', (16914, 16929), False, 'from geomeppy.geom.polygons import Polygon2D, Polygon3D, break_polygons\n'), ((16931, 16955), 'geomeppy.geom.polygons.Polygon3D', 'Polygon3D', (['coord[idx[1]]'], {}), '(coord[idx[1]])\n', (16940, 16955), False, 'from geomeppy.geom.polygons import Polygon2D, Polygon3D, break_polygons\n'), ((17862, 17884), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys', '"""-."""'], {}), "(xs, ys, '-.')\n", (17870, 17884), True, 'import matplotlib.pyplot as plt\n'), ((20226, 20241), 'geomeppy.geom.polygons.Polygon2D', 'Polygon2D', (['bloc'], {}), '(bloc)\n', (20235, 20241), False, 'from geomeppy.geom.polygons import Polygon2D, Polygon3D, break_polygons\n'), ((35605, 35658), 'os.path.join', 'os.path.join', (['InputFileDir', "(self.name + '_winter.txt')"], {}), "(InputFileDir, self.name + '_winter.txt')\n", (35617, 35658), False, 'import os\n'), ((35683, 35788), 'CoreFiles.ProbGenerator.SigmoFile', 'ProbGenerator.SigmoFile', (['"""winter"""', 'self.IntLoadCurveShape', '(eleval / self.EPHeatedArea * 100)', 'IntLoad'], {}), "('winter', self.IntLoadCurveShape, eleval / self.\n EPHeatedArea * 100, IntLoad)\n", (35706, 35788), True, 'import CoreFiles.ProbGenerator as ProbGenerator\n'), ((35925, 35978), 'os.path.join', 'os.path.join', (['InputFileDir', "(self.name + '_summer.txt')"], {}), "(InputFileDir, self.name + '_summer.txt')\n", (35937, 35978), False, 'import os\n'), ((36003, 36108), 'CoreFiles.ProbGenerator.SigmoFile', 'ProbGenerator.SigmoFile', (['"""summer"""', 'self.IntLoadCurveShape', '(eleval / self.EPHeatedArea * 100)', 'IntLoad'], {}), "('summer', self.IntLoadCurveShape, eleval / self.\n EPHeatedArea * 100, IntLoad)\n", (36026, 36108), True, 'import CoreFiles.ProbGenerator as ProbGenerator\n'), ((42141, 42160), 'shapely.geometry.polygon.Point', 'Point', (['ShadeWall[0]'], {}), '(ShadeWall[0])\n', (42146, 42160), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((42197, 42216), 'shapely.geometry.polygon.Point', 'Point', (['ShadeWall[1]'], {}), '(ShadeWall[1])\n', (42202, 42216), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((9596, 9609), 'shapely.geometry.polygon.Polygon', 'Polygon', (['foot'], {}), '(foot)\n', (9603, 9609), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((13490, 13503), 'shapely.geometry.polygon.Polygon', 'Polygon', (['foot'], {}), '(foot)\n', (13497, 13503), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((15657, 15676), 'geomeppy.geom.polygons.Polygon2D', 'Polygon2D', (['polycoor'], {}), '(polycoor)\n', (15666, 15676), False, 'from geomeppy.geom.polygons import Polygon2D, Polygon3D, break_polygons\n'), ((16680, 16693), 'shapely.geometry.polygon.Polygon', 'Polygon', (['coor'], {}), '(coor)\n', (16687, 16693), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((28316, 28347), 'shapely.geometry.polygon.Polygon', 'Polygon', (['RelativeAgregFootprint'], {}), '(RelativeAgregFootprint)\n', (28323, 28347), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((28397, 28428), 'shapely.geometry.polygon.Polygon', 'Polygon', (['RelativeAgregFootprint'], {}), '(RelativeAgregFootprint)\n', (28404, 28428), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((39410, 39427), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (39424, 39427), True, 'import numpy as np\n'), ((39430, 39447), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {}), '(w)\n', (39444, 39447), True, 'import numpy as np\n'), ((10041, 10054), 'shapely.geometry.polygon.Polygon', 'Polygon', (['foot'], {}), '(foot)\n', (10048, 10054), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((27310, 27338), 'shapely.geometry.polygon.Polygon', 'Polygon', (['RemainingBlocs[idx]'], {}), '(RemainingBlocs[idx])\n', (27317, 27338), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((27279, 27303), 'shapely.geometry.polygon.Polygon', 'Polygon', (['AggregFootprint'], {}), '(AggregFootprint)\n', (27286, 27303), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((37169, 37183), 'shapely.geometry.polygon.Point', 'Point', (['edge[0]'], {}), '(edge[0])\n', (37174, 37183), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((37461, 37475), 'shapely.geometry.polygon.Point', 'Point', (['edge[1]'], {}), '(edge[1])\n', (37466, 37475), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((37142, 37159), 'shapely.geometry.polygon.LineString', 'LineString', (['edge1'], {}), '(edge1)\n', (37152, 37159), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n'), ((37434, 37451), 'shapely.geometry.polygon.LineString', 'LineString', (['edge1'], {}), '(edge1)\n', (37444, 37451), False, 'from shapely.geometry.polygon import Polygon, Point, LineString\n')] |
'''
Visualization for RGB results.
'''
import sys, os
cur_file_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(cur_file_path, '..'))
import importlib, time, math, shutil, csv, random
import numpy as np
import cv2
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from utils.config import SplitLineParser
from utils.transforms import rotation_matrix_to_angle_axis, batch_rodrigues
from utils.torch import load_state
from utils.logging import mkdir
from fitting.fitting_utils import load_res, prep_res, run_smpl
from fitting.eval_utils import SMPL_SIZES
from body_model.body_model import BodyModel
from body_model.utils import SMPL_PATH, SMPLH_PATH, SMPL_JOINTS, SMPLX_PATH
from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections
from viz.mesh_viewer import COMPRESS_PARAMS
J_BODY = len(SMPL_JOINTS)-1 # no root
GT_RES_NAME = 'gt_results'
PRED_RES_NAME = 'stage3_results'
PRED_PRIOR_RES_NAME = 'stage3_results_prior'
STAGES_RES_NAMES = ['stage1_results', 'stage2_results', 'stage3_init_results'] # results in camera frame
STAGES_PRIOR_RES_NAMES = ['stage2_results_prior', 'stage3_init_results_prior'] # results in prior frame (w.r.t final floor fit)
FINAL_RES_NAME = 'final_results'
FINAL_PRIOR_RES_NAME = 'final_results_prior'
OBS_NAME = 'observations'
FPS = 30
# visualization options
GROUND_ALPHA = 1.0
BODY_ALPHA = None # use to make body mesh translucent
IM_EXTN = 'jpg' # png # to use for rendering jpg saves a lot of space
def parse_args(argv):
parser = SplitLineParser(fromfile_prefix_chars='@', allow_abbrev=False)
parser.add_argument('--results', type=str, required=True, help='Path to the results_out directory from fitting to run viz on.')
parser.add_argument('--out', type=str, required=True, help='Path to save visualizations to.')
# visualization options
parser.add_argument('--viz-final-only', dest='viz_final_only', action='store_true', help="If given only visualize the final full sequence result and not the subsequences.")
parser.set_defaults(viz_final_only=False)
parser.add_argument('--viz-stages', dest='viz_stages', action='store_true', help="If given, visualizes intermediate optimization stages and comparison to final pred.")
parser.set_defaults(viz_stages=False)
parser.add_argument('--viz-prior-frame', dest='viz_prior_frame', action='store_true', help="If given, also visualizes results in the HuMoR canonical coordinate frame.")
parser.set_defaults(viz_prior_frame=False)
parser.add_argument('--viz-obs-2d', dest='viz_obs_2d', action='store_true', help="If given, visualizes 2D joint observations on top of og video")
parser.set_defaults(viz_obs_2d=False)
parser.add_argument('--viz-no-render-cam-body', dest='viz_render_cam_body', action='store_false', help="If given, does not render body mesh from camera view")
parser.set_defaults(viz_render_cam_body=True)
parser.add_argument('--viz-pred-floor', dest='viz_pred_floor', action='store_true', help="Render the predicted floor from the camera view.")
parser.set_defaults(viz_pred_floor=False)
parser.add_argument('--viz-contacts', dest='viz_contacts', action='store_true', help="Render predicted contacts on the joints")
parser.set_defaults(viz_contacts=False)
parser.add_argument('--viz-wireframe', dest='viz_wireframe', action='store_true', help="Render body and floor in wireframe")
parser.set_defaults(viz_wireframe=False)
parser.add_argument('--viz-bodies-static', type=int, default=None, help="If given, renders all body predictions at once at this given frame interval interval.")
parser.add_argument('--viz-no-bg', dest='viz_bg', action='store_false', help="If given will not overlay the rendering on top of OG video.")
parser.set_defaults(viz_bg=True)
parser.add_argument('--viz-render-width', type=int, default=1280, help="Width of rendered output images")
parser.add_argument('--viz-render-height', type=int, default=720, help="Width of rendered output images")
parser.add_argument('--shuffle', dest='shuffle', action='store_true', help="Shuffles viz ordering")
parser.set_defaults(shuffle=False)
parser.add_argument('--flip-img', dest='flip_img', action='store_true', help="Flips the loaded image about y-axis. This is useful for PROX result.")
parser.set_defaults(flip_img=False)
known_args, unknown_args = parser.parse_known_args(argv)
return known_args
def main(args):
print(args)
mkdir(args.out)
qual_out_path = args.out
D_IMW, D_IMH = args.viz_render_width, args.viz_render_height
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
# collect our results directories
all_result_dirs = [os.path.join(args.results, f) for f in sorted(os.listdir(args.results)) if f[0] != '.']
all_result_dirs = [f for f in all_result_dirs if os.path.isdir(f)]
if args.shuffle:
random.seed(0)
random.shuffle(all_result_dirs)
print(all_result_dirs)
seq_name_list = []
body_model_dict = dict()
for residx, result_dir in enumerate(all_result_dirs):
seq_name = result_dir.split('/')[-1]
is_final_res = seq_name == 'final_results'
if not is_final_res:
if args.viz_final_only:
continue
seq_name = '_'.join(result_dir.split('/')[-1].split('_')[:-1])
print('Visualizing %s %d / %d...' % (seq_name, residx, len(all_result_dirs)))
obs_dict = load_res(result_dir, OBS_NAME + '.npz')
cur_img_paths = obs_dict['img_paths'] # used to load in results from baselines
cur_frame_names = ['.'.join(f.split('/')[-1].split('.')[:-1]) for f in cur_img_paths]
# load in humor prediction
pred_res = load_res(result_dir, PRED_RES_NAME + '.npz')
if pred_res is None:
print('Could not find final pred (stage 3) results for %s, skipping...' % (seq_name))
continue
T = pred_res['trans'].shape[0]
# check if have any nans valid
for smpk in SMPL_SIZES.keys():
cur_valid = (torch.sum(torch.logical_not(torch.isfinite(torch.Tensor(pred_res[smpk])))).item() == 0)
if not cur_valid:
print('Found NaNs in prediction for %s, filling with zeros...' % (smpk))
# print(pred_res[smpk].shape)
if smpk == 'betas':
pred_res[smpk] = np.zeros((pred_res[smpk].shape[0]), dtype=np.float)
else:
pred_res[smpk] = np.zeros((T, pred_res[smpk].shape[1]), dtype=np.float)
floor_valid = (torch.sum(torch.logical_not(torch.isfinite(torch.Tensor(pred_res['floor_plane'])))).item() == 0)
if not floor_valid:
print('Predicted floor is NaN, replacing with up.')
pred_res['floor_plane'] = np.array([0.0, -1.0, 0.0, 0.0])
pred_res = prep_res(pred_res, device, T)
num_pred_betas = pred_res['betas'].size(1)
pred_floor_plane = torch.Tensor(pred_res['floor_plane']).to(device)
# humor prediction in prior frame
pred_res_prior = None
if args.viz_prior_frame:
pred_res_prior = load_res(result_dir, PRED_PRIOR_RES_NAME + '.npz')
if pred_res_prior is None:
print('Could not find final prior pred (stage 3) results for %s, skipping...' % (seq_name))
continue
pred_res_prior = prep_res(pred_res_prior, device, T)
# load stages results if needed
cur_viz_stages = args.viz_stages and not is_final_res
cur_stages_res = None
if cur_viz_stages:
cur_stages_res = dict()
for stage_name in STAGES_RES_NAMES:
stage_res = load_res(result_dir, stage_name + '.npz')
if stage_res is None:
print('Could not find results for stage %s of %s, skipping...' % (stage_name, seq_name))
continue
cur_stages_res[stage_name] = prep_res(stage_res, device, T)
# load prior stages results if needed
cur_stages_prior_res = None
if args.viz_prior_frame and cur_viz_stages:
cur_stages_prior_res = dict()
for stage_name in STAGES_PRIOR_RES_NAMES:
stage_res = load_res(result_dir, stage_name + '.npz')
if stage_res is None:
print('Could not find results for stage %s of %s, skipping...' % (stage_name, seq_name))
continue
cur_stages_prior_res[stage_name] = prep_res(stage_res, device, T)
#
# create body models for each
#
meta_path = os.path.join(result_dir, 'meta.txt')
if not os.path.exists(meta_path):
print('Could not find metadata for %s, skipping...' % (seq_name))
continue
optim_bm_path = gt_bm_path = None
with open(meta_path, 'r') as f:
optim_bm_str = f.readline().strip()
optim_bm_path = optim_bm_str.split(' ')[1]
gt_bm_str = f.readline().strip()
gt_bm_path = gt_bm_str.split(' ')[1]
# humor model
pred_bm = None
if optim_bm_path not in body_model_dict:
pred_bm = BodyModel(bm_path=optim_bm_path,
num_betas=num_pred_betas,
batch_size=T).to(device)
if not is_final_res:
# final results will be different length, so want to re-load for subsequences
body_model_dict[optim_bm_path] = pred_bm
if not is_final_res:
pred_bm = body_model_dict[optim_bm_path]
# we are using this sequence for sure
seq_name_list.append(seq_name)
# run through SMPL
pred_body = run_smpl(pred_res, pred_bm)
stages_body = None
if cur_stages_res is not None:
stages_body = dict()
for k, v in cur_stages_res.items():
stages_body[k] = run_smpl(v, pred_bm)
# get body smpl joints
stage_body_joints = stages_body[k].Jtr[:, :len(SMPL_JOINTS)]
cur_stages_res[k]['joints3d_smpl'] = stage_body_joints
# prior frame through SMPL
pred_prior_body = None
if pred_res_prior is not None:
pred_prior_body = run_smpl(pred_res_prior, pred_bm)
stages_prior_body = None
if cur_stages_prior_res is not None:
stages_prior_body = dict()
for k, v in cur_stages_prior_res.items():
stages_prior_body[k] = run_smpl(v, pred_bm)
# load in image frames
IMW, IMH = None, None
img_arr = np.zeros((T, D_IMH, D_IMW, 3), dtype=np.float32)
for imidx, img_path in enumerate(cur_img_paths):
img = cv2.imread(img_path)
if args.flip_img:
img = cv2.flip(img, 1)
IMH, IMW, _ = img.shape
img = cv2.resize(img, (D_IMW, D_IMH), interpolation=cv2.INTER_LINEAR)
img = img.astype(np.float32)[:, :, ::-1] / 255.0
img_arr[imidx] = img
# load in camera info
gt_res = None
gt_res = load_res(result_dir, GT_RES_NAME + '.npz')
if gt_res is None:
print('Could not find GT data for %s, skipping...' % (seq_name))
continue
# get camera intrinsics
cam_fx = gt_res['cam_mtx'][0, 0]
cam_fy = gt_res['cam_mtx'][1, 1]
cam_cx = gt_res['cam_mtx'][0, 2]
cam_cy = gt_res['cam_mtx'][1, 2]
cam_intrins = (cam_fx, cam_fy, cam_cx, cam_cy)
# print(cam_intrins)
x_frac = float(D_IMW) / IMW
y_frac = float(D_IMH) / IMH
cam_intrins_down = (cam_fx*x_frac, cam_fy*y_frac, cam_cx*x_frac, cam_cy*y_frac)
#
# Qualitative evaluation
#
cur_qual_out_path = os.path.join(qual_out_path, seq_name)
mkdir(cur_qual_out_path)
# always use final fit ground plane for visualization
viz_ground_plane = None
if args.viz_pred_floor:
viz_ground_plane = pred_res['floor_plane'] if args.viz_pred_floor else None
render_ground_plane = viz_ground_plane is not None
viz_points = None
viz_point_color =[0.0, 1.0, 0.0]
if args.viz_obs_2d:
viz_joints2d = obs_dict['joints2d']
viz_joints2d[:,:,0] = viz_joints2d[:,:,0]*x_frac
viz_joints2d[:,:,1] = viz_joints2d[:,:,1]*y_frac
# use contacts if desired
viz_contacts = pred_res['contacts'] if args.viz_contacts else None
# always render OG video
og_out_path = os.path.join(cur_qual_out_path, 'og_video')
mkdir(og_out_path)
for fidx, img in enumerate(img_arr):
img = (img*255.0).astype(np.uint8)
img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(og_out_path, 'frame_%08d.%s' % (fidx, IM_EXTN)), img_bgr, COMPRESS_PARAMS)
create_video(og_out_path + '/frame_%08d.' + '%s' % (IM_EXTN), og_out_path + '.mp4', FPS)
mask_viz = None
scene_viz = None
img_viz = img_arr if args.viz_bg else None
if args.viz_obs_2d:
# visualize Openpose (with really high confidence) on top of video
og_2d_obs_out_path = os.path.join(cur_qual_out_path, 'og_video_2d_obs')
mkdir(og_2d_obs_out_path)
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(float(D_IMW)*0.01, float(D_IMH)*0.01), dpi=100, frameon=False)
for fidx, img in enumerate(img_arr):
plt.imshow(img, aspect='auto')
valid_mask = viz_joints2d[fidx, :, 2] > 0.7
plt.scatter(viz_joints2d[fidx, valid_mask, 0], viz_joints2d[fidx, valid_mask, 1], c='lime', s=100)
ax = plt.gca()
plt.axis('off')
cur_joint2d_out = os.path.join(og_2d_obs_out_path, 'frame_%08d.png' % (fidx))
plt.savefig(cur_joint2d_out, bbox_inches='tight', pad_inches=0)
plt.clf()
plt.close(fig)
create_video(og_2d_obs_out_path + '/frame_%08d.' + '%s' % ('png'), og_2d_obs_out_path + '.mp4', FPS)
# render camera-view prediction
pred_out_path = os.path.join(cur_qual_out_path, 'final_pred')
viz_smpl_seq(pred_body, imw=D_IMW, imh=D_IMH, fps=FPS,
render_body=args.viz_render_cam_body,
render_bodies_static=args.viz_bodies_static,
render_points_static=args.viz_bodies_static,
render_joints=(args.viz_wireframe or BODY_ALPHA is not None),
render_skeleton=BODY_ALPHA is not None,
skel_color=[0.5, 0.5, 0.5],
joint_rad=0.02,
render_ground=render_ground_plane,
ground_plane=viz_ground_plane,
ground_alpha=GROUND_ALPHA,
body_alpha=BODY_ALPHA,
static_meshes=scene_viz,
points_seq=viz_points,
point_color=viz_point_color,
contacts=viz_contacts,
use_offscreen=True,
out_path=pred_out_path,
wireframe=args.viz_wireframe,
RGBA=True,
point_rad=0.004,
follow_camera=False,
camera_intrinsics=cam_intrins_down,
img_seq=img_viz,
mask_seq=mask_viz,
img_extn=IM_EXTN)
create_video(pred_out_path + '/frame_%08d.' + '%s' % (IM_EXTN), pred_out_path + '.mp4', FPS)
# always comparison of prediction and OG video
og_pred_comp_path = os.path.join(cur_qual_out_path, 'comp_og_final_pred')
create_multi_comparison_images([og_out_path, pred_out_path],
og_pred_comp_path,
['Input', 'Final'],
extn=IM_EXTN)
create_video(og_pred_comp_path + '/frame_%08d.' + '%s' % (IM_EXTN), og_pred_comp_path + '.mp4', FPS)
# render all stages then comparison of og vid, stage2 and final
if cur_viz_stages:
for k, stage_body in stages_body.items():
if not cur_viz_stages and k != STAGES_RES_NAMES[1]:
continue # only want stage 2 in this case
stage_out_path = os.path.join(cur_qual_out_path, k)
viz_smpl_seq(stage_body, imw=D_IMW, imh=D_IMH, fps=FPS,
render_body=True,
render_bodies_static=args.viz_bodies_static,
render_joints=args.viz_wireframe,
render_skeleton=False,
render_ground=render_ground_plane,
ground_plane=viz_ground_plane,
ground_alpha=GROUND_ALPHA,
body_alpha=BODY_ALPHA,
static_meshes=scene_viz,
points_seq=viz_points,
point_color=viz_point_color,
use_offscreen=True,
out_path=stage_out_path,
wireframe=args.viz_wireframe,
RGBA=True,
point_rad=0.004,
follow_camera=False,
camera_intrinsics=cam_intrins_down,
img_seq=img_viz,
mask_seq=mask_viz,
img_extn=IM_EXTN)
create_video(stage_out_path + '/frame_%08d.' + '%s' % (IM_EXTN), stage_out_path + '.mp4', FPS)
# create comparison
stage2_out_path = os.path.join(cur_qual_out_path, STAGES_RES_NAMES[1])
if cur_viz_stages:
pred_stage_comp_path = os.path.join(cur_qual_out_path, 'comp_final_pred_stages')
create_multi_comparison_images([og_out_path, stage2_out_path, pred_out_path],
pred_stage_comp_path,
['Input', 'Stage2', 'Final'],
extn=IM_EXTN)
create_video(pred_stage_comp_path + '/frame_%08d.' + '%s' % (IM_EXTN), pred_stage_comp_path + '.mp4', FPS)
del img_viz
del img_arr
# repeat in prior frame for everything
if args.viz_prior_frame:
cam_rot = None
prior_cam_offset = [0.0, 2.2, 0.9]
prior_frame_use_follow = True
# final prediction
pred_prior_out_path = os.path.join(cur_qual_out_path, 'final_pred_prior')
viz_smpl_seq(pred_prior_body, imw=D_IMH, imh=D_IMH, fps=FPS,
render_body=True,
render_bodies_static=args.viz_bodies_static,
render_joints=(args.viz_wireframe or BODY_ALPHA is not None),
render_skeleton=BODY_ALPHA is not None,
body_alpha=BODY_ALPHA,
skel_color=[0.5, 0.5, 0.5],
joint_rad=0.02,
render_ground=True,
contacts=viz_contacts,
use_offscreen=True,
out_path=pred_prior_out_path,
wireframe=args.viz_wireframe,
RGBA=True,
follow_camera=prior_frame_use_follow,
cam_offset=prior_cam_offset,
cam_rot=cam_rot,
img_extn=IM_EXTN)
create_video(pred_prior_out_path + '/frame_%08d.' + '%s' % (IM_EXTN), pred_prior_out_path + '.mp4', FPS)
# always comparison of prediction and OG video
og_pred_prior_comp_path = os.path.join(cur_qual_out_path, 'comp_og_final_pred_prior')
create_multi_comparison_images([og_out_path, pred_out_path, pred_prior_out_path],
og_pred_prior_comp_path,
['Input', 'FinalCam', 'FinalPrior'],
extn=IM_EXTN)
create_video(og_pred_prior_comp_path + '/frame_%08d.' + '%s' % (IM_EXTN), og_pred_prior_comp_path + '.mp4', FPS)
# render all stages then comparison of og vid, stage2 and final
if cur_viz_stages:
for k, stage_prior_body in stages_prior_body.items():
if not cur_viz_stages and k != STAGES_PRIOR_RES_NAMES[0]:
continue # only want stage 2 in this case
stage_prior_out_path = os.path.join(cur_qual_out_path, k)
viz_smpl_seq(stage_prior_body, imw=D_IMH, imh=D_IMH, fps=FPS,
render_body=True,
render_bodies_static=args.viz_bodies_static,
render_joints=args.viz_wireframe,
render_skeleton=False,
render_ground=True,
use_offscreen=True,
out_path=stage_prior_out_path,
wireframe=args.viz_wireframe,
RGBA=True,
follow_camera=prior_frame_use_follow,
cam_offset=prior_cam_offset,
cam_rot=cam_rot,
img_extn=IM_EXTN)
create_video(stage_prior_out_path + '/frame_%08d.' + '%s' % (IM_EXTN), stage_prior_out_path + '.mp4', FPS)
# create comparison
stage2_prior_out_path = os.path.join(cur_qual_out_path, STAGES_PRIOR_RES_NAMES[0])
if cur_viz_stages:
pred_stage_prior_comp_path = os.path.join(cur_qual_out_path, 'comp_final_pred_prior_stages')
create_multi_comparison_images([og_out_path, stage2_prior_out_path, pred_prior_out_path],
pred_stage_prior_comp_path,
['Input', 'Stage2Prior', 'FinalPrior'],
extn=IM_EXTN)
create_video(pred_stage_prior_comp_path + '/frame_%08d.' + '%s' % (IM_EXTN), pred_stage_prior_comp_path + '.mp4', FPS)
del pred_bm
if __name__=='__main__':
args = parse_args(sys.argv[1:])
main(args) | [
"viz.utils.create_multi_comparison_images",
"numpy.array",
"torch.cuda.is_available",
"viz.utils.create_video",
"os.path.exists",
"matplotlib.pyplot.imshow",
"os.listdir",
"matplotlib.pyplot.close",
"os.path.isdir",
"utils.config.SplitLineParser",
"matplotlib.pyplot.scatter",
"matplotlib.pyplo... | [((87, 113), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (103, 113), False, 'import sys, os\n'), ((131, 164), 'os.path.join', 'os.path.join', (['cur_file_path', '""".."""'], {}), "(cur_file_path, '..')\n", (143, 164), False, 'import sys, os\n'), ((1648, 1710), 'utils.config.SplitLineParser', 'SplitLineParser', ([], {'fromfile_prefix_chars': '"""@"""', 'allow_abbrev': '(False)'}), "(fromfile_prefix_chars='@', allow_abbrev=False)\n", (1663, 1710), False, 'from utils.config import SplitLineParser\n'), ((4602, 4617), 'utils.logging.mkdir', 'mkdir', (['args.out'], {}), '(args.out)\n', (4607, 4617), False, 'from utils.logging import mkdir\n'), ((4752, 4777), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4775, 4777), False, 'import torch\n'), ((4726, 4748), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (4738, 4748), False, 'import torch\n'), ((4783, 4802), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4795, 4802), False, 'import torch\n'), ((4865, 4894), 'os.path.join', 'os.path.join', (['args.results', 'f'], {}), '(args.results, f)\n', (4877, 4894), False, 'import sys, os\n'), ((5053, 5067), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (5064, 5067), False, 'import importlib, time, math, shutil, csv, random\n'), ((5076, 5107), 'random.shuffle', 'random.shuffle', (['all_result_dirs'], {}), '(all_result_dirs)\n', (5090, 5107), False, 'import importlib, time, math, shutil, csv, random\n'), ((5613, 5652), 'fitting.fitting_utils.load_res', 'load_res', (['result_dir', "(OBS_NAME + '.npz')"], {}), "(result_dir, OBS_NAME + '.npz')\n", (5621, 5652), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((5889, 5933), 'fitting.fitting_utils.load_res', 'load_res', (['result_dir', "(PRED_RES_NAME + '.npz')"], {}), "(result_dir, PRED_RES_NAME + '.npz')\n", (5897, 5933), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((6180, 6197), 'fitting.eval_utils.SMPL_SIZES.keys', 'SMPL_SIZES.keys', ([], {}), '()\n', (6195, 6197), False, 'from fitting.eval_utils import SMPL_SIZES\n'), ((7018, 7047), 'fitting.fitting_utils.prep_res', 'prep_res', (['pred_res', 'device', 'T'], {}), '(pred_res, device, T)\n', (7026, 7047), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((8815, 8851), 'os.path.join', 'os.path.join', (['result_dir', '"""meta.txt"""'], {}), "(result_dir, 'meta.txt')\n", (8827, 8851), False, 'import sys, os\n'), ((9929, 9956), 'fitting.fitting_utils.run_smpl', 'run_smpl', (['pred_res', 'pred_bm'], {}), '(pred_res, pred_bm)\n', (9937, 9956), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((10836, 10884), 'numpy.zeros', 'np.zeros', (['(T, D_IMH, D_IMW, 3)'], {'dtype': 'np.float32'}), '((T, D_IMH, D_IMW, 3), dtype=np.float32)\n', (10844, 10884), True, 'import numpy as np\n'), ((11332, 11374), 'fitting.fitting_utils.load_res', 'load_res', (['result_dir', "(GT_RES_NAME + '.npz')"], {}), "(result_dir, GT_RES_NAME + '.npz')\n", (11340, 11374), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((12031, 12068), 'os.path.join', 'os.path.join', (['qual_out_path', 'seq_name'], {}), '(qual_out_path, seq_name)\n', (12043, 12068), False, 'import sys, os\n'), ((12077, 12101), 'utils.logging.mkdir', 'mkdir', (['cur_qual_out_path'], {}), '(cur_qual_out_path)\n', (12082, 12101), False, 'from utils.logging import mkdir\n'), ((12807, 12850), 'os.path.join', 'os.path.join', (['cur_qual_out_path', '"""og_video"""'], {}), "(cur_qual_out_path, 'og_video')\n", (12819, 12850), False, 'import sys, os\n'), ((12859, 12877), 'utils.logging.mkdir', 'mkdir', (['og_out_path'], {}), '(og_out_path)\n', (12864, 12877), False, 'from utils.logging import mkdir\n'), ((13149, 13239), 'viz.utils.create_video', 'create_video', (["(og_out_path + '/frame_%08d.' + '%s' % IM_EXTN)", "(og_out_path + '.mp4')", 'FPS'], {}), "(og_out_path + '/frame_%08d.' + '%s' % IM_EXTN, og_out_path +\n '.mp4', FPS)\n", (13161, 13239), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((14471, 14516), 'os.path.join', 'os.path.join', (['cur_qual_out_path', '"""final_pred"""'], {}), "(cur_qual_out_path, 'final_pred')\n", (14483, 14516), False, 'import sys, os\n'), ((14525, 15330), 'viz.utils.viz_smpl_seq', 'viz_smpl_seq', (['pred_body'], {'imw': 'D_IMW', 'imh': 'D_IMH', 'fps': 'FPS', 'render_body': 'args.viz_render_cam_body', 'render_bodies_static': 'args.viz_bodies_static', 'render_points_static': 'args.viz_bodies_static', 'render_joints': '(args.viz_wireframe or BODY_ALPHA is not None)', 'render_skeleton': '(BODY_ALPHA is not None)', 'skel_color': '[0.5, 0.5, 0.5]', 'joint_rad': '(0.02)', 'render_ground': 'render_ground_plane', 'ground_plane': 'viz_ground_plane', 'ground_alpha': 'GROUND_ALPHA', 'body_alpha': 'BODY_ALPHA', 'static_meshes': 'scene_viz', 'points_seq': 'viz_points', 'point_color': 'viz_point_color', 'contacts': 'viz_contacts', 'use_offscreen': '(True)', 'out_path': 'pred_out_path', 'wireframe': 'args.viz_wireframe', 'RGBA': '(True)', 'point_rad': '(0.004)', 'follow_camera': '(False)', 'camera_intrinsics': 'cam_intrins_down', 'img_seq': 'img_viz', 'mask_seq': 'mask_viz', 'img_extn': 'IM_EXTN'}), '(pred_body, imw=D_IMW, imh=D_IMH, fps=FPS, render_body=args.\n viz_render_cam_body, render_bodies_static=args.viz_bodies_static,\n render_points_static=args.viz_bodies_static, render_joints=args.\n viz_wireframe or BODY_ALPHA is not None, render_skeleton=BODY_ALPHA is not\n None, skel_color=[0.5, 0.5, 0.5], joint_rad=0.02, render_ground=\n render_ground_plane, ground_plane=viz_ground_plane, ground_alpha=\n GROUND_ALPHA, body_alpha=BODY_ALPHA, static_meshes=scene_viz,\n points_seq=viz_points, point_color=viz_point_color, contacts=\n viz_contacts, use_offscreen=True, out_path=pred_out_path, wireframe=\n args.viz_wireframe, RGBA=True, point_rad=0.004, follow_camera=False,\n camera_intrinsics=cam_intrins_down, img_seq=img_viz, mask_seq=mask_viz,\n img_extn=IM_EXTN)\n', (14537, 15330), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((15891, 15985), 'viz.utils.create_video', 'create_video', (["(pred_out_path + '/frame_%08d.' + '%s' % IM_EXTN)", "(pred_out_path + '.mp4')", 'FPS'], {}), "(pred_out_path + '/frame_%08d.' + '%s' % IM_EXTN, pred_out_path +\n '.mp4', FPS)\n", (15903, 15985), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((16068, 16121), 'os.path.join', 'os.path.join', (['cur_qual_out_path', '"""comp_og_final_pred"""'], {}), "(cur_qual_out_path, 'comp_og_final_pred')\n", (16080, 16121), False, 'import sys, os\n'), ((16130, 16247), 'viz.utils.create_multi_comparison_images', 'create_multi_comparison_images', (['[og_out_path, pred_out_path]', 'og_pred_comp_path', "['Input', 'Final']"], {'extn': 'IM_EXTN'}), "([og_out_path, pred_out_path],\n og_pred_comp_path, ['Input', 'Final'], extn=IM_EXTN)\n", (16160, 16247), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((16349, 16452), 'viz.utils.create_video', 'create_video', (["(og_pred_comp_path + '/frame_%08d.' + '%s' % IM_EXTN)", "(og_pred_comp_path + '.mp4')", 'FPS'], {}), "(og_pred_comp_path + '/frame_%08d.' + '%s' % IM_EXTN, \n og_pred_comp_path + '.mp4', FPS)\n", (16361, 16452), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((5006, 5022), 'os.path.isdir', 'os.path.isdir', (['f'], {}), '(f)\n', (5019, 5022), False, 'import sys, os\n'), ((6966, 6997), 'numpy.array', 'np.array', (['[0.0, -1.0, 0.0, 0.0]'], {}), '([0.0, -1.0, 0.0, 0.0])\n', (6974, 6997), True, 'import numpy as np\n'), ((7311, 7361), 'fitting.fitting_utils.load_res', 'load_res', (['result_dir', "(PRED_PRIOR_RES_NAME + '.npz')"], {}), "(result_dir, PRED_PRIOR_RES_NAME + '.npz')\n", (7319, 7361), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((7571, 7606), 'fitting.fitting_utils.prep_res', 'prep_res', (['pred_res_prior', 'device', 'T'], {}), '(pred_res_prior, device, T)\n', (7579, 7606), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((8867, 8892), 'os.path.exists', 'os.path.exists', (['meta_path'], {}), '(meta_path)\n', (8881, 8892), False, 'import sys, os\n'), ((10490, 10523), 'fitting.fitting_utils.run_smpl', 'run_smpl', (['pred_res_prior', 'pred_bm'], {}), '(pred_res_prior, pred_bm)\n', (10498, 10523), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((10960, 10980), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (10970, 10980), False, 'import cv2\n'), ((11104, 11167), 'cv2.resize', 'cv2.resize', (['img', '(D_IMW, D_IMH)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, (D_IMW, D_IMH), interpolation=cv2.INTER_LINEAR)\n', (11114, 11167), False, 'import cv2\n'), ((12992, 13028), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (13004, 13028), False, 'import cv2\n'), ((13481, 13531), 'os.path.join', 'os.path.join', (['cur_qual_out_path', '"""og_video_2d_obs"""'], {}), "(cur_qual_out_path, 'og_video_2d_obs')\n", (13493, 13531), False, 'import sys, os\n'), ((13544, 13569), 'utils.logging.mkdir', 'mkdir', (['og_2d_obs_out_path'], {}), '(og_2d_obs_out_path)\n', (13549, 13569), False, 'from utils.logging import mkdir\n'), ((14261, 14275), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (14270, 14275), True, 'import matplotlib.pyplot as plt\n'), ((14297, 14400), 'viz.utils.create_video', 'create_video', (["(og_2d_obs_out_path + '/frame_%08d.' + '%s' % 'png')", "(og_2d_obs_out_path + '.mp4')", 'FPS'], {}), "(og_2d_obs_out_path + '/frame_%08d.' + '%s' % 'png', \n og_2d_obs_out_path + '.mp4', FPS)\n", (14309, 14400), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((18163, 18215), 'os.path.join', 'os.path.join', (['cur_qual_out_path', 'STAGES_RES_NAMES[1]'], {}), '(cur_qual_out_path, STAGES_RES_NAMES[1])\n', (18175, 18215), False, 'import sys, os\n'), ((19038, 19089), 'os.path.join', 'os.path.join', (['cur_qual_out_path', '"""final_pred_prior"""'], {}), "(cur_qual_out_path, 'final_pred_prior')\n", (19050, 19089), False, 'import sys, os\n'), ((19102, 19659), 'viz.utils.viz_smpl_seq', 'viz_smpl_seq', (['pred_prior_body'], {'imw': 'D_IMH', 'imh': 'D_IMH', 'fps': 'FPS', 'render_body': '(True)', 'render_bodies_static': 'args.viz_bodies_static', 'render_joints': '(args.viz_wireframe or BODY_ALPHA is not None)', 'render_skeleton': '(BODY_ALPHA is not None)', 'body_alpha': 'BODY_ALPHA', 'skel_color': '[0.5, 0.5, 0.5]', 'joint_rad': '(0.02)', 'render_ground': '(True)', 'contacts': 'viz_contacts', 'use_offscreen': '(True)', 'out_path': 'pred_prior_out_path', 'wireframe': 'args.viz_wireframe', 'RGBA': '(True)', 'follow_camera': 'prior_frame_use_follow', 'cam_offset': 'prior_cam_offset', 'cam_rot': 'cam_rot', 'img_extn': 'IM_EXTN'}), '(pred_prior_body, imw=D_IMH, imh=D_IMH, fps=FPS, render_body=\n True, render_bodies_static=args.viz_bodies_static, render_joints=args.\n viz_wireframe or BODY_ALPHA is not None, render_skeleton=BODY_ALPHA is not\n None, body_alpha=BODY_ALPHA, skel_color=[0.5, 0.5, 0.5], joint_rad=0.02,\n render_ground=True, contacts=viz_contacts, use_offscreen=True, out_path\n =pred_prior_out_path, wireframe=args.viz_wireframe, RGBA=True,\n follow_camera=prior_frame_use_follow, cam_offset=prior_cam_offset,\n cam_rot=cam_rot, img_extn=IM_EXTN)\n', (19114, 19659), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((20119, 20226), 'viz.utils.create_video', 'create_video', (["(pred_prior_out_path + '/frame_%08d.' + '%s' % IM_EXTN)", "(pred_prior_out_path + '.mp4')", 'FPS'], {}), "(pred_prior_out_path + '/frame_%08d.' + '%s' % IM_EXTN, \n pred_prior_out_path + '.mp4', FPS)\n", (20131, 20226), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((20322, 20381), 'os.path.join', 'os.path.join', (['cur_qual_out_path', '"""comp_og_final_pred_prior"""'], {}), "(cur_qual_out_path, 'comp_og_final_pred_prior')\n", (20334, 20381), False, 'import sys, os\n'), ((20394, 20559), 'viz.utils.create_multi_comparison_images', 'create_multi_comparison_images', (['[og_out_path, pred_out_path, pred_prior_out_path]', 'og_pred_prior_comp_path', "['Input', 'FinalCam', 'FinalPrior']"], {'extn': 'IM_EXTN'}), "([og_out_path, pred_out_path,\n pred_prior_out_path], og_pred_prior_comp_path, ['Input', 'FinalCam',\n 'FinalPrior'], extn=IM_EXTN)\n", (20424, 20559), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((20661, 20776), 'viz.utils.create_video', 'create_video', (["(og_pred_prior_comp_path + '/frame_%08d.' + '%s' % IM_EXTN)", "(og_pred_prior_comp_path + '.mp4')", 'FPS'], {}), "(og_pred_prior_comp_path + '/frame_%08d.' + '%s' % IM_EXTN, \n og_pred_prior_comp_path + '.mp4', FPS)\n", (20673, 20776), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((4911, 4935), 'os.listdir', 'os.listdir', (['args.results'], {}), '(args.results)\n', (4921, 4935), False, 'import sys, os\n'), ((7127, 7164), 'torch.Tensor', 'torch.Tensor', (["pred_res['floor_plane']"], {}), "(pred_res['floor_plane'])\n", (7139, 7164), False, 'import torch\n'), ((7880, 7921), 'fitting.fitting_utils.load_res', 'load_res', (['result_dir', "(stage_name + '.npz')"], {}), "(result_dir, stage_name + '.npz')\n", (7888, 7921), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((8143, 8173), 'fitting.fitting_utils.prep_res', 'prep_res', (['stage_res', 'device', 'T'], {}), '(stage_res, device, T)\n', (8151, 8173), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((8433, 8474), 'fitting.fitting_utils.load_res', 'load_res', (['result_dir', "(stage_name + '.npz')"], {}), "(result_dir, stage_name + '.npz')\n", (8441, 8474), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((8702, 8732), 'fitting.fitting_utils.prep_res', 'prep_res', (['stage_res', 'device', 'T'], {}), '(stage_res, device, T)\n', (8710, 8732), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((10146, 10166), 'fitting.fitting_utils.run_smpl', 'run_smpl', (['v', 'pred_bm'], {}), '(v, pred_bm)\n', (10154, 10166), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((10735, 10755), 'fitting.fitting_utils.run_smpl', 'run_smpl', (['v', 'pred_bm'], {}), '(v, pred_bm)\n', (10743, 10755), False, 'from fitting.fitting_utils import load_res, prep_res, run_smpl\n'), ((11033, 11049), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (11041, 11049), False, 'import cv2\n'), ((13053, 13113), 'os.path.join', 'os.path.join', (['og_out_path', "('frame_%08d.%s' % (fidx, IM_EXTN))"], {}), "(og_out_path, 'frame_%08d.%s' % (fidx, IM_EXTN))\n", (13065, 13113), False, 'import sys, os\n'), ((13780, 13810), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'aspect': '"""auto"""'}), "(img, aspect='auto')\n", (13790, 13810), True, 'import matplotlib.pyplot as plt\n'), ((13887, 13989), 'matplotlib.pyplot.scatter', 'plt.scatter', (['viz_joints2d[fidx, valid_mask, 0]', 'viz_joints2d[fidx, valid_mask, 1]'], {'c': '"""lime"""', 's': '(100)'}), "(viz_joints2d[fidx, valid_mask, 0], viz_joints2d[fidx,\n valid_mask, 1], c='lime', s=100)\n", (13898, 13989), True, 'import matplotlib.pyplot as plt\n'), ((14007, 14016), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (14014, 14016), True, 'import matplotlib.pyplot as plt\n'), ((14033, 14048), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (14041, 14048), True, 'import matplotlib.pyplot as plt\n'), ((14083, 14140), 'os.path.join', 'os.path.join', (['og_2d_obs_out_path', "('frame_%08d.png' % fidx)"], {}), "(og_2d_obs_out_path, 'frame_%08d.png' % fidx)\n", (14095, 14140), False, 'import sys, os\n'), ((14159, 14222), 'matplotlib.pyplot.savefig', 'plt.savefig', (['cur_joint2d_out'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "(cur_joint2d_out, bbox_inches='tight', pad_inches=0)\n", (14170, 14222), True, 'import matplotlib.pyplot as plt\n'), ((14239, 14248), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (14246, 14248), True, 'import matplotlib.pyplot as plt\n'), ((16767, 16801), 'os.path.join', 'os.path.join', (['cur_qual_out_path', 'k'], {}), '(cur_qual_out_path, k)\n', (16779, 16801), False, 'import sys, os\n'), ((16818, 17437), 'viz.utils.viz_smpl_seq', 'viz_smpl_seq', (['stage_body'], {'imw': 'D_IMW', 'imh': 'D_IMH', 'fps': 'FPS', 'render_body': '(True)', 'render_bodies_static': 'args.viz_bodies_static', 'render_joints': 'args.viz_wireframe', 'render_skeleton': '(False)', 'render_ground': 'render_ground_plane', 'ground_plane': 'viz_ground_plane', 'ground_alpha': 'GROUND_ALPHA', 'body_alpha': 'BODY_ALPHA', 'static_meshes': 'scene_viz', 'points_seq': 'viz_points', 'point_color': 'viz_point_color', 'use_offscreen': '(True)', 'out_path': 'stage_out_path', 'wireframe': 'args.viz_wireframe', 'RGBA': '(True)', 'point_rad': '(0.004)', 'follow_camera': '(False)', 'camera_intrinsics': 'cam_intrins_down', 'img_seq': 'img_viz', 'mask_seq': 'mask_viz', 'img_extn': 'IM_EXTN'}), '(stage_body, imw=D_IMW, imh=D_IMH, fps=FPS, render_body=True,\n render_bodies_static=args.viz_bodies_static, render_joints=args.\n viz_wireframe, render_skeleton=False, render_ground=render_ground_plane,\n ground_plane=viz_ground_plane, ground_alpha=GROUND_ALPHA, body_alpha=\n BODY_ALPHA, static_meshes=scene_viz, points_seq=viz_points, point_color\n =viz_point_color, use_offscreen=True, out_path=stage_out_path,\n wireframe=args.viz_wireframe, RGBA=True, point_rad=0.004, follow_camera\n =False, camera_intrinsics=cam_intrins_down, img_seq=img_viz, mask_seq=\n mask_viz, img_extn=IM_EXTN)\n', (16830, 17437), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((18005, 18102), 'viz.utils.create_video', 'create_video', (["(stage_out_path + '/frame_%08d.' + '%s' % IM_EXTN)", "(stage_out_path + '.mp4')", 'FPS'], {}), "(stage_out_path + '/frame_%08d.' + '%s' % IM_EXTN, \n stage_out_path + '.mp4', FPS)\n", (18017, 18102), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((18286, 18343), 'os.path.join', 'os.path.join', (['cur_qual_out_path', '"""comp_final_pred_stages"""'], {}), "(cur_qual_out_path, 'comp_final_pred_stages')\n", (18298, 18343), False, 'import sys, os\n'), ((18360, 18508), 'viz.utils.create_multi_comparison_images', 'create_multi_comparison_images', (['[og_out_path, stage2_out_path, pred_out_path]', 'pred_stage_comp_path', "['Input', 'Stage2', 'Final']"], {'extn': 'IM_EXTN'}), "([og_out_path, stage2_out_path, pred_out_path\n ], pred_stage_comp_path, ['Input', 'Stage2', 'Final'], extn=IM_EXTN)\n", (18390, 18508), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((18628, 18737), 'viz.utils.create_video', 'create_video', (["(pred_stage_comp_path + '/frame_%08d.' + '%s' % IM_EXTN)", "(pred_stage_comp_path + '.mp4')", 'FPS'], {}), "(pred_stage_comp_path + '/frame_%08d.' + '%s' % IM_EXTN, \n pred_stage_comp_path + '.mp4', FPS)\n", (18640, 18737), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((22210, 22268), 'os.path.join', 'os.path.join', (['cur_qual_out_path', 'STAGES_PRIOR_RES_NAMES[0]'], {}), '(cur_qual_out_path, STAGES_PRIOR_RES_NAMES[0])\n', (22222, 22268), False, 'import sys, os\n'), ((6550, 6599), 'numpy.zeros', 'np.zeros', (['pred_res[smpk].shape[0]'], {'dtype': 'np.float'}), '(pred_res[smpk].shape[0], dtype=np.float)\n', (6558, 6599), True, 'import numpy as np\n'), ((6661, 6715), 'numpy.zeros', 'np.zeros', (['(T, pred_res[smpk].shape[1])'], {'dtype': 'np.float'}), '((T, pred_res[smpk].shape[1]), dtype=np.float)\n', (6669, 6715), True, 'import numpy as np\n'), ((9389, 9461), 'body_model.body_model.BodyModel', 'BodyModel', ([], {'bm_path': 'optim_bm_path', 'num_betas': 'num_pred_betas', 'batch_size': 'T'}), '(bm_path=optim_bm_path, num_betas=num_pred_betas, batch_size=T)\n', (9398, 9461), False, 'from body_model.body_model import BodyModel\n'), ((21139, 21173), 'os.path.join', 'os.path.join', (['cur_qual_out_path', 'k'], {}), '(cur_qual_out_path, k)\n', (21151, 21173), False, 'import sys, os\n'), ((21194, 21613), 'viz.utils.viz_smpl_seq', 'viz_smpl_seq', (['stage_prior_body'], {'imw': 'D_IMH', 'imh': 'D_IMH', 'fps': 'FPS', 'render_body': '(True)', 'render_bodies_static': 'args.viz_bodies_static', 'render_joints': 'args.viz_wireframe', 'render_skeleton': '(False)', 'render_ground': '(True)', 'use_offscreen': '(True)', 'out_path': 'stage_prior_out_path', 'wireframe': 'args.viz_wireframe', 'RGBA': '(True)', 'follow_camera': 'prior_frame_use_follow', 'cam_offset': 'prior_cam_offset', 'cam_rot': 'cam_rot', 'img_extn': 'IM_EXTN'}), '(stage_prior_body, imw=D_IMH, imh=D_IMH, fps=FPS, render_body=\n True, render_bodies_static=args.viz_bodies_static, render_joints=args.\n viz_wireframe, render_skeleton=False, render_ground=True, use_offscreen\n =True, out_path=stage_prior_out_path, wireframe=args.viz_wireframe,\n RGBA=True, follow_camera=prior_frame_use_follow, cam_offset=\n prior_cam_offset, cam_rot=cam_rot, img_extn=IM_EXTN)\n', (21206, 21613), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((22026, 22135), 'viz.utils.create_video', 'create_video', (["(stage_prior_out_path + '/frame_%08d.' + '%s' % IM_EXTN)", "(stage_prior_out_path + '.mp4')", 'FPS'], {}), "(stage_prior_out_path + '/frame_%08d.' + '%s' % IM_EXTN, \n stage_prior_out_path + '.mp4', FPS)\n", (22038, 22135), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((22353, 22416), 'os.path.join', 'os.path.join', (['cur_qual_out_path', '"""comp_final_pred_prior_stages"""'], {}), "(cur_qual_out_path, 'comp_final_pred_prior_stages')\n", (22365, 22416), False, 'import sys, os\n'), ((22437, 22616), 'viz.utils.create_multi_comparison_images', 'create_multi_comparison_images', (['[og_out_path, stage2_prior_out_path, pred_prior_out_path]', 'pred_stage_prior_comp_path', "['Input', 'Stage2Prior', 'FinalPrior']"], {'extn': 'IM_EXTN'}), "([og_out_path, stage2_prior_out_path,\n pred_prior_out_path], pred_stage_prior_comp_path, ['Input',\n 'Stage2Prior', 'FinalPrior'], extn=IM_EXTN)\n", (22467, 22616), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((22749, 22870), 'viz.utils.create_video', 'create_video', (["(pred_stage_prior_comp_path + '/frame_%08d.' + '%s' % IM_EXTN)", "(pred_stage_prior_comp_path + '.mp4')", 'FPS'], {}), "(pred_stage_prior_comp_path + '/frame_%08d.' + '%s' % IM_EXTN, \n pred_stage_prior_comp_path + '.mp4', FPS)\n", (22761, 22870), False, 'from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections\n'), ((6782, 6819), 'torch.Tensor', 'torch.Tensor', (["pred_res['floor_plane']"], {}), "(pred_res['floor_plane'])\n", (6794, 6819), False, 'import torch\n'), ((6267, 6295), 'torch.Tensor', 'torch.Tensor', (['pred_res[smpk]'], {}), '(pred_res[smpk])\n', (6279, 6295), False, 'import torch\n')] |
from pathlib import Path
import numpy as np
import torch.nn as nn
class FeatureExtractor(object):
def __init__(self):
super(FeatureExtractor).__init__()
def initialize(self, trainer):
self.feature_path = trainer.logger.log_path / 'features'
if not self.feature_path.exists():
self.feature_path.mkdir()
self.k = 0
trainer.logger.print('Extract features after convolution and linear layers')
trainer.logger.print(f'The features are saved at:{self.feature_path}')
self.register_module_hook(trainer)
def keep_feature(self, module, input, output):
mat = output.cpu().numpy()
if mat.ndim == 4:
mat = np.mean(mat, axis=(2,3))
if module.extracted is None:
module.extracted = mat
else:
module.extracted = np.vstack((module.extracted, mat))
def register_module_hook(self, trainer):
self.first_module = None
for name, module in trainer.model.named_modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
if self.first_module == None:
self.first_module = module
module.extracted = None
module.register_forward_hook(self.keep_feature)
temp_path = self.feature_path / name
if not temp_path.exists():
temp_path.mkdir()
def save_feature(self, trainer):
for name, module in trainer.model.named_modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
np.save(self.feature_path / name / (str(self.k).zfill(1) + '.npy'), module.extracted)
module.extracted = None
self.k += 1
def check_feature(self, trainer):
if self.first_module.extracted.shape[0] > 10000:
self.save_feature(trainer) | [
"numpy.mean",
"numpy.vstack"
] | [((714, 739), 'numpy.mean', 'np.mean', (['mat'], {'axis': '(2, 3)'}), '(mat, axis=(2, 3))\n', (721, 739), True, 'import numpy as np\n'), ((869, 903), 'numpy.vstack', 'np.vstack', (['(module.extracted, mat)'], {}), '((module.extracted, mat))\n', (878, 903), True, 'import numpy as np\n')] |
from sklearn.model_selection import StratifiedKFold, KFold, train_test_split
from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, \
f1_score, auc, mean_squared_error
from sklearn.linear_model import RidgeCV, LassoCV, ElasticNet, SGDClassifier, SGDRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from scipy.stats import pearsonr, spearmanr
from collections import defaultdict
from functools import partial
from data import DataProvider
import random
import json
import numpy as np
import numpy.ma as ma
import pandas as pd
def auprc(y_true, y_score):
lr_precision, lr_recall, _ = precision_recall_curve(y_true=y_true, probas_pred=y_score)
return auc(lr_recall, lr_precision)
def pearson(y_true, y_pred):
r_val, p_val = pearsonr(x=y_true, y=y_pred)
return r_val
def spearman(y_true, y_pred):
r_val, p_val = spearmanr(a=y_true, b=y_pred)
return r_val
classify_scoring = {
'auroc': 'roc_auc',
'auprc': make_scorer(auprc, needs_proba=True),
'acc': make_scorer(accuracy_score),
'f1': make_scorer(f1_score),
'aps': make_scorer(average_precision_score, needs_proba=True)
}
regress_scoring = {
'mse': make_scorer(mean_squared_error),
'pearsonr': make_scorer(pearson),
'spearmanr': make_scorer(spearman),
}
def classify_with_rf(train_features, y_train, cv_split_rf, metric='auroc'):
try:
# logger.debug("Training Random Forest model")
# mx_depth: trees' maximum depth
# n_estimators: number of trees to use
# n_jobs = -1 means to run the jobs in parallel
rf_tuning_parameters = [{'n_estimators': [10, 50, 200, 500, 1000], 'max_depth': [10, 50, 100, 200, 500]}]
# rf_tuning_parameters = [{'n_estimators': [5], 'max_depth': [10]}]
rf = GridSearchCV(RandomForestClassifier(), rf_tuning_parameters, n_jobs=-1, cv=cv_split_rf,
verbose=2, scoring=classify_scoring, refit=metric)
scaler = StandardScaler()
train_features = scaler.fit_transform(train_features)
rf.fit(train_features, y_train) # , groups=train_groups
# logger.debug("Trained Random Forest successfully")
return rf, scaler
except Exception as e:
# logger.debug("Fail to Train Random Forest, caused by %s" % e.message)
raise e
def regress_with_rf(train_features, y_train, cv_split_rf, metric='pearsonr'):
try:
# logger.debug("Training Random Forest model")
# mx_depth: trees' maximum depth
# n_estimators: number of trees to use
# n_jobs = -1 means to run the jobs in parallel
rf_tuning_parameters = [{'n_estimators': [10, 50, 200, 500, 1000], 'max_depth': [10, 50, 100, 200, 500]}]
# rf_tuning_parameters = [{'n_estimators': [5], 'max_depth': [10]}]
rf = GridSearchCV(RandomForestRegressor(), rf_tuning_parameters, n_jobs=-1, cv=cv_split_rf,
verbose=2, scoring=regress_scoring, refit=metric)
scaler = StandardScaler()
train_features = scaler.fit_transform(train_features)
rf.fit(train_features, y_train) # , groups=train_groups
# logger.debug("Trained Random Forest successfully")
return rf, scaler
except Exception as e:
# logger.debug("Fail to Train Random Forest, caused by %s" % e.message)
raise e
def classify_with_enet(train_features, y_train, cv_split_enet, metric='auroc'):
try:
# logger.debug("Training elastic net regression model")
alphas = [0.1, 0.01, 0.001, 0.0001, 0.00001]
l1_ratios = [0.0, 0.25, 0.5, 0.75, 1.0]
# alphas = [0.1]
# l1_ratios = [0.25]
base_enet = SGDClassifier(loss='log', penalty='elasticnet', random_state=12345)
enet_param_grid = dict(alpha=alphas, l1_ratio=l1_ratios)
enet = GridSearchCV(estimator=base_enet, param_grid=enet_param_grid, n_jobs=-1, cv=cv_split_enet, verbose=2,
scoring=classify_scoring, refit=metric)
scaler = StandardScaler()
train_features = scaler.fit_transform(train_features)
enet.fit(train_features, y_train)
# logger.debug("Trained Elastic net classification model successfully")
return enet, scaler
except Exception as e:
raise e
def regress_with_enet(train_features, y_train, cv_split_enet, metric='pearsonr'):
try:
# logger.debug("Training elastic net regression model")
alphas = [0.1, 0.01, 0.001, 0.0001, 0.00001]
l1_ratios = [0.0, 0.25, 0.5, 0.75, 1.0]
# alphas = [0.1]
# l1_ratios = [0.25]
# base_enet = SGDRegressor(penalty='elasticnet', random_state=12345)
base_enet = ElasticNet(random_state=12345, max_iter=5000)
enet_param_grid = dict(alpha=alphas, l1_ratio=l1_ratios)
enet = GridSearchCV(estimator=base_enet, param_grid=enet_param_grid, n_jobs=-1, cv=cv_split_enet, verbose=2,
scoring=regress_scoring, refit=metric)
scaler = StandardScaler()
train_features = scaler.fit_transform(train_features)
enet.fit(train_features, y_train)
# logger.debug("Trained Elastic net classification model successfully")
return enet, scaler
except Exception as e:
raise e
def n_time_cv_classify(train_data, n=10, model_fn=classify_with_enet, test_data=None, random_state=2020,
metric='auroc'):
# metric_list = ['auroc', 'acc', 'aps', 'f1']
metric_list = ['auroc', 'acc', 'aps', 'f1', 'auprc']
random.seed(random_state)
seeds = random.sample(range(100000), k=n)
train_history = defaultdict(list)
test_history = defaultdict(list)
models = []
for seed in seeds:
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)
cv_split = kfold.split(*train_data)
trained_model, scaler = model_fn(*train_data, list(cv_split), metric=metric)
for metric in metric_list:
train_history[metric].append(trained_model.cv_results_[f'mean_test_{metric}'][trained_model.best_index_])
if test_data is not None:
# preds = trained_model.predict(test_data[0])
# pred_scores = trained_model.predict_proba(test_data[0])[:, 1]
preds = trained_model.predict(scaler.transform(test_data[0]))
pred_scores = trained_model.predict_proba(scaler.transform(test_data[0]))[:, 1]
# print(preds)
# print(pred_scores)
test_history['auroc'].append(roc_auc_score(y_true=test_data[1], y_score=pred_scores))
test_history['acc'].append(accuracy_score(y_true=test_data[1], y_pred=preds))
test_history['aps'].append(average_precision_score(y_true=test_data[1], y_score=pred_scores))
test_history['f1'].append(f1_score(y_true=test_data[1], y_pred=preds))
test_history['auprc'].append(auprc(y_true=test_data[1], y_score=pred_scores))
models.append(trained_model)
return (train_history, models) if test_data is None else (train_history, test_history, models)
def multi_regress(train_data, output_file_name, model_fn=regress_with_enet, test_data=None, random_state=2020):
train_feature_df = train_data[0]
train_target_df = train_data[1]
train_pred_df = pd.DataFrame(np.full_like(train_target_df, fill_value=-1),
index=train_target_df.index,
columns=train_target_df.columns)
if test_data is not None:
test_feature_df = test_data[0]
test_target_df = test_data[1]
test_pred_df = pd.DataFrame(np.full_like(test_target_df, fill_value=-1),
index=test_target_df.index,
columns=test_target_df.columns)
assert all(train_pred_df.columns == test_pred_df.columns)
for drug in train_pred_df.columns:
print("Training: {}".format(drug))
y = train_target_df.loc[~train_target_df[drug].isna(), drug]
sample_ids = train_feature_df.index.intersection(y.index)
X = train_feature_df.loc[sample_ids]
# print(X.columns)
y = y.reindex(sample_ids)
outer_kfold = KFold(n_splits=5, shuffle=True, random_state=random_state)
for train_index, test_index in outer_kfold.split(X, y):
X_train, X_test = X.iloc[train_index, :], X.iloc[test_index, :]
y_train, y_test = y[train_index], y[test_index]
kfold = KFold(n_splits=5, shuffle=True, random_state=random_state)
cv_split = kfold.split(X_train)
try:
trained_model, scaler = model_fn(X_train, y_train, list(cv_split))
train_pred_df.loc[y.index[test_index], drug] = trained_model.predict(scaler.transform(X_test))
except Exception as e:
print(e)
assert all(train_pred_df.index==train_target_df.index)
train_pred_df.to_csv(f'./predictions/{output_file_name}.csv', index_label='Sample')
if test_data is not None:
outer_kfold = KFold(n_splits=5, shuffle=True, random_state=random_state)
cv_split = outer_kfold.split(X)
try:
trained_model, scaler = model_fn(X, y, list(cv_split))
test_pred_df.loc[test_feature_df.index, drug] = trained_model.predict(scaler.transform(test_feature_df))
except Exception as e:
print(e)
assert all(test_pred_df.index == test_target_df.index)
test_pred_df.to_csv(f'./predictions/test_{output_file_name}.csv', index_label='Sample')
return (train_target_df.values, train_pred_df.values) if test_data is None else (
train_target_df.values, train_pred_df.values, test_target_df.values, test_pred_df.values)
def n_time_cv_regress(train_data, output_file_name, n=5, model_fn=regress_with_enet, test_data=None, random_state=2020):
random.seed(random_state)
seeds = random.sample(range(100000), k=int(n))
train_history = defaultdict(list)
test_history = defaultdict(list)
for seed in seeds:
if test_data is None:
train_y_truths, train_y_preds = multi_regress(train_data, f'{output_file_name}_{seed}',
model_fn=model_fn, test_data=test_data,
random_state=seed)
else:
train_y_truths, train_y_preds, test_y_truths, test_y_preds = multi_regress(train_data,
f'{output_file_name}_{seed}',
model_fn=model_fn,
test_data=test_data,
random_state=random_state)
test_history['dpearsonr'].append(
np.mean([pearsonr(test_y_truths[:, i][~ma.masked_invalid(test_y_truths[:, i]).mask],
test_y_preds[:, i][~ma.masked_invalid(test_y_truths[:, i]).mask])[
0]
for i in range(test_y_truths.shape[1])]).item())
test_history['cpearsonr'].append(
np.mean([pearsonr(test_y_truths[i, :][~ma.masked_invalid(test_y_truths[i, :]).mask],
test_y_preds[i, :][~ma.masked_invalid(test_y_truths[i, :]).mask])[
0]
for i in range(test_y_truths.shape[0])]).item())
test_history['dspearman'].append(
np.mean([spearmanr(test_y_truths[:, i][~ma.masked_invalid(test_y_truths[:, i]).mask],
test_y_preds[:, i][~ma.masked_invalid(test_y_truths[:, i]).mask])[
0]
for i in range(test_y_truths.shape[1])]).item())
test_history['cspearman'].append(
np.mean([spearmanr(test_y_truths[i, :][~ma.masked_invalid(test_y_truths[i, :]).mask],
test_y_preds[i, :][~ma.masked_invalid(test_y_truths[i, :]).mask])[
0]
for i in range(test_y_truths.shape[0])]).item())
test_history['drmse'].append(
np.mean(np.nanmean(np.square((test_y_truths - test_y_preds)), axis=0)).item())
test_history['crmse'].append(
np.mean(np.nanmean(np.square((test_y_truths - test_y_preds)), axis=1)).item())
train_history['dpearsonr'].append(
np.mean([pearsonr(train_y_truths[:, i][~ma.masked_invalid(train_y_truths[:, i]).mask],
train_y_preds[:, i][~ma.masked_invalid(train_y_truths[:, i]).mask])[
0]
for i in range(train_y_truths.shape[1])]).item())
train_history['cpearsonr'].append(
np.mean([pearsonr(train_y_truths[i, :][~ma.masked_invalid(train_y_truths[i, :]).mask],
train_y_preds[i, :][~ma.masked_invalid(train_y_truths[i, :]).mask])[
0]
for i in range(train_y_truths.shape[0])]).item())
train_history['dspearman'].append(
np.mean([spearmanr(train_y_truths[:, i][~ma.masked_invalid(train_y_truths[:, i]).mask],
train_y_preds[:, i][~ma.masked_invalid(train_y_truths[:, i]).mask])[
0]
for i in range(train_y_truths.shape[1])]).item())
train_history['cspearman'].append(
np.mean([spearmanr(train_y_truths[i, :][~ma.masked_invalid(train_y_truths[i, :]).mask],
train_y_preds[i, :][~ma.masked_invalid(train_y_truths[i, :]).mask])[
0]
for i in range(train_y_truths.shape[0])]).item())
train_history['drmse'].append(np.mean(np.nanmean(np.square((train_y_truths - train_y_preds)), axis=0)).item())
train_history['crmse'].append(np.mean(np.nanmean(np.square((train_y_truths - train_y_preds)), axis=1)).item())
return (train_history, test_history)
if __name__ == '__main__':
data_provider = DataProvider()
labeled_samples, mut_only_labeled_samples = data_provider.get_labeled_samples()
labeled_target_df = data_provider.target_df.loc[labeled_samples]
labeled_mut_only_target_df = data_provider.target_df.loc[mut_only_labeled_samples]
label_gex_df = data_provider.gex_dat.loc[labeled_samples]
label_mut_df = data_provider.mut_dat.loc[labeled_samples]
label_mut_only_df = data_provider.mut_dat.loc[mut_only_labeled_samples]
train_gex_data = (label_gex_df, labeled_target_df)
train_mut_data = (label_mut_df, labeled_target_df)
test_data = (label_mut_only_df, labeled_mut_only_target_df)
gex_train_history, _ = n_time_cv_regress(train_gex_data, 'gex_pred', n=5, model_fn=regress_with_enet,
test_data=None, random_state=2020)
with open('./predictions/gex_pred.json', 'w') as f:
json.dump(gex_train_history, f)
mut_train_history, mut_test_history = n_time_cv_regress(train_mut_data, 'mut_pred', n=5, model_fn=regress_with_enet,
test_data=test_data, random_state=2020)
with open('./predictions/mut_pred.json', 'w') as f:
json.dump(mut_train_history, f)
with open('./predictions/test_mut_pred.json', 'w') as f:
json.dump(mut_test_history, f)
| [
"sklearn.model_selection.GridSearchCV",
"data.DataProvider",
"sklearn.metrics.auc",
"sklearn.metrics.roc_auc_score",
"sklearn.model_selection.StratifiedKFold",
"scipy.stats.pearsonr",
"sklearn.model_selection.KFold",
"sklearn.linear_model.SGDClassifier",
"sklearn.ensemble.RandomForestRegressor",
"... | [((820, 878), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', ([], {'y_true': 'y_true', 'probas_pred': 'y_score'}), '(y_true=y_true, probas_pred=y_score)\n', (842, 878), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((890, 918), 'sklearn.metrics.auc', 'auc', (['lr_recall', 'lr_precision'], {}), '(lr_recall, lr_precision)\n', (893, 918), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((969, 997), 'scipy.stats.pearsonr', 'pearsonr', ([], {'x': 'y_true', 'y': 'y_pred'}), '(x=y_true, y=y_pred)\n', (977, 997), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((1066, 1095), 'scipy.stats.spearmanr', 'spearmanr', ([], {'a': 'y_true', 'b': 'y_pred'}), '(a=y_true, b=y_pred)\n', (1075, 1095), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((1173, 1209), 'sklearn.metrics.make_scorer', 'make_scorer', (['auprc'], {'needs_proba': '(True)'}), '(auprc, needs_proba=True)\n', (1184, 1209), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((1222, 1249), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (1233, 1249), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((1261, 1282), 'sklearn.metrics.make_scorer', 'make_scorer', (['f1_score'], {}), '(f1_score)\n', (1272, 1282), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((1295, 1349), 'sklearn.metrics.make_scorer', 'make_scorer', (['average_precision_score'], {'needs_proba': '(True)'}), '(average_precision_score, needs_proba=True)\n', (1306, 1349), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((1384, 1415), 'sklearn.metrics.make_scorer', 'make_scorer', (['mean_squared_error'], {}), '(mean_squared_error)\n', (1395, 1415), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((1433, 1453), 'sklearn.metrics.make_scorer', 'make_scorer', (['pearson'], {}), '(pearson)\n', (1444, 1453), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((1472, 1493), 'sklearn.metrics.make_scorer', 'make_scorer', (['spearman'], {}), '(spearman)\n', (1483, 1493), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((5742, 5767), 'random.seed', 'random.seed', (['random_state'], {}), '(random_state)\n', (5753, 5767), False, 'import random\n'), ((5834, 5851), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5845, 5851), False, 'from collections import defaultdict\n'), ((5871, 5888), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5882, 5888), False, 'from collections import defaultdict\n'), ((10136, 10161), 'random.seed', 'random.seed', (['random_state'], {}), '(random_state)\n', (10147, 10161), False, 'import random\n'), ((10233, 10250), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10244, 10250), False, 'from collections import defaultdict\n'), ((10270, 10287), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10281, 10287), False, 'from collections import defaultdict\n'), ((14564, 14578), 'data.DataProvider', 'DataProvider', ([], {}), '()\n', (14576, 14578), False, 'from data import DataProvider\n'), ((2168, 2184), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2182, 2184), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3195, 3211), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3209, 3211), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3881, 3948), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""', 'penalty': '"""elasticnet"""', 'random_state': '(12345)'}), "(loss='log', penalty='elasticnet', random_state=12345)\n", (3894, 3948), False, 'from sklearn.linear_model import RidgeCV, LassoCV, ElasticNet, SGDClassifier, SGDRegressor\n'), ((4029, 4175), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'base_enet', 'param_grid': 'enet_param_grid', 'n_jobs': '(-1)', 'cv': 'cv_split_enet', 'verbose': '(2)', 'scoring': 'classify_scoring', 'refit': 'metric'}), '(estimator=base_enet, param_grid=enet_param_grid, n_jobs=-1, cv\n =cv_split_enet, verbose=2, scoring=classify_scoring, refit=metric)\n', (4041, 4175), False, 'from sklearn.model_selection import GridSearchCV\n'), ((4216, 4232), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4230, 4232), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4898, 4943), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'random_state': '(12345)', 'max_iter': '(5000)'}), '(random_state=12345, max_iter=5000)\n', (4908, 4943), False, 'from sklearn.linear_model import RidgeCV, LassoCV, ElasticNet, SGDClassifier, SGDRegressor\n'), ((5024, 5169), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'base_enet', 'param_grid': 'enet_param_grid', 'n_jobs': '(-1)', 'cv': 'cv_split_enet', 'verbose': '(2)', 'scoring': 'regress_scoring', 'refit': 'metric'}), '(estimator=base_enet, param_grid=enet_param_grid, n_jobs=-1, cv\n =cv_split_enet, verbose=2, scoring=regress_scoring, refit=metric)\n', (5036, 5169), False, 'from sklearn.model_selection import GridSearchCV\n'), ((5210, 5226), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5224, 5226), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5945, 6005), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': 'seed'}), '(n_splits=5, shuffle=True, random_state=seed)\n', (5960, 6005), False, 'from sklearn.model_selection import StratifiedKFold, KFold, train_test_split\n'), ((7508, 7552), 'numpy.full_like', 'np.full_like', (['train_target_df'], {'fill_value': '(-1)'}), '(train_target_df, fill_value=-1)\n', (7520, 7552), True, 'import numpy as np\n'), ((8415, 8473), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': 'random_state'}), '(n_splits=5, shuffle=True, random_state=random_state)\n', (8420, 8473), False, 'from sklearn.model_selection import StratifiedKFold, KFold, train_test_split\n'), ((15446, 15477), 'json.dump', 'json.dump', (['gex_train_history', 'f'], {}), '(gex_train_history, f)\n', (15455, 15477), False, 'import json\n'), ((15765, 15796), 'json.dump', 'json.dump', (['mut_train_history', 'f'], {}), '(mut_train_history, f)\n', (15774, 15796), False, 'import json\n'), ((15867, 15897), 'json.dump', 'json.dump', (['mut_test_history', 'f'], {}), '(mut_test_history, f)\n', (15876, 15897), False, 'import json\n'), ((1999, 2023), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (2021, 2023), False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier\n'), ((3028, 3051), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (3049, 3051), False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier\n'), ((7825, 7868), 'numpy.full_like', 'np.full_like', (['test_target_df'], {'fill_value': '(-1)'}), '(test_target_df, fill_value=-1)\n', (7837, 7868), True, 'import numpy as np\n'), ((8694, 8752), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': 'random_state'}), '(n_splits=5, shuffle=True, random_state=random_state)\n', (8699, 8752), False, 'from sklearn.model_selection import StratifiedKFold, KFold, train_test_split\n'), ((9284, 9342), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': 'random_state'}), '(n_splits=5, shuffle=True, random_state=random_state)\n', (9289, 9342), False, 'from sklearn.model_selection import StratifiedKFold, KFold, train_test_split\n'), ((6724, 6779), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', ([], {'y_true': 'test_data[1]', 'y_score': 'pred_scores'}), '(y_true=test_data[1], y_score=pred_scores)\n', (6737, 6779), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((6820, 6869), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'test_data[1]', 'y_pred': 'preds'}), '(y_true=test_data[1], y_pred=preds)\n', (6834, 6869), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((6910, 6975), 'sklearn.metrics.average_precision_score', 'average_precision_score', ([], {'y_true': 'test_data[1]', 'y_score': 'pred_scores'}), '(y_true=test_data[1], y_score=pred_scores)\n', (6933, 6975), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((7015, 7058), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'test_data[1]', 'y_pred': 'preds'}), '(y_true=test_data[1], y_pred=preds)\n', (7023, 7058), False, 'from sklearn.metrics import roc_auc_score, make_scorer, average_precision_score, precision_recall_curve, accuracy_score, f1_score, auc, mean_squared_error\n'), ((14292, 14333), 'numpy.square', 'np.square', (['(train_y_truths - train_y_preds)'], {}), '(train_y_truths - train_y_preds)\n', (14301, 14333), True, 'import numpy as np\n'), ((14411, 14452), 'numpy.square', 'np.square', (['(train_y_truths - train_y_preds)'], {}), '(train_y_truths - train_y_preds)\n', (14420, 14452), True, 'import numpy as np\n'), ((12673, 12712), 'numpy.square', 'np.square', (['(test_y_truths - test_y_preds)'], {}), '(test_y_truths - test_y_preds)\n', (12682, 12712), True, 'import numpy as np\n'), ((12810, 12849), 'numpy.square', 'np.square', (['(test_y_truths - test_y_preds)'], {}), '(test_y_truths - test_y_preds)\n', (12819, 12849), True, 'import numpy as np\n'), ((12966, 13005), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['train_y_truths[:, i]'], {}), '(train_y_truths[:, i])\n', (12983, 13005), True, 'import numpy.ma as ma\n'), ((13064, 13103), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['train_y_truths[:, i]'], {}), '(train_y_truths[:, i])\n', (13081, 13103), True, 'import numpy.ma as ma\n'), ((13306, 13345), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['train_y_truths[i, :]'], {}), '(train_y_truths[i, :])\n', (13323, 13345), True, 'import numpy.ma as ma\n'), ((13404, 13443), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['train_y_truths[i, :]'], {}), '(train_y_truths[i, :])\n', (13421, 13443), True, 'import numpy.ma as ma\n'), ((13647, 13686), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['train_y_truths[:, i]'], {}), '(train_y_truths[:, i])\n', (13664, 13686), True, 'import numpy.ma as ma\n'), ((13746, 13785), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['train_y_truths[:, i]'], {}), '(train_y_truths[:, i])\n', (13763, 13785), True, 'import numpy.ma as ma\n'), ((13989, 14028), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['train_y_truths[i, :]'], {}), '(train_y_truths[i, :])\n', (14006, 14028), True, 'import numpy.ma as ma\n'), ((14088, 14127), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['train_y_truths[i, :]'], {}), '(train_y_truths[i, :])\n', (14105, 14127), True, 'import numpy.ma as ma\n'), ((11277, 11315), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['test_y_truths[:, i]'], {}), '(test_y_truths[:, i])\n', (11294, 11315), True, 'import numpy.ma as ma\n'), ((11377, 11415), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['test_y_truths[:, i]'], {}), '(test_y_truths[:, i])\n', (11394, 11415), True, 'import numpy.ma as ma\n'), ((11631, 11669), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['test_y_truths[i, :]'], {}), '(test_y_truths[i, :])\n', (11648, 11669), True, 'import numpy.ma as ma\n'), ((11731, 11769), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['test_y_truths[i, :]'], {}), '(test_y_truths[i, :])\n', (11748, 11769), True, 'import numpy.ma as ma\n'), ((11986, 12024), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['test_y_truths[:, i]'], {}), '(test_y_truths[:, i])\n', (12003, 12024), True, 'import numpy.ma as ma\n'), ((12087, 12125), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['test_y_truths[:, i]'], {}), '(test_y_truths[:, i])\n', (12104, 12125), True, 'import numpy.ma as ma\n'), ((12342, 12380), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['test_y_truths[i, :]'], {}), '(test_y_truths[i, :])\n', (12359, 12380), True, 'import numpy.ma as ma\n'), ((12443, 12481), 'numpy.ma.masked_invalid', 'ma.masked_invalid', (['test_y_truths[i, :]'], {}), '(test_y_truths[i, :])\n', (12460, 12481), True, 'import numpy.ma as ma\n')] |
#! /usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
from visualization_msgs.msg import Marker
from std_msgs.msg import Header, ColorRGBA
from geometry_msgs.msg import Pose, Point, Vector3, Quaternion
import matplotlib.pyplot as plt
import numpy as np
POS_SCALE = 0.01
MSE_PLOT_MAX_TIME = 120 # in secs
class TrajectoryMarkers:
def __init__(self):
self.filtered_marker_counter = 1
self.ground_truth_marker_counter = 0
self.initial_ground_truth_pos = (0, 0, 0)
self.filtered_pos = np.array([0, 0, 0]).astype(np.float64)
self.ground_truth_pos = np.array([0, 0, 0]).astype(np.float64)
self.start_time = rospy.get_time()
self.saved_plot = False
rospy.Subscriber("odometry/filtered", Odometry, self.plot_filtered_pos)
rospy.Subscriber("kitti/oxts/odom", Odometry, self.plot_ground_truth_pos)
self.filtered_odom_pub = rospy.Publisher('trajectory_markers/filtered', Marker, queue_size = 10)
self.ground_truth_odom_pub = rospy.Publisher('trajectory_markers/ground_truth', Marker, queue_size = 10)
def plot_filtered_pos(self, msg):
self.filtered_pos = np.vstack((self.filtered_pos, [
msg.pose.pose.position.x * POS_SCALE,
msg.pose.pose.position.y * POS_SCALE,
msg.pose.pose.position.z * POS_SCALE
]))
# Creater marker
marker = Marker(type = Marker.SPHERE,
lifetime = rospy.Duration(0),
pose = Pose(Point
(
self.filtered_pos[self.filtered_marker_counter][0],
self.filtered_pos[self.filtered_marker_counter][1],
self.filtered_pos[self.filtered_marker_counter][2]),
Quaternion(0.0, 0.0, 0.0, 1.0)
),
scale = Vector3(0.1, 0.1, 0.1),
header = Header(frame_id = 'odom'),
color = ColorRGBA(0.0, 1.0, 0.0, 1.0), # Green
id = self.filtered_marker_counter)
# Publish marker
self.filtered_odom_pub.publish(marker)
self.filtered_marker_counter += 1
def plot_ground_truth_pos(self, msg):
if self.ground_truth_marker_counter == 0:
self.initial_ground_truth_pos = (msg.pose.pose.position.x, msg.pose.pose.position.y, msg.pose.pose.position.z)
self.ground_truth_marker_counter += 1
return
self.ground_truth_pos = np.vstack((self.ground_truth_pos, [
(msg.pose.pose.position.x - self.initial_ground_truth_pos[0]) * POS_SCALE,
(msg.pose.pose.position.y - self.initial_ground_truth_pos[1]) * POS_SCALE,
(msg.pose.pose.position.z - self.initial_ground_truth_pos[2]) * POS_SCALE
]))
# Creater marker
marker = Marker(type = Marker.SPHERE,
lifetime = rospy.Duration(0),
pose = Pose(Point
(
self.ground_truth_pos[self.ground_truth_marker_counter][0],
self.ground_truth_pos[self.ground_truth_marker_counter][1],
self.ground_truth_pos[self.ground_truth_marker_counter][2]),
Quaternion(0.0, 0.0, 0.0, 1.0)
),
scale = Vector3(0.1, 0.1, 0.1),
header = Header(frame_id = 'odom'),
color = ColorRGBA(1.0, 1.0, 0.0, 1.0), # Yellow
id = self.ground_truth_marker_counter)
# Publish marker
self.ground_truth_odom_pub.publish(marker)
self.ground_truth_marker_counter += 1
if rospy.get_time() - self.start_time > MSE_PLOT_MAX_TIME and not self.saved_plot:
self.calc_and_save_mse_plot()
self.saved_plot = True
def calc_and_save_mse_plot(self):
fig = plt.figure(figsize = (12,6))
ax_x = plt.subplot(121)
ax_x.set_title("MSE X")
ax_x.set_xlabel("steps")
ax_x.set_ylabel("mse")
ax_y = plt.subplot(122)
ax_y.set_title("MSE Y")
ax_y.set_xlabel("steps")
ax_y.set_ylabel("mse")
# Calculate mean squared error
se_x = 0.0 # sum of squared error for x
se_y = 0.0 # sum of squared error for y
mse_x = [] # mean sqaured error for x at each step
mse_y = [] # mean sqaured error for y at each step
print("Calculating mse for x and y both...")
for i in range(1, self.ground_truth_marker_counter):
se_x += (self.ground_truth_pos[i][0] - self.filtered_pos[i][0]) * (self.ground_truth_pos[i][0] - self.filtered_pos[i][0])
se_y += (self.ground_truth_pos[i][1] - self.filtered_pos[i][1]) * (self.ground_truth_pos[i][1] - self.filtered_pos[i][1])
mse_x.append(se_x / i)
mse_y.append(se_y / i)
ax_x.plot(mse_x)
ax_y.plot(mse_y)
plt.show()
fig.savefig("fig") # path $HOME/.ros/fig.png
print("SAVED PLOT")
if __name__ == '__main__':
rospy.init_node("trajectory_markers_node", anonymous = True)
trajectory_interactive_markers = TrajectoryMarkers()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rospy.spin()
rate.sleep()
| [
"geometry_msgs.msg.Vector3",
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.get_time",
"std_msgs.msg.ColorRGBA",
"numpy.array",
"matplotlib.pyplot.figure",
"std_msgs.msg.Header",
"rospy.Rate",
"numpy.vstack",
"rospy.spin",
"geometry_msgs.msg.Point",
"rospy.Duration",
... | [((5491, 5549), 'rospy.init_node', 'rospy.init_node', (['"""trajectory_markers_node"""'], {'anonymous': '(True)'}), "('trajectory_markers_node', anonymous=True)\n", (5506, 5549), False, 'import rospy\n'), ((5620, 5634), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (5630, 5634), False, 'import rospy\n'), ((710, 726), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (724, 726), False, 'import rospy\n'), ((768, 839), 'rospy.Subscriber', 'rospy.Subscriber', (['"""odometry/filtered"""', 'Odometry', 'self.plot_filtered_pos'], {}), "('odometry/filtered', Odometry, self.plot_filtered_pos)\n", (784, 839), False, 'import rospy\n'), ((848, 921), 'rospy.Subscriber', 'rospy.Subscriber', (['"""kitti/oxts/odom"""', 'Odometry', 'self.plot_ground_truth_pos'], {}), "('kitti/oxts/odom', Odometry, self.plot_ground_truth_pos)\n", (864, 921), False, 'import rospy\n'), ((956, 1025), 'rospy.Publisher', 'rospy.Publisher', (['"""trajectory_markers/filtered"""', 'Marker'], {'queue_size': '(10)'}), "('trajectory_markers/filtered', Marker, queue_size=10)\n", (971, 1025), False, 'import rospy\n'), ((1065, 1138), 'rospy.Publisher', 'rospy.Publisher', (['"""trajectory_markers/ground_truth"""', 'Marker'], {'queue_size': '(10)'}), "('trajectory_markers/ground_truth', Marker, queue_size=10)\n", (1080, 1138), False, 'import rospy\n'), ((1210, 1361), 'numpy.vstack', 'np.vstack', (['(self.filtered_pos, [msg.pose.pose.position.x * POS_SCALE, msg.pose.pose.\n position.y * POS_SCALE, msg.pose.pose.position.z * POS_SCALE])'], {}), '((self.filtered_pos, [msg.pose.pose.position.x * POS_SCALE, msg.\n pose.pose.position.y * POS_SCALE, msg.pose.pose.position.z * POS_SCALE]))\n', (1219, 1361), True, 'import numpy as np\n'), ((2736, 3011), 'numpy.vstack', 'np.vstack', (['(self.ground_truth_pos, [(msg.pose.pose.position.x - self.\n initial_ground_truth_pos[0]) * POS_SCALE, (msg.pose.pose.position.y -\n self.initial_ground_truth_pos[1]) * POS_SCALE, (msg.pose.pose.position.\n z - self.initial_ground_truth_pos[2]) * POS_SCALE])'], {}), '((self.ground_truth_pos, [(msg.pose.pose.position.x - self.\n initial_ground_truth_pos[0]) * POS_SCALE, (msg.pose.pose.position.y -\n self.initial_ground_truth_pos[1]) * POS_SCALE, (msg.pose.pose.position.\n z - self.initial_ground_truth_pos[2]) * POS_SCALE]))\n', (2745, 3011), True, 'import numpy as np\n'), ((4287, 4314), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (4297, 4314), True, 'import matplotlib.pyplot as plt\n'), ((4332, 4348), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (4343, 4348), True, 'import matplotlib.pyplot as plt\n'), ((4461, 4477), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (4472, 4477), True, 'import matplotlib.pyplot as plt\n'), ((5366, 5376), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5374, 5376), True, 'import matplotlib.pyplot as plt\n'), ((5654, 5673), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (5671, 5673), False, 'import rospy\n'), ((5683, 5695), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (5693, 5695), False, 'import rospy\n'), ((562, 581), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (570, 581), True, 'import numpy as np\n'), ((644, 663), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (652, 663), True, 'import numpy as np\n'), ((1515, 1532), 'rospy.Duration', 'rospy.Duration', (['(0)'], {}), '(0)\n', (1529, 1532), False, 'import rospy\n'), ((2067, 2089), 'geometry_msgs.msg.Vector3', 'Vector3', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (2074, 2089), False, 'from geometry_msgs.msg import Pose, Point, Vector3, Quaternion\n'), ((2127, 2150), 'std_msgs.msg.Header', 'Header', ([], {'frame_id': '"""odom"""'}), "(frame_id='odom')\n", (2133, 2150), False, 'from std_msgs.msg import Header, ColorRGBA\n'), ((2189, 2218), 'std_msgs.msg.ColorRGBA', 'ColorRGBA', (['(0.0)', '(1.0)', '(0.0)', '(1.0)'], {}), '(0.0, 1.0, 0.0, 1.0)\n', (2198, 2218), False, 'from std_msgs.msg import Header, ColorRGBA\n'), ((3157, 3174), 'rospy.Duration', 'rospy.Duration', (['(0)'], {}), '(0)\n', (3171, 3174), False, 'import rospy\n'), ((3709, 3731), 'geometry_msgs.msg.Vector3', 'Vector3', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (3716, 3731), False, 'from geometry_msgs.msg import Pose, Point, Vector3, Quaternion\n'), ((3769, 3792), 'std_msgs.msg.Header', 'Header', ([], {'frame_id': '"""odom"""'}), "(frame_id='odom')\n", (3775, 3792), False, 'from std_msgs.msg import Header, ColorRGBA\n'), ((3831, 3860), 'std_msgs.msg.ColorRGBA', 'ColorRGBA', (['(1.0)', '(1.0)', '(0.0)', '(1.0)'], {}), '(1.0, 1.0, 0.0, 1.0)\n', (3840, 3860), False, 'from std_msgs.msg import Header, ColorRGBA\n'), ((1574, 1745), 'geometry_msgs.msg.Point', 'Point', (['self.filtered_pos[self.filtered_marker_counter][0]', 'self.filtered_pos[self.filtered_marker_counter][1]', 'self.filtered_pos[self.filtered_marker_counter][2]'], {}), '(self.filtered_pos[self.filtered_marker_counter][0], self.filtered_pos\n [self.filtered_marker_counter][1], self.filtered_pos[self.\n filtered_marker_counter][2])\n', (1579, 1745), False, 'from geometry_msgs.msg import Pose, Point, Vector3, Quaternion\n'), ((1958, 1988), 'geometry_msgs.msg.Quaternion', 'Quaternion', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (1968, 1988), False, 'from geometry_msgs.msg import Pose, Point, Vector3, Quaternion\n'), ((3216, 3411), 'geometry_msgs.msg.Point', 'Point', (['self.ground_truth_pos[self.ground_truth_marker_counter][0]', 'self.ground_truth_pos[self.ground_truth_marker_counter][1]', 'self.ground_truth_pos[self.ground_truth_marker_counter][2]'], {}), '(self.ground_truth_pos[self.ground_truth_marker_counter][0], self.\n ground_truth_pos[self.ground_truth_marker_counter][1], self.\n ground_truth_pos[self.ground_truth_marker_counter][2])\n', (3221, 3411), False, 'from geometry_msgs.msg import Pose, Point, Vector3, Quaternion\n'), ((3604, 3634), 'geometry_msgs.msg.Quaternion', 'Quaternion', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (3614, 3634), False, 'from geometry_msgs.msg import Pose, Point, Vector3, Quaternion\n'), ((4076, 4092), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (4090, 4092), False, 'import rospy\n')] |
import numpy as np
import scipy as sp
import logging
import doctest
import unittest
import os.path
import time
from pysnptools.pstreader import PstData, PstNpz, PstHdf5
from pysnptools.util import create_directory_if_necessary
from pysnptools.kernelreader.test import _fortesting_JustCheckExists
class TestLoader(unittest.TestCase):
def test_big_npz(self):
logging.info("in test_big_npz")
n = 1000
pstdata = PstData(row=range(n-1),col=range(n+1),val=np.zeros([n-1,n+1]))
output = "tempdir/pstreader/big.npz"
create_directory_if_necessary(output)
PstNpz.write(output,pstdata)
pstnpz = PstNpz(output)
pstdata1 = pstnpz[::2,::4].read()
pstdata2 = pstnpz.read(order='A')
assert pstdata2.val.flags['C_CONTIGUOUS']
pstdata = PstData(row=range(n-1),col=range(n+1),val=np.zeros([n-1,n+1],order='F'))
PstNpz.write(output,pstdata)
pstnpz = PstNpz(output)
pstdata2 = pstnpz.read(order='A')
pstdata2.val.flags['F_CONTIGUOUS']
print("done")
def test_writes(self):
#===================================
# Defining sub functions
#===================================
def _oned_int(c):
return list(range(c))
def _oned_str(c):
return [str(i) for i in range(c)]
def _twooned_int(c):
return [[i] for i in range(c)]
def _twooned_str(c):
return [[str(i)] for i in range(c)]
def _twotwod_int(c):
return [[i,i] for i in range(c)]
def _twotwod_str(c):
return [[str(i),"hello"] for i in range(c)]
def _none(c):
return None
def _zero(c):
return np.empty([c,0])
#===================================
# Staring main function
#===================================
logging.info("starting 'test_writes'")
np.random.seed(0)
output_template = "tempdir/pstreader/writes.{0}.{1}"
create_directory_if_necessary(output_template.format(0,"npz"))
i = 0
for row_count in [5,2,1,0]:
for col_count in [4,2,1,0]:
val = np.random.normal(.5,2,size=(row_count,col_count))
for row_or_col_gen in [_oned_int,_oned_str,_twooned_int,_twooned_str,_twotwod_int,_twotwod_str]:
row = row_or_col_gen(row_count)
col = row_or_col_gen(col_count)
for prop_gen in [_oned_int,_oned_str,_twooned_int,_twooned_str,_twotwod_int,_twotwod_str,_none,_zero]:
row_prop = prop_gen(row_count)
col_prop = prop_gen(col_count)
pstdata = PstData(row,col,val,row_prop,col_prop,str(i))
for the_class,suffix in [(PstNpz,"npz"),(PstHdf5,"hdf5")]:
filename = output_template.format(i,suffix)
logging.info(filename)
i += 1
the_class.write(filename,pstdata)
for subsetter in [None, sp.s_[::2,::3]]:
reader = the_class(filename)
_fortesting_JustCheckExists().input(reader)
subreader = reader if subsetter is None else reader[subsetter[0],subsetter[1]]
readdata = subreader.read(order='C')
expected = pstdata if subsetter is None else pstdata[subsetter[0],subsetter[1]].read()
assert np.array_equal(readdata.val,expected.val)
assert np.array_equal(readdata.row,expected.row)
assert np.array_equal(readdata.col,expected.col)
assert np.array_equal(readdata.row_property,expected.row_property)
assert np.array_equal(readdata.col_property,expected.col_property)
try:
os.remove(filename)
except:
pass
logging.info("done with 'test_writes'")
def test_repr_test(self):
np.random.seed(0)
row_property=np.array([[1.0,2,2.5],[3,4,4.5],[5,6,6.5]])
col_property=np.array([[1.0,2,2.5,1],[3,4,4.5,3]])
pstdata = PstData(row=np.array([[1.0,2],[3,4],[5,6]]),
col=np.array([("A","a"),("B","b")]),
val = np.random.normal(.5,2,size=(3,2)),
row_property=row_property,
col_property=col_property)
assert pstdata.col_to_index([("B","b")])[0] == 1
s = str(pstdata)
def test_read(self):
np.random.seed(0)
row_property=np.array([[1.0,2,2.5],[3,4,4.5],[5,6,6.5]])
col_property=np.array([[1.0,2,2.5,1],[3,4,4.5,3]])
pstdata = PstData(row=np.array([[1.0,2],[3,4],[5,6]]),
col=np.array([["A","a"],["B","b"]]),
val = np.random.normal(.5,2,size=(3,2)),
row_property=row_property,
col_property=col_property,
name="test_read")
assert pstdata.row_to_index([np.array([3.0,4])])[0] == 1
assert pstdata.col_to_index([np.array(["A","a"])])[0] == 0
assert np.array_equal(pstdata[1:,:2].row_property,row_property[1:])
assert np.array_equal(pstdata[1:,:2].col_property,col_property[:2])
pstdata2 = pstdata[:2,:2].read()
from pysnptools.kernelreader.test import _fortesting_JustCheckExists
_fortesting_JustCheckExists().input(pstdata)
_fortesting_JustCheckExists().input(pstdata2)
np.testing.assert_array_almost_equal(pstdata2.val, pstdata.val[:2,:2], decimal=10)
pstdata3 = pstdata[[],:].read()
assert pstdata3.val.shape[0] == 0 and pstdata3.val.shape[1]==2
pstdata.val = pstdata.val.copy(order='F')
pstdata2 = pstdata[:2,:2].read()
np.testing.assert_array_almost_equal(pstdata2.val, pstdata.val[:2,:2], decimal=10)
pstdata2 = pstdata[:2,:2].read(order='F')
np.testing.assert_array_almost_equal(pstdata2.val, pstdata.val[:2,:2], decimal=10)
pstdata2 = pstdata[:2,:2].read(order='A')
np.testing.assert_array_almost_equal(pstdata2.val, pstdata.val[:2,:2], decimal=10)
pstdata2 = pstdata[:2,:2].read(force_python_only=True,dtype=None,order='C')
np.testing.assert_array_almost_equal(pstdata2.val, pstdata.val[:2,:2], decimal=10)
pstdata2 = pstdata[:2,:2].read(force_python_only=True,dtype='float32',order='C')
np.testing.assert_array_almost_equal(pstdata2.val, pstdata.val[:2,:2].astype(dtype='float32'), decimal=10)
pstdata2 = pstdata[:2,:2].read(force_python_only=True,dtype='float32',order=None)
np.testing.assert_array_almost_equal(pstdata2.val, pstdata.val[:2,:2].astype(dtype='float32'), decimal=10)
pstdata2 = pstdata[:2,:2].read(force_python_only=True,dtype=None,order='F')
np.testing.assert_array_almost_equal(pstdata2.val, pstdata.val[:2,:2], decimal=10)
pstdata4 = pstdata[::,::].read(force_python_only=True)
np.testing.assert_array_almost_equal(pstdata4.val, pstdata.val, decimal=10)
logging.info("done with test")
def test_inputs(self):
from pysnptools.pstreader import PstData
np.random.seed(0)
row_property=np.array([1.0,2,2.5])
col_property=np.array([1,2])
pstdata = PstData(row=np.array([1.0,3,6]),
col=np.array(["Aa","Bb"]),
val = np.random.normal(.5,2,size=(3,2)),
row_property=row_property,
col_property=col_property,
name="test_read")
assert pstdata.row_to_index([3])[0] == 1
assert pstdata.col_to_index(["Aa"])[0] == 0
assert np.array_equal(pstdata[1:,:2].row_property,row_property[1:])
assert np.array_equal(pstdata[1:,:2].col_property,col_property[:2])
logging.info("done with test")
def test_inputs2(self):
from pysnptools.pstreader import PstData
np.random.seed(0)
row_property=None
col_property=None
pstdata = PstData(row=np.array([1.0,3,6]),
col=np.array(["Aa","Bb"]),
val = np.random.normal(.5,2,size=(3,2)),
row_property=row_property,
col_property=col_property,
name="test_read")
assert pstdata.row_to_index([3])[0] == 1
assert pstdata.col_to_index(["Aa"])[0] == 0
assert np.array_equal(pstdata[1:,:2].row_property,pstdata.row_property[1:])
assert np.array_equal(pstdata[1:,:2].col_property,pstdata.col_property[:2])
logging.info("done with test")
def test_inputs3(self):
from pysnptools.pstreader import PstData
np.random.seed(0)
row_property=None
col_property=None
pstdata = PstData(row=[[1.0,2.0],[3,4],[6,7]],
col=np.array([]),
val = [[],[],[]],
row_property=row_property,
col_property=col_property,
name="test_read")
assert pstdata.row_to_index([[3,4]])[0] == 1
assert np.array_equal(pstdata[1:,:2].row_property,pstdata.row_property[1:])
assert np.array_equal(pstdata[1:,:2].col_property,pstdata.col_property[:2])
logging.info("done with test")
def test_inputs4(self):
from pysnptools.pstreader import PstData
pstdata = PstData(row=None,
col=None,
val = None,
row_property=None,
col_property=None,
name="test_read")
assert pstdata.row_count == 0 and pstdata.col_count == 0 and pstdata.val.shape[0] == 0 and pstdata.val.shape[1]==0 and len(pstdata.row_property)==0 and len(pstdata.col_property)==0
logging.info("done with test")
# We do it this way instead of using doctest.DocTestSuite because doctest.DocTestSuite requires modules to be pickled, which python doesn't allow.
# We need tests to be pickleable so that they can be run on a cluster.
class TestDocStrings(unittest.TestCase):
pass
def test_snpreader(self):
import pysnptools.pstreader.pstreader
old_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
result = doctest.testmod(pysnptools.pstreader.pstreader)
os.chdir(old_dir)
assert result.failed == 0, "failed doc test: " + __file__
def test_snpdata(self):
import pysnptools.pstreader.pstdata
old_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
result = doctest.testmod(pysnptools.pstreader.pstdata)
os.chdir(old_dir)
assert result.failed == 0, "failed doc test: " + __file__
def test_snpdata(self):
import pysnptools.pstreader.pstnpz
old_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
result = doctest.testmod(pysnptools.pstreader.pstnpz)
os.chdir(old_dir)
assert result.failed == 0, "failed doc test: " + __file__
def test_snpdata(self):
import pysnptools.pstreader.psthdf5
old_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
result = doctest.testmod(pysnptools.pstreader.psthdf5)
os.chdir(old_dir)
assert result.failed == 0, "failed doc test: " + __file__
def getTestSuite():
"""
set up composite test suite
"""
test_suite = unittest.TestSuite([])
test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDocStrings))
test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestLoader))
return test_suite
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
suites = getTestSuite()
r = unittest.TextTestRunner(failfast=False)
r.run(suites)
| [
"logging.basicConfig",
"unittest.TestSuite",
"numpy.random.normal",
"numpy.testing.assert_array_almost_equal",
"pysnptools.pstreader.PstNpz",
"pysnptools.pstreader.PstData",
"pysnptools.util.create_directory_if_necessary",
"pysnptools.pstreader.PstNpz.write",
"pysnptools.kernelreader.test._fortestin... | [((11941, 11963), 'unittest.TestSuite', 'unittest.TestSuite', (['[]'], {}), '([])\n', (11959, 11963), False, 'import unittest\n'), ((12184, 12223), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (12203, 12223), False, 'import logging\n'), ((12261, 12300), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'failfast': '(False)'}), '(failfast=False)\n', (12284, 12300), False, 'import unittest\n'), ((376, 407), 'logging.info', 'logging.info', (['"""in test_big_npz"""'], {}), "('in test_big_npz')\n", (388, 407), False, 'import logging\n'), ((559, 596), 'pysnptools.util.create_directory_if_necessary', 'create_directory_if_necessary', (['output'], {}), '(output)\n', (588, 596), False, 'from pysnptools.util import create_directory_if_necessary\n'), ((605, 634), 'pysnptools.pstreader.PstNpz.write', 'PstNpz.write', (['output', 'pstdata'], {}), '(output, pstdata)\n', (617, 634), False, 'from pysnptools.pstreader import PstData, PstNpz, PstHdf5\n'), ((651, 665), 'pysnptools.pstreader.PstNpz', 'PstNpz', (['output'], {}), '(output)\n', (657, 665), False, 'from pysnptools.pstreader import PstData, PstNpz, PstHdf5\n'), ((900, 929), 'pysnptools.pstreader.PstNpz.write', 'PstNpz.write', (['output', 'pstdata'], {}), '(output, pstdata)\n', (912, 929), False, 'from pysnptools.pstreader import PstData, PstNpz, PstHdf5\n'), ((946, 960), 'pysnptools.pstreader.PstNpz', 'PstNpz', (['output'], {}), '(output)\n', (952, 960), False, 'from pysnptools.pstreader import PstData, PstNpz, PstHdf5\n'), ((1899, 1937), 'logging.info', 'logging.info', (['"""starting \'test_writes\'"""'], {}), '("starting \'test_writes\'")\n', (1911, 1937), False, 'import logging\n'), ((1946, 1963), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1960, 1963), True, 'import numpy as np\n'), ((4203, 4242), 'logging.info', 'logging.info', (['"""done with \'test_writes\'"""'], {}), '("done with \'test_writes\'")\n', (4215, 4242), False, 'import logging\n'), ((4282, 4299), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4296, 4299), True, 'import numpy as np\n'), ((4321, 4372), 'numpy.array', 'np.array', (['[[1.0, 2, 2.5], [3, 4, 4.5], [5, 6, 6.5]]'], {}), '([[1.0, 2, 2.5], [3, 4, 4.5], [5, 6, 6.5]])\n', (4329, 4372), True, 'import numpy as np\n'), ((4386, 4430), 'numpy.array', 'np.array', (['[[1.0, 2, 2.5, 1], [3, 4, 4.5, 3]]'], {}), '([[1.0, 2, 2.5, 1], [3, 4, 4.5, 3]])\n', (4394, 4430), True, 'import numpy as np\n'), ((4839, 4856), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4853, 4856), True, 'import numpy as np\n'), ((4878, 4929), 'numpy.array', 'np.array', (['[[1.0, 2, 2.5], [3, 4, 4.5], [5, 6, 6.5]]'], {}), '([[1.0, 2, 2.5], [3, 4, 4.5], [5, 6, 6.5]])\n', (4886, 4929), True, 'import numpy as np\n'), ((4943, 4987), 'numpy.array', 'np.array', (['[[1.0, 2, 2.5, 1], [3, 4, 4.5, 3]]'], {}), '([[1.0, 2, 2.5, 1], [3, 4, 4.5, 3]])\n', (4951, 4987), True, 'import numpy as np\n'), ((5472, 5534), 'numpy.array_equal', 'np.array_equal', (['pstdata[1:, :2].row_property', 'row_property[1:]'], {}), '(pstdata[1:, :2].row_property, row_property[1:])\n', (5486, 5534), True, 'import numpy as np\n'), ((5548, 5610), 'numpy.array_equal', 'np.array_equal', (['pstdata[1:, :2].col_property', 'col_property[:2]'], {}), '(pstdata[1:, :2].col_property, col_property[:2])\n', (5562, 5610), True, 'import numpy as np\n'), ((5845, 5932), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['pstdata2.val', 'pstdata.val[:2, :2]'], {'decimal': '(10)'}), '(pstdata2.val, pstdata.val[:2, :2],\n decimal=10)\n', (5881, 5932), True, 'import numpy as np\n'), ((6138, 6225), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['pstdata2.val', 'pstdata.val[:2, :2]'], {'decimal': '(10)'}), '(pstdata2.val, pstdata.val[:2, :2],\n decimal=10)\n', (6174, 6225), True, 'import numpy as np\n'), ((6279, 6366), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['pstdata2.val', 'pstdata.val[:2, :2]'], {'decimal': '(10)'}), '(pstdata2.val, pstdata.val[:2, :2],\n decimal=10)\n', (6315, 6366), True, 'import numpy as np\n'), ((6420, 6507), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['pstdata2.val', 'pstdata.val[:2, :2]'], {'decimal': '(10)'}), '(pstdata2.val, pstdata.val[:2, :2],\n decimal=10)\n', (6456, 6507), True, 'import numpy as np\n'), ((6595, 6682), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['pstdata2.val', 'pstdata.val[:2, :2]'], {'decimal': '(10)'}), '(pstdata2.val, pstdata.val[:2, :2],\n decimal=10)\n', (6631, 6682), True, 'import numpy as np\n'), ((7179, 7266), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['pstdata2.val', 'pstdata.val[:2, :2]'], {'decimal': '(10)'}), '(pstdata2.val, pstdata.val[:2, :2],\n decimal=10)\n', (7215, 7266), True, 'import numpy as np\n'), ((7333, 7408), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['pstdata4.val', 'pstdata.val'], {'decimal': '(10)'}), '(pstdata4.val, pstdata.val, decimal=10)\n', (7369, 7408), True, 'import numpy as np\n'), ((7419, 7449), 'logging.info', 'logging.info', (['"""done with test"""'], {}), "('done with test')\n", (7431, 7449), False, 'import logging\n'), ((7536, 7553), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (7550, 7553), True, 'import numpy as np\n'), ((7575, 7598), 'numpy.array', 'np.array', (['[1.0, 2, 2.5]'], {}), '([1.0, 2, 2.5])\n', (7583, 7598), True, 'import numpy as np\n'), ((7618, 7634), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (7626, 7634), True, 'import numpy as np\n'), ((8072, 8134), 'numpy.array_equal', 'np.array_equal', (['pstdata[1:, :2].row_property', 'row_property[1:]'], {}), '(pstdata[1:, :2].row_property, row_property[1:])\n', (8086, 8134), True, 'import numpy as np\n'), ((8148, 8210), 'numpy.array_equal', 'np.array_equal', (['pstdata[1:, :2].col_property', 'col_property[:2]'], {}), '(pstdata[1:, :2].col_property, col_property[:2])\n', (8162, 8210), True, 'import numpy as np\n'), ((8217, 8247), 'logging.info', 'logging.info', (['"""done with test"""'], {}), "('done with test')\n", (8229, 8247), False, 'import logging\n'), ((8335, 8352), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (8349, 8352), True, 'import numpy as np\n'), ((8843, 8913), 'numpy.array_equal', 'np.array_equal', (['pstdata[1:, :2].row_property', 'pstdata.row_property[1:]'], {}), '(pstdata[1:, :2].row_property, pstdata.row_property[1:])\n', (8857, 8913), True, 'import numpy as np\n'), ((8927, 8997), 'numpy.array_equal', 'np.array_equal', (['pstdata[1:, :2].col_property', 'pstdata.col_property[:2]'], {}), '(pstdata[1:, :2].col_property, pstdata.col_property[:2])\n', (8941, 8997), True, 'import numpy as np\n'), ((9004, 9034), 'logging.info', 'logging.info', (['"""done with test"""'], {}), "('done with test')\n", (9016, 9034), False, 'import logging\n'), ((9121, 9138), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (9135, 9138), True, 'import numpy as np\n'), ((9553, 9623), 'numpy.array_equal', 'np.array_equal', (['pstdata[1:, :2].row_property', 'pstdata.row_property[1:]'], {}), '(pstdata[1:, :2].row_property, pstdata.row_property[1:])\n', (9567, 9623), True, 'import numpy as np\n'), ((9637, 9707), 'numpy.array_equal', 'np.array_equal', (['pstdata[1:, :2].col_property', 'pstdata.col_property[:2]'], {}), '(pstdata[1:, :2].col_property, pstdata.col_property[:2])\n', (9651, 9707), True, 'import numpy as np\n'), ((9714, 9744), 'logging.info', 'logging.info', (['"""done with test"""'], {}), "('done with test')\n", (9726, 9744), False, 'import logging\n'), ((9841, 9938), 'pysnptools.pstreader.PstData', 'PstData', ([], {'row': 'None', 'col': 'None', 'val': 'None', 'row_property': 'None', 'col_property': 'None', 'name': '"""test_read"""'}), "(row=None, col=None, val=None, row_property=None, col_property=None,\n name='test_read')\n", (9848, 9938), False, 'from pysnptools.pstreader import PstData\n'), ((10266, 10296), 'logging.info', 'logging.info', (['"""done with test"""'], {}), "('done with test')\n", (10278, 10296), False, 'import logging\n'), ((10752, 10799), 'doctest.testmod', 'doctest.testmod', (['pysnptools.pstreader.pstreader'], {}), '(pysnptools.pstreader.pstreader)\n', (10767, 10799), False, 'import doctest\n'), ((11074, 11119), 'doctest.testmod', 'doctest.testmod', (['pysnptools.pstreader.pstdata'], {}), '(pysnptools.pstreader.pstdata)\n', (11089, 11119), False, 'import doctest\n'), ((11393, 11437), 'doctest.testmod', 'doctest.testmod', (['pysnptools.pstreader.pstnpz'], {}), '(pysnptools.pstreader.pstnpz)\n', (11408, 11437), False, 'import doctest\n'), ((11712, 11757), 'doctest.testmod', 'doctest.testmod', (['pysnptools.pstreader.psthdf5'], {}), '(pysnptools.pstreader.psthdf5)\n', (11727, 11757), False, 'import doctest\n'), ((1750, 1766), 'numpy.empty', 'np.empty', (['[c, 0]'], {}), '([c, 0])\n', (1758, 1766), True, 'import numpy as np\n'), ((485, 509), 'numpy.zeros', 'np.zeros', (['[n - 1, n + 1]'], {}), '([n - 1, n + 1])\n', (493, 509), True, 'import numpy as np\n'), ((861, 896), 'numpy.zeros', 'np.zeros', (['[n - 1, n + 1]'], {'order': '"""F"""'}), "([n - 1, n + 1], order='F')\n", (869, 896), True, 'import numpy as np\n'), ((2208, 2261), 'numpy.random.normal', 'np.random.normal', (['(0.5)', '(2)'], {'size': '(row_count, col_count)'}), '(0.5, 2, size=(row_count, col_count))\n', (2224, 2261), True, 'import numpy as np\n'), ((4454, 4490), 'numpy.array', 'np.array', (['[[1.0, 2], [3, 4], [5, 6]]'], {}), '([[1.0, 2], [3, 4], [5, 6]])\n', (4462, 4490), True, 'import numpy as np\n'), ((4517, 4551), 'numpy.array', 'np.array', (["[('A', 'a'), ('B', 'b')]"], {}), "([('A', 'a'), ('B', 'b')])\n", (4525, 4551), True, 'import numpy as np\n'), ((4582, 4619), 'numpy.random.normal', 'np.random.normal', (['(0.5)', '(2)'], {'size': '(3, 2)'}), '(0.5, 2, size=(3, 2))\n', (4598, 4619), True, 'import numpy as np\n'), ((5011, 5047), 'numpy.array', 'np.array', (['[[1.0, 2], [3, 4], [5, 6]]'], {}), '([[1.0, 2], [3, 4], [5, 6]])\n', (5019, 5047), True, 'import numpy as np\n'), ((5074, 5108), 'numpy.array', 'np.array', (["[['A', 'a'], ['B', 'b']]"], {}), "([['A', 'a'], ['B', 'b']])\n", (5082, 5108), True, 'import numpy as np\n'), ((5139, 5176), 'numpy.random.normal', 'np.random.normal', (['(0.5)', '(2)'], {'size': '(3, 2)'}), '(0.5, 2, size=(3, 2))\n', (5155, 5176), True, 'import numpy as np\n'), ((5737, 5766), 'pysnptools.kernelreader.test._fortesting_JustCheckExists', '_fortesting_JustCheckExists', ([], {}), '()\n', (5764, 5766), False, 'from pysnptools.kernelreader.test import _fortesting_JustCheckExists\n'), ((5790, 5819), 'pysnptools.kernelreader.test._fortesting_JustCheckExists', '_fortesting_JustCheckExists', ([], {}), '()\n', (5817, 5819), False, 'from pysnptools.kernelreader.test import _fortesting_JustCheckExists\n'), ((7664, 7685), 'numpy.array', 'np.array', (['[1.0, 3, 6]'], {}), '([1.0, 3, 6])\n', (7672, 7685), True, 'import numpy as np\n'), ((7715, 7737), 'numpy.array', 'np.array', (["['Aa', 'Bb']"], {}), "(['Aa', 'Bb'])\n", (7723, 7737), True, 'import numpy as np\n'), ((7770, 7807), 'numpy.random.normal', 'np.random.normal', (['(0.5)', '(2)'], {'size': '(3, 2)'}), '(0.5, 2, size=(3, 2))\n', (7786, 7807), True, 'import numpy as np\n'), ((8435, 8456), 'numpy.array', 'np.array', (['[1.0, 3, 6]'], {}), '([1.0, 3, 6])\n', (8443, 8456), True, 'import numpy as np\n'), ((8486, 8508), 'numpy.array', 'np.array', (["['Aa', 'Bb']"], {}), "(['Aa', 'Bb'])\n", (8494, 8508), True, 'import numpy as np\n'), ((8541, 8578), 'numpy.random.normal', 'np.random.normal', (['(0.5)', '(2)'], {'size': '(3, 2)'}), '(0.5, 2, size=(3, 2))\n', (8557, 8578), True, 'import numpy as np\n'), ((9276, 9288), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9284, 9288), True, 'import numpy as np\n'), ((11988, 12009), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (12007, 12009), False, 'import unittest\n'), ((12073, 12094), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (12092, 12094), False, 'import unittest\n'), ((5362, 5380), 'numpy.array', 'np.array', (['[3.0, 4]'], {}), '([3.0, 4])\n', (5370, 5380), True, 'import numpy as np\n'), ((5427, 5447), 'numpy.array', 'np.array', (["['A', 'a']"], {}), "(['A', 'a'])\n", (5435, 5447), True, 'import numpy as np\n'), ((2971, 2993), 'logging.info', 'logging.info', (['filename'], {}), '(filename)\n', (2983, 2993), False, 'import logging\n'), ((3635, 3677), 'numpy.array_equal', 'np.array_equal', (['readdata.val', 'expected.val'], {}), '(readdata.val, expected.val)\n', (3649, 3677), True, 'import numpy as np\n'), ((3716, 3758), 'numpy.array_equal', 'np.array_equal', (['readdata.row', 'expected.row'], {}), '(readdata.row, expected.row)\n', (3730, 3758), True, 'import numpy as np\n'), ((3797, 3839), 'numpy.array_equal', 'np.array_equal', (['readdata.col', 'expected.col'], {}), '(readdata.col, expected.col)\n', (3811, 3839), True, 'import numpy as np\n'), ((3878, 3938), 'numpy.array_equal', 'np.array_equal', (['readdata.row_property', 'expected.row_property'], {}), '(readdata.row_property, expected.row_property)\n', (3892, 3938), True, 'import numpy as np\n'), ((3977, 4037), 'numpy.array_equal', 'np.array_equal', (['readdata.col_property', 'expected.col_property'], {}), '(readdata.col_property, expected.col_property)\n', (3991, 4037), True, 'import numpy as np\n'), ((3253, 3282), 'pysnptools.kernelreader.test._fortesting_JustCheckExists', '_fortesting_JustCheckExists', ([], {}), '()\n', (3280, 3282), False, 'from pysnptools.kernelreader.test import _fortesting_JustCheckExists\n')] |
#! /usr/bin/env python
import rpy2.robjects as robjects
import numpy as np
import matplotlib.pyplot as plt
import sys
import csv
import time
import os
import math
def compute_rsquared(\
baseline,\
prediction,\
total_points):
mean_y = 0.0
for i in range(total_points):
mean_y += baseline[i]
mean_y /= float(total_points)
SS_res = 0.0
for i in range(total_points):
SS_res += math.pow((baseline[i] - prediction[i]), 2)
SS_tot = 0.0
for i in range(total_points):
SS_tot += math.pow((baseline[i] - mean_y), 2)
Rsquared = 1.0 - SS_res/SS_tot
return Rsquared
def compute_arsquared(\
baseline,\
prediction,\
total_points):
mean_y = 0.0
for i in range(total_points):
mean_y += baseline[i]
mean_y /= float(total_points)
SS_res = 0.0
for i in range(total_points):
SS_res += math.pow((baseline[i] - prediction[i]), 2)
SS_tot = 0.0
for i in range(total_points):
SS_tot += math.pow((baseline[i] - mean_y), 2)
Rsquared = 1.0 - SS_res/SS_tot
n = total_points
p = 1
Adjusted = 1.0 - (1.0-Rsquared)*float(n-1)/float(n-p-1)
return Adjusted
def run_grid_search(dataset_name, size, X_train, Y_train, X_test, Y_test):
print ("dataset_name")
print (dataset_name)
stats = {
"regression_time":[],
"in_test_time":[],
"out_test_time":[],
"in_mse":[],
"in_r2":[],
"in_ar2":[],
"out_mse":[],
"out_r2":[],
"out_ar2":[]
}
os.system("mkdir -p grid-search.db")
with open("grid-search.db/"+dataset_name+".log", "a") as f_out:
f_out.write("NKnots,Lambda,RegressionTime,InTestTime,InMSE,InR2,InAR2,OutTestTime,OutMSE,OutR2,OutAR2\n")
k_unit = int(size/50)
for k in range(k_unit, size, k_unit):
for lambda1 in [0,0.1,0.01,10**-3,10**-4,10**-5,10**-6,10**-7,10**-8,10**-9,10**-10,1]:
print ("Grid-search Dataset: "+str(dataset_name)+" K: "+str(k)+" L:"+str(lambda1))
f_out.write(str(k)+","+str(lambda1)+",")
r_y = robjects.FloatVector(Y_train)
r_x = robjects.FloatVector(X_train)
try:
t1 = time.time_ns()
r_smooth_spline = robjects.r['smooth.spline']
spline1 = r_smooth_spline(x=r_x, y=r_y, spar=lambda1, nknots=k)
t2 = time.time_ns()
r_time = (t2-t1)/1e9
stats["regression_time"].append(r_time)
f_out.write(str(r_time)+",")
except:
stats["regression_time"].append("-")
f_out.write("\n")
continue
t1 = time.time_ns()
Y_train_spline = np.array(robjects.r['predict'](spline1,robjects.FloatVector(X_train)).rx2('y'))
in_mse = np.mean((np.array(Y_train)-np.array(Y_train_spline))**2)
in_R2 = compute_rsquared(Y_train, Y_train_spline, len(Y_train))
in_AR2 = compute_arsquared(Y_train, Y_train_spline, len(Y_train))
t2 = time.time_ns()
in_test_time = (t2-t1)/1e9
stats["in_test_time"].append(in_test_time)
stats["in_mse"].append(in_mse)
stats["in_r2"].append(in_R2)
stats["in_ar2"].append(in_AR2)
t1 = time.time_ns()
Y_test_spline = np.array(robjects.r['predict'](spline1,robjects.FloatVector(X_test)).rx2('y'))
out_mse = np.mean((np.array(Y_test)-np.array(Y_test_spline))**2)
out_R2 = compute_rsquared(Y_test, Y_test_spline, len(Y_test))
out_AR2 = compute_arsquared(Y_test, Y_test_spline, len(Y_test))
t2 = time.time_ns()
out_test_time = (t2-t1)/1e9
stats["out_test_time"].append(out_test_time)
stats["out_mse"].append(out_mse)
stats["out_r2"].append(out_R2)
stats["out_ar2"].append(out_AR2)
f_out.write(str(in_test_time)+","+str(in_mse)+","+str(in_R2)+","+str(in_AR2)+","+str(out_test_time)+","+str(out_mse)+","+str(out_R2)+","+str(out_AR2)+"\n")
print (stats)
return
def run_synthetic_test():
for t in [0.5,1.0,2.0,3.0,4.0,5.0]:
for size in [10000,100000]:
for v in [0.01, 0.05, 0.1]:
dataset = "gptune-demo-"+str(t)+"-"+str(size)+"-"+str(v)
#dataset = sys.argv[1]
X_train = []
Y_train = []
trainset = dataset+"-train"
with open("datagen/"+trainset, "r") as f_in:
for dataline in f_in.readlines():
data = dataline.split(",")
X_train.append(float(data[0]))
Y_train.append(float(data[1]))
X_test = []
Y_test = []
testset = dataset+"-test"
with open("datagen/"+testset, "r") as f_in:
for dataline in f_in.readlines():
data = dataline.split(",")
X_test.append(float(data[0]))
Y_test.append(float(data[1]))
run_grid_search(dataset, size, X_train, Y_train, X_test, Y_test)
return
def run_grid_search_winequality(dataset_name, size, X_train, Y_train, X_test, Y_test):
print ("dataset_name")
print (dataset_name)
stats = {
"regression_time":[],
"in_test_time":[],
"out_test_time":[],
"in_mse":[],
"in_r2":[],
"in_ar2":[],
"out_mse":[],
"out_r2":[],
"out_ar2":[]
}
os.system("mkdir -p grid-search.db")
with open("grid-search.db/"+dataset_name+".log", "a") as f_out:
f_out.write("NKnots,Lambda,RegressionTime,InTestTime,InMSE,InR2,InAR2,OutTestTime,OutMSE,OutR2,OutAR2\n")
#k_unit = int(size/50)
#for k in range(k_unit, size, k_unit):
for k in [4,5,6,7,8,9,10,11,12,13]:
for lambda1 in [0,0.1,0.01,10**-3,10**-4,10**-5,10**-6,10**-7,10**-8,10**-9,10**-10,1]:
print ("Grid-search Dataset: "+str(dataset_name)+" K: "+str(k)+" L:"+str(lambda1))
f_out.write(str(k)+","+str(lambda1)+",")
r_y = robjects.FloatVector(Y_train)
r_x = robjects.FloatVector(X_train)
try:
t1 = time.time_ns()
r_smooth_spline = robjects.r['smooth.spline']
spline1 = r_smooth_spline(x=r_x, y=r_y, spar=lambda1, nknots=k)
t2 = time.time_ns()
r_time = (t2-t1)/1e9
stats["regression_time"].append(r_time)
f_out.write(str(r_time)+",")
except:
stats["regression_time"].append("-")
f_out.write("\n")
continue
t1 = time.time_ns()
Y_train_spline = np.array(robjects.r['predict'](spline1,robjects.FloatVector(X_train)).rx2('y'))
in_mse = np.mean((np.array(Y_train)-np.array(Y_train_spline))**2)
in_R2 = compute_rsquared(Y_train, Y_train_spline, len(Y_train))
in_AR2 = compute_arsquared(Y_train, Y_train_spline, len(Y_train))
t2 = time.time_ns()
in_test_time = (t2-t1)/1e9
stats["in_test_time"].append(in_test_time)
stats["in_mse"].append(in_mse)
stats["in_r2"].append(in_R2)
stats["in_ar2"].append(in_AR2)
t1 = time.time_ns()
Y_test_spline = np.array(robjects.r['predict'](spline1,robjects.FloatVector(X_test)).rx2('y'))
out_mse = np.mean((np.array(Y_test)-np.array(Y_test_spline))**2)
out_R2 = compute_rsquared(Y_test, Y_test_spline, len(Y_test))
out_AR2 = compute_arsquared(Y_test, Y_test_spline, len(Y_test))
t2 = time.time_ns()
out_test_time = (t2-t1)/1e9
stats["out_test_time"].append(out_test_time)
stats["out_mse"].append(out_mse)
stats["out_r2"].append(out_R2)
stats["out_ar2"].append(out_AR2)
f_out.write(str(in_test_time)+","+str(in_mse)+","+str(in_R2)+","+str(in_AR2)+","+str(out_test_time)+","+str(out_mse)+","+str(out_R2)+","+str(out_AR2)+"\n")
print (stats)
return
def run_winequality_test():
attributes = ["fixed_acidity", "volatile_acidity", "citric_acid", "residual_sugar", "chlorides", "free_sulfur_dioxide", "total_sulfur_dioxide", "density", "pH", "sulphates", "alcohol"]
for attribute in attributes:
dataset = "winequality-"+attribute
idx = attributes.index(attribute)
X_train = []
Y_train = []
with open("winequality/wine_train.txt", "r") as f_in:
f_in.readline()
for dataline in f_in.readlines():
data = dataline.split(" ")
X_train.append(float(data[idx]))
with open("winequality/score_train.txt", "r") as f_in:
f_in.readline()
for dataline in f_in.readlines():
data = dataline.split(" ")
Y_train.append(float(data[0]))
X_test = []
Y_test = []
with open("winequality/wine_test.txt", "r") as f_in:
f_in.readline()
for dataline in f_in.readlines():
data = dataline.split(" ")
X_test.append(float(data[idx]))
with open("winequality/score_test.txt", "r") as f_in:
f_in.readline()
for dataline in f_in.readlines():
data = dataline.split(" ")
Y_test.append(float(data[0]))
run_grid_search_winequality(dataset, len(X_test), X_train, Y_train, X_test, Y_test)
return
def run_grid_search_household(dataset_name, size, X_train, Y_train, X_test, Y_test):
print ("dataset_name")
print (dataset_name)
stats = {
"regression_time":[],
"in_test_time":[],
"out_test_time":[],
"in_mse":[],
"in_r2":[],
"in_ar2":[],
"out_mse":[],
"out_r2":[],
"out_ar2":[]
}
os.system("mkdir -p grid-search.db")
with open("grid-search.db/"+dataset_name+".log", "a") as f_out:
f_out.write("NKnots,Lambda,RegressionTime,InTestTime,InMSE,InR2,InAR2,OutTestTime,OutMSE,OutR2,OutAR2\n")
#k_unit = int(size/50)
#for k in range(k_unit, size, k_unit):
#for k in [4,5,6,7,8,9,10,11,12,13]:
for k in range(5,105,5):
for lambda1 in [0,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,1.0]:
print ("Grid-search Dataset: "+str(dataset_name)+" K: "+str(k)+" L:"+str(lambda1))
f_out.write(str(k)+","+str(lambda1)+",")
r_y = robjects.FloatVector(Y_train)
r_x = robjects.FloatVector(X_train)
try:
t1 = time.time_ns()
r_smooth_spline = robjects.r['smooth.spline']
spline1 = r_smooth_spline(x=r_x, y=r_y, spar=lambda1, nknots=k)
t2 = time.time_ns()
r_time = (t2-t1)/1e9
stats["regression_time"].append(r_time)
f_out.write(str(r_time)+",")
except:
stats["regression_time"].append("-")
f_out.write("\n")
continue
t1 = time.time_ns()
Y_train_spline = np.array(robjects.r['predict'](spline1,robjects.FloatVector(X_train)).rx2('y'))
in_mse = np.mean((np.array(Y_train)-np.array(Y_train_spline))**2)
in_R2 = compute_rsquared(Y_train, Y_train_spline, len(Y_train))
in_AR2 = compute_arsquared(Y_train, Y_train_spline, len(Y_train))
t2 = time.time_ns()
in_test_time = (t2-t1)/1e9
stats["in_test_time"].append(in_test_time)
stats["in_mse"].append(in_mse)
stats["in_r2"].append(in_R2)
stats["in_ar2"].append(in_AR2)
t1 = time.time_ns()
Y_test_spline = np.array(robjects.r['predict'](spline1,robjects.FloatVector(X_test)).rx2('y'))
out_mse = np.mean((np.array(Y_test)-np.array(Y_test_spline))**2)
out_R2 = compute_rsquared(Y_test, Y_test_spline, len(Y_test))
out_AR2 = compute_arsquared(Y_test, Y_test_spline, len(Y_test))
t2 = time.time_ns()
out_test_time = (t2-t1)/1e9
stats["out_test_time"].append(out_test_time)
stats["out_mse"].append(out_mse)
stats["out_r2"].append(out_R2)
stats["out_ar2"].append(out_AR2)
f_out.write(str(in_test_time)+","+str(in_mse)+","+str(in_R2)+","+str(in_AR2)+","+str(out_test_time)+","+str(out_mse)+","+str(out_R2)+","+str(out_AR2)+"\n")
print (stats)
return
def run_household_test():
attribute = "Time"
dataset = "household-"+attribute
X_train = []
Y_train = []
X_test = []
Y_test = []
with open("household/household_power_consumption.txt", "r") as f_in:
f_in.readline()
datalines = f_in.readlines()
traindata_arr = datalines[0:1000000]
testdata_arr = datalines[1000000:1100000]
wrong_data_cnt = 0
import time
from datetime import datetime
start_dt = datetime(2006,12,16,17,24,00)
start_time_val = int(round(start_dt.timestamp()))
for traindata in traindata_arr:
data = traindata.split(";")
date = data[0]
time = data[1]
date_split = date.split("/")
time_split = time.split(":")
try:
dt = datetime(int(date_split[2]),int(date_split[1]),int(date_split[0]),int(time_split[0]),int(time_split[1]),int(time_split[2]))
time_val = (int(round(dt.timestamp())) - start_time_val)/60
sub_metering_3 = float(data[-1])
X_train.append(time_val)
Y_train.append(sub_metering_3+0.00001)
except:
wrong_data_cnt += 1
print ("wrong_data_cnt (train): ", wrong_data_cnt)
wrong_data_cnt = 0
for testdata in testdata_arr:
data = traindata.split(";")
date = data[0]
time = data[1]
date_split = date.split("/")
time_split = time.split(":")
try:
dt = datetime(int(date_split[2]),int(date_split[1]),int(date_split[0]),int(time_split[0]),int(time_split[1]),int(time_split[2]))
time_val = (int(round(dt.timestamp())) - start_time_val)/60
sub_metering_3 = float(data[-1])
X_test.append(time_val)
Y_test.append(sub_metering_3+0.00001)
except:
wrong_data_cnt += 1
print ("wrong_data_cnt (test): ", wrong_data_cnt)
run_grid_search_household(dataset, len(X_test), X_train, Y_train, X_test, Y_test)
return
def main():
#run_synthetic_test()
#run_winequality_test()
run_household_test()
return
if __name__ == "__main__":
main()
| [
"datetime.datetime",
"time.split",
"math.pow",
"rpy2.robjects.FloatVector",
"time.time_ns",
"numpy.array",
"os.system"
] | [((1557, 1593), 'os.system', 'os.system', (['"""mkdir -p grid-search.db"""'], {}), "('mkdir -p grid-search.db')\n", (1566, 1593), False, 'import os\n'), ((5797, 5833), 'os.system', 'os.system', (['"""mkdir -p grid-search.db"""'], {}), "('mkdir -p grid-search.db')\n", (5806, 5833), False, 'import os\n'), ((10416, 10452), 'os.system', 'os.system', (['"""mkdir -p grid-search.db"""'], {}), "('mkdir -p grid-search.db')\n", (10425, 10452), False, 'import os\n'), ((425, 465), 'math.pow', 'math.pow', (['(baseline[i] - prediction[i])', '(2)'], {}), '(baseline[i] - prediction[i], 2)\n', (433, 465), False, 'import math\n'), ((538, 571), 'math.pow', 'math.pow', (['(baseline[i] - mean_y)', '(2)'], {}), '(baseline[i] - mean_y, 2)\n', (546, 571), False, 'import math\n'), ((893, 933), 'math.pow', 'math.pow', (['(baseline[i] - prediction[i])', '(2)'], {}), '(baseline[i] - prediction[i], 2)\n', (901, 933), False, 'import math\n'), ((1006, 1039), 'math.pow', 'math.pow', (['(baseline[i] - mean_y)', '(2)'], {}), '(baseline[i] - mean_y, 2)\n', (1014, 1039), False, 'import math\n'), ((13772, 13805), 'datetime.datetime', 'datetime', (['(2006)', '(12)', '(16)', '(17)', '(24)', '(0)'], {}), '(2006, 12, 16, 17, 24, 0)\n', (13780, 13805), False, 'from datetime import datetime\n'), ((14061, 14076), 'time.split', 'time.split', (['""":"""'], {}), "(':')\n", (14071, 14076), False, 'import time\n'), ((14803, 14818), 'time.split', 'time.split', (['""":"""'], {}), "(':')\n", (14813, 14818), False, 'import time\n'), ((2133, 2162), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['Y_train'], {}), '(Y_train)\n', (2153, 2162), True, 'import rpy2.robjects as robjects\n'), ((2185, 2214), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['X_train'], {}), '(X_train)\n', (2205, 2214), True, 'import rpy2.robjects as robjects\n'), ((2788, 2802), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (2800, 2802), False, 'import time\n'), ((3181, 3195), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (3193, 3195), False, 'import time\n'), ((3460, 3474), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (3472, 3474), False, 'import time\n'), ((3846, 3860), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (3858, 3860), False, 'import time\n'), ((6419, 6448), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['Y_train'], {}), '(Y_train)\n', (6439, 6448), True, 'import rpy2.robjects as robjects\n'), ((6471, 6500), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['X_train'], {}), '(X_train)\n', (6491, 6500), True, 'import rpy2.robjects as robjects\n'), ((7074, 7088), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (7086, 7088), False, 'import time\n'), ((7467, 7481), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (7479, 7481), False, 'import time\n'), ((7746, 7760), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (7758, 7760), False, 'import time\n'), ((8132, 8146), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (8144, 8146), False, 'import time\n'), ((11094, 11123), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['Y_train'], {}), '(Y_train)\n', (11114, 11123), True, 'import rpy2.robjects as robjects\n'), ((11146, 11175), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['X_train'], {}), '(X_train)\n', (11166, 11175), True, 'import rpy2.robjects as robjects\n'), ((11749, 11763), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (11761, 11763), False, 'import time\n'), ((12142, 12156), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (12154, 12156), False, 'import time\n'), ((12421, 12435), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (12433, 12435), False, 'import time\n'), ((12807, 12821), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (12819, 12821), False, 'import time\n'), ((2262, 2276), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (2274, 2276), False, 'import time\n'), ((2452, 2466), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (2464, 2466), False, 'import time\n'), ((6548, 6562), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (6560, 6562), False, 'import time\n'), ((6738, 6752), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (6750, 6752), False, 'import time\n'), ((11223, 11237), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (11235, 11237), False, 'import time\n'), ((11413, 11427), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (11425, 11427), False, 'import time\n'), ((2950, 2967), 'numpy.array', 'np.array', (['Y_train'], {}), '(Y_train)\n', (2958, 2967), True, 'import numpy as np\n'), ((2968, 2992), 'numpy.array', 'np.array', (['Y_train_spline'], {}), '(Y_train_spline)\n', (2976, 2992), True, 'import numpy as np\n'), ((3621, 3637), 'numpy.array', 'np.array', (['Y_test'], {}), '(Y_test)\n', (3629, 3637), True, 'import numpy as np\n'), ((3638, 3661), 'numpy.array', 'np.array', (['Y_test_spline'], {}), '(Y_test_spline)\n', (3646, 3661), True, 'import numpy as np\n'), ((7236, 7253), 'numpy.array', 'np.array', (['Y_train'], {}), '(Y_train)\n', (7244, 7253), True, 'import numpy as np\n'), ((7254, 7278), 'numpy.array', 'np.array', (['Y_train_spline'], {}), '(Y_train_spline)\n', (7262, 7278), True, 'import numpy as np\n'), ((7907, 7923), 'numpy.array', 'np.array', (['Y_test'], {}), '(Y_test)\n', (7915, 7923), True, 'import numpy as np\n'), ((7924, 7947), 'numpy.array', 'np.array', (['Y_test_spline'], {}), '(Y_test_spline)\n', (7932, 7947), True, 'import numpy as np\n'), ((11911, 11928), 'numpy.array', 'np.array', (['Y_train'], {}), '(Y_train)\n', (11919, 11928), True, 'import numpy as np\n'), ((11929, 11953), 'numpy.array', 'np.array', (['Y_train_spline'], {}), '(Y_train_spline)\n', (11937, 11953), True, 'import numpy as np\n'), ((12582, 12598), 'numpy.array', 'np.array', (['Y_test'], {}), '(Y_test)\n', (12590, 12598), True, 'import numpy as np\n'), ((12599, 12622), 'numpy.array', 'np.array', (['Y_test_spline'], {}), '(Y_test_spline)\n', (12607, 12622), True, 'import numpy as np\n'), ((2875, 2904), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['X_train'], {}), '(X_train)\n', (2895, 2904), True, 'import rpy2.robjects as robjects\n'), ((3546, 3574), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['X_test'], {}), '(X_test)\n', (3566, 3574), True, 'import rpy2.robjects as robjects\n'), ((7161, 7190), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['X_train'], {}), '(X_train)\n', (7181, 7190), True, 'import rpy2.robjects as robjects\n'), ((7832, 7860), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['X_test'], {}), '(X_test)\n', (7852, 7860), True, 'import rpy2.robjects as robjects\n'), ((11836, 11865), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['X_train'], {}), '(X_train)\n', (11856, 11865), True, 'import rpy2.robjects as robjects\n'), ((12507, 12535), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['X_test'], {}), '(X_test)\n', (12527, 12535), True, 'import rpy2.robjects as robjects\n')] |
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.layers as layers
from tensorflow.keras.layers import Layer, Conv1D, Conv2D, MaxPooling2D, Dense, Flatten, Reshape, UpSampling2D, Concatenate, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.callbacks import EarlyStopping
import deeptrack as dt # Used to construct dataset of particle movement
from itertools import islice
import tensorboard
import datetime
import matplotlib.pyplot as plt
import matplotlib as mpl
plt.close('all')
plt.style.use('seaborn-deep')
mpl.rcParams.update({
'text.usetex': True,
'pgf.rcfonts': False,
'lines.linewidth': 1,
'figure.dpi': 300,
})
from models import ConvolutionalAutoencoder
from callbacks import LogImageCallback
from data import create_autoencoder_generator
import json
image_size = 64
batch_size = 128
code_dims = [8,16,32,64,128]
depth = 4
for code_dim in code_dims:
frame_dataset = create_autoencoder_generator(image_size, batch_size)
autoencoder = ConvolutionalAutoencoder(image_size, code_dim, depth)
autoencoder.compile(optimizer='adam', loss='mse')
log_dir = f'logs/auto_{autoencoder.code_dim}_depth_{autoencoder.depth}_{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}'
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=1,
update_freq='batch',
)
reference_images = tf.convert_to_tensor(np.array([x[0][0] for _, x in zip(range(5), frame_dataset)]))
image_callback = LogImageCallback(log_dir, reference_images)
auto_history = autoencoder.fit(
frame_dataset,
epochs=20,
steps_per_epoch=50,
shuffle=True,
callbacks=[
tensorboard_callback,
image_callback,
EarlyStopping(monitor='loss',
mode='min',
patience=10,
restore_best_weights=True)
]
)
autoencoder.save(f'trained_models/autoencoder_dim_{code_dim}/model')
autoencoder.save_weights(f'trained_models/autoencoder_dim_{code_dim}/weights')
history_dict = auto_history.history
data = json.dumps(history_dict)
with open(f'trained_models/autoencoder_dim_{code_dim}/history.json',"w") as f:
f.write(data)
fig, axes = plt.subplots(2,5, figsize=(10,5), dpi=200, sharex=True, sharey=True)
for ax1, ax2 in axes.T:
original = next(frame_dataset)[0][0]
reconstructed = autoencoder(np.expand_dims(original, 0))
ax1.imshow(original, cmap='gray')
ax1.set_title('Original')
ax2.imshow(reconstructed[0], cmap='gray')
ax2.set_title('Reconstruction')
fig.tight_layout()
code = autoencoder.encoder(np.expand_dims(original, 0))
print(code.shape)
| [
"tensorflow.keras.callbacks.TensorBoard",
"matplotlib.rcParams.update",
"json.dumps",
"callbacks.LogImageCallback",
"matplotlib.pyplot.style.use",
"models.ConvolutionalAutoencoder",
"matplotlib.pyplot.close",
"tensorflow.keras.callbacks.EarlyStopping",
"datetime.datetime.now",
"numpy.expand_dims",... | [((590, 606), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (599, 606), True, 'import matplotlib.pyplot as plt\n'), ((607, 636), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-deep"""'], {}), "('seaborn-deep')\n", (620, 636), True, 'import matplotlib.pyplot as plt\n'), ((637, 746), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'text.usetex': True, 'pgf.rcfonts': False, 'lines.linewidth': 1,\n 'figure.dpi': 300}"], {}), "({'text.usetex': True, 'pgf.rcfonts': False,\n 'lines.linewidth': 1, 'figure.dpi': 300})\n", (656, 746), True, 'import matplotlib as mpl\n'), ((2400, 2470), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(5)'], {'figsize': '(10, 5)', 'dpi': '(200)', 'sharex': '(True)', 'sharey': '(True)'}), '(2, 5, figsize=(10, 5), dpi=200, sharex=True, sharey=True)\n', (2412, 2470), True, 'import matplotlib.pyplot as plt\n'), ((1025, 1077), 'data.create_autoencoder_generator', 'create_autoencoder_generator', (['image_size', 'batch_size'], {}), '(image_size, batch_size)\n', (1053, 1077), False, 'from data import create_autoencoder_generator\n'), ((1096, 1149), 'models.ConvolutionalAutoencoder', 'ConvolutionalAutoencoder', (['image_size', 'code_dim', 'depth'], {}), '(image_size, code_dim, depth)\n', (1120, 1149), False, 'from models import ConvolutionalAutoencoder\n'), ((1360, 1450), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(1)', 'update_freq': '"""batch"""'}), "(log_dir=log_dir, histogram_freq=1,\n update_freq='batch')\n", (1390, 1450), True, 'import tensorflow as tf\n'), ((1606, 1649), 'callbacks.LogImageCallback', 'LogImageCallback', (['log_dir', 'reference_images'], {}), '(log_dir, reference_images)\n', (1622, 1649), False, 'from callbacks import LogImageCallback\n'), ((2257, 2281), 'json.dumps', 'json.dumps', (['history_dict'], {}), '(history_dict)\n', (2267, 2281), False, 'import json\n'), ((2793, 2820), 'numpy.expand_dims', 'np.expand_dims', (['original', '(0)'], {}), '(original, 0)\n', (2807, 2820), True, 'import numpy as np\n'), ((2566, 2593), 'numpy.expand_dims', 'np.expand_dims', (['original', '(0)'], {}), '(original, 0)\n', (2580, 2593), True, 'import numpy as np\n'), ((1873, 1959), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""loss"""', 'mode': '"""min"""', 'patience': '(10)', 'restore_best_weights': '(True)'}), "(monitor='loss', mode='min', patience=10, restore_best_weights\n =True)\n", (1886, 1959), False, 'from tensorflow.keras.callbacks import EarlyStopping\n'), ((1281, 1304), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1302, 1304), False, 'import datetime\n')] |
"""
<NAME>
2014 August 21
Various utilities for calculating angular separations.
"""
import matplotlib.pyplot as plt
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
def rmSingles(fluxcomponent, targetstring='target'):
"""
Filter out targets in fluxcomponent that have only one ALMA source.
"""
nindiv = len(fluxcomponent)
flagger = numpy.zeros(nindiv)
for icomp in range(nindiv):
target = fluxcomponent[targetstring][icomp]
match = fluxcomponent[targetstring] == target
nmatch = fluxcomponent[targetstring][match].size
if nmatch == 1:
flagger[icomp] = 1
goodflag = flagger == 0
fluxcomponent = fluxcomponent[goodflag]
return fluxcomponent
def setThresh(fluxcomponent, thresh, fluxstring='f880'):
"""
Filter out targets in fluxcomponent below a given `thresh` flux density.
"""
if fluxstring == 'peakflux':
thresh /= 1e3
fluxhigh = fluxcomponent[fluxstring] > thresh
fluxcomponent = fluxcomponent[fluxhigh]
return fluxcomponent
def getSeparation(fluxcomponent, rastring='offra', decstring='offdec',
targetstring='target', fluxstring='f880', degrees=False):
nindiv = len(fluxcomponent)
separation = []
ra_vector = []
dec_vector = []
fluxweighted = []
for icomp in range(nindiv):
target = fluxcomponent[targetstring][icomp]
match = fluxcomponent[targetstring] == target
ras = fluxcomponent[rastring][match]
decs = fluxcomponent[decstring][match]
fluxs = fluxcomponent[fluxstring][match]
nmatch = fluxcomponent[fluxstring][match].size
try:
if degrees:
factor = 1.
else:
factor = 3600.
avgra = ras.mean() / factor
avgdec = decs.mean() / factor
ra = fluxcomponent[rastring][icomp] / factor
dec = fluxcomponent[decstring][icomp] / factor
newra = ras / factor
newdec = decs / factor
except:
newra = numpy.zeros(nmatch)
newdec = numpy.zeros(nmatch)
for imatch in range(nmatch):
ira = ras[imatch]
idec = decs[imatch]
sc = SkyCoord(ira, idec, "icrs", unit=(u.hourangle, u.degree))
newra[imatch] = sc.ra.deg
newdec[imatch] = sc.dec.deg
ra = fluxcomponent[rastring][icomp]
dec = fluxcomponent[decstring][icomp]
sc = SkyCoord(ra, dec, "icrs", unit=(u.hourangle, u.degree))
ra = sc.ra.deg
dec = sc.dec.deg
avgra = newra.mean()
avgdec = newdec.mean()
raoffset = (ra - avgra) * numpy.cos(dec)
decoffset = dec - avgdec
offset = numpy.sqrt(raoffset ** 2 + decoffset ** 2) * 3600
separation.append(offset)
ra_vector.append(raoffset)
dec_vector.append(decoffset)
wmeanra_num = newra * fluxs
wmeanra_den = fluxs
wmeanra = wmeanra_num.sum() / wmeanra_den.sum()
wmeandec_num = newdec * fluxs
wmeandec_den = fluxs
wmeandec = wmeandec_num.sum() / wmeandec_den.sum()
raoffset = (ra - wmeanra) * numpy.cos(dec)
decoffset = dec - wmeandec
offset = numpy.sqrt(raoffset ** 2 + decoffset ** 2) * 3600
fluxweighted.append(offset)
return numpy.array(separation), numpy.array(fluxweighted), ra_vector, \
dec_vector
def simPosition(fluxcomponent, distance=8.5, rastring='offra',
decstring='offdec', targetstring='target'):
"""
Replace the RA and Dec values for each target in fluxcomponent with random
positions that are forced to be within "distance" arcseconds of the average
position of the sources in that object. Default distance is 8.5", aka one
ALMA FOV.
"""
from numpy.random import uniform
nindiv = len(fluxcomponent)
for icomp in range(nindiv):
target = fluxcomponent[targetstring][icomp]
match = fluxcomponent[targetstring] == target
ras = fluxcomponent[rastring][match]
decs = fluxcomponent[decstring][match]
nmatch = fluxcomponent[rastring][match].size
try:
avgra = ras.mean() / 3600
avgdec = decs.mean() / 3600
ra = fluxcomponent[rastring][icomp] / 3600
dec = fluxcomponent[decstring][icomp] / 3600
newra = ras / 3600
newdec = decs / 3600
degflag = True
except:
degflag = False
newra = numpy.zeros(nmatch)
newdec = numpy.zeros(nmatch)
for imatch in range(nmatch):
ira = ras[imatch]
idec = decs[imatch]
sc = SkyCoord(ira, idec, "icrs", unit=(u.hourangle, u.degree))
newra[imatch] = sc.ra.deg
newdec[imatch] = sc.dec.deg
ra = fluxcomponent[rastring][icomp]
dec = fluxcomponent[decstring][icomp]
sc = SkyCoord(ra, dec, "icrs", unit=(u.hourangle, u.degree))
ra = sc.ra.deg
dec = sc.dec.deg
avgra = newra.mean()
avgdec = newdec.mean()
lowra = avgra - distance / 3600 / numpy.cos(avgdec)
highra = avgra + distance / 3600 / numpy.cos(avgdec)
lowdec = avgdec - distance / 3600
highdec = avgdec + distance / 3600
if lowdec < -0.025:
lowdec = -0.025
if highdec > 0.025:
highdec = 0.025
if degflag:
randomra = uniform(low=lowra, high=highra) * 3600
randomdec = uniform(low=lowdec, high=highdec) * 3600
else:
randomra = uniform(low=lowra, high=highra)
randomdec = uniform(low=lowdec, high=highdec)
#print(randomra, randomdec)
#c = SkyCoord(ra=randomra * u.degree, dec=randomdec * u.degree)
fluxcomponent[rastring][icomp] = randomra
fluxcomponent[decstring][icomp] = randomdec
#fluxcomponent[rastring][icomp] = c.ra.to_string('hourangle', sep=':')
#fluxcomponent[decstring][icomp] = c.dec.to_string(sep=':')
return fluxcomponent
def simArea(fluxcomponent, nsim, bin_edges, targetstring='target',
edgecolor='black', facecolor='none', hatch='', fluxstring='f880',
norm=1.0, label=''):
nbins = bin_edges.size - 1
supersep = numpy.zeros([nsim, nbins])
for isim in range(nsim):
simP = simPosition(fluxcomponent, targetstring=targetstring)
avgsep, wmeansep, ra1, dec1 = getSeparation(simP,
targetstring=targetstring, fluxstring=fluxstring)
hist, bin_edges = numpy.histogram(avgsep, bins=bin_edges)
area1 = numpy.pi * bin_edges ** 2
area2 = numpy.pi * numpy.roll(bin_edges, -1) ** 2
area = area2 - area1
area = area[0:-1]
xhist = (bin_edges + numpy.roll(bin_edges, -1)) / 2.
xhist = xhist[0:-1]
supersep[isim, :] = hist / area / norm
mediansep = numpy.median(supersep, axis=0)
stdsep = numpy.std(supersep, axis=0)
uppersep = mediansep + stdsep
lowersep = mediansep - stdsep
plt.plot(xhist, mediansep, linestyle='--', color=edgecolor, label=label)
plt.fill_between(xhist, lowersep, y2=uppersep, hatch=hatch,
facecolor=facecolor, edgecolor=edgecolor)
return mediansep, stdsep, uppersep, lowersep
def histArea(values, nbins, color='', norm=1.0, fmt='s', ms=6, linestyle='-',
drawstyle='default', showerror=True, mew=0.5, label=''):
hist, bin_edges = numpy.histogram(values, bins=nbins)
area1 = numpy.pi * bin_edges ** 2
area2 = numpy.pi * numpy.roll(bin_edges, -1) ** 2
area = area2 - area1
area = area[0:-1]
xhist = (bin_edges + numpy.roll(bin_edges, -1)) / 2.
xhist = xhist[0:-1]
plt.plot(xhist, hist / area / norm, fmt, color=color, linestyle=linestyle,
linewidth=2, drawstyle=drawstyle, ms=ms, mew=mew, label=label)
#plt.fill_between(xhist, hist / area / norm, facecolor=color, alpha=0.2)
#plt.plot(xhist, hist / area / norm, fmt, ms=ms, mew=mew, color=color)
if showerror:
plt.errorbar(xhist, hist / area / norm, yerr=numpy.sqrt(hist) /
area / norm, fmt=fmt, ms=ms, color=color, ecolor='gray',
capsize=0, mew=mew)
| [
"numpy.histogram",
"numpy.median",
"numpy.sqrt",
"numpy.roll",
"matplotlib.pyplot.plot",
"astropy.coordinates.SkyCoord",
"matplotlib.pyplot.fill_between",
"numpy.array",
"numpy.zeros",
"numpy.random.uniform",
"numpy.cos",
"numpy.std"
] | [((402, 421), 'numpy.zeros', 'numpy.zeros', (['nindiv'], {}), '(nindiv)\n', (413, 421), False, 'import numpy\n'), ((6468, 6494), 'numpy.zeros', 'numpy.zeros', (['[nsim, nbins]'], {}), '([nsim, nbins])\n', (6479, 6494), False, 'import numpy\n'), ((7093, 7123), 'numpy.median', 'numpy.median', (['supersep'], {'axis': '(0)'}), '(supersep, axis=0)\n', (7105, 7123), False, 'import numpy\n'), ((7137, 7164), 'numpy.std', 'numpy.std', (['supersep'], {'axis': '(0)'}), '(supersep, axis=0)\n', (7146, 7164), False, 'import numpy\n'), ((7238, 7310), 'matplotlib.pyplot.plot', 'plt.plot', (['xhist', 'mediansep'], {'linestyle': '"""--"""', 'color': 'edgecolor', 'label': 'label'}), "(xhist, mediansep, linestyle='--', color=edgecolor, label=label)\n", (7246, 7310), True, 'import matplotlib.pyplot as plt\n'), ((7315, 7421), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['xhist', 'lowersep'], {'y2': 'uppersep', 'hatch': 'hatch', 'facecolor': 'facecolor', 'edgecolor': 'edgecolor'}), '(xhist, lowersep, y2=uppersep, hatch=hatch, facecolor=\n facecolor, edgecolor=edgecolor)\n', (7331, 7421), True, 'import matplotlib.pyplot as plt\n'), ((7645, 7680), 'numpy.histogram', 'numpy.histogram', (['values'], {'bins': 'nbins'}), '(values, bins=nbins)\n', (7660, 7680), False, 'import numpy\n'), ((7905, 8046), 'matplotlib.pyplot.plot', 'plt.plot', (['xhist', '(hist / area / norm)', 'fmt'], {'color': 'color', 'linestyle': 'linestyle', 'linewidth': '(2)', 'drawstyle': 'drawstyle', 'ms': 'ms', 'mew': 'mew', 'label': 'label'}), '(xhist, hist / area / norm, fmt, color=color, linestyle=linestyle,\n linewidth=2, drawstyle=drawstyle, ms=ms, mew=mew, label=label)\n', (7913, 8046), True, 'import matplotlib.pyplot as plt\n'), ((3452, 3475), 'numpy.array', 'numpy.array', (['separation'], {}), '(separation)\n', (3463, 3475), False, 'import numpy\n'), ((3477, 3502), 'numpy.array', 'numpy.array', (['fluxweighted'], {}), '(fluxweighted)\n', (3488, 3502), False, 'import numpy\n'), ((6745, 6784), 'numpy.histogram', 'numpy.histogram', (['avgsep'], {'bins': 'bin_edges'}), '(avgsep, bins=bin_edges)\n', (6760, 6784), False, 'import numpy\n'), ((2783, 2797), 'numpy.cos', 'numpy.cos', (['dec'], {}), '(dec)\n', (2792, 2797), False, 'import numpy\n'), ((2848, 2890), 'numpy.sqrt', 'numpy.sqrt', (['(raoffset ** 2 + decoffset ** 2)'], {}), '(raoffset ** 2 + decoffset ** 2)\n', (2858, 2890), False, 'import numpy\n'), ((3287, 3301), 'numpy.cos', 'numpy.cos', (['dec'], {}), '(dec)\n', (3296, 3301), False, 'import numpy\n'), ((3354, 3396), 'numpy.sqrt', 'numpy.sqrt', (['(raoffset ** 2 + decoffset ** 2)'], {}), '(raoffset ** 2 + decoffset ** 2)\n', (3364, 3396), False, 'import numpy\n'), ((5776, 5807), 'numpy.random.uniform', 'uniform', ([], {'low': 'lowra', 'high': 'highra'}), '(low=lowra, high=highra)\n', (5783, 5807), False, 'from numpy.random import uniform\n'), ((5832, 5865), 'numpy.random.uniform', 'uniform', ([], {'low': 'lowdec', 'high': 'highdec'}), '(low=lowdec, high=highdec)\n', (5839, 5865), False, 'from numpy.random import uniform\n'), ((7742, 7767), 'numpy.roll', 'numpy.roll', (['bin_edges', '(-1)'], {}), '(bin_edges, -1)\n', (7752, 7767), False, 'import numpy\n'), ((7845, 7870), 'numpy.roll', 'numpy.roll', (['bin_edges', '(-1)'], {}), '(bin_edges, -1)\n', (7855, 7870), False, 'import numpy\n'), ((2107, 2126), 'numpy.zeros', 'numpy.zeros', (['nmatch'], {}), '(nmatch)\n', (2118, 2126), False, 'import numpy\n'), ((2148, 2167), 'numpy.zeros', 'numpy.zeros', (['nmatch'], {}), '(nmatch)\n', (2159, 2167), False, 'import numpy\n'), ((2560, 2615), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ra', 'dec', '"""icrs"""'], {'unit': '(u.hourangle, u.degree)'}), "(ra, dec, 'icrs', unit=(u.hourangle, u.degree))\n", (2568, 2615), False, 'from astropy.coordinates import SkyCoord\n'), ((4640, 4659), 'numpy.zeros', 'numpy.zeros', (['nmatch'], {}), '(nmatch)\n', (4651, 4659), False, 'import numpy\n'), ((4681, 4700), 'numpy.zeros', 'numpy.zeros', (['nmatch'], {}), '(nmatch)\n', (4692, 4700), False, 'import numpy\n'), ((5093, 5148), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ra', 'dec', '"""icrs"""'], {'unit': '(u.hourangle, u.degree)'}), "(ra, dec, 'icrs', unit=(u.hourangle, u.degree))\n", (5101, 5148), False, 'from astropy.coordinates import SkyCoord\n'), ((5316, 5333), 'numpy.cos', 'numpy.cos', (['avgdec'], {}), '(avgdec)\n', (5325, 5333), False, 'import numpy\n'), ((5377, 5394), 'numpy.cos', 'numpy.cos', (['avgdec'], {}), '(avgdec)\n', (5386, 5394), False, 'import numpy\n'), ((5635, 5666), 'numpy.random.uniform', 'uniform', ([], {'low': 'lowra', 'high': 'highra'}), '(low=lowra, high=highra)\n', (5642, 5666), False, 'from numpy.random import uniform\n'), ((5698, 5731), 'numpy.random.uniform', 'uniform', ([], {'low': 'lowdec', 'high': 'highdec'}), '(low=lowdec, high=highdec)\n', (5705, 5731), False, 'from numpy.random import uniform\n'), ((6854, 6879), 'numpy.roll', 'numpy.roll', (['bin_edges', '(-1)'], {}), '(bin_edges, -1)\n', (6864, 6879), False, 'import numpy\n'), ((6969, 6994), 'numpy.roll', 'numpy.roll', (['bin_edges', '(-1)'], {}), '(bin_edges, -1)\n', (6979, 6994), False, 'import numpy\n'), ((2300, 2357), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ira', 'idec', '"""icrs"""'], {'unit': '(u.hourangle, u.degree)'}), "(ira, idec, 'icrs', unit=(u.hourangle, u.degree))\n", (2308, 2357), False, 'from astropy.coordinates import SkyCoord\n'), ((4833, 4890), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ira', 'idec', '"""icrs"""'], {'unit': '(u.hourangle, u.degree)'}), "(ira, idec, 'icrs', unit=(u.hourangle, u.degree))\n", (4841, 4890), False, 'from astropy.coordinates import SkyCoord\n'), ((8278, 8294), 'numpy.sqrt', 'numpy.sqrt', (['hist'], {}), '(hist)\n', (8288, 8294), False, 'import numpy\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import json
from random import randint
from matplotlib.lines import Line2D
from sklearn.cluster import KMeans
from scipy.spatial import distance
from sklearn.externals import joblib
data = pd.read_csv('Hsapiens-9606-201603-2016-RNASeq-Quantile-CancerGenomeAtlas-v1.GEM.txt',sep='\t')
data = data.transpose()
data_edge = pd.read_csv('Hsapiens-9606-201603-2016-RNASeq-Quantile-CancerGenomeAtlas-v1.sc.mcs30.md5.mmINF.th0.860100.coexpnet.edges.txt',sep='\t')
data_edge = data_edge.transpose()
clf = joblib.load('new_model_32')
centroid = clf.cluster_centers_
num_clusters = 32
print("Here")
tdata = []
gp_count = np.zeros((32,), dtype=int)
#14908
for i in range(0, 14908):
if i%100 ==0:
print(i)
glist = []
g1 = list(data_edge.iloc[0,i])
g1 = ''.join(g1)
for j in range(0, 2016):
glist.append(data[g1][j])
g2 = list(data_edge.iloc[1,i])
g2 =''.join(g2)
for k in range(0,2016):
glist.append(data[g2][k])
glist = np.asarray(glist)
np.nan_to_num(glist, copy=False)
glist[glist < 0] = 0
glist = np.ndarray.tolist(glist)
tdata.append(glist)
print("Now Here")
label = clf.predict(tdata)
print("Now Now Here")
for i in label:
gp_count[i] = gp_count[i] + 1
print(gp_count)
| [
"pandas.read_csv",
"sklearn.externals.joblib.load",
"numpy.asarray",
"numpy.zeros",
"numpy.ndarray.tolist",
"numpy.nan_to_num"
] | [((261, 365), 'pandas.read_csv', 'pd.read_csv', (['"""Hsapiens-9606-201603-2016-RNASeq-Quantile-CancerGenomeAtlas-v1.GEM.txt"""'], {'sep': '"""\t"""'}), "(\n 'Hsapiens-9606-201603-2016-RNASeq-Quantile-CancerGenomeAtlas-v1.GEM.txt',\n sep='\\t')\n", (272, 365), True, 'import pandas as pd\n'), ((393, 539), 'pandas.read_csv', 'pd.read_csv', (['"""Hsapiens-9606-201603-2016-RNASeq-Quantile-CancerGenomeAtlas-v1.sc.mcs30.md5.mmINF.th0.860100.coexpnet.edges.txt"""'], {'sep': '"""\t"""'}), "(\n 'Hsapiens-9606-201603-2016-RNASeq-Quantile-CancerGenomeAtlas-v1.sc.mcs30.md5.mmINF.th0.860100.coexpnet.edges.txt'\n , sep='\\t')\n", (404, 539), True, 'import pandas as pd\n'), ((570, 597), 'sklearn.externals.joblib.load', 'joblib.load', (['"""new_model_32"""'], {}), "('new_model_32')\n", (581, 597), False, 'from sklearn.externals import joblib\n'), ((687, 713), 'numpy.zeros', 'np.zeros', (['(32,)'], {'dtype': 'int'}), '((32,), dtype=int)\n', (695, 713), True, 'import numpy as np\n'), ((1045, 1062), 'numpy.asarray', 'np.asarray', (['glist'], {}), '(glist)\n', (1055, 1062), True, 'import numpy as np\n'), ((1067, 1099), 'numpy.nan_to_num', 'np.nan_to_num', (['glist'], {'copy': '(False)'}), '(glist, copy=False)\n', (1080, 1099), True, 'import numpy as np\n'), ((1137, 1161), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['glist'], {}), '(glist)\n', (1154, 1161), True, 'import numpy as np\n')] |
from hepaccelerate.utils import Results, Dataset, Histogram, choose_backend, JaggedStruct
import uproot
import numpy
import numpy as np
import unittest
import os
from uproot_methods.classes.TH1 import from_numpy
USE_CUDA = bool(int(os.environ.get("HEPACCELERATE_CUDA", 0)))
class TestJaggedStruct(unittest.TestCase):
def test_jaggedstruct(self):
attr_names_dtypes = [("Muon_pt", "float32")]
js = JaggedStruct([0,2,3], {"pt": np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)}, "Muon_", np, attr_names_dtypes)
class TestHistogram(unittest.TestCase):
NUMPY_LIB, ha = choose_backend(use_cuda=USE_CUDA)
def test_histogram(self):
np = TestHistogram.NUMPY_LIB
data = np.array([2,3,4,5,6,7], dtype=np.float32)
data[data<2] = 0
weights = np.ones_like(data, dtype=np.float32)
w, w2, e = self.ha.histogram_from_vector(data, weights, np.array([0,1,2,3,4,5], dtype=np.float32))
npw, npe = np.histogram(data, np.array([0,1,2,3,4,5]))
hr = from_numpy((w, e))
f = uproot.recreate("test.root")
f["hist"] = hr
data = np.random.normal(size=10000)
data = np.array(data, dtype=np.float32)
weights = np.ones_like(data, dtype=np.float32)
w, w2, e = self.ha.histogram_from_vector(data, weights, np.linspace(-1,1,100, dtype=np.float32))
hr = from_numpy((w, e))
f["hist2"] = hr
f.close()
def test_histogram_several(self):
np = TestHistogram.NUMPY_LIB
data = np.array([2,3,4,5,6,7], dtype=np.float32)
mask = data>=2
data[self.NUMPY_LIB.invert(mask)] = 0
weights = np.ones_like(data, dtype=np.float32)
bins = np.array([0,1,2,3,4,5], dtype=np.float32)
w, w2, e = self.ha.histogram_from_vector(data, weights, bins)
histograms = self.ha.histogram_from_vector_several([(data, bins), (data, bins)], weights, mask)
assert(numpy.all(w == histograms[0][0]))
assert(numpy.all(w == histograms[1][0]))
assert(numpy.all(w2 == histograms[0][1]))
assert(numpy.all(w2 == histograms[1][1]))
class TestDataset(unittest.TestCase):
NUMPY_LIB, ha = choose_backend(use_cuda=USE_CUDA)
@staticmethod
def load_dataset(num_iter=1):
datastructures = {
"Muon": [
("Muon_Px", "float32"),
("Muon_Py", "float32"),
("Muon_Pz", "float32"),
("Muon_E", "float32"),
("Muon_Charge", "int32"),
("Muon_Iso", "float32")
],
"Jet": [
("Jet_Px", "float32"),
("Jet_Py", "float32"),
("Jet_Pz", "float32"),
("Jet_E", "float32"),
("Jet_btag", "float32"),
("Jet_ID", "bool")
],
"EventVariables": [
("NPrimaryVertices", "int32"),
("triggerIsoMu24", "bool"),
("EventWeight", "float32")
]
}
dataset = Dataset("HZZ", num_iter*["data/HZZ.root"], datastructures, treename="events", datapath="")
assert(dataset.filenames[0] == "data/HZZ.root")
assert(len(dataset.filenames) == num_iter)
assert(len(dataset.structs["Jet"]) == 0)
assert(len(dataset.eventvars) == 0)
return dataset
def setUp(self):
self.dataset = self.load_dataset()
@staticmethod
def map_func(dataset, ifile):
mu = dataset.structs["Muon"][ifile]
mu_pt = np.sqrt(mu.Px**2 + mu.Py**2)
mu_pt_pass = mu_pt > 20
mask_rows = np.ones(mu.numevents(), dtype=np.bool)
mask_content = np.ones(mu.numobjects(), dtype=np.bool)
ret = TestDataset.ha.sum_in_offsets(mu.offsets, mu_pt_pass, mask_rows, mask_content, dtype=np.int8)
return ret
def test_dataset_map(self):
dataset = self.load_dataset()
dataset.load_root()
rets = dataset.map(self.map_func)
assert(len(rets) == 1)
assert(len(rets[0]) == dataset.structs["Muon"][0].numevents())
assert(np.sum(rets[0]) > 0)
return rets
def test_dataset_compact(self):
dataset = self.dataset
dataset.load_root()
memsize1 = dataset.memsize()
rets = dataset.map(self.map_func)
#compacting uses JaggedArray functionality and can only be done on the numpy/CPU backend
dataset.move_to_device(np)
rets = [TestDataset.NUMPY_LIB.asnumpy(r) for r in rets]
dataset.compact(rets)
dataset.move_to_device(TestDataset.NUMPY_LIB)
memsize2 = dataset.memsize()
assert(memsize1 > memsize2)
print("compacted memory size ratio:", memsize2/memsize1)
@staticmethod
def precompute_results(filename):
fi = uproot.open(filename)
arr = fi.get("events").array("EventWeight")
return {"EventWeight": arr.sum()}
def test_dataset_merge_inplace(self):
num_iter = 10
ds_multi = self.load_dataset(num_iter=num_iter)
ds_multi.func_filename_precompute = self.precompute_results
ds_multi.load_root()
assert(len(ds_multi.structs["Jet"]) == num_iter)
njet = ds_multi.num_objects_loaded("Jet")
#compute a per-event jet energy sum taking into account the offsets
jet_sume = TestDataset.NUMPY_LIB.hstack([TestDataset.ha.sum_in_offsets(
ds_multi.structs["Jet"][i].offsets,
ds_multi.structs["Jet"][i]["E"],
TestDataset.NUMPY_LIB.ones(ds_multi.structs["Jet"][i].numevents(), dtype=TestDataset.NUMPY_LIB.bool),
TestDataset.NUMPY_LIB.ones(ds_multi.structs["Jet"][i].numobjects(), dtype=TestDataset.NUMPY_LIB.bool)
) for i in range(num_iter)])
numevents = ds_multi.numevents()
ds_multi.merge_inplace()
assert(len(ds_multi.structs["Jet"]) == 1)
assert(ds_multi.num_objects_loaded("Jet") == njet)
jet_sume_merged = TestDataset.ha.sum_in_offsets(
ds_multi.structs["Jet"][0].offsets,
ds_multi.structs["Jet"][0]["E"],
TestDataset.NUMPY_LIB.ones(ds_multi.structs["Jet"][0].numevents(), dtype=TestDataset.NUMPY_LIB.bool),
TestDataset.NUMPY_LIB.ones(ds_multi.structs["Jet"][0].numobjects(), dtype=TestDataset.NUMPY_LIB.bool)
)
assert(TestDataset.NUMPY_LIB.all(jet_sume_merged == jet_sume))
assert(ds_multi.numevents() == numevents)
if __name__ == "__main__":
unittest.main()
| [
"uproot.recreate",
"numpy.ones_like",
"hepaccelerate.utils.choose_backend",
"numpy.random.normal",
"numpy.sqrt",
"os.environ.get",
"hepaccelerate.utils.Dataset",
"numpy.array",
"numpy.linspace",
"uproot_methods.classes.TH1.from_numpy",
"numpy.sum",
"uproot.open",
"unittest.main",
"numpy.al... | [((601, 634), 'hepaccelerate.utils.choose_backend', 'choose_backend', ([], {'use_cuda': 'USE_CUDA'}), '(use_cuda=USE_CUDA)\n', (615, 634), False, 'from hepaccelerate.utils import Results, Dataset, Histogram, choose_backend, JaggedStruct\n'), ((2189, 2222), 'hepaccelerate.utils.choose_backend', 'choose_backend', ([], {'use_cuda': 'USE_CUDA'}), '(use_cuda=USE_CUDA)\n', (2203, 2222), False, 'from hepaccelerate.utils import Results, Dataset, Histogram, choose_backend, JaggedStruct\n'), ((6625, 6640), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6638, 6640), False, 'import unittest\n'), ((233, 272), 'os.environ.get', 'os.environ.get', (['"""HEPACCELERATE_CUDA"""', '(0)'], {}), "('HEPACCELERATE_CUDA', 0)\n", (247, 272), False, 'import os\n'), ((718, 764), 'numpy.array', 'np.array', (['[2, 3, 4, 5, 6, 7]'], {'dtype': 'np.float32'}), '([2, 3, 4, 5, 6, 7], dtype=np.float32)\n', (726, 764), True, 'import numpy as np\n'), ((803, 839), 'numpy.ones_like', 'np.ones_like', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (815, 839), True, 'import numpy as np\n'), ((1023, 1041), 'uproot_methods.classes.TH1.from_numpy', 'from_numpy', (['(w, e)'], {}), '((w, e))\n', (1033, 1041), False, 'from uproot_methods.classes.TH1 import from_numpy\n'), ((1054, 1082), 'uproot.recreate', 'uproot.recreate', (['"""test.root"""'], {}), "('test.root')\n", (1069, 1082), False, 'import uproot\n'), ((1131, 1159), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10000)'}), '(size=10000)\n', (1147, 1159), True, 'import numpy as np\n'), ((1175, 1207), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (1183, 1207), True, 'import numpy as np\n'), ((1226, 1262), 'numpy.ones_like', 'np.ones_like', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (1238, 1262), True, 'import numpy as np\n'), ((1381, 1399), 'uproot_methods.classes.TH1.from_numpy', 'from_numpy', (['(w, e)'], {}), '((w, e))\n', (1391, 1399), False, 'from uproot_methods.classes.TH1 import from_numpy\n'), ((1534, 1580), 'numpy.array', 'np.array', (['[2, 3, 4, 5, 6, 7]'], {'dtype': 'np.float32'}), '([2, 3, 4, 5, 6, 7], dtype=np.float32)\n', (1542, 1580), True, 'import numpy as np\n'), ((1663, 1699), 'numpy.ones_like', 'np.ones_like', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (1675, 1699), True, 'import numpy as np\n'), ((1715, 1761), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {'dtype': 'np.float32'}), '([0, 1, 2, 3, 4, 5], dtype=np.float32)\n', (1723, 1761), True, 'import numpy as np\n'), ((1947, 1979), 'numpy.all', 'numpy.all', (['(w == histograms[0][0])'], {}), '(w == histograms[0][0])\n', (1956, 1979), False, 'import numpy\n'), ((1996, 2028), 'numpy.all', 'numpy.all', (['(w == histograms[1][0])'], {}), '(w == histograms[1][0])\n', (2005, 2028), False, 'import numpy\n'), ((2045, 2078), 'numpy.all', 'numpy.all', (['(w2 == histograms[0][1])'], {}), '(w2 == histograms[0][1])\n', (2054, 2078), False, 'import numpy\n'), ((2095, 2128), 'numpy.all', 'numpy.all', (['(w2 == histograms[1][1])'], {}), '(w2 == histograms[1][1])\n', (2104, 2128), False, 'import numpy\n'), ((3149, 3246), 'hepaccelerate.utils.Dataset', 'Dataset', (['"""HZZ"""', "(num_iter * ['data/HZZ.root'])", 'datastructures'], {'treename': '"""events"""', 'datapath': '""""""'}), "('HZZ', num_iter * ['data/HZZ.root'], datastructures, treename=\n 'events', datapath='')\n", (3156, 3246), False, 'from hepaccelerate.utils import Results, Dataset, Histogram, choose_backend, JaggedStruct\n'), ((3641, 3673), 'numpy.sqrt', 'np.sqrt', (['(mu.Px ** 2 + mu.Py ** 2)'], {}), '(mu.Px ** 2 + mu.Py ** 2)\n', (3648, 3673), True, 'import numpy as np\n'), ((4934, 4955), 'uproot.open', 'uproot.open', (['filename'], {}), '(filename)\n', (4945, 4955), False, 'import uproot\n'), ((904, 950), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {'dtype': 'np.float32'}), '([0, 1, 2, 3, 4, 5], dtype=np.float32)\n', (912, 950), True, 'import numpy as np\n'), ((985, 1013), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (993, 1013), True, 'import numpy as np\n'), ((1327, 1368), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(100)'], {'dtype': 'np.float32'}), '(-1, 1, 100, dtype=np.float32)\n', (1338, 1368), True, 'import numpy as np\n'), ((4219, 4234), 'numpy.sum', 'np.sum', (['rets[0]'], {}), '(rets[0])\n', (4225, 4234), True, 'import numpy as np\n'), ((447, 505), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 3.0, 4.0, 5.0]'], {'dtype': 'np.float32'}), '([0.0, 1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)\n', (455, 505), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File name: load_data.py
Author: locke
Date created: 2020/3/25 下午7:00
"""
import time
import numpy as np
class AlignmentData:
def __init__(self, data_dir="data/DBP15K/ja_en", rate=0.3, share=False, swap=False, val=0.0, with_r=False):
t_ = time.time()
self.rate = rate
self.val = val
self.ins2id_dict, self.id2ins_dict, [self.kg1_ins_ids, self.kg2_ins_ids] = self.load_dict(data_dir + "/ent_ids_", file_num=2)
self.rel2id_dict, self.id2rel_dict, [self.kg1_rel_ids, self.kg2_rel_ids] = self.load_dict(data_dir + "/rel_ids_", file_num=2)
self.ins_num = len(self.ins2id_dict)
self.rel_num = len(self.rel2id_dict)
self.triple_idx = self.load_triples(data_dir + "/triples_", file_num=2)
self.ill_idx = self.load_triples(data_dir + "/ill_ent_ids", file_num=1)
np.random.shuffle(self.ill_idx)
self.ill_train_idx, self.ill_val_idx, self.ill_test_idx = np.array(self.ill_idx[:int(len(self.ill_idx) // 1 * rate)], dtype=np.int32), np.array(self.ill_idx[int(len(self.ill_idx) // 1 * rate) : int(len(self.ill_idx) // 1 * (rate+val))], dtype=np.int32), np.array(self.ill_idx[int(len(self.ill_idx) // 1 * (rate+val)):], dtype=np.int32)
self.ins_G_edges_idx, self.ins_G_values_idx, self.r_ij_idx = self.gen_sparse_graph_from_triples(self.triple_idx, self.ins_num, with_r)
assert (share != swap or (share == False and swap == False))
if share:
self.triple_idx = self.share(self.triple_idx, self.ill_train_idx) # 1 -> 2:base
self.kg1_ins_ids = (self.kg1_ins_ids - set(self.ill_train_idx[:, 0])) | set(self.ill_train_idx[:, 1])
self.ill_train_idx = []
if swap:
self.triple_idx = self.swap(self.triple_idx, self.ill_train_idx)
self.labeled_alignment = set()
self.boot_triple_idx = []
self.boot_pair_dix = []
self.init_time = time.time() - t_
def load_triples(self, data_dir, file_num=2):
if file_num == 2:
file_names = [data_dir + str(i) for i in range(1, 3)]
else:
file_names = [data_dir]
triple = []
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as f:
data = f.read().strip().split("\n")
data = [tuple(map(int, i.split("\t"))) for i in data]
triple += data
np.random.shuffle(triple)
return triple
def load_dict(self, data_dir, file_num=2):
if file_num == 2:
file_names = [data_dir + str(i) for i in range(1, 3)]
else:
file_names = [data_dir]
what2id, id2what, ids = {}, {}, []
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as f:
data = f.read().strip().split("\n")
data = [i.split("\t") for i in data]
what2id = {**what2id, **dict([[i[1], int(i[0])] for i in data])}
id2what = {**id2what, **dict([[int(i[0]), i[1]] for i in data])}
ids.append(set([int(i[0]) for i in data]))
return what2id, id2what, ids
def gen_sparse_graph_from_triples(self, triples, ins_num, with_r=False):
edge_dict = {}
for (h, r, t) in triples:
if h != t:
if (h, t) not in edge_dict:
edge_dict[(h, t)] = []
edge_dict[(t, h)] = []
edge_dict[(h, t)].append(r)
edge_dict[(t, h)].append(-r)
if with_r:
edges = [[h, t] for (h, t) in edge_dict for r in edge_dict[(h, t)]]
values = [1 for (h, t) in edge_dict for r in edge_dict[(h, t)]]
r_ij = [abs(r) for (h, t) in edge_dict for r in edge_dict[(h, t)]]
edges = np.array(edges, dtype=np.int32)
values = np.array(values, dtype=np.float32)
r_ij = np.array(r_ij, dtype=np.float32)
return edges, values, r_ij
else:
edges = [[h, t] for (h, t) in edge_dict]
values = [1 for (h, t) in edge_dict]
# add self-loop
edges += [[e, e] for e in range(ins_num)]
values += [1 for e in range(ins_num)]
edges = np.array(edges, dtype=np.int32)
values = np.array(values, dtype=np.float32)
return edges, values, None
def share(self, triples, ill):
from_1_to_2_dict = dict(ill)
new_triples = []
for (h, r, t) in triples:
if h in from_1_to_2_dict:
h = from_1_to_2_dict[h]
if t in from_1_to_2_dict:
t = from_1_to_2_dict[t]
new_triples.append((h, r, t))
new_triples = list(set(new_triples))
return new_triples
def swap(self, triples, ill):
from_1_to_2_dict = dict(ill)
from_2_to_1_dict = dict(ill[:, ::-1])
new_triples = []
for (h, r, t) in triples:
new_triples.append((h, r, t))
if h in from_1_to_2_dict:
new_triples.append((from_1_to_2_dict[h], r, t))
if t in from_1_to_2_dict:
new_triples.append((h, r, from_1_to_2_dict[t]))
if h in from_2_to_1_dict:
new_triples.append((from_2_to_1_dict[h], r, t))
if t in from_2_to_1_dict:
new_triples.append((h, r, from_2_to_1_dict[t]))
new_triples = list(set(new_triples))
return new_triples
def __repr__(self):
return self.__class__.__name__ + " dataset summary:" + \
"\n\tins_num: " + str(self.ins_num) + \
"\n\trel_num: " + str(self.rel_num) + \
"\n\ttriple_idx: " + str(len(self.triple_idx)) + \
"\n\trate: " + str(self.rate) + "\tval: " + str(self.val) + \
"\n\till_idx(train/test/val): " + str(len(self.ill_idx)) + " = " + str(len(self.ill_train_idx)) + " + " + str(len(self.ill_test_idx)) + " + " + str(len(self.ill_val_idx)) + \
"\n\tins_G_edges_idx: " + str(len(self.ins_G_edges_idx)) + \
"\n\t----------------------------- init_time: " + str(round(self.init_time, 3)) + "s"
if __name__ == '__main__':
# TEST
d = AlignmentData(share=False, swap=False)
print(d)
d = AlignmentData(share=True, swap=False)
print(d)
d = AlignmentData(share=False, swap=True)
print(d)
| [
"numpy.array",
"time.time",
"numpy.random.shuffle"
] | [((321, 332), 'time.time', 'time.time', ([], {}), '()\n', (330, 332), False, 'import time\n'), ((908, 939), 'numpy.random.shuffle', 'np.random.shuffle', (['self.ill_idx'], {}), '(self.ill_idx)\n', (925, 939), True, 'import numpy as np\n'), ((2482, 2507), 'numpy.random.shuffle', 'np.random.shuffle', (['triple'], {}), '(triple)\n', (2499, 2507), True, 'import numpy as np\n'), ((4307, 4338), 'numpy.array', 'np.array', (['edges'], {'dtype': 'np.int32'}), '(edges, dtype=np.int32)\n', (4315, 4338), True, 'import numpy as np\n'), ((4356, 4390), 'numpy.array', 'np.array', (['values'], {'dtype': 'np.float32'}), '(values, dtype=np.float32)\n', (4364, 4390), True, 'import numpy as np\n'), ((1992, 2003), 'time.time', 'time.time', ([], {}), '()\n', (2001, 2003), False, 'import time\n'), ((3876, 3907), 'numpy.array', 'np.array', (['edges'], {'dtype': 'np.int32'}), '(edges, dtype=np.int32)\n', (3884, 3907), True, 'import numpy as np\n'), ((3929, 3963), 'numpy.array', 'np.array', (['values'], {'dtype': 'np.float32'}), '(values, dtype=np.float32)\n', (3937, 3963), True, 'import numpy as np\n'), ((3983, 4015), 'numpy.array', 'np.array', (['r_ij'], {'dtype': 'np.float32'}), '(r_ij, dtype=np.float32)\n', (3991, 4015), True, 'import numpy as np\n')] |
import numpy as np
from .Observable import Subject
class ObservableArray(np.ndarray, Subject):
def __init__(self, *args, **kwargs):
Subject.__init__(self)
np.ndarray.__init__(self)
def _notify(self, to_return):
"""
if hasattr(to_return, "_observers") and hasattr(self, "_observers"):
Subject._notify()
else:
print("Error! No observers found")
"""
if to_return is not None:
if (hasattr(self, "_observers")):
to_return._observers = self._observers
Subject._notify(self)
else:
Subject._notify(self)
def __getitem__(self, index):
to_return = super(ObservableArray, self).__getitem__(index)
if hasattr(self, "_observers") and type(to_return) is not ObservableArray:
if to_return.shape != ():
tmp = ObservableArray(to_return.shape)
else:
tmp = ObservableArray((1,))
tmp[:] = to_return
tmp._observers = self._observers
return tmp
elif hasattr(self, "_observers") and not hasattr(to_return, "_observers"):
to_return._observers = self._observers
return to_return
else:
return to_return
def __repr__(self):
to_return = repr(np.asarray(self))
return to_return
def __iadd__(self, *args, **kwargs):
to_return = super(self.__class__, self).__iadd__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __isub__(self, *args, **kwargs):
to_return = super(self.__class__, self).__isub__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __imul__(self, *args, **kwargs):
to_return = super(self.__class__, self).__imul__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __idiv__(self, *args, **kwargs):
to_return = super(self.__class__, self).__idiv__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __itruediv__(self, *args, **kwargs):
to_return = super(self.__class__, self).__itruediv__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __rmatmul__(self, *args, **kwargs):
to_return = super(self.__class__, self).__rmatmul__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __matmul__(self, *args, **kwargs):
to_return = super(self.__class__, self).__matmul__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __imatmul__(self, *args, **kwargs):
to_return = super(self.__class__, self).__imatmul__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __ipow__(self, *args, **kwargs):
to_return = super(self.__class__, self).__ipow__(*args, **kwargs)
self._notify(to_return)
return to_return
def __imod__(self, *args, **kwargs):
to_return = super(self.__class__, self).__imod__(*args, **kwargs)
self._notify(to_return)
return to_return
def __ifloordiv__(self, *args, **kwargs):
to_return = super(self.__class__, self).__ifloordiv__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __ilshift__(self, *args, **kwargs):
to_return = super(self.__class__, self).__ilshift__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __irshift__(self, *args, **kwargs):
to_return = super(self.__class__, self).__irshift__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __iand__(self, *args, **kwargs):
to_return = super(self.__class__, self).__iand__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __ixor__(self, *args, **kwargs):
to_return = super(self.__class__, self).__ixor__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __ior__(self, *args, **kwargs):
to_return = super(self.__class__, self).__ior__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __setitem__(self, *args, **kwargs):
to_return = super(self.__class__, self).__setitem__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __setslice__(self, *args, **kwargs):
to_return = super(self.__class__, self).__setslice__(*args,
**kwargs)
self._notify(to_return)
return to_return
| [
"numpy.ndarray.__init__",
"numpy.asarray"
] | [((181, 206), 'numpy.ndarray.__init__', 'np.ndarray.__init__', (['self'], {}), '(self)\n', (200, 206), True, 'import numpy as np\n'), ((1366, 1382), 'numpy.asarray', 'np.asarray', (['self'], {}), '(self)\n', (1376, 1382), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from parser.metric import AttachmentMethod
from parser.parser import BiaffineParser
import torch
import torch.nn as nn
import torch.optim as optim
from pytorch_pretrained_bert import BertAdam
from pytorch_pretrained_bert import BertTokenizer
from datetime import datetime, timedelta
from tqdm import tqdm
import numpy as np
class Model(object):
def __init__(self, vocab, network):
super(Model, self).__init__()
self.vocab = vocab
self.network = network
self.criterion = nn.CrossEntropyLoss()
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# self.tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased', do_lower_case=False)
def __call__(self, loaders, epochs, patience,
lr, betas, epsilon, weight_decay, annealing, file,
last_epoch, cloud_address, args, gradient_accumulation_steps=1, max_metric=0.0):
self.gradient_accumulation_steps = gradient_accumulation_steps
total_time = timedelta()
max_e, max_metric = last_epoch, max_metric
train_loader, dev_loader, test_loader = loaders
self.optimizer = BertAdam(params=self.network.parameters(),
lr=lr, b1=betas[0], b2=betas[1],
e=epsilon, weight_decay=weight_decay,
max_grad_norm=5.0)
# self.optimizer = optim.Adam(params=self.network.parameters(),
# lr=lr, betas=betas, eps=epsilon)
# self.scheduler = optim.lr_scheduler.LambdaLR(optimizer=self.optimizer,
# lr_lambda=annealing)
if args.local_rank == 0:
print('***Started training at {}***'.format(datetime.now()))
for epoch in range(last_epoch + 1, epochs + 1):
start = datetime.now()
# train one epoch and update the parameters
if args.local_rank == 0:
print(f"Epoch {epoch} / {epochs}:")
if args.distributed:
train_loader.sampler.set_epoch(epoch)
self.train(train_loader, distirbuted=args.distributed)
# train_loss, train_metric = self.evaluate(train_loader)
# if args.local_rank == 0:
# print(f"{'train:':<6} Loss: {train_loss:.4f} {train_metric}")
dev_loss, dev_metric = self.evaluate(dev_loader)
if args.local_rank == 0:
print(f"{'dev:':<6} Loss: {dev_loss:.4f} {dev_metric}")
# test_loss, test_metric = self.evaluate(test_loader)
# if args.local_rank == 0:
# print(f"{'test:':<6} Loss: {test_loss:.4f} {test_metric}")
t = datetime.now() - start
if args.local_rank == 0:
print(f"{t}s elapsed\n")
total_time += t
# save the model if it is the best so far
if args.local_rank == 0:
if dev_metric > max_metric:
max_e, max_metric = epoch, dev_metric
if args.distributed or torch.cuda.device_count() > 1:
self.network.module.save(file, epoch, cloud_address, self.optimizer, max_metric, args.local_rank)
else:
self.network.save(file, epoch, cloud_address, self.optimizer, max_e, max_metric)
elif epoch - max_e >= patience:
break
if args.local_rank == 0:
print('***Finished training at {}***'.format(datetime.now()))
self.network = BiaffineParser.load(file, cloud_address)
loss, metric = self.evaluate(test_loader)
print(f"max score of dev is {max_metric.score:.2%} at epoch {max_e}")
print(f"the score of test at epoch {max_e} is {metric.score:.2%}")
print(f"mean time of each epoch is {total_time / epoch}s")
print(f"{total_time}s elapsed")
def train(self, loader, distirbuted=False):
self.network.train()
step = 0
if not distirbuted:
loader = tqdm(loader)
for batch in loader:
batch = tuple(t.to(self.device) for t in batch)
words, attention_mask, token_start_mask, arcs, rels = batch
try:
s_arc, s_rel = self.network(words, attention_mask)
# ignore [CLS]
token_start_mask[:, 0] = 0
# ignore [SEP]
lens = attention_mask.sum(dim=1) - 1
token_start_mask[torch.arange(len(token_start_mask)), lens] = 0
gold_arcs, gold_rels = arcs[token_start_mask], rels[token_start_mask]
s_arc, s_rel = s_arc[token_start_mask], s_rel[token_start_mask]
loss = self.get_loss(s_arc, s_rel, gold_arcs, gold_rels)
except:
for sentence in words:
print(self.tokenizer.convert_ids_to_tokens(sentence.detach().to(torch.device("cpu")).numpy()))
print('***DEBUGGING PARSER START***')
self.network.eval()
self.network(words, attention_mask, debug=True)
print('***DEBUGGING PARSER END***')
print('words', words.shape)
print('arcs', arcs.shape)
print('rels', rels.shape)
print('attention_mask', attention_mask.shape)
print('token_start_mask', token_start_mask.shape)
print('s_arc', s_arc.shape)
print('s_rel', s_rel.shape)
print('gold_arcs', gold_arcs.shape)
print('gold_rels', gold_rels.shape)
raise RuntimeError('training failed.')
if self.gradient_accumulation_steps > 1:
loss = loss / self.gradient_accumulation_steps
loss.backward()
if (step + 1) % self.gradient_accumulation_steps == 0:
nn.utils.clip_grad_norm_(self.network.parameters(), 5.0)
self.optimizer.step()
# self.scheduler.step()
self.optimizer.zero_grad()
step += 1
@torch.no_grad()
def evaluate(self, loader, include_punct=False):
self.network.eval()
loss, metric = 0, AttachmentMethod()
for i, batch in enumerate(loader):
batch = tuple(t.to(self.device) for t in batch)
words, attention_mask, token_start_mask, arcs, rels = batch
try:
# ignore [CLS]
token_start_mask[:, 0] = 0
# ignore [SEP]
lens = attention_mask.sum(dim=1) - 1
token_start_mask[torch.arange(len(token_start_mask)), lens] = 0
# ignore all punctuation if specified
if not include_punct:
puncts = words.new_tensor([punct for punct in self.vocab.puncts])
token_start_mask &= words.unsqueeze(-1).ne(puncts).all(-1)
s_arc, s_rel = self.network(words, attention_mask)
s_arc, s_rel = s_arc[token_start_mask], s_rel[token_start_mask]
gold_arcs, gold_rels = arcs[token_start_mask], rels[token_start_mask]
pred_arcs, pred_rels = self.decode(s_arc, s_rel)
loss += self.get_loss(s_arc, s_rel, gold_arcs, gold_rels)
metric(pred_arcs, pred_rels, gold_arcs, gold_rels)
except:
for sentence in words:
print(self.tokenizer.convert_ids_to_tokens(sentence.detach().to(torch.device("cpu")).numpy()))
print('***DEBUGGING PARSER START***')
self.network.eval()
self.network(words, attention_mask, debug=True)
print('***DEBUGGING PARSER END***')
print('words', words.shape)
print('arcs', arcs.shape)
print('rels', rels.shape)
print('attention_mask', attention_mask.shape)
print('token_start_mask', token_start_mask.shape)
print('s_arc', s_arc.shape)
print('s_rel', s_rel.shape)
print('gold_arcs', gold_arcs.shape)
print('gold_rels', gold_rels.shape)
print('pred_arcs', pred_arcs.shape)
print('pred_rels', pred_rels.shape)
raise RuntimeError('evaluation failed.')
loss /= len(loader)
return loss, metric
@torch.no_grad()
def predict(self, loader):
self.network.eval()
all_arcs, all_rels = [], []
# for words, attention_mask, token_start_mask, arcs, rels in tqdm(loader):
for words, attention_mask, token_start_mask, arcs, rels in loader:
# ignore [CLS]
token_start_mask[:, 0] = 0
# ignore [SEP]
lens = attention_mask.sum(dim=1) - 1
token_start_mask[torch.arange(len(token_start_mask)), lens] = 0
s_arc, s_rel = self.network(words, attention_mask)
s_arc, s_rel = s_arc[token_start_mask], s_rel[token_start_mask]
pred_arcs, pred_rels = self.decode(s_arc, s_rel)
# lens for splitting
lens = token_start_mask.sum(dim=1).tolist()
all_arcs.extend(torch.split(pred_arcs, lens))
all_rels.extend(torch.split(pred_rels, lens))
all_arcs = [seq.tolist() for seq in all_arcs]
all_rels = [self.vocab.id2rel(seq) for seq in all_rels]
return all_arcs, all_rels
@torch.no_grad()
def get_embeddings(self, loader, layer_index=-1, return_all=False, ignore=True, ignore_token_start_mask=False):
self.network.eval()
all_embeddings = []
for words, attention_mask, token_start_mask in loader:
if ignore_token_start_mask:
token_start_mask = attention_mask.clone()
if ignore:
# ignore [CLS]
token_start_mask[:, 0] = 0
# ignore [SEP]
lens = attention_mask.sum(dim=1) - 1
token_start_mask[torch.arange(len(token_start_mask)), lens] = 0
embed = self.network.get_embeddings(words, attention_mask, layer_index, return_all=return_all)
if return_all:
embed = torch.stack(embed) # [num_layer, batch_size, seq_len, bert_dim]
embed = embed[:,token_start_mask] # [num_layer, num_word, bert_dim]
else:
embed = embed[token_start_mask] # [num_word, bert_dim]
# lens for splitting
lens = token_start_mask.sum(dim=1).tolist()
for sentence_embed in torch.split(embed, lens, dim=-2):
all_embeddings.append(np.array(sentence_embed.tolist()))
return all_embeddings
@torch.no_grad()
def get_concat_embeddings(self, loader):
self.network.eval()
all_embeddings = []
for words, attention_mask, token_start_mask in loader:
embed = self.network.get_concat_embeddings(words, attention_mask)
embed = embed[token_start_mask] # [num_word, bert_dim]
# lens for splitting
lens = token_start_mask.sum(dim=1).tolist()
for sentence_embed in torch.split(embed, lens, dim=-2):
all_embeddings.append(np.array(sentence_embed.tolist()))
return all_embeddings
@torch.no_grad()
def get_avg_concat_embeddings(self, loader, ignore=True, layer_index=-1):
self.network.eval()
all_embeddings = []
for words, attention_mask, token_start_mask in loader:
# [batch_size, seq_len, bert_dim]
embed = self.network.get_concat_embeddings(words, attention_mask)
if ignore:
# ignore [CLS]
token_start_mask[:, 0] = 0
# ignore [SEP]
lens = attention_mask.sum(dim=1) - 1
token_start_mask[torch.arange(len(token_start_mask)), lens] = 0
# need to take care of attention as well since we later rely on attention to do averaging
attention_mask[torch.arange(len(token_start_mask)), lens] = 0
for sent_embed, sent_att_mask, sent_mask in zip(embed, attention_mask, token_start_mask):
sent_avg_embeddings = []
tmp = None
tmp_len = 0
sent_embed = sent_embed.tolist()
sent_att_mask = sent_att_mask.tolist()
sent_mask = sent_mask.tolist()
for word_embed, word_att_mask, word_mask in zip(sent_embed, sent_att_mask, sent_mask):
if word_att_mask != 1:
if tmp is not None:
sent_avg_embeddings.append(tmp/tmp_len)
tmp = None
break
if word_mask == 1:
if tmp is not None:
if tmp_len == 0:
tmp_len = 1
sent_avg_embeddings.append(tmp/tmp_len)
tmp = np.array(word_embed)
tmp_len = 1
else:
if tmp is not None:
tmp += np.array(word_embed)
tmp_len += 1
# take care of last word when sentence len == max_seq_len in batch
if tmp is not None:
sent_avg_embeddings.append(tmp/tmp_len)
all_embeddings.append(np.array(sent_avg_embeddings))
return all_embeddings
@torch.no_grad()
def get_avg_embeddings(self, loader, ignore=True, layer_index=-1):
self.network.eval()
all_embeddings = []
for words, attention_mask, token_start_mask in loader:
# [batch_size, seq_len, bert_dim]
embed = self.network.get_embeddings(words, attention_mask, layer_index)
if ignore:
# ignore [CLS]
token_start_mask[:, 0] = 0
# ignore [SEP]
lens = attention_mask.sum(dim=1) - 1
token_start_mask[torch.arange(len(token_start_mask)), lens] = 0
# need to take care of attention as well since we later rely on attention to do averaging
attention_mask[torch.arange(len(token_start_mask)), lens] = 0
for sent_embed, sent_att_mask, sent_mask in zip(embed, attention_mask, token_start_mask):
sent_avg_embeddings = []
tmp = None
tmp_len = 0
sent_embed = sent_embed.tolist()
sent_att_mask = sent_att_mask.tolist()
sent_mask = sent_mask.tolist()
for word_embed, word_att_mask, word_mask in zip(sent_embed, sent_att_mask, sent_mask):
if word_att_mask != 1:
if tmp is not None:
sent_avg_embeddings.append(tmp/tmp_len)
tmp = None
break
if word_mask == 1:
if tmp is not None:
if tmp_len == 0:
tmp_len = 1
sent_avg_embeddings.append(tmp/tmp_len)
tmp = np.array(word_embed)
tmp_len = 1
else:
if tmp is not None:
tmp += np.array(word_embed)
tmp_len += 1
# take care of last word when sentence len == max_seq_len in batch
if tmp is not None:
sent_avg_embeddings.append(tmp/tmp_len)
all_embeddings.append(np.array(sent_avg_embeddings))
return all_embeddings
@torch.no_grad()
def get_everything(self, loader):
self.network.eval()
all_arcs, all_rels, all_embeddings = [], [], []
for words, attention_mask, token_start_mask in loader:
# ignore [CLS]
token_start_mask[:, 0] = 0
# ignore [SEP]
lens = attention_mask.sum(dim=1) - 1
token_start_mask[torch.arange(len(token_start_mask)), lens] = 0
s_arc, s_rel, embed = self.network.get_everything(words, attention_mask)
s_arc, s_rel, embed = s_arc[token_start_mask], s_rel[token_start_mask], embed[token_start_mask]
# lens for splitting
lens = token_start_mask.sum(dim=1).tolist()
for i, sentence_arc in enumerate(torch.split(s_arc, lens)):
all_arcs.append(np.array(sentence_arc[:,:lens[i]].tolist()))
for i, sentence_rel in enumerate(torch.split(s_rel, lens)):
all_rels.append(np.array(sentence_rel[:,:lens[i]].tolist()))
for sentence_embed in torch.split(embed, lens, dim=-2):
all_embeddings.append(np.array(sentence_embed.tolist()))
return all_arcs, all_rels, all_embeddings
@torch.no_grad()
def get_matrices(self, loader):
self.network.eval()
all_arcs, all_rels = [], []
for words, attention_mask, token_start_mask in loader:
# ignore [CLS]
token_start_mask[:, 0] = 0
# ignore [SEP]
lens = attention_mask.sum(dim=1) - 1
token_start_mask[torch.arange(len(token_start_mask)), lens] = 0
s_arc, s_rel = self.network(words, attention_mask)
s_arc, s_rel = s_arc[token_start_mask], s_rel[token_start_mask]
# lens for splitting
lens = token_start_mask.sum(dim=1).tolist()
for i, sentence_arc in enumerate(torch.split(s_arc, lens)):
all_arcs.append(np.array(sentence_arc[:,:lens[i]].tolist()))
for i, sentence_rel in enumerate(torch.split(s_rel, lens)):
all_rels.append(np.array(sentence_rel[:,:lens[i]].tolist()))
return all_arcs, all_rels
def get_loss(self, s_arc, s_rel, gold_arcs, gold_rels):
s_rel = s_rel[torch.arange(len(s_rel)), gold_arcs]
arc_loss = self.criterion(s_arc, gold_arcs)
rel_loss = self.criterion(s_rel, gold_rels)
loss = arc_loss + rel_loss
return loss
def decode(self, s_arc, s_rel):
pred_arcs = s_arc.argmax(dim=-1)
pred_rels = s_rel[torch.arange(len(s_rel)), pred_arcs].argmax(dim=-1)
return pred_arcs, pred_rels
| [
"torch.split",
"torch.nn.CrossEntropyLoss",
"tqdm.tqdm",
"torch.stack",
"torch.cuda.device_count",
"datetime.datetime.now",
"numpy.array",
"torch.cuda.is_available",
"parser.parser.BiaffineParser.load",
"torch.no_grad",
"datetime.timedelta",
"parser.metric.AttachmentMethod",
"torch.device"
] | [((6489, 6504), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6502, 6504), False, 'import torch\n'), ((8885, 8900), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8898, 8900), False, 'import torch\n'), ((9940, 9955), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9953, 9955), False, 'import torch\n'), ((11285, 11300), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11298, 11300), False, 'import torch\n'), ((11915, 11930), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11928, 11930), False, 'import torch\n'), ((14167, 14182), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14180, 14182), False, 'import torch\n'), ((16428, 16443), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16441, 16443), False, 'import torch\n'), ((17642, 17657), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17655, 17657), False, 'import torch\n'), ((537, 558), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (556, 558), True, 'import torch.nn as nn\n'), ((570, 595), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (593, 595), False, 'import torch\n'), ((1132, 1143), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (1141, 1143), False, 'from datetime import datetime, timedelta\n'), ((623, 643), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (635, 643), False, 'import torch\n'), ((684, 703), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (696, 703), False, 'import torch\n'), ((2002, 2016), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2014, 2016), False, 'from datetime import datetime, timedelta\n'), ((3786, 3826), 'parser.parser.BiaffineParser.load', 'BiaffineParser.load', (['file', 'cloud_address'], {}), '(file, cloud_address)\n', (3805, 3826), False, 'from parser.parser import BiaffineParser\n'), ((4301, 4313), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (4305, 4313), False, 'from tqdm import tqdm\n'), ((6613, 6631), 'parser.metric.AttachmentMethod', 'AttachmentMethod', ([], {}), '()\n', (6629, 6631), False, 'from parser.metric import AttachmentMethod\n'), ((11129, 11161), 'torch.split', 'torch.split', (['embed', 'lens'], {'dim': '(-2)'}), '(embed, lens, dim=-2)\n', (11140, 11161), False, 'import torch\n'), ((11759, 11791), 'torch.split', 'torch.split', (['embed', 'lens'], {'dim': '(-2)'}), '(embed, lens, dim=-2)\n', (11770, 11791), False, 'import torch\n'), ((17478, 17510), 'torch.split', 'torch.split', (['embed', 'lens'], {'dim': '(-2)'}), '(embed, lens, dim=-2)\n', (17489, 17510), False, 'import torch\n'), ((2900, 2914), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2912, 2914), False, 'from datetime import datetime, timedelta\n'), ((9693, 9721), 'torch.split', 'torch.split', (['pred_arcs', 'lens'], {}), '(pred_arcs, lens)\n', (9704, 9721), False, 'import torch\n'), ((9751, 9779), 'torch.split', 'torch.split', (['pred_rels', 'lens'], {}), '(pred_rels, lens)\n', (9762, 9779), False, 'import torch\n'), ((10741, 10759), 'torch.stack', 'torch.stack', (['embed'], {}), '(embed)\n', (10752, 10759), False, 'import torch\n'), ((17189, 17213), 'torch.split', 'torch.split', (['s_arc', 'lens'], {}), '(s_arc, lens)\n', (17200, 17213), False, 'import torch\n'), ((17339, 17363), 'torch.split', 'torch.split', (['s_rel', 'lens'], {}), '(s_rel, lens)\n', (17350, 17363), False, 'import torch\n'), ((18327, 18351), 'torch.split', 'torch.split', (['s_arc', 'lens'], {}), '(s_arc, lens)\n', (18338, 18351), False, 'import torch\n'), ((18477, 18501), 'torch.split', 'torch.split', (['s_rel', 'lens'], {}), '(s_rel, lens)\n', (18488, 18501), False, 'import torch\n'), ((1900, 1914), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1912, 1914), False, 'from datetime import datetime, timedelta\n'), ((3742, 3756), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3754, 3756), False, 'from datetime import datetime, timedelta\n'), ((14097, 14126), 'numpy.array', 'np.array', (['sent_avg_embeddings'], {}), '(sent_avg_embeddings)\n', (14105, 14126), True, 'import numpy as np\n'), ((16348, 16377), 'numpy.array', 'np.array', (['sent_avg_embeddings'], {}), '(sent_avg_embeddings)\n', (16356, 16377), True, 'import numpy as np\n'), ((13654, 13674), 'numpy.array', 'np.array', (['word_embed'], {}), '(word_embed)\n', (13662, 13674), True, 'import numpy as np\n'), ((15905, 15925), 'numpy.array', 'np.array', (['word_embed'], {}), '(word_embed)\n', (15913, 15925), True, 'import numpy as np\n'), ((3273, 3298), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3296, 3298), False, 'import torch\n'), ((13816, 13836), 'numpy.array', 'np.array', (['word_embed'], {}), '(word_embed)\n', (13824, 13836), True, 'import numpy as np\n'), ((16067, 16087), 'numpy.array', 'np.array', (['word_embed'], {}), '(word_embed)\n', (16075, 16087), True, 'import numpy as np\n'), ((5225, 5244), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5237, 5244), False, 'import torch\n'), ((7939, 7958), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7951, 7958), False, 'import torch\n')] |
import os
import cv2
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from sys import argv
class Graph:
def __init__(self, adjacency_list: str):
"""
Initialize a graph using an adjacency list as text input
:param adjacency_list: location of graph in txt format
"""
self.adj_txt = adjacency_list
self.num_vertices = self.count_vertices()
self.edges = self.create_edge_check_mat()
def count_vertices(self) -> int:
"""
Count the no .of vertices in the graph
:return: No. of vertices in the graph
"""
adj_txt = open(self.adj_txt, 'r')
count = 0
for _ in adj_txt.readlines():
count += 1
adj_txt.close()
return count
def create_edge_check_mat(self) -> list:
"""
Create an adjacency matrix of the graph
:return: list of edges
"""
edges = []
# An array to check whether the edge has already been added to the list
edge_check_mat = np.ones((self.num_vertices, self.num_vertices), dtype=np.uint8)
adj_txt = open(self.adj_txt, 'r')
for line in adj_txt.readlines():
vertices = [int(i) for i in line.split()]
for j in range(1, len(vertices)):
edge = (vertices[0]-1, vertices[j]-1)
if edge_check_mat[edge[0], edge[1]] and edge_check_mat[edge[1], edge[0]]:
edges.append((edge[0]+1, edge[1]+1))
edge_check_mat[edge[0], edge[1]] = 0
edge_check_mat[edge[1], edge[0]] = 0
adj_txt.close()
return edges
def animate_euler_tour(self, euler_tour: list) -> None:
"""
Create an animation of the Euler tour
:param euler_tour: order of vertices for the Euler tour
:return: nothing
"""
# Define the graph with edges
graph_img = nx.Graph()
graph_img.add_edges_from(self.edges)
pos = nx.spring_layout(graph_img)
# Draw the graph and save it
nx.draw_networkx(graph_img, pos=pos, with_labels=True)
plt.savefig(fname="img.png")
img = cv2.imread("img.png")
img_size = img.shape[:2]
# Define open-cv video recorder to save animation
video_format = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
video_output = cv2.VideoWriter("euler_tour.avi", video_format, 2.0, (img_size[1], img_size[0]))
# Record first frame for more duration
for _ in range(4):
video_output.write(img)
# Draw the Euler tour
for i in range(len(euler_tour) - 1):
nx.draw_networkx_nodes(graph_img, pos, [euler_tour[i]], node_color='r')
plt.savefig(fname="img.png")
img = cv2.imread("img.png")
video_output.write(img)
nx.draw_networkx_nodes(graph_img, pos, [euler_tour[i]])
nx.draw_networkx_edges(graph_img, pos, [(euler_tour[i], euler_tour[i+1])], width=2.0, edge_color='r')
nx.draw_networkx_nodes(graph_img, pos, [euler_tour[i + 1]], node_color='r')
plt.savefig(fname="img.png")
img = cv2.imread("img.png")
# Record the last frame for more duration
if i == len(euler_tour) - 2:
for _ in range(4):
video_output.write(img)
else:
video_output.write(img)
video_output.release()
cv2.destroyAllWindows()
os.system("rm -rf img.png")
"""
Define arguments to the script
adjacency_txt: Location of the text file that contains the graph in the form of adjacency list
"""
script, adjacency_txt = argv
if __name__ == '__main__':
# Instantiate object of the given graph
graph = Graph(adjacency_txt)
# Find euler tour using the c file
os.system('gcc euler_tour.c -std=c99 -o euler_tour && ./euler_tour ' + adjacency_txt)
# Extract the output of the c file
euler_txt = open("A.txt", "r")
lines = [line for line in euler_txt.readlines()]
# Animate the Euler tour if it exists in the given graph
if int(lines[0]):
tour = [int(v) for v in lines[1].split()]
graph.animate_euler_tour(tour)
euler_txt.close()
| [
"networkx.draw_networkx_edges",
"matplotlib.pyplot.savefig",
"numpy.ones",
"networkx.spring_layout",
"networkx.Graph",
"networkx.draw_networkx",
"cv2.VideoWriter",
"networkx.draw_networkx_nodes",
"cv2.destroyAllWindows",
"cv2.VideoWriter_fourcc",
"os.system",
"cv2.imread"
] | [((3861, 3950), 'os.system', 'os.system', (["('gcc euler_tour.c -std=c99 -o euler_tour && ./euler_tour ' + adjacency_txt)"], {}), "('gcc euler_tour.c -std=c99 -o euler_tour && ./euler_tour ' +\n adjacency_txt)\n", (3870, 3950), False, 'import os\n'), ((1059, 1122), 'numpy.ones', 'np.ones', (['(self.num_vertices, self.num_vertices)'], {'dtype': 'np.uint8'}), '((self.num_vertices, self.num_vertices), dtype=np.uint8)\n', (1066, 1122), True, 'import numpy as np\n'), ((1944, 1954), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1952, 1954), True, 'import networkx as nx\n'), ((2014, 2041), 'networkx.spring_layout', 'nx.spring_layout', (['graph_img'], {}), '(graph_img)\n', (2030, 2041), True, 'import networkx as nx\n'), ((2087, 2141), 'networkx.draw_networkx', 'nx.draw_networkx', (['graph_img'], {'pos': 'pos', 'with_labels': '(True)'}), '(graph_img, pos=pos, with_labels=True)\n', (2103, 2141), True, 'import networkx as nx\n'), ((2150, 2178), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': '"""img.png"""'}), "(fname='img.png')\n", (2161, 2178), True, 'import matplotlib.pyplot as plt\n'), ((2193, 2214), 'cv2.imread', 'cv2.imread', (['"""img.png"""'], {}), "('img.png')\n", (2203, 2214), False, 'import cv2\n'), ((2330, 2372), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""X"""', '"""V"""', '"""I"""', '"""D"""'], {}), "('X', 'V', 'I', 'D')\n", (2352, 2372), False, 'import cv2\n'), ((2396, 2481), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""euler_tour.avi"""', 'video_format', '(2.0)', '(img_size[1], img_size[0])'], {}), "('euler_tour.avi', video_format, 2.0, (img_size[1], img_size[0])\n )\n", (2411, 2481), False, 'import cv2\n'), ((3486, 3509), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3507, 3509), False, 'import cv2\n'), ((3518, 3545), 'os.system', 'os.system', (['"""rm -rf img.png"""'], {}), "('rm -rf img.png')\n", (3527, 3545), False, 'import os\n'), ((2675, 2746), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['graph_img', 'pos', '[euler_tour[i]]'], {'node_color': '"""r"""'}), "(graph_img, pos, [euler_tour[i]], node_color='r')\n", (2697, 2746), True, 'import networkx as nx\n'), ((2759, 2787), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': '"""img.png"""'}), "(fname='img.png')\n", (2770, 2787), True, 'import matplotlib.pyplot as plt\n'), ((2806, 2827), 'cv2.imread', 'cv2.imread', (['"""img.png"""'], {}), "('img.png')\n", (2816, 2827), False, 'import cv2\n'), ((2876, 2931), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['graph_img', 'pos', '[euler_tour[i]]'], {}), '(graph_img, pos, [euler_tour[i]])\n', (2898, 2931), True, 'import networkx as nx\n'), ((2944, 3051), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['graph_img', 'pos', '[(euler_tour[i], euler_tour[i + 1])]'], {'width': '(2.0)', 'edge_color': '"""r"""'}), "(graph_img, pos, [(euler_tour[i], euler_tour[i + 1])],\n width=2.0, edge_color='r')\n", (2966, 3051), True, 'import networkx as nx\n'), ((3058, 3133), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['graph_img', 'pos', '[euler_tour[i + 1]]'], {'node_color': '"""r"""'}), "(graph_img, pos, [euler_tour[i + 1]], node_color='r')\n", (3080, 3133), True, 'import networkx as nx\n'), ((3146, 3174), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': '"""img.png"""'}), "(fname='img.png')\n", (3157, 3174), True, 'import matplotlib.pyplot as plt\n'), ((3193, 3214), 'cv2.imread', 'cv2.imread', (['"""img.png"""'], {}), "('img.png')\n", (3203, 3214), False, 'import cv2\n')] |
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
import cv2
def equalize(img):
x_max = img.max()
s = img.size
h = np.zeros(256)
for i in range(256):
h[i] = np.count_nonzero(img == i)
out = np.zeros_like(img)
for i in range(256):
out[img == i] = x_max / s * h[:i+1].sum()
return out.astype(np.uint8)
img = io.imread("./dataset/images/imori_256x256_dark.png")
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ans = equalize(gray)
plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)
plt.title("gray")
plt.imshow(gray, cmap="gray")
plt.subplot(1, 2, 2)
plt.title("answer")
plt.imshow(ans, cmap="gray")
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.zeros_like",
"numpy.count_nonzero",
"skimage.io.imread",
"matplotlib.pyplot.figure",
"numpy.zeros",
"cv2.cvtColor",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((379, 431), 'skimage.io.imread', 'io.imread', (['"""./dataset/images/imori_256x256_dark.png"""'], {}), "('./dataset/images/imori_256x256_dark.png')\n", (388, 431), False, 'from skimage import io\n'), ((439, 476), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (451, 476), False, 'import cv2\n'), ((499, 526), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (509, 526), True, 'import matplotlib.pyplot as plt\n'), ((527, 547), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (538, 547), True, 'import matplotlib.pyplot as plt\n'), ((548, 565), 'matplotlib.pyplot.title', 'plt.title', (['"""gray"""'], {}), "('gray')\n", (557, 565), True, 'import matplotlib.pyplot as plt\n'), ((566, 595), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gray'], {'cmap': '"""gray"""'}), "(gray, cmap='gray')\n", (576, 595), True, 'import matplotlib.pyplot as plt\n'), ((596, 616), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (607, 616), True, 'import matplotlib.pyplot as plt\n'), ((617, 636), 'matplotlib.pyplot.title', 'plt.title', (['"""answer"""'], {}), "('answer')\n", (626, 636), True, 'import matplotlib.pyplot as plt\n'), ((637, 665), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ans'], {'cmap': '"""gray"""'}), "(ans, cmap='gray')\n", (647, 665), True, 'import matplotlib.pyplot as plt\n'), ((666, 676), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (674, 676), True, 'import matplotlib.pyplot as plt\n'), ((153, 166), 'numpy.zeros', 'np.zeros', (['(256)'], {}), '(256)\n', (161, 166), True, 'import numpy as np\n'), ((245, 263), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (258, 263), True, 'import numpy as np\n'), ((207, 233), 'numpy.count_nonzero', 'np.count_nonzero', (['(img == i)'], {}), '(img == i)\n', (223, 233), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from orix.vector.neo_euler import Rodrigues, Homochoric
from orix.quaternion.rotation import Rotation
""" Rodrigues """
@pytest.mark.parametrize(
"rotation, expected",
[
(Rotation([1, 0, 0, 0]), [0, 0, 0]),
(Rotation([0.9239, 0.2209, 0.2209, 0.2209]), [0.2391, 0.2391, 0.2391]),
],
)
def test_from_rotation(rotation, expected):
rodrigues = Rodrigues.from_rotation(rotation)
assert np.allclose(rodrigues.data, expected, atol=1e-4)
@pytest.mark.parametrize(
"rodrigues, expected", [(Rodrigues([0.2391, 0.2391, 0.2391]), np.pi / 4),]
)
def test_angle(rodrigues, expected):
angle = rodrigues.angle
assert np.allclose(angle.data, expected, atol=1e-3)
""" Homochoric"""
@pytest.mark.parametrize(
"rotation", [Rotation([1, 0, 0, 0]), Rotation([0.9239, 0.2209, 0.2209, 0.2209])]
)
def test_Homochoric_from_rotation(rotation):
h = Homochoric.from_rotation(rotation)
return None
@pytest.mark.parametrize(
"rotation", [Rotation([1, 0, 0, 0]), Rotation([0.9239, 0.2209, 0.2209, 0.2209])]
)
@pytest.mark.xfail(strict=True, reason=AttributeError)
def test_Homochoric_angle(rotation):
h = Homochoric.from_rotation(rotation)
h.angle
| [
"numpy.allclose",
"pytest.mark.xfail",
"orix.quaternion.rotation.Rotation",
"orix.vector.neo_euler.Rodrigues",
"orix.vector.neo_euler.Rodrigues.from_rotation",
"orix.vector.neo_euler.Homochoric.from_rotation"
] | [((1089, 1142), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'reason': 'AttributeError'}), '(strict=True, reason=AttributeError)\n', (1106, 1142), False, 'import pytest\n'), ((410, 443), 'orix.vector.neo_euler.Rodrigues.from_rotation', 'Rodrigues.from_rotation', (['rotation'], {}), '(rotation)\n', (433, 443), False, 'from orix.vector.neo_euler import Rodrigues, Homochoric\n'), ((455, 505), 'numpy.allclose', 'np.allclose', (['rodrigues.data', 'expected'], {'atol': '(0.0001)'}), '(rodrigues.data, expected, atol=0.0001)\n', (466, 505), True, 'import numpy as np\n'), ((689, 734), 'numpy.allclose', 'np.allclose', (['angle.data', 'expected'], {'atol': '(0.001)'}), '(angle.data, expected, atol=0.001)\n', (700, 734), True, 'import numpy as np\n'), ((922, 956), 'orix.vector.neo_euler.Homochoric.from_rotation', 'Homochoric.from_rotation', (['rotation'], {}), '(rotation)\n', (946, 956), False, 'from orix.vector.neo_euler import Rodrigues, Homochoric\n'), ((1188, 1222), 'orix.vector.neo_euler.Homochoric.from_rotation', 'Homochoric.from_rotation', (['rotation'], {}), '(rotation)\n', (1212, 1222), False, 'from orix.vector.neo_euler import Rodrigues, Homochoric\n'), ((799, 821), 'orix.quaternion.rotation.Rotation', 'Rotation', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (807, 821), False, 'from orix.quaternion.rotation import Rotation\n'), ((823, 865), 'orix.quaternion.rotation.Rotation', 'Rotation', (['[0.9239, 0.2209, 0.2209, 0.2209]'], {}), '([0.9239, 0.2209, 0.2209, 0.2209])\n', (831, 865), False, 'from orix.quaternion.rotation import Rotation\n'), ((1018, 1040), 'orix.quaternion.rotation.Rotation', 'Rotation', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (1026, 1040), False, 'from orix.quaternion.rotation import Rotation\n'), ((1042, 1084), 'orix.quaternion.rotation.Rotation', 'Rotation', (['[0.9239, 0.2209, 0.2209, 0.2209]'], {}), '([0.9239, 0.2209, 0.2209, 0.2209])\n', (1050, 1084), False, 'from orix.quaternion.rotation import Rotation\n'), ((225, 247), 'orix.quaternion.rotation.Rotation', 'Rotation', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (233, 247), False, 'from orix.quaternion.rotation import Rotation\n'), ((270, 312), 'orix.quaternion.rotation.Rotation', 'Rotation', (['[0.9239, 0.2209, 0.2209, 0.2209]'], {}), '([0.9239, 0.2209, 0.2209, 0.2209])\n', (278, 312), False, 'from orix.quaternion.rotation import Rotation\n'), ((561, 596), 'orix.vector.neo_euler.Rodrigues', 'Rodrigues', (['[0.2391, 0.2391, 0.2391]'], {}), '([0.2391, 0.2391, 0.2391])\n', (570, 596), False, 'from orix.vector.neo_euler import Rodrigues, Homochoric\n')] |
import numpy as np
"""[Recreates the adjacency matrix with which the steady state probabilities get multiplied iteratively
The adjacency matrix A of a set of pages (nodes) defines the linking structure]
Returns:
[numpy Matrix] -- [The matrix with which the steady state probabilities will get multipled]
"""
def recreate_adjacency_matrix(marchov_chain):
# since the size of the matrix is n x n getting the size of the row is enough
num_urls = marchov_chain.shape[0]
# the probabilty of visiting a url, which is the 1 / total number of urls
probability = 1 / num_urls
probability_matrix = np.zeros((num_urls, num_urls))
# probability_matrix will contain all similar values which is the probability of visiting a particular page
probability_matrix[:] = probability
'''
In assigning a PageRank score to each node of the web graph, we use the teleport operation in two ways:
(1) When at a node with no out-links, the surfer invokes the teleport operation.
(2) At any node that has outgoing links, the surfer invokes the teleport operation with a probability of alpha.
Typical value of alpha is 0.1
'''
alpha = 0.1
adjacency_matrix = alpha * marchov_chain + ((1 - alpha) * probability_matrix)
return adjacency_matrix
"""[Computes the page rank of a given marchov chain url]
Returns:
[list] -- [A list of n page rank score where n is the total number of urls]
"""
def compute_page_rank(marchov_chain):
# create the adjacency matrix with which the steady state probability will get multiplied iteratively
adjacency_matrix = recreate_adjacency_matrix(marchov_chain)
num_urls = adjacency_matrix.shape[0]
# initial vector for the steady state probabilities will always be <1, 0, 0.....n>
steady_state_probabilities = np.zeros((1, num_urls))
# this creates a vector of <1, 0, 0...n>
steady_state_probabilities[0][0] = 1
# the steady_state probabilities always needs to be transposed
steady_state_probabilities = np.transpose(steady_state_probabilities)
previous_state_probabilities = steady_state_probabilities
while True:
steady_state_probabilities = adjacency_matrix * steady_state_probabilities
# we stop when the values have converged and no longer change over the iterations
if (previous_state_probabilities == steady_state_probabilities).all():
# if the values converge then the steady_state_probabilities are returned which is the page rank score of the urls
return steady_state_probabilities
# otherwise the current steady state probabilities becomes the previous steady state probabilities as we are about to begin another iterations
previous_state_probabilities = steady_state_probabilities
if __name__ == '__main__':
'''
NOTE - This marchov chain is transpose of the modified adjacency matrix of the graph.
In an adjacency matrix the rows represent an individual url in the graph and columns
represent the urls that the graph has already visited. This adjacency matrix will be
transposed and modified to recreate the adjacency matrix. If a url has visited other urls then
the columns will get replaced by 1 / n, where n is the total number of urls visited by that url.
The matrix will essentially be a transpose of the adjacency matrix with the columns divided by total non zero entries.
'''
marchov_chain = np.matrix([[0, 0, 1],
[1, 0.5, 0],
[0, 0.5, 0]])
page_rank = compute_page_rank(marchov_chain)
print(page_rank)
print(np.sum(page_rank)) | [
"numpy.sum",
"numpy.zeros",
"numpy.transpose",
"numpy.matrix"
] | [((620, 650), 'numpy.zeros', 'np.zeros', (['(num_urls, num_urls)'], {}), '((num_urls, num_urls))\n', (628, 650), True, 'import numpy as np\n'), ((1842, 1865), 'numpy.zeros', 'np.zeros', (['(1, num_urls)'], {}), '((1, num_urls))\n', (1850, 1865), True, 'import numpy as np\n'), ((2058, 2098), 'numpy.transpose', 'np.transpose', (['steady_state_probabilities'], {}), '(steady_state_probabilities)\n', (2070, 2098), True, 'import numpy as np\n'), ((3530, 3578), 'numpy.matrix', 'np.matrix', (['[[0, 0, 1], [1, 0.5, 0], [0, 0.5, 0]]'], {}), '([[0, 0, 1], [1, 0.5, 0], [0, 0.5, 0]])\n', (3539, 3578), True, 'import numpy as np\n'), ((3691, 3708), 'numpy.sum', 'np.sum', (['page_rank'], {}), '(page_rank)\n', (3697, 3708), True, 'import numpy as np\n')] |
import os
import cv2 as cv
import numpy as np
# STEP 1 : Selecting Data For Modeling
# ____________________________________________________________________________
# Read Cascade Classifier from haar_face.xml
haar_cascade = cv.CascadeClassifier('../haar_face.xml')
# Location of Training dataset
DIR = './train'
features = [] # Features for training (faces of people)
labels = [] # For labels corresponding to features (whose face is it)
people = [] # List of people in dataset
for folder in os.listdir(DIR):
people.append(folder)
print(f'List of people : {people}')
def create_data():
"""
Creates features and labels
:return: None
"""
for person in people:
path = os.path.join(DIR, person)
label = people.index(person)
for img in os.listdir(path):
path_img = os.path.join(path, img)
get_img = cv.imread(path_img)
grey = cv.cvtColor(get_img, cv.COLOR_BGR2GRAY)
# Detecting faces in image(numpy array) 'grey' in rectangular co-ordinate system
faces_rect = haar_cascade.detectMultiScale(grey, scaleFactor=1.1, minNeighbors=4)
for (x, y, w, h) in faces_rect:
faces_roi = grey[y:y+h, x:x+w]
features.append(faces_roi)
labels.append(label)
create_data()
print("________features and labels creation successful________")
# STEP 2 : Creating, Training and Saving our model
# ____________________________________________________________________________
features = np.array(features, dtype='object')
labels = np.array(labels)
face_recognizer = cv.face.LBPHFaceRecognizer_create()
face_recognizer.train(features, labels)
print("________LBPH Face Recognizer training successful________")
face_recognizer.save('trained_face_model.yml')
print('________Saving Trained model in "trained_face_model.yml"________')
| [
"os.listdir",
"os.path.join",
"cv2.face.LBPHFaceRecognizer_create",
"numpy.array",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.imread"
] | [((227, 267), 'cv2.CascadeClassifier', 'cv.CascadeClassifier', (['"""../haar_face.xml"""'], {}), "('../haar_face.xml')\n", (247, 267), True, 'import cv2 as cv\n'), ((499, 514), 'os.listdir', 'os.listdir', (['DIR'], {}), '(DIR)\n', (509, 514), False, 'import os\n'), ((1540, 1574), 'numpy.array', 'np.array', (['features'], {'dtype': '"""object"""'}), "(features, dtype='object')\n", (1548, 1574), True, 'import numpy as np\n'), ((1584, 1600), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1592, 1600), True, 'import numpy as np\n'), ((1620, 1655), 'cv2.face.LBPHFaceRecognizer_create', 'cv.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (1653, 1655), True, 'import cv2 as cv\n'), ((706, 731), 'os.path.join', 'os.path.join', (['DIR', 'person'], {}), '(DIR, person)\n', (718, 731), False, 'import os\n'), ((789, 805), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (799, 805), False, 'import os\n'), ((830, 853), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (842, 853), False, 'import os\n'), ((876, 895), 'cv2.imread', 'cv.imread', (['path_img'], {}), '(path_img)\n', (885, 895), True, 'import cv2 as cv\n'), ((915, 954), 'cv2.cvtColor', 'cv.cvtColor', (['get_img', 'cv.COLOR_BGR2GRAY'], {}), '(get_img, cv.COLOR_BGR2GRAY)\n', (926, 954), True, 'import cv2 as cv\n')] |
"""
Copyright 2017 <NAME>, <NAME>
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import scipy as sp
import math
import matplotlib.pyplot as plt
from . import solver
from . import project_simplex_box
from . import pgd
import llops as yp
import llops.operators as ops
from llops.solvers import iterative, objectivefunctions
from llops import iFt, Ft
from llops.config import default_backend, default_dtype
eps = 1e-13
def dnf(x):
if len(x) == 0:
return 0
else:
# x = x / np.sum(x)
x_fft = np.fft.fft(x)
sigma_x = np.abs(x_fft) ** 2
return np.sqrt(1 / len(x) * np.sum(np.max(sigma_x) / sigma_x))
def cond(x):
if len(x) == 0:
return 0
else:
# x = x / np.sum(x)
x_fft = np.fft.fft(x)
sigma_x = np.abs(x_fft)
return np.max(sigma_x) / np.min(sigma_x)
def vector(pulse_count, kernel_length=None,
method='random_phase', n_tests=100, metric='dnf', dtype=None, backend=None):
"""
This is a helper function for solving for a blur vector in terms of it's condition #
"""
# Parse dtype and backend
dtype = dtype if dtype is not None else yp.config.default_dtype
backend = backend if backend is not None else yp.config.default_backend
# Calculate kernel length if not provided
if kernel_length is None:
kernel_length = 2 * pulse_count
# Compute many kernels
kernel_list = []
for _ in range(n_tests):
# Generate blur kernel
if method == 'random_phase':
# Ensure first and last time point are illuminated
indicies = np.random.choice(kernel_length, size=(pulse_count - 2), replace=False)
illum = np.zeros(kernel_length)
illum[indicies] = 1.0
illum[0], illum[-1] = 1.0, 1.0
elif method == 'random':
illum = np.random.uniform(size=kernel_length)
else:
raise ValueError('Invalid kernel generation method %s' % method)
# Append kernel to list
kernel_list.append(illum)
## Choose best kernel
if metric == 'cond':
# Determine kernel with best condition #
metric_best = 1e10
kernel_best = []
for kernel in kernel_list:
kappa = cond(kernel)
if kappa < metric_best:
kernel_best = kernel
metric_best = kappa
elif metric == 'dnf':
# Determine kernel with best dnf
metric_best = 1e10
kernel_best = []
for kernel in kernel_list:
_dnf = dnf(kernel)
if _dnf < metric_best:
kernel_best = kernel
metric_best = _dnf
else:
raise ValueError
# Normalize kernel
kernel_best /= np.sum(kernel_best)
# Cast
kernel_best = yp.cast(kernel_best, dtype, backend)
return (kernel_best, metric_best)
def kernel(shape, pulse_count, kernel_length=None, method='random_phase',
n_tests=100, metric='dnf', axis=1, position='center'):
# Generate blur vector
blur_vector, _ = vector(pulse_count,
kernel_length=kernel_length,
method=method,
n_tests=n_tests,
metric=metric)
# Generate kernel from vector
return fromVector(blur_vector, shape=shape, axis=axis, position=position)
def generate(shape, blur_kernel_length, method='random_phase', axis=1,
blur_illumination_fraction=0.5, position='center',normalize=True):
# Generate blur kernel
if method == 'constant':
illum = yp.ones(blur_kernel_length) * blur_illumination_fraction
elif method == 'random_phase' or method == 'coded':
illum, _ = genRandInitialization(blur_kernel_length, blur_illumination_fraction)
elif method == 'random' or method == 'uniform':
illum = np.random.uniform(size=blur_kernel_length)
else:
assert False, "method " + method + " unrecognized"
# Generate kernel
kernel = fromVector(illum, shape, axis, position, normalize=normalize)
# Return kernel
return kernel
def fromVector(blur_vector, shape, axis=1, position='center',
normalize=True, reverse=False, interpolation_factor=1.0):
"""Converts a blur vector to a blur kernel."""
# Get length of kernel
blur_kernel_length = yp.size(blur_vector)
# Find correct dimension
ndims = len(shape)
# Expand illum to 2D and ensure it's in the correct direction
blur_vector = yp.expandDims(blur_vector, ndims)
# Reverse blur vector if requested
if reverse:
blur_vector = yp.flip(blur_vector)
# Ensure blur vector is 1D
blur_vector = yp.vec(blur_vector)
# Apply interpolation
if interpolation_factor != 1.0:
interpolated_length = int(np.round(interpolation_factor * len(blur_vector)))
blur_vector = yp.real(yp.iFt(yp.pad(yp.Ft(blur_vector), interpolated_length, center=True)))
# Ensure blur kernel has the correct dimensions
blur_vector = yp.expandDims(blur_vector, ndims)
# Rotate if necessary
if axis == 1:
blur_vector = blur_vector.T
# Position kernel in image
if position == 'center':
kernel = yp.pad(blur_vector, shape, center=True)
elif position == 'center_left':
roll_amount = [0, 0]
roll_amount[axis] = -blur_kernel_length // 2
kernel = yp.roll(yp.pad(blur_vector, shape, center=True), roll_amount)
elif position == 'center_right':
roll_amount = [0, 0]
roll_amount[axis] = blur_kernel_length // 2
kernel = yp.roll(yp.pad(blur_vector, shape, center=True), roll_amount)
elif position == 'origin':
kernel = yp.pad(blur_vector, shape, crop_start=(0, 0))
else:
raise ValueError('Invalid position %s' % position)
# Center kernel after pad. This is a hack.
roll_values = [1] * yp.ndim(kernel)
kernel = yp.roll(kernel, roll_values)
# Normalize kernel
if normalize:
kernel /= yp.scalar(yp.sum(kernel))
return kernel
######################################################################################################
################################ UTILITIES FOR READING FROM DATA #####################################
######################################################################################################
def blurVectorsFromDataset(dataset, dtype=None, backend=None, debug=False,
use_phase_ramp=False, corrections={}):
"""
This function generates the object size, image size, and blur kernels from
a comptic dataset object.
Args:
dataset: An io.Dataset object
dtype [np.float32]: Which datatype to use for kernel generation (All numpy datatypes supported)
Returns:
object_size: The object size this dataset can recover
image_size: The computed image size of the dataset
blur_kernel_list: A dictionary of blur kernels lists, one key per color channel.
"""
dtype = dtype if dtype is not None else yp.config.default_dtype
backend = backend if backend is not None else yp.config.default_backend
# Calculate effective pixel size if necessaey
if dataset.metadata.system.eff_pixel_size_um is None:
dataset.metadata.system.eff_pixel_size_um = dataset.metadata.camera.pixel_size_um / \
(dataset.metadata.objective.mag * dataset.metadata.system.mag)
# Recover and store position and illumination list
blur_vector_roi_list = []
position_list, illumination_list = [], []
frame_segment_map = []
for frame_index in range(len(dataset.frame_list)):
frame_state = dataset.frame_state_list[frame_index]
# Store which segment this measurement uses
frame_segment_map.append(frame_state['position']['common']['linear_segment_index'])
# Extract list of illumination values for each time point
if 'illumination' in frame_state:
illumination_list_frame = []
for time_point in frame_state['illumination']['states']:
illumination_list_time_point = []
for illumination in time_point:
illumination_list_time_point.append(
{'index': illumination['index'], 'value': illumination['value']})
illumination_list_frame.append(illumination_list_time_point)
else:
raise ValueError('Frame %d does not contain illumination information' % frame_index)
# Extract list of positions for each time point
if 'position' in frame_state:
position_list_frame = []
for time_point in frame_state['position']['states']:
position_list_time_point = []
for position in time_point:
if 'units' in position['value']:
if position['value']['units'] == 'mm':
ps_um = dataset.metadata.system.eff_pixel_size_um
position_list_time_point.append(
[1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])
elif position['value']['units'] == 'um':
position_list_time_point.append(
[position['value']['y'] / ps_um, position['value']['x'] / ps_um])
elif position['value']['units'] == 'pixels':
position_list_time_point.append([position['value']['y'], position['value']['x']])
else:
raise ValueError('Invalid units %s for position in frame %d' %
(position['value']['units'], frame_index))
else:
# print('WARNING: Could not find posiiton units in metadata, assuming mm')
ps_um = dataset.metadata.system.eff_pixel_size_um
position_list_time_point.append(
[1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])
position_list_frame.append(position_list_time_point[0]) # Assuming single time point for now.
# Define positions and position indicies used
positions_used, position_indicies_used = [], []
for index, pos in enumerate(position_list_frame):
for color in illumination_list_frame[index][0]['value']:
if any([illumination_list_frame[index][0]['value'][color] > 0 for color in illumination_list_frame[index][0]['value']]):
position_indicies_used.append(index)
positions_used.append(pos)
# Generate ROI for this blur vector
blur_vector_roi = getPositionListBoundingBox(positions_used)
# Append to list
blur_vector_roi_list.append(blur_vector_roi)
# Crop illumination list to values within the support used
illumination_list.append([illumination_list_frame[index] for index in range(min(position_indicies_used), max(position_indicies_used) + 1)])
# Store corresponding positions
position_list.append(positions_used)
# Apply kernel scaling or compression if necessary
if 'scale' in corrections:
for index in range(len(position_list)):
_positions = np.asarray(position_list[index])
for ax in range(yp.shape(_positions)[1]):
_positions[:, ax] = ((_positions[:, ax] - yp.min(_positions[:, ax])) * corrections['scale'] + yp.min(_positions[:, ax]))
position_list[index] = _positions.tolist()
blur_vector_roi_list[index].shape = [corrections['scale'] * sh for sh in blur_vector_roi_list[index].shape]
# Synthesize blur vectors
blur_vector_list = []
for frame_index in range(len(dataset.frame_list)):
# Generate blur vectors
if use_phase_ramp:
kernel_shape = [yp.fft.next_fast_len(max(sh, 1)) for sh in blur_vector_roi_list[frame_index].shape]
offset = yp.cast([sh // 2 + st for (sh, st) in zip(kernel_shape, blur_vector_roi_list[frame_index].start)], 'complex32')
# Create phase ramp and calculate offset
R = ops.PhaseRamp(kernel_shape, dtype='complex32')
# Generate blur vector
blur_vector = yp.zeros(R.M)
for pos, illum in zip(position_list[frame_index], illumination_list[frame_index]):
blur_vector += (R * (yp.cast(pos, 'complex32') - offset))
# Take inverse Fourier Transform
blur_vector = yp.abs(yp.cast(yp.iFt(blur_vector)), 0.0)
else:
blur_vector = yp.asarray([illum[0]['value']['w'] for illum in illumination_list[frame_index]],
dtype=dtype, backend=backend)
# Normalize illuminaiton vectors
blur_vector /= yp.scalar(yp.sum(blur_vector))
# Append to list
blur_vector_list.append(blur_vector)
# Subtract mininum of frame_segment_map
frame_segment_map = [segment - min(frame_segment_map) for segment in frame_segment_map]
# Return
return blur_vector_list, blur_vector_roi_list, frame_segment_map, position_list, illumination_list
def blurKernelRecoveryFromStatic(blurred, static, solver='iterative', reg=None, iteration_count=10, system_otf=None, threshold=0.2):
static_mean = np.mean(static)
if static_mean > 1e-4:
static = (static.copy() - static_mean) / static_mean
blurred_mean = np.mean(blurred)
if blurred_mean > 1e-4:
blurred = (blurred.copy() - blurred_mean) / blurred_mean
# if system_otf is not None:
# static = iFt(Ft(static) * system_otf)
if solver == 'iterative':
A = ops.Convolution(blurred.shape, static, mode='windowed')
y = blurred.reshape(-1).astype(np.complex64)
# Initialization: choosing a "good" coefficient value will help in convergence
initialization = np.ones(y.shape, y.dtype)
# Define cost function
objective = objectivefunctions.L2(A, y, l2_reg=reg) #, reg=5e-3)
# Gradient descent implementation
kernel_recovered = iterative.GradientDescent(objective).solve(initialization=initialization,
step_size=1e-3,
nesterov_enabled=True,
iteration_count=iteration_count,
display_type='text',
display_iteration_delta=max((iteration_count // 10),1))
else:
if reg is None:
reg = 0
kernel_recovered = iFt((np.conj(Ft(static)) * Ft(blurred)) / (np.abs(Ft(static)) ** 2 + reg))
# Take real part
kernel_recovered = np.real(kernel_recovered).reshape(static.shape)
# Subtract low-frequency information
kernel_recovered -= scipy.ndimage.filters.gaussian_filter(np.real(kernel_recovered.reshape(blurred.shape)), 10)
# Filter by OTF support, threshold
if system_otf is not None:
kernel_recovered = np.real(iFt(Ft(kernel_recovered.reshape(blurred.shape)) * system_otf))
kernel_recovered *= (kernel_recovered > threshold * np.max(kernel_recovered))
return(kernel_recovered)
def registerDatasetImages(dataset, roi=None):
from comptic.registration import registerImage
shift_list = []
image_list = []
for index in range(1, len(dataset.frame_list)):
if roi is not None:
shift_list.append(registerImage(dataset.frame_list[index - 1][roi.slice],
dataset.frame_list[index][roi.slice]))
image_list.append((dataset.frame_list[index - 1][roi.slice], dataset.frame_list[index][roi.slice]))
else:
shift_list.append(registerImage(dataset.frame_list[index - 1], dataset.frame_list[index]))
print(shift_list)
print("Registered image %d of %d, shift was (%d, %d) pixels" %
(index, len(dataset.frame_list), shift_list[-1][0], shift_list[-1]))
return(shift_list, image_list)
def cropAndCenterKernel(kernel_recovered, kernel_size):
# Center maximum value in blur kernel
max_pos = np.unravel_index(np.argmax(kernel_recovered), kernel_recovered.shape)
kernel_centered = np.roll(kernel_recovered, -np.asarray(max_pos) + np.asarray(kernel_recovered.shape) //2)
# Crop to 2x blur kernel fov
kernel_zeroed = np.zeros(kernel_centered.shape, dtype=kernel_centered.dtype)
kernel_zeroed[kernel_centered.shape[0] // 2 - kernel_size[0]:kernel_centered.shape[0] // 2 + kernel_size[0],
kernel_centered.shape[1] // 2 - kernel_size[1]:kernel_centered.shape[1] // 2 + kernel_size[1]] = \
kernel_centered[kernel_centered.shape[0] // 2 - kernel_size[0]:kernel_centered.shape[0] // 2 + kernel_size[0],
kernel_centered.shape[1] // 2 - kernel_size[1]:kernel_centered.shape[1] // 2 + kernel_size[1]]
# Center at middle of blur kernel
p = np.where(kernel_zeroed > 0)
kernel_centered = np.roll(kernel_zeroed, -np.round(np.asarray((np.mean(p[0]), np.mean(p[1]))) + np.asarray(kernel_zeroed.shape) // 2).astype(np.int))
kernel_size_small = kernel_size //2
# Zero everything outside a resonable shift range
kernel_zeroed_crop = np.zeros(kernel_centered.shape, dtype=kernel_centered.dtype)
kernel_zeroed_crop[kernel_centered.shape[0] // 2 - kernel_size_small[0]:kernel_centered.shape[0] // 2 + kernel_size_small[0],
kernel_centered.shape[1] // 2 - kernel_size_small[1]:kernel_centered.shape[1] // 2 + kernel_size_small[1]] = \
kernel_centered[kernel_centered.shape[0] // 2 - kernel_size_small[0]:kernel_centered.shape[0] // 2 + kernel_size_small[0],
kernel_centered.shape[1] // 2 - kernel_size_small[1]:kernel_centered.shape[1] // 2 + kernel_size_small[1]]
return(kernel_zeroed_crop)
def plotBlurKernelList(blur_kernel_list, max_count_to_show=5, measurement_list=None, figsize=None):
""" Plots a list of blur kernels and (optionally) corresponding measurements """
count_to_show = min(max_count_to_show, len(blur_kernel_list))
if figsize is None:
plt.figure(figsize=(count_to_show * 2.5, 4 * (1 + int(measurement_list is not None))))
else:
plt.figure(figsize=figsize)
for i in range(count_to_show):
plt.subplot(1 + int(measurement_list is not None), count_to_show, i + 1)
plt.imshow(blur_kernel_list[i], interpolation='bilinear')
plt.title('Blur Kernel ' + str(i))
def illustrateMultiFrameKernel(blur_kernel_list, filename):
""" Function which illustrates a multi-frame blur kernel and saves it to the disk"""
image_c = np.zeros((blur_kernel_list[0].shape[0], blur_kernel_list[0].shape[1], 3))
color_list = ['r', 'g', 'c', 'm', 'w', 'y']
for index, blur_kernel in enumerate(blur_kernel_list):
rgb = matplotlib.colors.to_rgb(color_list[index])
image_c[:, :, 0] += blur_kernel * rgb[0]
image_c[:, :, 1] += blur_kernel * rgb[1]
image_c[:, :, 2] += blur_kernel * rgb[2]
image_c /= np.amax(image_c)
plt.figure()
plt.imshow(image_c, interpolation='bilinear')
plt.xticks([], [])
plt.yticks([], [])
plt.tight_layout()
plt.savefig(filename, transparent=True)
def genSamplingComb(object_size, image_size, dtype=np.complex64):
""" Generates a comb function corresponding with seperation defined by
image_size, centered at the center of object_size """
sampling = np.floor(((np.asarray(object_size) / 2) / np.asarray(image_size)))
sampling_comb = np.zeros(object_size, dtype=dtype)
yy, xx = np.meshgrid(np.arange(-sampling[0], sampling[0] + 1), np.arange(-sampling[1], sampling[1] + 1))
positions_0 = np.hstack((yy.ravel()[:, np.newaxis], xx.ravel()[:, np.newaxis])).astype(np.int)
positions = np.zeros(positions_0.shape, dtype=positions_0.dtype)
positions[:, 0] = object_size[0] // 2 + positions_0[:, 0] * image_size[0]
positions[:, 1] = object_size[1] // 2 + positions_0[:, 1] * image_size[1]
for position in positions:
sampling_comb[position[0], position[1]] = 1
positions -= np.asarray(object_size) // 2
return((sampling_comb, positions))
def genConvolutionSupportList(blur_kernel_list, image_size, threshold=0.05):
"""
This function generates a list of images defining the support of a windowed convolution operation.
"""
object_size = blur_kernel_list[0].shape
W = ops.Crop(object_size, image_size)
kernel_support_mask = []
object_support_mask = []
print(W.dtype)
window_mask = np.abs(W.H * W * np.ones(W.shape[1], dtype=np.complex64)).reshape(object_size)
for blur_kernel in blur_kernel_list:
C = ops.Convolution((blur_kernel > threshold).astype(np.complex64), mode='windowed',
pad_value=0, pad_size=int(object_size[0] / 2))
kernel_support_mask += [((C * (window_mask.reshape(-1).astype(np.complex64))).reshape(object_size) > threshold)]
object_support_mask.append(kernel_support_mask[-1])
for dim in range(kernel_support_mask[-1].ndim):
object_support_mask[-1] = np.flip(object_support_mask[-1], dim)
return (kernel_support_mask, object_support_mask)
def blurKernelFromPositions(object_size, position_list, illum_list, flip_kernels=False, use_phase_ramp=False,
pos_perturbation=None, dtype=default_dtype, backend=default_backend):
"""
This function generates a single blur kernel from a list of positions and illuminations. not multiframe.
"""
# Initialize blur kernels
blur_kernel = np.zeros(object_size, dtype=np.complex64)
for position_index, position in enumerate(position_list):
y = position[0]
x = position[1]
if pos_perturbation is not None:
y = y + pos_perturbation[position_index, 0]
x = x + pos_perturbation[position_index, 1]
if not use_phase_ramp:
x = int(round(x))
y = int(round(y))
# Assign illumination values
if illum_list[position_index] > 0:
if not use_phase_ramp:
blur_kernel[y, x] += illum_list[position_index]
else:
R = ops.PhaseRamp(blur_kernel.shape, dtype=dtype, backend=backend)
x_ = yp.astype(np.asarray((y - object_size[0] // 2, x - object_size[1] // 2)), R.dtype)
ramp = yp.reshape(R * x_, blur_kernel.shape)
blur_kernel += (ramp * illum_list[position_index])
if use_phase_ramp:
blur_kernel = iFt(blur_kernel)
blur_kernel[blur_kernel < 1e-8] = 0.0
if flip_kernels:
blur_kernel = np.fliplr(blur_kernel)
if np.sum(blur_kernel) > 0:
blur_kernel /= np.sum(blur_kernel)
return blur_kernel
def positionListToBlurKernelMap(kernel_size, position_list, return_fourier=True):
"""Function which converts a list of positions in a blur kernel to a full (non-sparse) blur kernel map.
Args:
kernel_size: Size of first two dimensions in blur_kernel_map
position_list: List of x,y tuples which are the locaitons of each position in the blur kernel.
return_fourier: Optional, enables return of blur kernels in frequency (Fourier) domain.
Returns:
A 2D blur_kernel_map, which has dimensions (kernel_size[0], kernel_size[1], size(position_list,1))
"""
# TODO redundant
print("can this be replaced with blurKernelFromPositions?")
n_positions = np.size(position_list, 0)
blur_kernel_map = np.zeros((n_positions, kernel_size[0], kernel_size[1]))
for pos in np.arange(0, n_positions):
blur_kernel_map[pos, position_list[pos, 0], position_list[pos, 1]] = 1
if return_fourier:
blur_kernel_map = Ft(blur_kernel_map.astype(np.complex64))
return(blur_kernel_map)
def pointListToBlurKernel(kernel_size, position_list, illumination_vector):
"""Converts point list and illuminaiton vector to blur kernel"""
# TODO redundant
print("can this be replaced with blurKernelFromPositions?")
position_count = np.size(position_list, 0)
blur_kernel = np.zeros((kernel_size[0], kernel_size[1]))
assert position_count == len(illumination_vector)
for index, position in enumerate(position_list):
blur_kernel[position[0], position[1]] = illumination_vector[index]
return(blur_kernel)
def colorBlurKernelsToMonochrome(blur_kernel_list_color):
"""
This function converts a list of color blur kernels to monochrome, assuming no optical effects.
Args:
blur_kernel_list_color: A dictionary of blur kernel lists, where each key indicates the illumination color channel of that kernel.
Returns:
A list of blur kernels which is the sum of the lists of each key in blur_kernel_list_color
"""
blur_kernel_list = []
for index, blur_kernel in enumerate(blur_kernel_list_color):
first_channel = list(blur_kernel.keys())[0]
new_kernel = np.zeros(blur_kernel[first_channel].shape, dtype=blur_kernel[first_channel].dtype)
for channel in blur_kernel:
new_kernel += blur_kernel[channel]
blur_kernel_list.append(new_kernel)
return(blur_kernel_list)
def getPositionListBoundingBox(kernel_position_list, use_mean=False):
"""
This function returns the bounding box of a single blur kernel or list of blur kernels, defined as a list of positions
Args:
kernel_position_list: list of points (y,x)
Returns:
A list of the extreme values in the blur kernel in the format [y_min, y_max, x_min, x_max]
"""
bounding_box = [1e10, -1e10, 1e10, -1e10]
assert type(kernel_position_list) in [list, np.ndarray]
# Make a single kernel_position_list a list with one element
if type(kernel_position_list[0][0]) not in [list, np.ndarray, tuple]:
kernel_position_list = [kernel_position_list]
for position in kernel_position_list:
if type(position[0][0]) in [np.ndarray, list, tuple]:
# TODO: This will break if we blur by more than one pixel during each pixel motion
if not use_mean:
max_y, max_x = np.max(np.asarray(position), axis=0)[0]
min_y, min_x = np.min(np.asarray(position), axis=0)[0]
else:
mean_y, mean_x = np.mean(np.asarray(position), axis=0)[0]
else:
if not use_mean:
max_y, max_x = np.max(np.asarray(position), axis=0)
min_y, min_x = np.min(np.asarray(position), axis=0)
else:
mean_y, mean_x = np.mean(np.asarray(position), axis=0)
if not use_mean:
bounding_box = [min(min_y, bounding_box[0]),
max(max_y, bounding_box[1]),
min(min_x, bounding_box[2]),
max(max_x, bounding_box[3])]
else:
bounding_box = [min(mean_y, bounding_box[0]),
max(mean_y, bounding_box[1]),
min(mean_x, bounding_box[2]),
max(mean_x, bounding_box[3])]
# Create ROI object
kernel_support_roi = yp.Roi(start=(int(round(bounding_box[0])), int(round(bounding_box[2]))),
end=(int(round(bounding_box[1])), int(round(bounding_box[3]))))
return(kernel_support_roi)
######################################################################################################
##################################### AUTOCALIBRATION ################################################
######################################################################################################
class BsplineND():
# from http://pythology.blogspot.com/2017/07/nd-b-spline-basis-functions-with-scipy.html
def __init__(self, knots, degree=3, periodic=False):
"""
:param knots: a list of the spline knots with ndim = len(knots)
TODO (sarah) incorporate 2d aspect?
"""
self.ndim = len(knots)
self.splines = []
self.knots = knots
self.degree = degree
for idim, knots1d in enumerate(knots):
nknots1d = len(knots1d)
y_dummy = np.zeros(nknots1d)
knots1d, coeffs, degree = sp.interpolate.splrep(knots1d, y_dummy, k=degree,
per=periodic)
self.splines.append((knots1d, coeffs, degree))
self.ncoeffs = [len(coeffs) for knots, coeffs, degree in self.splines]
def evaluate_independent(self, position):
"""
:param position: a numpy array with size [ndim, npoints]
:returns: a numpy array with size [nspl1, nspl2, ..., nsplN, npts]
with the spline basis evaluated at the input points
"""
ndim, npts = position.shape
values_shape = self.ncoeffs + [npts]
values = np.empty(values_shape)
ranges = [range(icoeffs) for icoeffs in self.ncoeffs]
for icoeffs in itertools.product(*ranges):
values_dim = np.empty((ndim, npts))
for idim, icoeff in enumerate(icoeffs):
coeffs = [1.0 if ispl == icoeff else 0.0 for ispl in
range(self.ncoeffs[idim])]
values_dim[idim] = sp.interpolate.splev(
position[idim],
(self.splines[idim][0], coeffs, self.degree))
values[icoeffs] = np.product(values_dim, axis=0)
return values
def evaluate(self, position):
assert self.weights is not None, "Must specify coefficients with set_coeffs()"
values = self.evaluate_independent(position)
return self.weights.dot(values)
def set_weights(self, weights):
assert len(weights) == self.ncoeffs[0], "must input correct number of weights"
self.weights = weights
def get_basis_splines(extent, num_basis_fn):
knotsx = np.linspace(0,extent-1,num_basis_fn)
bspline = BsplineND([knotsx])
pointsx1d = np.linspace(knotsx[0], knotsx[-1], extent)
basis_matrix = bspline.evaluate_independent(pointsx1d[None, :])
return basis_matrix[:num_basis_fn].T
def constructAlternatingMin(illuminations, shifts, image_size, n_frames, y):
assert False, "DEPRECIATED, try getAutocalibrationFns"
def positions_to_splines(spl_basis, pos):
# TODO (sarah) can be implemented as matrix inversion
return np.linalg.pinv(spl_basis.T.dot(spl_basis)).dot(spl_basis.T.dot(pos))
# def gradw(w):
# return spl_basis.T.dot( pos - spl_basis.dot(w))
# for i in range(100):
# w = w + 0.1 * gradw(w)
# return w
def get_monotone_projection(spline_basis):
A = spline_basis[:-1] - spline_basis[1:]
return lambda x: project_inequality(A, x)
def project_inequality(A, x):
# projection onto Ax <= 0
# TODO assumes A is real
# TODO check that this is actually working: seems that resulting x is in full A nullspace...
# assert len(x.shape==2)
A_active = A[np.where(A.dot(x) > 0)[0]]
if A_active.shape[0] == 0:
return x
if len(A_active.shape) < 2:
A_active = np.expand_dims(A_active,1)
AAinv = np.linalg.pinv(A_active.dot(A_active.T))
P = A_active.T.dot(AAinv).dot(A_active)
Px = P.dot(x)
return x - Px
def getAutocalibrationFns(y, object_size, illums, basis_y, basis_x, weights_initial, object_initial,
dtype=None, backend=None, verbose=False):
if verbose: print("generating BlurKernelBasis operator")
Hsum = ops.BlurKernelBasis(object_size, (basis_y, basis_x), illums,
dtype=dtype, backend=backend, verbose=verbose)
F = ops.FourierTransform(object_size, dtype=dtype, backend=backend, normalize=False)
if verbose: print("defining diagonalized components")
D_weights = ops.Diagonalize((Hsum * weights_initial).reshape(object_size), \
label='D_{shift}', dtype=dtype, backend=backend)
# TODO (sarah) use windowing here
D_object = ops.Diagonalize(F * object_initial, \
label='D_{object}', dtype=dtype, backend=backend)
# Forward models
if verbose: print("defining forward models")
A_object = F.H * D_weights * F
A_weights = F.H * D_object * Hsum
# Objectives
if verbose: print("defining objectives")
L2 = ops.L2Norm(object_size, dtype=dtype, backend=backend)
objective_object = L2 * (A_object - y)
objective_weights = L2 * (A_weights - y)
np_dtype = yp.getNativeDatatype(F.dtype, F.backend)
objective_object_set_weights = lambda weights: objective_object.setArgument('D_{shift}', \
Hsum * np.asarray(weights).astype(np_dtype))
objective_weights_set_object = lambda object_update: objective_weights.setArgument('D_{object}', \
F * np.asarray(object_update).astype(np_dtype))
objectives = [objective_object, objective_weights]
update_fns = [objective_object_set_weights, objective_weights_set_object]
return objectives, update_fns
def tand(x):
return np.float(np.tan(x * np.pi / 180))
def sind(x):
return np.float(np.sin(x * np.pi / 180))
def cosd(x):
return np.float(np.cos(x * np.pi / 180))
def dnf2snr(dnf, exposureUnits, exposureCountsPerUnit=6553, darkCurrentE=0.9, patternNoiseE=3.9, readoutNoiseE=2.5, cameraBits=16, fullWellCapacity=30000):
"""Function which converts deconvolution noise factor to signal to noise ratio.
Uses equations from https://www.photometrics.com/resources/learningzone/signaltonoiseratio.php and the dnf from the Agrawal and Raskar 2009 CVPR paper found here: http://ieeexplore.ieee.org/document/5206546/
Default values are for the PCO.edge 5.5 sCMOS camera (https://www.pco.de/fileadmin/user_upload/pco-product_sheets/pco.edge_55_data_sheet.pdf)
Args:
dnf: Deconvolution noise factor as specified in Agrawal et. al.
exposureUnits: exposure time, time units (normally ms)
exposureCountsPerUnit: Average number of raw image counts for a 1 unit of exposureUnits exposure time
darkCurrentE: Dark current from datasheet, units electrons
patternNoiseE: Pattern noise from datasheet, units electrons
readoutNoiseE: Readout noise from datasheet, units electrons
cameraBits: Number of bits in camera
Returns:
A 2D numpy array which indicates the support of the optical system in the frequency domain.
"""
countsToE = fullWellCapacity / (2 ** cameraBits - 1)
return countsToE * exposureUnits * exposureCountsPerUnit \
/ (dnf * math.sqrt((countsToE * exposureCountsPerUnit + readoutNoiseE) * exposureUnits + (darkCurrentE + patternNoiseE)))
def genRandInitialization(n, beta, bounds=[0.0, 1.0], remainder=False):
blurVec = np.zeros(n)
# # Make a random assortment of columns 1
# randSeed = (np.random.rand(n) * [bounds[1] - bounds[0]]) - bounds[0]
# randSort = np.argsort(randSeed)
# mask = randSort <= np.floor(beta * n)
n_pulses = int(np.floor(beta * n))
mask = np.random.choice(n, size=n_pulses, replace=False)
# # Set these values to max
blurVec[mask] = bounds[1]
# # Assign the remainder to a value which is zero
if remainder:
zeroIdx = np.argsort(blurVec)
blurVec[zeroIdx[0]] = (beta * n) % 1
# Compute Condition Number
condNum = max(abs(fftpack.fft(mask))) / min(abs(fftpack.fft(mask)))
return blurVec, condNum
def condLowerBound(N, beta, bounds=[0, 1]):
"""Function which generates a lower bound on condition number using the PSD description of the optimal solution.
Args:
N: Number of positions on blur kernel
beta: throughoput coefficient in range [0,1]
bounds: bounds of resulting kernel, usually set to [0,1]
Returns:
Scalar condition number lower bound
"""
# Theoretical maximum bound on sum(x^2)
ps_max = np.floor(N * beta) + np.mod(N * beta, 1) ** 2
# Convert power spectrum to frequency domain power specturm using Parseval's theorem
ps_f = ps_max * N
# This is the DC term in real space
dc = N * beta
# Compute mininum value of power spectra using parseval's theorem and the DC
minPs = (ps_f - dc ** 2) / (N - 1)
# Compute Condition Number
kappa = dc / np.sqrt(minPs)
return kappa
def dnfUpperBound(N, beta, bounds=[0, 1]):
"""Function which generates a upper bound on deconvolution noise factor (DNF) using the PSD description of the optimal solution.
DNF is described in the Agrawal and Raskar 2009 CVPR paper found here: http://ieeexplore.ieee.org/document/5206546/
Args:
N: Number of positions on blur kernel
beta: throughoput coefficient in range [0,1]
bounds: bounds of resulting kernel, usually set to [0,1]
Returns:
Scalar dnf lower bound
"""
# Theoretical maximum bound on sum(x^2)
ps_max = np.floor(N * beta) + np.mod(N * beta, 1) ** 2
# Convert power spectrum to frequency domain power specturm using Parseval's theorem
ps_f = ps_max * N
# This is the DC term in real space
dc = N * beta
# Compute mininum value of power spectra using parseval's theorem and the DC
if N > 1:
minPs = (ps_f - dc ** 2) / (N - 1)
# Compute DNF
dnf = (N - 1) * 1 / np.sqrt(minPs) + 1 / (dc)
else:
dnf = 1
return dnf
def genKernelMapCol(colIdx, innerKernelMap, outerKernelMap, kernelSupport=None, supportThreshold=-1):
"""Function which generates a single column in a kernel map from an inner and outer base kernelMap.
Args:
innerKernelMap: kernelMap to pattern on inner dimension, as in the minor stride in the final kernel. Should be of same (x,y) size as outerKernelMap (2nd and 3rd dim)
outerKernelMap: kernelMap to pattern on inner dimension, as in the major stride in the final kernel. Should be of same (x,y) size as innerKernelMap (2nd and 3rd dim)
kernelSupport: Binary array for 2D support in both inner and outer kernelMaps
supportThreshold: A threshold for the final kernelMap magnitude. Only used if kernelSupport = None (default)
Returns:
A 1D column from the kernelMap formed by innerKernelMap and outerKernelMap. Size in the first dimension will be the product of the first two dimensions in both innerKernelMap and outerKernelMap by default, or can be less if kernelSupport or supportThreshold are passed.
"""
innerSize = np.size(innerKernelMap, 2)
outerSize = np.size(outerKernelMap, 2)
assert colIdx < innerSize * outerSize, "colIdx should be less than the product of the third dimensions in innerKernelMap and outerKernelMap"
innerIdx = int(colIdx % innerSize)
outerIdx = int(np.floor(colIdx / innerSize))
kernelMapCol = (innerKernelMap[:, :, innerIdx] * outerKernelMap[:, :, outerIdx]).reshape(-1)
if (np.any(kernelSupport) == None):
if supportThreshold > 0:
support = np.abs(kernelMapCol) > supportThreshold
else:
support = ones(innerKernelMap[:, :, 0].reshape(-1).shape)
else:
support = kernelSupport
kernelMapCol = kernelMapCol[support.reshape(-1) > 0]
return kernelMapCol
def genKernelMapSupport(innerKernelMap, outerKernelMap, supportThreshold=-1):
"""Function which generates a 2D support plot given both inner and outer kernelMaps.
Args:
innerKernelMap: kernelMap to pattern on inner dimension, as in the minor stride in the final kernel. Should be of same (x,y) size as outerKernelMap (2nd and 3rd dim)
outerKernelMap: kernelMap to pattern on inner dimension, as in the major stride in the final kernel. Should be of same (x,y) size as innerKernelMap (2nd and 3rd dim)
supportThreshold: A threshold for the final kernelMap magnitude.
Returns:
A 2D complete kernelMap with size equal to the first two dimensions in innerKernelMap and outerKernelMap. This is a binary array which indicates whether the magnitude of all combinations of inner and outerKernelMap will be greater than the supportThreshold at every position. By default, returns an array which is all True, unless supportThreshold is passed.
"""
return (np.sum(np.abs(innerKernelMap), 2) / np.mean(np.sum(np.abs(innerKernelMap), 2)) * np.sum(np.abs(outerKernelMap)) / np.mean(np.sum(np.abs(outerKernelMap)))) > supportThreshold
def genKernelMap(innerKernelMap, outerKernelMap, kernelSupport=None, supportThreshold=-1):
"""Function which generates an kernel map from an inner and outer base kernelMap.
Args:
innerKernelMap: kernelMap to pattern on inner dimension, as in the minor stride in the final kernel. Should be of same (x,y) size as outerKernelMap (2nd and 3rd dim)
outerKernelMap: kernelMap to pattern on inner dimension, as in the major stride in the final kernel. Should be of same (x,y) size as innerKernelMap (2nd and 3rd dim)
kernelSupport: Binary array for 2D support in both inner and outer kernelMaps
supportThreshold: A threshold for the final kernelMap magnitude. Only used if kernelSupport = None (default)
Returns:
A 2D complete kernelMap with second dimension equal to the product of the last dimensions in innerKernelMap and outerKernelMap. Size in the first dimension will be the product of the first two dimensions in both innerKernelMap and outerKernelMap by default, or can be less if kernelSupport or supportThreshold are passed.
"""
# Number of columns to build
if type(outerKernelMap) is type(None):
outerKernelMap = np.ones((np.size(innerKernelMap, 0), np.size(innerKernelMap, 1), 1))
if np.ndim(innerKernelMap) == 2:
innerKernelMap = np.expand_dims(innerKernelMap, 2)
if np.ndim(outerKernelMap) == 2:
outerKernelMap = np.expand_dims(outerKernelMap, 2)
outerSize = np.size(outerKernelMap, 2)
innerSize = np.size(innerKernelMap, 2)
nColumns = innerSize * outerSize
# Generate support based on power spectra of each kernel
if (kernelSupport) == None:
if supportThreshold > 0:
support = genKernelMapSupport(innerKernelMap, outerKernelMap, supportThreshold=supportThreshold)
else:
support = np.ones(innerKernelMap[:, :, 0].reshape(-1).shape)
else:
support = kernelSupport
kernelMap = np.zeros((np.sum(support > 0), nColumns), dtype=np.complex64)
for colIdx in np.arange(0, innerSize * outerSize):
kernelMap[:, colIdx] = genKernelMapCol(colIdx, innerKernelMap, outerKernelMap, kernelSupport=support)
return(kernelMap)
######################################################################################################
##################################### PATHWAY GENERATION #############################################
######################################################################################################
def genMotionPathIncrimentPlot(blur_kernel_map):
"""Function which generates a 2D kernel map for illustration where every sequential position is it's index in blur_kernel_map
Args:
blur_kernel_map: A blur kernel map, usually a bunch of delta functions. Should be a 3D ndarray
Returns:
A 2D ndarray where each value is it's index in the input blur_kernel_map
"""
kernel_map_full = np.zeros((np.size(blur_kernel_map, 0), np.size(blur_kernel_map, 1)))
for idx in np.arange(np.size(blur_kernel_map, 2)):
kernel_map_full = kernel_map_full + blur_kernel_map[:, :, idx] * idx
return kernel_map_full
def genRasterMotionPathwayOld(object_size, image_size, full_object_multi_pass=0, measurement_redundancy=1):
"""Function which generates a list of points which make up a complete raster scan of a given FOV, given a small capture FOV
Args:
image_size: Capture frame size in (y,x)
object_size: Sample size in (y,x), should be larger than image_size
full_object_multi_pass: (0) Flag to force kernel generation to scan full object multiple times instead of dividing it into segments. If between 0 and 1, scans the object in halves.
measurement_redundancy: redundancy in x, increases number of measurements by this factor
Returns:
A 2D ndarray where each value is it's index in the input blur_kernel_map
"""
# Determine major axis
major_axis = np.argmax(np.asarray(object_size))
if object_size[0] == object_size[1]:
major_axis = 1
if major_axis == 0:
object_size = np.flip(object_size, 0)
image_size = np.flip(image_size, 0)
measurement_count = np.ceil(np.asarray(object_size) / np.asarray(image_size)
).astype(np.int) # two components in x and y
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
measurement_count[1] = int(measurement_redundancy * measurement_count[1])
raster_segments = np.zeros((measurement_count[0] * 2, 2), dtype=np.int)
y_stride = object_size[0] / measurement_count[0]
x_stride = object_size[1] / measurement_count[1]
minor_stride_side = 'r'
raster_point_list = []
for row in np.arange(measurement_count[0]):
# Place the vertical upright
if minor_stride_side == 'r':
raster_segments[(2 * row), :] = [np.ceil(image_size[0] * (row + 0.5)).astype(int),
np.ceil(image_size[1] * 0.5).astype(int)]
raster_segments[(2 * row) + 1, :] = [np.ceil(image_size[0] * (row + 0.5)).astype(int),
np.ceil(object_size[1] - image_size[1] * 0.5).astype(int)]
minor_stride_side = 'l'
else:
raster_segments[(2 * row), :] = [np.ceil(image_size[0] * (row + 0.5)).astype(int),
np.ceil(object_size[1] - image_size[1] * 0.5).astype(int)]
raster_segments[(2 * row) + 1, :] = [np.ceil(image_size[0] * (row + 0.5)
).astype(int), np.ceil(image_size[1] * 0.5).astype(int)]
minor_stride_side = 'r'
# Determine movement direction of this row in X
if raster_segments[(2 * row), 1] < raster_segments[(2 * row) + 1, 1]:
move_direction_x = 1
else:
move_direction_x = -1
# always move down one
move_direction_y = 1
# Determine points to use for horizontal scan
if row == 0:
if measurement_count[0] == 1:
x_position_list = np.arange(0, object_size[1], move_direction_x)
else:
x_position_list = np.arange(0, raster_segments[(2 * row) + 1, 1], move_direction_x)
elif row == measurement_count[0] - 1:
x_position_list = np.arange(raster_segments[(2 * row), 1], object_size[1], move_direction_x)
else:
x_position_list = np.arange(raster_segments[(2 * row), 1],
raster_segments[(2 * row) + 1, 1], move_direction_x)
for position_x in x_position_list:
raster_point_list.append([raster_segments[(2 * row), 0], position_x])
# Vertical scan
if np.ceil(image_size[0] * (row + 1.5)) < object_size[0]:
for position_y in np.arange(np.ceil(image_size[0] * (row + 0.5)), np.ceil(image_size[0] * (row + 1.5))).astype(int):
raster_point_list.append([position_y.astype(int), raster_segments[(2 * row) + 1, 1]])
raster_point_list = np.asarray(raster_point_list)
# Determine number of points per image
points_per_image = np.floor(raster_point_list.shape[0] / np.prod(measurement_count))
measurement_indicies = np.arange(raster_point_list.shape[0])
measurement_indicies = np.floor(measurement_indicies / points_per_image)
# If full_object_multi_pass flag is specified, we want to scan the object backwards
# and forwards multiple times instead of dividing it up into segments.
raster_point_list_segmented = []
for measurement_index in range(np.prod(measurement_count)):
if not full_object_multi_pass:
raster_point_list_segmented.append(raster_point_list[measurement_indicies == measurement_index, :])
elif full_object_multi_pass < 1:
midpoint = int(np.ceil(raster_point_list.shape[0] / 2))
if measurement_index % 2:
if measurement_index % 3:
raster_point_list_segmented.append(raster_point_list[midpoint:, :])
else:
raster_point_list_segmented.append(np.flip(raster_point_list[0:midpoint, :], axis=0))
else:
if measurement_index % 4:
raster_point_list_segmented.append(np.flip(raster_point_list[midpoint:, :], axis=0))
else:
raster_point_list_segmented.append(raster_point_list[0:midpoint, :])
else:
if measurement_index % 2:
raster_point_list_segmented.append(raster_point_list)
else:
raster_point_list_segmented.append(np.flip(raster_point_list, axis=0))
# Transpose points if user desires
if major_axis == 0:
return(np.flip(raster_point_list_segmented, axis=2))
else:
return(raster_point_list_segmented)
def gen90Corner(image_size, orientation='ru'):
position_list = []
for x_position in np.arange(0, np.ceil(image_size[1] * 0.5).astype(int)):
position_list.append([np.ceil(image_size[0] * 0.5).astype(int), x_position])
for y_position in np.arange(np.ceil(image_size[0] * 0.5).astype(int), image_size[0]):
position_list.append([y_position, np.ceil(image_size[1] * 0.5).astype(int)])
if orientation == 'rl':
return flip_pts(position_list, image_size, ['ud', 'reverse'])
elif orientation == 'lu':
return flip_pts(position_list, image_size, ['lr'])
elif orientation == 'll':
return flip_pts(position_list, image_size, ['lr', 'ud', 'reverse'])
else:
return position_list
def genRasterMotionPathway(object_size, image_size, corner_gen_fn=gen90Corner, full_object_multi_pass=0, measurement_redundancy=1):
"""Function which generates a list of points which make up a complete raster scan of a given FOV, given a small capture FOV
Args:
image_size: Capture frame size in (y,x)
object_size: Sample size in (y,x), should be larger than image_size
corner_gen_fn: function that generates corner shapes with given image size, offset, and orientation
full_object_multi_pass: (0) Flag to force kernel generation to scan full object multiple times instead of dividing it into segments. If between 0 and 1, scans the object in halves.
measurement_redundancy: redundancy in x, increases number of measurements by this factor
Returns:
A 2D ndarray where each value is it's index in the input blur_kernel_map
"""
# Determine major axis
major_axis = np.argmax(np.asarray(object_size))
if object_size[0] == object_size[1]:
major_axis = 1
if major_axis == 0:
object_size = np.flip(object_size, 0)
image_size = np.flip(image_size, 0)
measurement_count = np.ceil(np.asarray(object_size) / np.asarray(image_size)
).astype(np.int) # two components in x and y
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
measurement_count[1] = int(measurement_redundancy * measurement_count[1])
raster_segments = np.zeros((measurement_count[0] * 2, 2), dtype=np.int)
y_stride = object_size[0] / measurement_count[0]
x_stride = object_size[1] / measurement_count[1]
minor_stride_side = 'r'
raster_point_list = []
for row in np.arange(measurement_count[0]):
if row == 0:
if measurement_count[0] == 1: # if final row
# straight line only
x_position_list = np.arange(0, object_size[1])
y_position = np.ceil(image_size[0] * 0.5).astype(int)
for position_x in x_position_list:
raster_point_list.append([y_position, position_x])
else:
# straight line
x_position_list = np.arange(0, object_size[1] - image_size[1])
y_position = np.ceil(image_size[0] * 0.5).astype(int)
for position_x in x_position_list:
raster_point_list.append([y_position, position_x])
# plus corner
for position_x, position_y in corner_gen_fn(image_size, orientation='ru'):
raster_point_list.append([position_y, position_x + object_size[1] - image_size[1]])
elif row % 2: # odd
for position_x, position_y in corner_gen_fn(image_size, orientation='rl'):
raster_point_list.append([position_y + row * image_size[0],
position_x + object_size[1] - image_size[1]])
if measurement_count[0] == row + 1: # final row: straight line
x_position_list = np.arange(0, object_size[1] - image_size[1], -1)
y_position = np.ceil(image_size[0] * 0.5 + row * image_size[0]).astype(int)
for position_x in x_position_list:
raster_point_list.append([y_position, position_x])
else:
# straight portion
x_position_list = np.arange(image_size[1], object_size[1] - image_size[1], -1)
y_position = np.ceil(image_size[0] * 0.5 + row * image_size[0]).astype(int)
for position_x in x_position_list:
raster_point_list.append([y_position, position_x])
# corner
for position_x, position_y in corner_gen_fn(image_size, orientation='lu'):
raster_point_list.append([position_y + row * image_size[0], position_x])
else: # even
for position_x, position_y in corner_gen_fn(image_size, orientation='ll'):
raster_point_list.append([position_y + row * image_size[0], position_x])
if measurement_count[0] == row + 1: # final row: straight line
x_position_list = np.arange(image_size[1], object_size[1])
y_position = np.ceil(image_size[0] * 0.5 + row * image_size[0]).astype(int)
for position_x in x_position_list:
raster_point_list.append([y_position, position_x])
else:
# straight portion
x_position_list = np.arange(image_size[1], object_size[1] - image_size[1])
y_position = np.ceil(image_size[0] * 0.5 + row * image_size[0]).astype(int)
for position_x in x_position_list:
raster_point_list.append([y_position, position_x])
# corner
for position_x, position_y in corner_gen_fn(image_size, orientation='ru'):
raster_point_list.append([position_y + row * image_size[0],
position_x + object_size[1] - image_size[1]])
raster_point_list = np.asarray(raster_point_list)
# Determine number of points per image
points_per_image = np.floor(raster_point_list.shape[0] / np.prod(measurement_count))
measurement_indicies = np.arange(raster_point_list.shape[0])
measurement_indicies = np.floor(measurement_indicies / points_per_image)
# If full_object_multi_pass flag is specified, we want to scan the object backwards
# and forwards multiple times instead of dividing it up into segments.
raster_point_list_segmented = []
for measurement_index in range(np.prod(measurement_count)):
if not full_object_multi_pass:
raster_point_list_segmented.append(raster_point_list[measurement_indicies == measurement_index, :])
elif full_object_multi_pass < 1:
midpoint = int(np.ceil(raster_point_list.shape[0] / 2))
if measurement_index % 2:
if measurement_index % 3:
raster_point_list_segmented.append(raster_point_list[midpoint:, :])
else:
raster_point_list_segmented.append(np.flip(raster_point_list[0:midpoint, :], axis=0))
else:
if measurement_index % 4:
raster_point_list_segmented.append(np.flip(raster_point_list[midpoint:, :], axis=0))
else:
raster_point_list_segmented.append(raster_point_list[0:midpoint, :])
else:
if measurement_index % 2:
raster_point_list_segmented.append(raster_point_list)
else:
raster_point_list_segmented.append(np.flip(raster_point_list, axis=0))
# Transpose points if user desires
if major_axis == 0:
return(np.flip(raster_point_list_segmented, axis=2))
else:
return(raster_point_list_segmented)
def flip_pts(points, image_size, orientations):
new_points = points
for orientation in orientations:
if orientation == 'reverse':
new_points = np.flipud(new_points)
else:
if orientation == 'ud':
def point_op(point): return (point[0], image_size[0] - point[1])
elif orientation == 'lr':
def point_op(point): return (image_size[1] - point[0], point[1])
else:
assert 0, 'unrecognized orientation'
new_points = [point_op(point) for point in new_points]
return new_points
# messy separate version for now, eventually merge custom corner logic with everything else to subsume this case
def genLinearRasterMotionPathway(object_size, image_size, full_object_multi_pass=0, measurement_redundancy=1):
"""Function which generates a list of points which make up a complete raster scan of a given FOV, given a small capture FOV
Args:
image_size: Capture frame size in (y,x)
object_size: Sample size in (y,x), should be larger than image_size
full_object_multi_pass: (0) Flag to force kernel generation to scan full object multiple times instead of dividing it into segments. If between 0 and 1, scans the object in halves.
measurement_redundancy: redundancy in x, increases number of measurements by this factor
Returns:
A 2D ndarray where each value is it's index in the input blur_kernel_map
"""
# Determine major axis
major_axis = np.argmax(np.asarray(object_size))
if object_size[0] == object_size[1]:
major_axis = 1
if major_axis == 0:
object_size = np.flip(object_size, 0)
image_size = np.flip(image_size, 0)
measurement_count = np.ceil(object_size / image_size).astype(np.int) # two components in x and y
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
measurement_count[1] = int(measurement_redundancy * measurement_count[1])
raster_segments = np.zeros((measurement_count[0] * 2, 2), dtype=np.int)
y_stride = object_size[0] / measurement_count[0]
x_stride = object_size[1] / measurement_count[1]
raster_point_list = []
for row in np.arange(measurement_count[0]):
# Place the vertical upright
raster_segments[(2 * row), :] = [np.ceil(image_size[0] * (row + 0.5)).astype(int),
np.ceil(image_size[1] * 0.5).astype(int)]
raster_segments[(2 * row) + 1, :] = [np.ceil(image_size[0] * (row + 0.5)).astype(int),
np.ceil(object_size[1] - image_size[1] * 0.5).astype(int)]
# Determine points to use for horizontal scan
x_position_list = np.arange(0, object_size[1], 1)
for position_x in x_position_list:
raster_point_list.append([raster_segments[(2 * row), 0], position_x])
raster_point_list = np.asarray(raster_point_list)
# Determine number of points per image
points_per_image = np.floor(raster_point_list.shape[0] / np.prod(measurement_count))
measurement_indicies = np.arange(raster_point_list.shape[0])
measurement_indicies = np.floor(measurement_indicies / points_per_image)
# If full_object_multi_pass flag is specified, we want to scan the object backwards
# and forwards multiple times instead of dividing it up into segments.
raster_point_list_segmented = []
for measurement_index in range(np.prod(measurement_count)):
if not full_object_multi_pass:
raster_point_list_segmented.append(raster_point_list[measurement_indicies == measurement_index, :])
elif full_object_multi_pass < 1:
midpoint = int(np.ceil(raster_point_list.shape[0] / 2))
if measurement_index % 2:
if measurement_index % 3:
raster_point_list_segmented.append(raster_point_list[midpoint:, :])
else:
raster_point_list_segmented.append(np.flip(raster_point_list[0:midpoint, :], axis=0))
else:
if measurement_index % 4:
raster_point_list_segmented.append(np.flip(raster_point_list[midpoint:, :], axis=0))
else:
raster_point_list_segmented.append(raster_point_list[0:midpoint, :])
else:
if measurement_index % 2:
raster_point_list_segmented.append(raster_point_list)
else:
raster_point_list_segmented.append(np.flip(raster_point_list, axis=0))
# Transpose points if user desires
if major_axis == 0:
return(np.flip(raster_point_list_segmented, axis=2))
else:
return(raster_point_list_segmented)
def genCustomRasterPathway(image_size, object_size, corner_fn, measurement_redundancy=1):
# very rough function, to be improved later
assert (object_size / image_size == [3, 3]).all(), 'only design for 3x3 grid'
line_list = []
line = genTwoPointLineBlurposition_list((0, int(image_size[0] / 2)), (image_size[1], int(image_size[0] / 2)))
line_list.append(line)
line = genTwoPointLineBlurposition_list(
(image_size[1], int(image_size[0] / 2)), (2 * image_size[1], int(image_size[0] / 2)))
line_list.append(line)
line = corner_fn(image_size, offset=(2 * image_size[1], 0))
line_list.append(line)
line = corner_fn(image_size, offset=(2 * image_size[1], image_size[0]), orientations=['ud', 'reverse'])
line_list.append(line)
line = genTwoPointLineBlurposition_list(
(2 * image_size[0], image_size[1] + int(image_size[1] / 2)), (image_size[0], image_size[1] + int(image_size[1] / 2)))
line_list.append(line)
line = corner_fn(image_size, offset=(0, image_size[0]), orientations=['lr'])
line_list.append(line)
line = corner_fn(image_size, offset=(0, 2 * image_size[0]), orientations=['lr', 'ud', 'reverse'])
line_list.append(line)
line = genTwoPointLineBlurposition_list(
(image_size[1], 2 * image_size[0] + int(image_size[0] / 2)), (2 * image_size[1], 2 * image_size[0] + int(image_size[0] / 2)))
line_list.append(line)
line = genTwoPointLineBlurposition_list(
(2 * image_size[1], 2 * image_size[0] + int(image_size[0] / 2)), (3 * image_size[1], 2 * image_size[0] + int(image_size[0] / 2)))
line_list.append(line)
point_list_segmented = []
for line in line_list:
if measurement_redundancy == 2:
middle = int(np.floor((len(line) - 1) / 2))
point_list_segmented.append(np.asarray(line[:middle]))
point_list_segmented.append(np.asarray(line[middle:-1]))
else:
point_list_segmented.append(np.asarray(line[:-1]))
# for points in point_list_segmented:
# points[np.where(points >= 3*image_size)] = 3*image_size[0]-1
return point_list_segmented
def generate_open_corner(image_size, offset=(0, 0), orientations=[]):
midside = (int(image_size[0] / 2), int(image_size[1] / 2))
diamond_points = [(0, midside[0]), (int(image_size[1] / 3), int(image_size[0] / 6)),
(image_size[1], image_size[0])]
diamond_points = flip_pts(diamond_points, image_size, orientations)
line_list = []
for i in range(len(diamond_points) - 1):
p1 = tuple(p + q for p, q in zip(diamond_points[i], offset))
p2 = tuple(p + q for p, q in zip(diamond_points[i + 1], offset))
line = genTwoPointLineBlurposition_list(p1, p2)
line_list.append(line)
return np.concatenate(line_list)
def generate_diamond_corner(image_size, offset=(0, 0), orientations=[]):
midside = (int(image_size[0] / 2), int(image_size[1] / 2))
diamond_points = [(0, midside[0]), (midside[1], int(image_size[0] / 5)), (int(4 * image_size[1] / 5), midside[0]),
(midside[1], image_size[0])]
diamond_points = flip_pts(diamond_points, image_size, orientations)
line_list = []
for i in range(len(diamond_points) - 1):
p1 = tuple(p + q for p, q in zip(diamond_points[i], offset))
p2 = tuple(p + q for p, q in zip(diamond_points[i + 1], offset))
line = genTwoPointLineBlurposition_list(p1, p2)
line_list.append(line)
return np.concatenate(line_list)
def genRasterMotionPathway_fallback(object_size, image_size, full_object_multi_pass=False):
"""Function which generates a list of points which make up a complete raster scan of a given FOV, given a small capture FOV
Args:
image_size: Capture frame size in (y,x)
object_size: Sample size in (y,x), should be larger than image_size
full_object_multi_pass: (False) Flag to force kernel generation to scan full object multiple times instead of dividing it into segments
Returns:
A 2D ndarray where each value is it's index in the input blur_kernel_map
"""
measurement_count = np.ceil(object_size / image_size).astype(np.int) # two components in x and y
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
raster_segments = np.zeros((measurement_count[0] * 2, 2), dtype=np.int)
y_stride = object_size[0] / measurement_count[0]
x_stride = object_size[1] / measurement_count[1]
minor_stride_side = 'r'
raster_point_list = []
for row in np.arange(measurement_count[0]):
# Place the vertical upright
if minor_stride_side == 'r':
raster_segments[(2 * row), :] = [np.ceil(image_size[0] * (row + 0.5)).astype(int),
np.ceil(image_size[1] * 0.5).astype(int)]
raster_segments[(2 * row) + 1, :] = [np.ceil(image_size[0] * (row + 0.5)).astype(int),
np.ceil(object_size[1] - image_size[1] * 0.5).astype(int)]
minor_stride_side = 'l'
else:
raster_segments[(2 * row), :] = [np.ceil(image_size[0] * (row + 0.5)).astype(int),
np.ceil(object_size[1] - image_size[1] * 0.5).astype(int)]
raster_segments[(2 * row) + 1, :] = [np.ceil(image_size[0] * (row + 0.5)
).astype(int), np.ceil(image_size[1] * 0.5).astype(int)]
minor_stride_side = 'r'
# Determine movement direction of this row in X
if raster_segments[(2 * row), 1] < raster_segments[(2 * row) + 1, 1]:
move_direction_x = 1
else:
move_direction_x = -1
# always move down one
move_direction_y = 1
# Determine points to use for horizontal scan
if row == 0:
if measurement_count[0] == 1:
x_position_list = np.arange(0, object_size[1], move_direction_x)
else:
x_position_list = np.arange(0, raster_segments[(2 * row) + 1, 1], move_direction_x)
elif row == measurement_count[0] - 1:
x_position_list = np.arange(raster_segments[(2 * row), 1], object_size[1], move_direction_x)
else:
x_position_list = np.arange(raster_segments[(2 * row), 1],
raster_segments[(2 * row) + 1, 1], move_direction_x)
for position_x in x_position_list:
raster_point_list.append([raster_segments[(2 * row), 0], position_x])
# Vertical scan
if np.ceil(image_size[0] * (row + 1.5)) < object_size[0]:
for position_y in np.arange(np.ceil(image_size[0] * (row + 0.5)), np.ceil(image_size[0] * (row + 1.5))).astype(int):
raster_point_list.append([position_y.astype(int), raster_segments[(2 * row) + 1, 1]])
raster_point_list = np.asarray(raster_point_list)
# Determine number of points per image
points_per_image = np.floor(raster_point_list.shape[0] / np.prod(measurement_count))
measurement_indicies = np.arange(raster_point_list.shape[0])
measurement_indicies = np.floor(measurement_indicies / points_per_image)
# If full_object_multi_pass flag is specified, we want to scan the object backwards
# and forwards multiple times instead of dividing it up into segments.
raster_point_list_segmented = []
for measurement_index in range(np.prod(measurement_count)):
if not full_object_multi_pass:
raster_point_list_segmented.append(raster_point_list[measurement_indicies == measurement_index, :])
else:
if measurement_index % 2:
raster_point_list_segmented.append(raster_point_list)
else:
raster_point_list_segmented.append(np.flip(raster_point_list, axis=0))
return(raster_point_list_segmented)
def genTwoPointLineBlurposition_list(startPos, endPos):
"""Function which generates a blur kernel map which make a linear pathway between two points
Args:
kernel_size: Tuple with size in x and y, should be integer
startPos: Start position, should be tuple of integers
endPos: End position, should be tuple of integers
Returns:
A list of x,y positions to generate a blur kernel map
"""
from skimage.draw import line
rr, cc = line(startPos[1], startPos[0], endPos[1], endPos[0])
# Convert list to array (TODO: Make this work with lists instead of arrays)
return np.asarray([rr, cc]).T
# Generate linear blur kernel map as a list of positions
def genLinearBlurKernelMapPositionList(kernel_size, n_positions, point_seperation=0, centered=True, centerOffset=(0, 0)):
"""Function which generates an example blur kernel map which make a linear pathway.
Args:
kernel_size: Tuple with size in x and y, should be integer
n_positions: Length of kernel, should be integer
point_seperation: Seperation between points, should be integer
Returns:
A list of x,y positions to generate a blur kernel map
"""
startPos = round(kernel_size[1] / 2 - (point_seperation + 1) * (n_positions / 2))
height = round(kernel_size[0] / 2)
position_list = np.zeros((n_positions, 2))
position_list[:, 1] = startPos + np.arange(0, (point_seperation + 1) *
n_positions, (point_seperation + 1)) - centerOffset[1]
position_list[:, 0] = height - centerOffset[0]
position_list = position_list.astype(np.int16)
if not centered:
position_list[:, 0] = position_list[:, 0] - np.ceil(kernel_size[0] * 0.5).astype(int)
position_list[:, 1] = position_list[:, 1] - \
np.ceil(kernel_size[1] * 0.5).astype(int) + (point_seperation + 1) * (n_positions / 2)
return(position_list)
def genCircularBlurKernelMapposition_list(kernel_size, radius, sweepAngle, startAngle=0, center=(0, 0)):
"""Function which generates an example blur kernel map which make an arc pathway. Uses sweep angle as an input.
Args:
kernel_size: Tuple with size in x and y, should be integer
radius: Desired Radius of arc, can be double or integer
sweepAngle: Desired angle of arc in degrees
startAngle: Angle from which to start arc, degrees
center: Center of Arc (x,y), integer
Returns:
A list of x,y positions to generate a blur kernel map
"""
# Generate circle
x = np.arange(-(kernel_size[1] - np.mod(kernel_size[1], 2)) / 2, (kernel_size[1] -
np.mod(kernel_size[1], 2)) / 2 - (np.mod(kernel_size[1], 2) == 1)) - center[1]
y = np.arange(-(kernel_size[0] - np.mod(kernel_size[0], 2)) / 2, (kernel_size[0] -
np.mod(kernel_size[0], 2)) / 2 - (np.mod(kernel_size[0], 2) == 1)) - center[0]
[xx, yy] = np.meshgrid(x, y)
fullCircle = np.abs((xx ** 2 + yy ** 2) - radius ** 2) < 40
M2 = ((-sind(startAngle + 180 + startAngle)) * xx) <= ((cosd(startAngle + sweepAngle + 180)) * yy)
M3 = (-sind(startAngle + 180) * xx > cosd(startAngle + 180) * yy)
fullCircle = fullCircle * M2 * M3
n_positions = np.sum(fullCircle)
print("generated %d positions" % n_positions)
position_list = np.zeros((n_positions, 2))
# [zp] I'm sure there is a faster way to do this...
it = np.nditer(fullCircle, flags=['multi_index'])
sIdx = 0
posIdx = 0
while not it.finished:
if it[0] > 0:
position_list[posIdx, 1] = it.multi_index[0]
position_list[posIdx, 0] = it.multi_index[1]
posIdx = posIdx + 1
it.iternext()
position_list = position_list.astype(np.int16)
return(position_list)
# Generate circular blur kernel position list by n_positions
def genCircularBlurKernelMapposition_listN(kernel_size, radius, n_positions, startAngle=0):
"""Function which generates an example blur kernel map which make an arc pathway. Uses number of positions as an input.
Args:
kernel_size: Tuple with size in x and y, should be integer
radius: Desired Radius of arc, can be double or integer
n_positions: Length of kernel, should be integer
startAngle: Angle from which to start arc, degrees
Returns:
A list of x,y positions to generate a blur kernel map
"""
# Generate circle
x = np.arange(-(kernel_size[1] - mod(kernel_size[1], 2)) / 2, (kernel_size[1] -
mod(kernel_size[1], 2)) / 2 - (mod(kernel_size[1], 2) == 1))
y = np.arange(-(kernel_size[0] - mod(kernel_size[0], 2)) / 2, (kernel_size[0] -
mod(kernel_size[0], 2)) / 2 - (mod(kernel_size[0], 2) == 1))
[xx, yy] = np.meshgrid(x, y)
fullCircle = np.abs((xx**2 + yy**2) - radius**2) < 40
position_list = np.zeros((n_positions, 2))
# [zp] I'm sure there is a faster way to do this...
it = np.nditer(fullCircle, flags=['multi_index'])
sIdx = 0
posIdx = 0
while not it.finished and posIdx < n_positions:
if it[0] > 0:
position_list[posIdx, 1] = it.multi_index[0]
position_list[posIdx, 0] = it.multi_index[1]
posIdx = posIdx + 1
it.iternext()
position_list = position_list.astype(np.int16)
return(position_list)
# Generate diagonal blur kernel position list
def genDiagonalBlurKernelMapposition_list(kernel_size, n_positions, point_seperation, slope=-1):
"""Function which generates an example blur kernel map which has a diagonal pathway (slope of -1)
Args:
kernel_size: Tuple with size in x and y, should be integer
n_positions: Length of kernel, should be integer
point_seperation: Seperation between points, should be integer
Returns:
A list of x,y positions to generate a blur kernel map
"""
startPos = int(np.round(kernel_size[1] / 2 + slope * (point_seperation + 1) * (n_positions / 2)))
position_list = np.zeros((n_positions, 2))
position_list[:, 1] = startPos - slope * np.arange(0, n_positions)
position_list[:, 0] = slope * position_list[:, 1]
position_list = position_list.astype(np.int16)
return(position_list)
######################################################################################################
##################################### ILLUMINATION GENERATION ########################################
######################################################################################################
def genIllum_pseudoRandom_len(kernel_length, beta=0.5, n_tests=10, led_count=1):
"""
This is a helper function for solving for a blur vector in terms of it's condition #
"""
kernel_list = []
for test in range(n_tests):
n_elements_max = math.floor(beta * kernel_length * led_count)
kernel = np.zeros(kernel_length * led_count)
indicies = np.arange(kernel_length * led_count)
for index in range(n_elements_max):
rand_index = np.random.randint(0, high=np.size(indicies) - 1, size=1)
kernel[indicies[rand_index]] = 1.
indicies = np.delete(indicies, rand_index)
rand_index = np.random.randint(0, high=np.size(indicies), size=1)
kernel[rand_index] = beta * kernel_length * led_count - np.sum(kernel)
assert beta * kernel_length * led_count - np.sum(kernel) <= 1
kernel_list.append(kernel)
# Determine kernel with best conditioon #
kappa_best = 1e10
kernel_best = []
for kernel in kernel_list:
spectra = np.abs(np.fft.fft(kernel))
kappa = np.max(spectra) / max(np.min(spectra), eps)
if kappa < kappa_best:
kernel_best = kernel
kappa_best = kappa
kernel_best = kernel_best.reshape((kernel_length, led_count))
return (kappa_best, kernel_best)
# crude random search method using point lists
def genIllum_randomSearch(point_list, object_size_0, maxiter=100, throughputCoeff=0.5):
best_sv = 0
illum, _ = genRandInitialization(len(point_list), throughputCoeff, bounds=[0, 1])
best_illum = illum
for i in range(maxiter):
blur_kernel = np.zeros(object_size_0)
for position_index, position in enumerate(point_list):
blur_kernel[position[0], position[1]] = illum[position_index]
blurKernelF = Ft(blur_kernel)
minSv = np.amin(np.abs(blurKernelF))
if minSv > best_sv:
best_sv = minSv
best_illum = illum
illum, _ = genRandInitialization(len(point_list), throughputCoeff, bounds=[0, 1])
return best_illum, best_sv
def genIllum_pseudoRandom(blurMapCol, p, maxiter=1000, throughputCoeff=0.5, resultType='final', verbose=False):
# note: this funtion now takes in the blurmapCol function rather than the map itself
obj = solver.kernel_objectives(blurMapCol, 1)
result = {}
result['history'] = {}
result['history']['f'] = [-1] * (maxiter + 1)
result['history']['x'] = [np.empty(p)] * (maxiter + 1)
# initialize first value, for compatability with other genIllums
blurVec, k = genRandInitialization(p, throughputCoeff, bounds=[0, 1])
result['history']['f'][0] = obj.conditionNumber(blurVec)
result['history']['x'][0] = blurVec
# Set up verbose printing
if verbose == True:
print(" Iter | Value ")
# Iteration Loop
for itr in np.arange(1, maxiter + 1):
blurVec, _ = genRandInitialization(p, throughputCoeff, bounds=[0, 1])
k = obj.conditionNumber(blurVec)
if verbose == True:
print(" %02d | %0.02f " % k)
# Return full or partial result depending on user imput
result['history']['f'][itr] = k
result['history']['x'][itr] = blurVec
# Store best result
from operator import itemgetter
index, kmin = min(enumerate(result['history']['f']), key=itemgetter(1))
result['fopt'] = kmin
result['xopt'] = result['history']['x'][index]
result['it'] = maxiter # change if we change stopping criterion
return result
def genIllum_GS(realSpaceSupport, fourierSpaceSupport, throughputCoeff=0.5,
resultType='final', verbose=False, usePureRandInit=False, maxiter=50):
"""Function which generates blur kernel using a modified Gerchberg-Saxton Algorithm
Args:
kernelMap_blur: A kernelMap which contains path information in real space
opticalSupport: A mask which defines the optical support in the frequency domain
throughputCoeff: A scalar coefficient in [0,1], usually set to 0.5
resultType: How to return the result dictionary. Options are "final" which returns only the final result, and "full", which returns kernels from all iterations.
usePureRandInit: Whether to use a pure random initialization or the output of the genRandInitialization function as an initialization
maxiter: the maximum number of iterations
Returns:
A dictionary with two fields: "condNum" contains the condition numbers in all iterations, and "illumVector" contains the LED intensities which lead to this condition number. If resultType is "final" (default), the best condition number can be accessed using result['condNum'][-1].
"""
def Ft(x): return np.fft.fftshift(np.fft.fft2(np.fft.fftshift(x, axes=(0, 1)), axes=(0, 1), norm='ortho'), axes=(0, 1))
def iFt(x): return np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(
x, axes=(0, 1)), axes=(0, 1), norm='ortho'), axes=(0, 1))
image_size = fourierSpaceSupport.shape
p = np.sum(realSpaceSupport)
# DC Term in Fourier Space
dc = np.zeros(image_size, dtype=np.double)
dc[np.ceil(image_size[0] / 2).astype(np.int), np.ceil(image_size[1] / 2).astype(np.int)] = (throughputCoeff * p) ** 2
maxPSD = p * (np.floor(throughputCoeff * p) + np.remainder(throughputCoeff * p, 1.0)
** 2) # max power spectrum in fourier domain
minPS = (maxPSD - np.sum(dc)) / np.sum(fourierSpaceSupport)
powerSpectrum = minPS * fourierSpaceSupport + dc
# Check Power spectrum integrity
assert np.any(np.abs(np.sum(powerSpectrum) - maxPSD) <= 1e-3), "Power spectrum does not match PSD criterion."
# initialize result dictionary
result = {}
if resultType == 'full':
result['history'] = {}
result['history']['f'] = [-1] * (maxiter + 1)
result['history']['x'] = [np.empty(p)] * (maxiter + 1)
# Initialize with random initialization
blurKernel = np.zeros(image_size, dtype=np.complex64)
if usePureRandInit:
blurKernel[realSpaceSupport] = np.random.rand(p)
# blurKernel[realSpaceSupport] = np.ones(p)
else:
blurKernel[realSpaceSupport], k = genRandInitialization(p, throughputCoeff)
# Project initilization into valid simplex
blurKernel[realSpaceSupport] = project_simplex_box.project(
np.real(blurKernel[realSpaceSupport]), throughputCoeff * p, alg='is')
# Add initialization to results
result['init'] = {}
result['init']['f'] = np.amax(np.abs(Ft(blurKernel))[fourierSpaceSupport]) / \
np.amin(np.abs(Ft(blurKernel))[fourierSpaceSupport])
result['init']['x'] = np.abs(blurKernel[realSpaceSupport])
# Store initialization in history variable
if (resultType == "full"):
result['history']['f'][0] = np.amax(np.abs(Ft(blurKernel))[fourierSpaceSupport]) / \
np.amin(np.abs(Ft(blurKernel))[fourierSpaceSupport])
result['history']['x'][0] = np.abs(blurKernel[realSpaceSupport])
# Set up verbose printing
if verbose == True:
print(" Iter | Value ")
# Iteration Loop
for itr in np.arange(1, maxiter + 1):
# Enforce power spectrum in frequency domain
blurKernel_ft = np.sqrt(powerSpectrum).astype(np.complex64) * \
np.exp(1j * fourierSpaceSupport * np.angle(Ft(blurKernel)))
# Enforce support in real domain
blurKernel = iFt(blurKernel_ft)
blurKernel = blurKernel * realSpaceSupport
# Perform projection onto simplex box
blurKernel[realSpaceSupport] = project_simplex_box.project(
np.real(blurKernel[realSpaceSupport]), throughputCoeff * p, alg='is')
if verbose == True:
print(" %02d | %0.02f " % (itr, np.amax(np.abs(Ft(blurKernel))[
fourierSpaceSupport]) / np.amin(abs(Ft(blurKernel))[fourierSpaceSupport])))
# Return full or partial result depending on user imput
if (resultType == 'full'):
result['history']['f'][itr] = np.amax(
np.abs(Ft(blurKernel))[fourierSpaceSupport]) / np.amin(np.abs(Ft(blurKernel))[fourierSpaceSupport])
result['history']['x'][itr] = np.abs(blurKernel[realSpaceSupport])
# Store result
result['fopt'] = np.amax(np.abs(Ft(blurKernel))[fourierSpaceSupport]) / \
np.amin(np.abs(Ft(blurKernel))[fourierSpaceSupport])
result['xopt'] = np.abs(blurKernel[realSpaceSupport])
result['it'] = maxiter # change if we change stopping criterion
return result
def genIllum(blurMapCol, nColumns, throughputCoeff=0.5, DNF=False, verbose=False,
usePureRandInit=False, maxiter=500, resultType='final', init=None):
"""Function which generates blur kernel using a projected gradient algorithm
Args:
blurMapCol: A function which returns columns of the blur map
nColumns: number of columns in the blur map
throughputCoeff: A scalar coefficient in [0,1], usually set to 0.5
DNF: flag to indicate use of the DNF objective function (otherwise, condition number is default)
verbose: flag for printing each iteratation of the gradent algorithm
usePureRandInit: Whether to use a pure random initialization or the output of the genRandInitialization function as an initialization
maxiter: the maximum number of iterations
resultType: How to return the result dictionary. Options are "final" which returns only the final result, and "full", which returns kernels from all iterations.
note that the full type runs to the full input maxiter rather than using stopping criteria to stop after convergence.
Returns:
A dictionary with fields xopt, it, and history. If resultType is full, history contains all iterates and function values, x and f.
note that the returned f's are smoothed versions of the objective functions. To compute actual values of objective functions, the iterates must be used directly.
"""
# initialize result dictionary
result = {}
n = nColumns
beta = throughputCoeff
obj = solver.kernel_objectives(blurMapCol, 1)
# Define Peojection
def projis(v):
return project_simplex_box.project(v, beta * n, alg='is')
# Define Objective Gradient and Function
if DNF:
f = obj.svSquaredReciprocalSumSmooth
f_true = obj.svSquaredReciprocalSum
f_true2 = obj.svSquaredReciprocalSum
grad = obj.gradSvSquaredReciprocalSum
else:
f = obj.minSvSquaredSmooth
f_true = obj.conditionNumber
f_true2 = obj.minSvSquared
grad = obj.gradMinSvSquared
# Define Initialization
if init is None:
if usePureRandInit:
x0 = np.random.rand(n)
else:
x0, kappa = genRandInitialization(n, beta)
else:
x0 = init
# True objective function
x0 = projis(x0)
while True:
try:
if (resultType == 'full'):
x, fval = pgd.projectedIterativeMax_developement(
x0, obj, f, grad, projis, pgd.backtrackingstep, pgd.smoothing_pow, maxiter, verbose=verbose)
else:
xstar, it = pgd.projectedIterativeMax(
x0, obj, f, grad, projis, pgd.backtrackingstep, pgd.smoothing_pow, maxiter, verbose=verbose)
break
except ArithmeticError:
print('no projection convergence, restarting')
x0 = np.random.rand(n)
x0 = projis(x0)
# Store result
if resultType == 'full':
result['history'] = {}
result['history']['f_smooth'] = fval
result['history']['f'] = []
for itr in np.arange(maxiter):
result['history']['f'].append(f_true(x[:, itr]))
result['history']['x'] = x
result['it'] = maxiter
result['xopt'] = x[:, maxiter - 1]
result['fopt'] = f_true(x[:, maxiter - 1])
result['fopt2'] = f_true2(x[:, maxiter - 1])
else:
result['xopt'] = xstar
result['fopt'] = f_true(xstar)
result['fopt2'] = f_true2(xstar)
result['it'] = it
return result
# Development verison, may be unstable
def genIllumDev(blurMapCol, nColumns, throughputCoeff, sumobj=False, verbose=False,
usePureRandInit=False, maxiter=500, resultType='final'):
n = nColumns
beta = throughputCoeff
obj = solver.kernel_objectives(blurMapCol, 1)
# Define Peojection
def projis(v):
return project_simplex_box.project(v, beta * n, alg='is')
# Define Objective Gradient and Function
if sumobj:
f = obj.svSquaredReciprocalSumSmooth
grad = obj.gradSvSquaredReciprocalSum
else:
f = obj.minSvSquaredSmooth
grad = obj.gradMinSvSquared
# Define Initialization
if usePureRandInit:
x0 = np.random.rand(n)
else:
x0, kappa = genRandInitialization(n, beta)
# Project x0 onto cardinal simplex
x0 = projis(x0)
# Set up results dictionary
result = {}
# result['history']['f'] = [-1] * (maxiter + 1) # include initialization as first element
# result['history']['x'] =[np.empty(p)] * (maxiter + 1) # include initialization as first element
# Store initialization as first variable in history
# result['history']['x'][0] = f(x0)
# result['history']['x'][0] = x0
while True:
try:
xstar, it = optimize_pgd.projectedIterativeMax(
x0, obj, f, grad, projis, optimize_pgd.backtrackingstep, optimize_pgd.smoothing_pow, maxiter, verbose=verbose)
break
except ArithmeticError:
print('no projection convergence, restarting')
x0 = np.random.rand(n)
x0 = projis(x0)
# Store results inside result dictionary
result['xopt'] = xstar
result['fopt'] = obj.conditionNumber(xstar)
result['it'] = it
return result
def plotIllumMap(illumVector, ledPointListNa, markerSz=50, plot_colormap="Grays"):
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
n_positions = illumVector.shape[0]
# make figure
figure = {
'data': [],
'layout': {},
'frames': []
}
# Greys, YlGnBu, Greens, YlOrRd, Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis
# fill in most of layout
maxR = 1.5 * max(abs(ledPointListNa).reshape(-1))
figure['layout']['paper_bgcolor'] = 'rgba(0,1,0,1)'
figure['layout']['plot_bgcolor'] = 'rgba(0,1,0,1)'
figure['layout']['xaxis'] = {'range': [-maxR, maxR], 'title': 'NA (x)'}
figure['layout']['yaxis'] = {'range': [-maxR, maxR], 'title': 'NA (y)'}
figure['layout']['hovermode'] = 'closest'
figure['layout']['height'] = 560
figure['layout']['width'] = 500
figure['layout']['updatemenus'] = [
{
'buttons': [
{
'args': [None, {'frame': {'duration': 200, 'redraw': False},
'fromcurrent': True, 'transition': {'duration': 100, 'easing': 'quadratic-in-out'}}],
'label': 'Play',
'method': 'animate'
},
{
'args': [[None], {'frame': {'duration': 0, 'redraw': False}, 'mode': 'immediate',
'transition': {'duration': 0}}],
'label': 'Pause',
'method': 'animate'
}
],
'direction': 'left',
'pad': {'r': 10, 't': 87},
'showactive': False,
'type': 'buttons',
'x': 0.1,
'xanchor': 'right',
'y': 0,
'yanchor': 'top'
}
]
sliders_dict = {
'active': 0,
'yanchor': 'top',
'xanchor': 'left',
'currentvalue': {
'font': {'size': 20},
'prefix': 'Position:',
'visible': True,
'xanchor': 'right'
},
'transition': {'duration': 300, 'easing': 'cubic-in-out'},
'pad': {'b': 10, 't': 50},
'len': 0.9,
'x': 0.1,
'y': 0,
'steps': []
}
# make data for first value
position = 0
ledVals = illumVector[position, :]
data_dict = {
'x': list(ledPointListNa[:, 0]),
'y': list(ledPointListNa[:, 1]),
'mode': 'markers',
'text': list(arange(0, size(ledPointListNa[:, 1]))),
'marker': {
'sizemode': 'area',
'sizeref': 200000,
'size': markerSz,
'color': ledVals,
'colorscale': plot_colormap
},
'name': " "
}
figure['data'].append(data_dict)
# make frames
for position in arange(0, n_positions):
ledVals = illumVector[position, :]
frame = {'data': [], 'name': str(position)}
data_dict = {
'x': list(ledPointListNa[:, 0]),
'y': list(ledPointListNa[:, 1]),
'mode': 'markers',
'text': list(arange(0, size(ledPointListNa[:, 1]))),
'marker': {
'sizemode': 'area',
'sizeref': 200000,
'size': markerSz,
'color': ledVals,
'colorscale': plot_colormap
},
'name': " "
}
frame['data'].append(data_dict)
figure['frames'].append(frame)
slider_step = {'args': [
[position],
{'frame': {'duration': 100, 'redraw': False},
'mode': 'immediate',
'transition': {'duration': 100}}
],
'label': position,
'method': 'animate'}
sliders_dict['steps'].append(slider_step)
figure['layout']['sliders'] = [sliders_dict]
iplot(figure)
class BlurKernelBasis(ops.Operator):
# inputs are weights for singular_vectors functions
# operator first translates weights to a position,
# then returns the resulting PhaseRamp
# TODO in progress
def __init__(self, N, basis, illums, verbose=False, dtype=None, backend=None, label='R'):
# Configure backend and datatype
backend = backend if backend is not None else yp.config.default_backend
dtype = dtype if dtype is not None else yp.config.default_dtype
x = np.arange(-N[1] / 2, N[1] / 2, 1.0) / N[1]
y = np.arange(-N[0] / 2, N[0] / 2, 1.0) / N[0]
xx, yy = np.meshgrid(x, y)
ry = -2 * np.pi * yy
rx = -2 * np.pi * xx
self.verbose = verbose
# Convert to the correct dtype and backend: TODO why through numpy?
dtype_np = yp.getNativeDatatype(dtype, 'numpy')
self.rx = yp.changeBackend(rx.astype(dtype_np), backend)
self.ry = yp.changeBackend(ry.astype(dtype_np), backend)
basis_y, basis_x = basis
self.basis_y = yp.changeBackend(basis_y.astype(dtype_np), backend)
self.basis_x = yp.changeBackend(basis_x.astype(dtype_np), backend)
self.ndim_y = yp.shape(self.basis_y)[1]
self.ndim_x = yp.shape(self.basis_x)[1]
self.illums = illums # TODO backend and dtype?
# TODO remove this
np.seterr(all='warn')
super(self.__class__, self).__init__((N, (self.ndim_y + self.ndim_x, 1)), dtype, backend, smooth=True, label=label,
forward=self._forward, gradient=self._gradient,
convex=False, repr_latex=self._latex)
def _latex(self, latex_input=None):
if latex_input is not None:
return 'e^{-i2\\pi (\\vec{k} \\cdot S \\cdot' + latex_input + ')}'
else:
return 'e^{-i2\\pi (\\vec{k} \\cdot S \\cdot [\\cdot ])}'
def _forward(self, x, y):
# TODO does this indexing work when not numpy
weight_y = x[:self.ndim_y]
weight_x = x[self.ndim_y:]
rys = yp.matmul(self.basis_y, weight_y)
rxs = yp.matmul(self.basis_x, weight_x)
print('forward with position:', np.amax(rys), np.amax(rxs))
# TODO: need to set this as zero otherwise weird results
y[:] = yp.zeros(self.M, dtype=self.dtype, backend=self.backend)
for t in range(len(self.illums)):
y[:] += self._single_forward(self.illums[t], [rys[t], rxs[t]])
# self.illums[t] * exp(self.ry * scalar(rys[t]) + self.rx * scalar(rxs[t]))
def _single_forward(self, illum, r):
# using euler's formula instead of
# exp(self.ry * scalar(r[0]) + self.rx * scalar(r[1]))
inner = self.ry * yp.scalar(r[0]) + self.rx * yp.scalar(r[1])
# if self.verbose: print(np.amax(np.abs(inner)), r)
try:
result = illum * (yp.cos(inner) + 1j * yp.sin(inner))
except RuntimeWarning as e:
print(e, illum, r)
return result
def _gradient(self, x=None, inside_operator=None):
from .stack import Vstack
weight_y = x[:self.ndim_y]
weight_x = x[self.ndim_y:]
rys = yp.matmul(self.basis_y, weight_y)
rxs = yp.matmul(self.basis_x, weight_x)
print(np.amax(np.abs(rys)), np.amax(np.abs(rxs)), np.amax(self.illums))
# sum across t
if self.verbose: print('computing over t')
sum_exp_y = yp.zeros([self.ndim_y, self.M[0], self.M[1]])
sum_exp_x = yp.zeros([self.ndim_x, self.M[0], self.M[1]])
for t in range(len(self.illums)):
if self.verbose: print(t, end=' ')
forward_t = self._single_forward(self.illums[t], [rys[t], rxs[t]])
for i in range(max(self.ndim_y, self.ndim_x)):
if i < self.ndim_y: sum_exp_y[i,:,:] += self.basis_y[t, i] * forward_t
if i < self.ndim_x: sum_exp_x[i,:,:] += self.basis_x[t, i] * forward_t
S = ops.Sum(self.M, self.dtype, self.backend)
if self.verbose: print('\nconstructing columns')
column_list_y = []; column_list_x = []
for i in range(max(self.ndim_y, self.ndim_x)):
if self.verbose: print(i, end=' ')
if i < self.ndim_y: column_list_y.append(S * ops.Diagonalize(conj(1j * self.ry) * yp.conj(sum_exp_y[i])))
if i < self.ndim_x: column_list_x.append(S * ops.Diagonalize(conj(1j * self.rx) * yp.conj(sum_exp_x[i])))
print('\n')
G = ops.Vstack(column_list_y + column_list_x)
return ops._GradientOperator(G)
def _gradient_lowmem(self, x=None, inside_operator=None):
from .stack import Vstack
weight_y = x[:self.ndim_y]
weight_x = x[self.ndim_y:]
rys = yp.matmul(self.basis_y, weight_y)
rxs = yp.matmul(self.basis_x, weight_x)
print(np.amax(np.abs(rys)), np.amax(np.abs(rxs)))
S = ops.Sum(self.M, self.dtype, self.backend)
column_list_y = []; column_list_x = []
for i in range(max(self.ndim_y, self.ndim_y)):
if self.verbose:
print('computing for column', i)
forward_0 = self._single_forward(self.illums[0], [rys[0], rxs[0]])
if i < self.ndim_y:
sum_exp_y = self.basis_y[0, i] * forward_0
if i < self.ndim_x:
sum_exp_x = self.basis_x[0, i] * forward_0
for t in range(1, len(self.illums)):
forward_t = self._single_forward(self.illums[t], [rys[t], rxs[t]])
if i < self.ndim_y:
sum_exp_y += self.basis_y[t, i] * forward_t
if i < self.ndim_x: sum_exp_x += self.basis_x[t, i] * forward_t
if i < self.ndim_y: column_list_y.append(S * ops.Diagonalize(conj(1j * self.ry) * yp.conj(sum_exp_y)))
if i < self.ndim_x: column_list_x.append(S * ops.Diagonalize(conj(1j * self.rx) * yp.conj(sum_exp_x)))
G = ops.Vstack(column_list_y + column_list_x)
return ops._GradientOperator(G)
| [
"llops.operators.Vstack",
"numpy.argsort",
"numpy.sin",
"operator.itemgetter",
"numpy.arange",
"llops.operators.Convolution",
"numpy.delete",
"numpy.real",
"llops.operators._GradientOperator",
"numpy.concatenate",
"skimage.draw.line",
"numpy.round",
"llops.conj",
"matplotlib.pyplot.savefig... | [((4157, 4176), 'numpy.sum', 'np.sum', (['kernel_best'], {}), '(kernel_best)\n', (4163, 4176), True, 'import numpy as np\n'), ((4207, 4243), 'llops.cast', 'yp.cast', (['kernel_best', 'dtype', 'backend'], {}), '(kernel_best, dtype, backend)\n', (4214, 4243), True, 'import llops as yp\n'), ((5829, 5849), 'llops.size', 'yp.size', (['blur_vector'], {}), '(blur_vector)\n', (5836, 5849), True, 'import llops as yp\n'), ((5988, 6021), 'llops.expandDims', 'yp.expandDims', (['blur_vector', 'ndims'], {}), '(blur_vector, ndims)\n', (6001, 6021), True, 'import llops as yp\n'), ((6171, 6190), 'llops.vec', 'yp.vec', (['blur_vector'], {}), '(blur_vector)\n', (6177, 6190), True, 'import llops as yp\n'), ((6510, 6543), 'llops.expandDims', 'yp.expandDims', (['blur_vector', 'ndims'], {}), '(blur_vector, ndims)\n', (6523, 6543), True, 'import llops as yp\n'), ((7401, 7429), 'llops.roll', 'yp.roll', (['kernel', 'roll_values'], {}), '(kernel, roll_values)\n', (7408, 7429), True, 'import llops as yp\n'), ((15007, 15022), 'numpy.mean', 'np.mean', (['static'], {}), '(static)\n', (15014, 15022), True, 'import numpy as np\n'), ((15131, 15147), 'numpy.mean', 'np.mean', (['blurred'], {}), '(blurred)\n', (15138, 15147), True, 'import numpy as np\n'), ((18215, 18275), 'numpy.zeros', 'np.zeros', (['kernel_centered.shape'], {'dtype': 'kernel_centered.dtype'}), '(kernel_centered.shape, dtype=kernel_centered.dtype)\n', (18223, 18275), True, 'import numpy as np\n'), ((18813, 18840), 'numpy.where', 'np.where', (['(kernel_zeroed > 0)'], {}), '(kernel_zeroed > 0)\n', (18821, 18840), True, 'import numpy as np\n'), ((19116, 19176), 'numpy.zeros', 'np.zeros', (['kernel_centered.shape'], {'dtype': 'kernel_centered.dtype'}), '(kernel_centered.shape, dtype=kernel_centered.dtype)\n', (19124, 19176), True, 'import numpy as np\n'), ((20561, 20634), 'numpy.zeros', 'np.zeros', (['(blur_kernel_list[0].shape[0], blur_kernel_list[0].shape[1], 3)'], {}), '((blur_kernel_list[0].shape[0], blur_kernel_list[0].shape[1], 3))\n', (20569, 20634), True, 'import numpy as np\n'), ((20962, 20978), 'numpy.amax', 'np.amax', (['image_c'], {}), '(image_c)\n', (20969, 20978), True, 'import numpy as np\n'), ((20984, 20996), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20994, 20996), True, 'import matplotlib.pyplot as plt\n'), ((21001, 21046), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_c'], {'interpolation': '"""bilinear"""'}), "(image_c, interpolation='bilinear')\n", (21011, 21046), True, 'import matplotlib.pyplot as plt\n'), ((21051, 21069), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '[]'], {}), '([], [])\n', (21061, 21069), True, 'import matplotlib.pyplot as plt\n'), ((21074, 21092), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]', '[]'], {}), '([], [])\n', (21084, 21092), True, 'import matplotlib.pyplot as plt\n'), ((21097, 21115), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21113, 21115), True, 'import matplotlib.pyplot as plt\n'), ((21120, 21159), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'transparent': '(True)'}), '(filename, transparent=True)\n', (21131, 21159), True, 'import matplotlib.pyplot as plt\n'), ((21467, 21501), 'numpy.zeros', 'np.zeros', (['object_size'], {'dtype': 'dtype'}), '(object_size, dtype=dtype)\n', (21475, 21501), True, 'import numpy as np\n'), ((21726, 21778), 'numpy.zeros', 'np.zeros', (['positions_0.shape'], {'dtype': 'positions_0.dtype'}), '(positions_0.shape, dtype=positions_0.dtype)\n', (21734, 21778), True, 'import numpy as np\n'), ((22354, 22387), 'llops.operators.Crop', 'ops.Crop', (['object_size', 'image_size'], {}), '(object_size, image_size)\n', (22362, 22387), True, 'import llops.operators as ops\n'), ((23523, 23564), 'numpy.zeros', 'np.zeros', (['object_size'], {'dtype': 'np.complex64'}), '(object_size, dtype=np.complex64)\n', (23531, 23564), True, 'import numpy as np\n'), ((25412, 25437), 'numpy.size', 'np.size', (['position_list', '(0)'], {}), '(position_list, 0)\n', (25419, 25437), True, 'import numpy as np\n'), ((25460, 25515), 'numpy.zeros', 'np.zeros', (['(n_positions, kernel_size[0], kernel_size[1])'], {}), '((n_positions, kernel_size[0], kernel_size[1]))\n', (25468, 25515), True, 'import numpy as np\n'), ((25532, 25557), 'numpy.arange', 'np.arange', (['(0)', 'n_positions'], {}), '(0, n_positions)\n', (25541, 25557), True, 'import numpy as np\n'), ((26010, 26035), 'numpy.size', 'np.size', (['position_list', '(0)'], {}), '(position_list, 0)\n', (26017, 26035), True, 'import numpy as np\n'), ((26054, 26096), 'numpy.zeros', 'np.zeros', (['(kernel_size[0], kernel_size[1])'], {}), '((kernel_size[0], kernel_size[1]))\n', (26062, 26096), True, 'import numpy as np\n'), ((31908, 31948), 'numpy.linspace', 'np.linspace', (['(0)', '(extent - 1)', 'num_basis_fn'], {}), '(0, extent - 1, num_basis_fn)\n', (31919, 31948), True, 'import numpy as np\n'), ((31995, 32037), 'numpy.linspace', 'np.linspace', (['knotsx[0]', 'knotsx[-1]', 'extent'], {}), '(knotsx[0], knotsx[-1], extent)\n', (32006, 32037), True, 'import numpy as np\n'), ((33515, 33626), 'llops.operators.BlurKernelBasis', 'ops.BlurKernelBasis', (['object_size', '(basis_y, basis_x)', 'illums'], {'dtype': 'dtype', 'backend': 'backend', 'verbose': 'verbose'}), '(object_size, (basis_y, basis_x), illums, dtype=dtype,\n backend=backend, verbose=verbose)\n', (33534, 33626), True, 'import llops.operators as ops\n'), ((33659, 33744), 'llops.operators.FourierTransform', 'ops.FourierTransform', (['object_size'], {'dtype': 'dtype', 'backend': 'backend', 'normalize': '(False)'}), '(object_size, dtype=dtype, backend=backend, normalize=False\n )\n', (33679, 33744), True, 'import llops.operators as ops\n'), ((34014, 34103), 'llops.operators.Diagonalize', 'ops.Diagonalize', (['(F * object_initial)'], {'label': '"""D_{object}"""', 'dtype': 'dtype', 'backend': 'backend'}), "(F * object_initial, label='D_{object}', dtype=dtype,\n backend=backend)\n", (34029, 34103), True, 'import llops.operators as ops\n'), ((34349, 34402), 'llops.operators.L2Norm', 'ops.L2Norm', (['object_size'], {'dtype': 'dtype', 'backend': 'backend'}), '(object_size, dtype=dtype, backend=backend)\n', (34359, 34402), True, 'import llops.operators as ops\n'), ((34507, 34547), 'llops.getNativeDatatype', 'yp.getNativeDatatype', (['F.dtype', 'F.backend'], {}), '(F.dtype, F.backend)\n', (34527, 34547), True, 'import llops as yp\n'), ((36881, 36892), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (36889, 36892), True, 'import numpy as np\n'), ((37147, 37196), 'numpy.random.choice', 'np.random.choice', (['n'], {'size': 'n_pulses', 'replace': '(False)'}), '(n, size=n_pulses, replace=False)\n', (37163, 37196), True, 'import numpy as np\n'), ((40576, 40602), 'numpy.size', 'np.size', (['innerKernelMap', '(2)'], {}), '(innerKernelMap, 2)\n', (40583, 40602), True, 'import numpy as np\n'), ((40619, 40645), 'numpy.size', 'np.size', (['outerKernelMap', '(2)'], {}), '(outerKernelMap, 2)\n', (40626, 40645), True, 'import numpy as np\n'), ((43973, 43999), 'numpy.size', 'np.size', (['outerKernelMap', '(2)'], {}), '(outerKernelMap, 2)\n', (43980, 43999), True, 'import numpy as np\n'), ((44016, 44042), 'numpy.size', 'np.size', (['innerKernelMap', '(2)'], {}), '(innerKernelMap, 2)\n', (44023, 44042), True, 'import numpy as np\n'), ((44541, 44576), 'numpy.arange', 'np.arange', (['(0)', '(innerSize * outerSize)'], {}), '(0, innerSize * outerSize)\n', (44550, 44576), True, 'import numpy as np\n'), ((46864, 46893), 'numpy.any', 'np.any', (['(measurement_count > 1)'], {}), '(measurement_count > 1)\n', (46870, 46893), True, 'import numpy as np\n'), ((47139, 47192), 'numpy.zeros', 'np.zeros', (['(measurement_count[0] * 2, 2)'], {'dtype': 'np.int'}), '((measurement_count[0] * 2, 2), dtype=np.int)\n', (47147, 47192), True, 'import numpy as np\n'), ((47372, 47403), 'numpy.arange', 'np.arange', (['measurement_count[0]'], {}), '(measurement_count[0])\n', (47381, 47403), True, 'import numpy as np\n'), ((49750, 49779), 'numpy.asarray', 'np.asarray', (['raster_point_list'], {}), '(raster_point_list)\n', (49760, 49779), True, 'import numpy as np\n'), ((49940, 49977), 'numpy.arange', 'np.arange', (['raster_point_list.shape[0]'], {}), '(raster_point_list.shape[0])\n', (49949, 49977), True, 'import numpy as np\n'), ((50005, 50054), 'numpy.floor', 'np.floor', (['(measurement_indicies / points_per_image)'], {}), '(measurement_indicies / points_per_image)\n', (50013, 50054), True, 'import numpy as np\n'), ((53625, 53654), 'numpy.any', 'np.any', (['(measurement_count > 1)'], {}), '(measurement_count > 1)\n', (53631, 53654), True, 'import numpy as np\n'), ((53900, 53953), 'numpy.zeros', 'np.zeros', (['(measurement_count[0] * 2, 2)'], {'dtype': 'np.int'}), '((measurement_count[0] * 2, 2), dtype=np.int)\n', (53908, 53953), True, 'import numpy as np\n'), ((54133, 54164), 'numpy.arange', 'np.arange', (['measurement_count[0]'], {}), '(measurement_count[0])\n', (54142, 54164), True, 'import numpy as np\n'), ((57541, 57570), 'numpy.asarray', 'np.asarray', (['raster_point_list'], {}), '(raster_point_list)\n', (57551, 57570), True, 'import numpy as np\n'), ((57731, 57768), 'numpy.arange', 'np.arange', (['raster_point_list.shape[0]'], {}), '(raster_point_list.shape[0])\n', (57740, 57768), True, 'import numpy as np\n'), ((57796, 57845), 'numpy.floor', 'np.floor', (['(measurement_indicies / points_per_image)'], {}), '(measurement_indicies / points_per_image)\n', (57804, 57845), True, 'import numpy as np\n'), ((61205, 61234), 'numpy.any', 'np.any', (['(measurement_count > 1)'], {}), '(measurement_count > 1)\n', (61211, 61234), True, 'import numpy as np\n'), ((61480, 61533), 'numpy.zeros', 'np.zeros', (['(measurement_count[0] * 2, 2)'], {'dtype': 'np.int'}), '((measurement_count[0] * 2, 2), dtype=np.int)\n', (61488, 61533), True, 'import numpy as np\n'), ((61684, 61715), 'numpy.arange', 'np.arange', (['measurement_count[0]'], {}), '(measurement_count[0])\n', (61693, 61715), True, 'import numpy as np\n'), ((62392, 62421), 'numpy.asarray', 'np.asarray', (['raster_point_list'], {}), '(raster_point_list)\n', (62402, 62421), True, 'import numpy as np\n'), ((62582, 62619), 'numpy.arange', 'np.arange', (['raster_point_list.shape[0]'], {}), '(raster_point_list.shape[0])\n', (62591, 62619), True, 'import numpy as np\n'), ((62647, 62696), 'numpy.floor', 'np.floor', (['(measurement_indicies / points_per_image)'], {}), '(measurement_indicies / points_per_image)\n', (62655, 62696), True, 'import numpy as np\n'), ((66990, 67015), 'numpy.concatenate', 'np.concatenate', (['line_list'], {}), '(line_list)\n', (67004, 67015), True, 'import numpy as np\n'), ((67700, 67725), 'numpy.concatenate', 'np.concatenate', (['line_list'], {}), '(line_list)\n', (67714, 67725), True, 'import numpy as np\n'), ((68443, 68472), 'numpy.any', 'np.any', (['(measurement_count > 1)'], {}), '(measurement_count > 1)\n', (68449, 68472), True, 'import numpy as np\n'), ((68639, 68692), 'numpy.zeros', 'np.zeros', (['(measurement_count[0] * 2, 2)'], {'dtype': 'np.int'}), '((measurement_count[0] * 2, 2), dtype=np.int)\n', (68647, 68692), True, 'import numpy as np\n'), ((68872, 68903), 'numpy.arange', 'np.arange', (['measurement_count[0]'], {}), '(measurement_count[0])\n', (68881, 68903), True, 'import numpy as np\n'), ((71250, 71279), 'numpy.asarray', 'np.asarray', (['raster_point_list'], {}), '(raster_point_list)\n', (71260, 71279), True, 'import numpy as np\n'), ((71440, 71477), 'numpy.arange', 'np.arange', (['raster_point_list.shape[0]'], {}), '(raster_point_list.shape[0])\n', (71449, 71477), True, 'import numpy as np\n'), ((71505, 71554), 'numpy.floor', 'np.floor', (['(measurement_indicies / points_per_image)'], {}), '(measurement_indicies / points_per_image)\n', (71513, 71554), True, 'import numpy as np\n'), ((72724, 72776), 'skimage.draw.line', 'line', (['startPos[1]', 'startPos[0]', 'endPos[1]', 'endPos[0]'], {}), '(startPos[1], startPos[0], endPos[1], endPos[0])\n', (72728, 72776), False, 'from skimage.draw import line\n'), ((73598, 73624), 'numpy.zeros', 'np.zeros', (['(n_positions, 2)'], {}), '((n_positions, 2))\n', (73606, 73624), True, 'import numpy as np\n'), ((75316, 75333), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (75327, 75333), True, 'import numpy as np\n'), ((75629, 75647), 'numpy.sum', 'np.sum', (['fullCircle'], {}), '(fullCircle)\n', (75635, 75647), True, 'import numpy as np\n'), ((75719, 75745), 'numpy.zeros', 'np.zeros', (['(n_positions, 2)'], {}), '((n_positions, 2))\n', (75727, 75745), True, 'import numpy as np\n'), ((75812, 75856), 'numpy.nditer', 'np.nditer', (['fullCircle'], {'flags': "['multi_index']"}), "(fullCircle, flags=['multi_index'])\n", (75821, 75856), True, 'import numpy as np\n'), ((77264, 77281), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (77275, 77281), True, 'import numpy as np\n'), ((77361, 77387), 'numpy.zeros', 'np.zeros', (['(n_positions, 2)'], {}), '((n_positions, 2))\n', (77369, 77387), True, 'import numpy as np\n'), ((77454, 77498), 'numpy.nditer', 'np.nditer', (['fullCircle'], {'flags': "['multi_index']"}), "(fullCircle, flags=['multi_index'])\n", (77463, 77498), True, 'import numpy as np\n'), ((78507, 78533), 'numpy.zeros', 'np.zeros', (['(n_positions, 2)'], {}), '((n_positions, 2))\n', (78515, 78533), True, 'import numpy as np\n'), ((81926, 81951), 'numpy.arange', 'np.arange', (['(1)', '(maxiter + 1)'], {}), '(1, maxiter + 1)\n', (81935, 81951), True, 'import numpy as np\n'), ((84095, 84119), 'numpy.sum', 'np.sum', (['realSpaceSupport'], {}), '(realSpaceSupport)\n', (84101, 84119), True, 'import numpy as np\n'), ((84161, 84198), 'numpy.zeros', 'np.zeros', (['image_size'], {'dtype': 'np.double'}), '(image_size, dtype=np.double)\n', (84169, 84198), True, 'import numpy as np\n'), ((85036, 85076), 'numpy.zeros', 'np.zeros', (['image_size'], {'dtype': 'np.complex64'}), '(image_size, dtype=np.complex64)\n', (85044, 85076), True, 'import numpy as np\n'), ((85725, 85761), 'numpy.abs', 'np.abs', (['blurKernel[realSpaceSupport]'], {}), '(blurKernel[realSpaceSupport])\n', (85731, 85761), True, 'import numpy as np\n'), ((86199, 86224), 'numpy.arange', 'np.arange', (['(1)', '(maxiter + 1)'], {}), '(1, maxiter + 1)\n', (86208, 86224), True, 'import numpy as np\n'), ((87483, 87519), 'numpy.abs', 'np.abs', (['blurKernel[realSpaceSupport]'], {}), '(blurKernel[realSpaceSupport])\n', (87489, 87519), True, 'import numpy as np\n'), ((93148, 93182), 'plotly.offline.init_notebook_mode', 'init_notebook_mode', ([], {'connected': '(True)'}), '(connected=True)\n', (93166, 93182), False, 'from plotly.offline import init_notebook_mode, iplot\n'), ((96934, 96947), 'plotly.offline.iplot', 'iplot', (['figure'], {}), '(figure)\n', (96939, 96947), False, 'from plotly.offline import init_notebook_mode, iplot\n'), ((1930, 1943), 'numpy.fft.fft', 'np.fft.fft', (['x'], {}), '(x)\n', (1940, 1943), True, 'import numpy as np\n'), ((2158, 2171), 'numpy.fft.fft', 'np.fft.fft', (['x'], {}), '(x)\n', (2168, 2171), True, 'import numpy as np\n'), ((2190, 2203), 'numpy.abs', 'np.abs', (['x_fft'], {}), '(x_fft)\n', (2196, 2203), True, 'import numpy as np\n'), ((6100, 6120), 'llops.flip', 'yp.flip', (['blur_vector'], {}), '(blur_vector)\n', (6107, 6120), True, 'import llops as yp\n'), ((6703, 6742), 'llops.pad', 'yp.pad', (['blur_vector', 'shape'], {'center': '(True)'}), '(blur_vector, shape, center=True)\n', (6709, 6742), True, 'import llops as yp\n'), ((7372, 7387), 'llops.ndim', 'yp.ndim', (['kernel'], {}), '(kernel)\n', (7379, 7387), True, 'import llops as yp\n'), ((15366, 15421), 'llops.operators.Convolution', 'ops.Convolution', (['blurred.shape', 'static'], {'mode': '"""windowed"""'}), "(blurred.shape, static, mode='windowed')\n", (15381, 15421), True, 'import llops.operators as ops\n'), ((15588, 15613), 'numpy.ones', 'np.ones', (['y.shape', 'y.dtype'], {}), '(y.shape, y.dtype)\n', (15595, 15613), True, 'import numpy as np\n'), ((15666, 15705), 'llops.solvers.objectivefunctions.L2', 'objectivefunctions.L2', (['A', 'y'], {'l2_reg': 'reg'}), '(A, y, l2_reg=reg)\n', (15687, 15705), False, 'from llops.solvers import iterative, objectivefunctions\n'), ((17997, 18024), 'numpy.argmax', 'np.argmax', (['kernel_recovered'], {}), '(kernel_recovered)\n', (18006, 18024), True, 'import numpy as np\n'), ((20142, 20169), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (20152, 20169), True, 'import matplotlib.pyplot as plt\n'), ((20295, 20352), 'matplotlib.pyplot.imshow', 'plt.imshow', (['blur_kernel_list[i]'], {'interpolation': '"""bilinear"""'}), "(blur_kernel_list[i], interpolation='bilinear')\n", (20305, 20352), True, 'import matplotlib.pyplot as plt\n'), ((21527, 21567), 'numpy.arange', 'np.arange', (['(-sampling[0])', '(sampling[0] + 1)'], {}), '(-sampling[0], sampling[0] + 1)\n', (21536, 21567), True, 'import numpy as np\n'), ((21569, 21609), 'numpy.arange', 'np.arange', (['(-sampling[1])', '(sampling[1] + 1)'], {}), '(-sampling[1], sampling[1] + 1)\n', (21578, 21609), True, 'import numpy as np\n'), ((22036, 22059), 'numpy.asarray', 'np.asarray', (['object_size'], {}), '(object_size)\n', (22046, 22059), True, 'import numpy as np\n'), ((24481, 24497), 'llops.iFt', 'iFt', (['blur_kernel'], {}), '(blur_kernel)\n', (24484, 24497), False, 'from llops import iFt, Ft\n'), ((24588, 24610), 'numpy.fliplr', 'np.fliplr', (['blur_kernel'], {}), '(blur_kernel)\n', (24597, 24610), True, 'import numpy as np\n'), ((24618, 24637), 'numpy.sum', 'np.sum', (['blur_kernel'], {}), '(blur_kernel)\n', (24624, 24637), True, 'import numpy as np\n'), ((24666, 24685), 'numpy.sum', 'np.sum', (['blur_kernel'], {}), '(blur_kernel)\n', (24672, 24685), True, 'import numpy as np\n'), ((26923, 27010), 'numpy.zeros', 'np.zeros', (['blur_kernel[first_channel].shape'], {'dtype': 'blur_kernel[first_channel].dtype'}), '(blur_kernel[first_channel].shape, dtype=blur_kernel[first_channel]\n .dtype)\n', (26931, 27010), True, 'import numpy as np\n'), ((30870, 30892), 'numpy.empty', 'np.empty', (['values_shape'], {}), '(values_shape)\n', (30878, 30892), True, 'import numpy as np\n'), ((33114, 33141), 'numpy.expand_dims', 'np.expand_dims', (['A_active', '(1)'], {}), '(A_active, 1)\n', (33128, 33141), True, 'import numpy as np\n'), ((35170, 35193), 'numpy.tan', 'np.tan', (['(x * np.pi / 180)'], {}), '(x * np.pi / 180)\n', (35176, 35193), True, 'import numpy as np\n'), ((35230, 35253), 'numpy.sin', 'np.sin', (['(x * np.pi / 180)'], {}), '(x * np.pi / 180)\n', (35236, 35253), True, 'import numpy as np\n'), ((35290, 35313), 'numpy.cos', 'np.cos', (['(x * np.pi / 180)'], {}), '(x * np.pi / 180)\n', (35296, 35313), True, 'import numpy as np\n'), ((37116, 37134), 'numpy.floor', 'np.floor', (['(beta * n)'], {}), '(beta * n)\n', (37124, 37134), True, 'import numpy as np\n'), ((37351, 37370), 'numpy.argsort', 'np.argsort', (['blurVec'], {}), '(blurVec)\n', (37361, 37370), True, 'import numpy as np\n'), ((38010, 38028), 'numpy.floor', 'np.floor', (['(N * beta)'], {}), '(N * beta)\n', (38018, 38028), True, 'import numpy as np\n'), ((38397, 38411), 'numpy.sqrt', 'np.sqrt', (['minPs'], {}), '(minPs)\n', (38404, 38411), True, 'import numpy as np\n'), ((39014, 39032), 'numpy.floor', 'np.floor', (['(N * beta)'], {}), '(N * beta)\n', (39022, 39032), True, 'import numpy as np\n'), ((40851, 40879), 'numpy.floor', 'np.floor', (['(colIdx / innerSize)'], {}), '(colIdx / innerSize)\n', (40859, 40879), True, 'import numpy as np\n'), ((40988, 41009), 'numpy.any', 'np.any', (['kernelSupport'], {}), '(kernelSupport)\n', (40994, 41009), True, 'import numpy as np\n'), ((43770, 43793), 'numpy.ndim', 'np.ndim', (['innerKernelMap'], {}), '(innerKernelMap)\n', (43777, 43793), True, 'import numpy as np\n'), ((43825, 43858), 'numpy.expand_dims', 'np.expand_dims', (['innerKernelMap', '(2)'], {}), '(innerKernelMap, 2)\n', (43839, 43858), True, 'import numpy as np\n'), ((43867, 43890), 'numpy.ndim', 'np.ndim', (['outerKernelMap'], {}), '(outerKernelMap)\n', (43874, 43890), True, 'import numpy as np\n'), ((43922, 43955), 'numpy.expand_dims', 'np.expand_dims', (['outerKernelMap', '(2)'], {}), '(outerKernelMap, 2)\n', (43936, 43955), True, 'import numpy as np\n'), ((45537, 45564), 'numpy.size', 'np.size', (['blur_kernel_map', '(2)'], {}), '(blur_kernel_map, 2)\n', (45544, 45564), True, 'import numpy as np\n'), ((46488, 46511), 'numpy.asarray', 'np.asarray', (['object_size'], {}), '(object_size)\n', (46498, 46511), True, 'import numpy as np\n'), ((46624, 46647), 'numpy.flip', 'np.flip', (['object_size', '(0)'], {}), '(object_size, 0)\n', (46631, 46647), True, 'import numpy as np\n'), ((46669, 46691), 'numpy.flip', 'np.flip', (['image_size', '(0)'], {}), '(image_size, 0)\n', (46676, 46691), True, 'import numpy as np\n'), ((50291, 50317), 'numpy.prod', 'np.prod', (['measurement_count'], {}), '(measurement_count)\n', (50298, 50317), True, 'import numpy as np\n'), ((51458, 51502), 'numpy.flip', 'np.flip', (['raster_point_list_segmented'], {'axis': '(2)'}), '(raster_point_list_segmented, axis=2)\n', (51465, 51502), True, 'import numpy as np\n'), ((53249, 53272), 'numpy.asarray', 'np.asarray', (['object_size'], {}), '(object_size)\n', (53259, 53272), True, 'import numpy as np\n'), ((53385, 53408), 'numpy.flip', 'np.flip', (['object_size', '(0)'], {}), '(object_size, 0)\n', (53392, 53408), True, 'import numpy as np\n'), ((53430, 53452), 'numpy.flip', 'np.flip', (['image_size', '(0)'], {}), '(image_size, 0)\n', (53437, 53452), True, 'import numpy as np\n'), ((58082, 58108), 'numpy.prod', 'np.prod', (['measurement_count'], {}), '(measurement_count)\n', (58089, 58108), True, 'import numpy as np\n'), ((59249, 59293), 'numpy.flip', 'np.flip', (['raster_point_list_segmented'], {'axis': '(2)'}), '(raster_point_list_segmented, axis=2)\n', (59256, 59293), True, 'import numpy as np\n'), ((60887, 60910), 'numpy.asarray', 'np.asarray', (['object_size'], {}), '(object_size)\n', (60897, 60910), True, 'import numpy as np\n'), ((61022, 61045), 'numpy.flip', 'np.flip', (['object_size', '(0)'], {}), '(object_size, 0)\n', (61029, 61045), True, 'import numpy as np\n'), ((61067, 61089), 'numpy.flip', 'np.flip', (['image_size', '(0)'], {}), '(image_size, 0)\n', (61074, 61089), True, 'import numpy as np\n'), ((62209, 62240), 'numpy.arange', 'np.arange', (['(0)', 'object_size[1]', '(1)'], {}), '(0, object_size[1], 1)\n', (62218, 62240), True, 'import numpy as np\n'), ((62933, 62959), 'numpy.prod', 'np.prod', (['measurement_count'], {}), '(measurement_count)\n', (62940, 62959), True, 'import numpy as np\n'), ((64100, 64144), 'numpy.flip', 'np.flip', (['raster_point_list_segmented'], {'axis': '(2)'}), '(raster_point_list_segmented, axis=2)\n', (64107, 64144), True, 'import numpy as np\n'), ((71791, 71817), 'numpy.prod', 'np.prod', (['measurement_count'], {}), '(measurement_count)\n', (71798, 71817), True, 'import numpy as np\n'), ((72869, 72889), 'numpy.asarray', 'np.asarray', (['[rr, cc]'], {}), '([rr, cc])\n', (72879, 72889), True, 'import numpy as np\n'), ((75351, 75390), 'numpy.abs', 'np.abs', (['(xx ** 2 + yy ** 2 - radius ** 2)'], {}), '(xx ** 2 + yy ** 2 - radius ** 2)\n', (75357, 75390), True, 'import numpy as np\n'), ((77299, 77338), 'numpy.abs', 'np.abs', (['(xx ** 2 + yy ** 2 - radius ** 2)'], {}), '(xx ** 2 + yy ** 2 - radius ** 2)\n', (77305, 77338), True, 'import numpy as np\n'), ((78403, 78488), 'numpy.round', 'np.round', (['(kernel_size[1] / 2 + slope * (point_seperation + 1) * (n_positions / 2))'], {}), '(kernel_size[1] / 2 + slope * (point_seperation + 1) * (n_positions /\n 2))\n', (78411, 78488), True, 'import numpy as np\n'), ((79315, 79359), 'math.floor', 'math.floor', (['(beta * kernel_length * led_count)'], {}), '(beta * kernel_length * led_count)\n', (79325, 79359), False, 'import math\n'), ((79377, 79412), 'numpy.zeros', 'np.zeros', (['(kernel_length * led_count)'], {}), '(kernel_length * led_count)\n', (79385, 79412), True, 'import numpy as np\n'), ((79432, 79468), 'numpy.arange', 'np.arange', (['(kernel_length * led_count)'], {}), '(kernel_length * led_count)\n', (79441, 79468), True, 'import numpy as np\n'), ((80696, 80719), 'numpy.zeros', 'np.zeros', (['object_size_0'], {}), '(object_size_0)\n', (80704, 80719), True, 'import numpy as np\n'), ((80879, 80894), 'llops.Ft', 'Ft', (['blur_kernel'], {}), '(blur_kernel)\n', (80881, 80894), False, 'from llops import iFt, Ft\n'), ((84511, 84538), 'numpy.sum', 'np.sum', (['fourierSpaceSupport'], {}), '(fourierSpaceSupport)\n', (84517, 84538), True, 'import numpy as np\n'), ((85140, 85157), 'numpy.random.rand', 'np.random.rand', (['p'], {}), '(p)\n', (85154, 85157), True, 'import numpy as np\n'), ((85424, 85461), 'numpy.real', 'np.real', (['blurKernel[realSpaceSupport]'], {}), '(blurKernel[realSpaceSupport])\n', (85431, 85461), True, 'import numpy as np\n'), ((86035, 86071), 'numpy.abs', 'np.abs', (['blurKernel[realSpaceSupport]'], {}), '(blurKernel[realSpaceSupport])\n', (86041, 86071), True, 'import numpy as np\n'), ((86486, 86504), 'llops.iFt', 'iFt', (['blurKernel_ft'], {}), '(blurKernel_ft)\n', (86489, 86504), False, 'from llops import iFt, Ft\n'), ((90770, 90788), 'numpy.arange', 'np.arange', (['maxiter'], {}), '(maxiter)\n', (90779, 90788), True, 'import numpy as np\n'), ((91932, 91949), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (91946, 91949), True, 'import numpy as np\n'), ((97582, 97599), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (97593, 97599), True, 'import numpy as np\n'), ((97786, 97822), 'llops.getNativeDatatype', 'yp.getNativeDatatype', (['dtype', '"""numpy"""'], {}), "(dtype, 'numpy')\n", (97806, 97822), True, 'import llops as yp\n'), ((98323, 98344), 'numpy.seterr', 'np.seterr', ([], {'all': '"""warn"""'}), "(all='warn')\n", (98332, 98344), True, 'import numpy as np\n'), ((99056, 99089), 'llops.matmul', 'yp.matmul', (['self.basis_y', 'weight_y'], {}), '(self.basis_y, weight_y)\n', (99065, 99089), True, 'import llops as yp\n'), ((99104, 99137), 'llops.matmul', 'yp.matmul', (['self.basis_x', 'weight_x'], {}), '(self.basis_x, weight_x)\n', (99113, 99137), True, 'import llops as yp\n'), ((99286, 99342), 'llops.zeros', 'yp.zeros', (['self.M'], {'dtype': 'self.dtype', 'backend': 'self.backend'}), '(self.M, dtype=self.dtype, backend=self.backend)\n', (99294, 99342), True, 'import llops as yp\n'), ((100168, 100201), 'llops.matmul', 'yp.matmul', (['self.basis_y', 'weight_y'], {}), '(self.basis_y, weight_y)\n', (100177, 100201), True, 'import llops as yp\n'), ((100216, 100249), 'llops.matmul', 'yp.matmul', (['self.basis_x', 'weight_x'], {}), '(self.basis_x, weight_x)\n', (100225, 100249), True, 'import llops as yp\n'), ((100425, 100470), 'llops.zeros', 'yp.zeros', (['[self.ndim_y, self.M[0], self.M[1]]'], {}), '([self.ndim_y, self.M[0], self.M[1]])\n', (100433, 100470), True, 'import llops as yp\n'), ((100491, 100536), 'llops.zeros', 'yp.zeros', (['[self.ndim_x, self.M[0], self.M[1]]'], {}), '([self.ndim_x, self.M[0], self.M[1]])\n', (100499, 100536), True, 'import llops as yp\n'), ((100950, 100991), 'llops.operators.Sum', 'ops.Sum', (['self.M', 'self.dtype', 'self.backend'], {}), '(self.M, self.dtype, self.backend)\n', (100957, 100991), True, 'import llops.operators as ops\n'), ((101466, 101507), 'llops.operators.Vstack', 'ops.Vstack', (['(column_list_y + column_list_x)'], {}), '(column_list_y + column_list_x)\n', (101476, 101507), True, 'import llops.operators as ops\n'), ((101523, 101547), 'llops.operators._GradientOperator', 'ops._GradientOperator', (['G'], {}), '(G)\n', (101544, 101547), True, 'import llops.operators as ops\n'), ((101729, 101762), 'llops.matmul', 'yp.matmul', (['self.basis_y', 'weight_y'], {}), '(self.basis_y, weight_y)\n', (101738, 101762), True, 'import llops as yp\n'), ((101777, 101810), 'llops.matmul', 'yp.matmul', (['self.basis_x', 'weight_x'], {}), '(self.basis_x, weight_x)\n', (101786, 101810), True, 'import llops as yp\n'), ((101882, 101923), 'llops.operators.Sum', 'ops.Sum', (['self.M', 'self.dtype', 'self.backend'], {}), '(self.M, self.dtype, self.backend)\n', (101889, 101923), True, 'import llops.operators as ops\n'), ((102920, 102961), 'llops.operators.Vstack', 'ops.Vstack', (['(column_list_y + column_list_x)'], {}), '(column_list_y + column_list_x)\n', (102930, 102961), True, 'import llops.operators as ops\n'), ((102977, 103001), 'llops.operators._GradientOperator', 'ops._GradientOperator', (['G'], {}), '(G)\n', (102998, 103001), True, 'import llops.operators as ops\n'), ((1962, 1975), 'numpy.abs', 'np.abs', (['x_fft'], {}), '(x_fft)\n', (1968, 1975), True, 'import numpy as np\n'), ((2219, 2234), 'numpy.max', 'np.max', (['sigma_x'], {}), '(sigma_x)\n', (2225, 2234), True, 'import numpy as np\n'), ((2237, 2252), 'numpy.min', 'np.min', (['sigma_x'], {}), '(sigma_x)\n', (2243, 2252), True, 'import numpy as np\n'), ((3015, 3083), 'numpy.random.choice', 'np.random.choice', (['kernel_length'], {'size': '(pulse_count - 2)', 'replace': '(False)'}), '(kernel_length, size=pulse_count - 2, replace=False)\n', (3031, 3083), True, 'import numpy as np\n'), ((3106, 3129), 'numpy.zeros', 'np.zeros', (['kernel_length'], {}), '(kernel_length)\n', (3114, 3129), True, 'import numpy as np\n'), ((5068, 5095), 'llops.ones', 'yp.ones', (['blur_kernel_length'], {}), '(blur_kernel_length)\n', (5075, 5095), True, 'import llops as yp\n'), ((7500, 7514), 'llops.sum', 'yp.sum', (['kernel'], {}), '(kernel)\n', (7506, 7514), True, 'import llops as yp\n'), ((12950, 12982), 'numpy.asarray', 'np.asarray', (['position_list[index]'], {}), '(position_list[index])\n', (12960, 12982), True, 'import numpy as np\n'), ((13838, 13884), 'llops.operators.PhaseRamp', 'ops.PhaseRamp', (['kernel_shape'], {'dtype': '"""complex32"""'}), "(kernel_shape, dtype='complex32')\n", (13851, 13884), True, 'import llops.operators as ops\n'), ((13947, 13960), 'llops.zeros', 'yp.zeros', (['R.M'], {}), '(R.M)\n', (13955, 13960), True, 'import llops as yp\n'), ((14285, 14400), 'llops.asarray', 'yp.asarray', (["[illum[0]['value']['w'] for illum in illumination_list[frame_index]]"], {'dtype': 'dtype', 'backend': 'backend'}), "([illum[0]['value']['w'] for illum in illumination_list[\n frame_index]], dtype=dtype, backend=backend)\n", (14295, 14400), True, 'import llops as yp\n'), ((14508, 14527), 'llops.sum', 'yp.sum', (['blur_vector'], {}), '(blur_vector)\n', (14514, 14527), True, 'import llops as yp\n'), ((16544, 16569), 'numpy.real', 'np.real', (['kernel_recovered'], {}), '(kernel_recovered)\n', (16551, 16569), True, 'import numpy as np\n'), ((21422, 21444), 'numpy.asarray', 'np.asarray', (['image_size'], {}), '(image_size)\n', (21432, 21444), True, 'import numpy as np\n'), ((23046, 23083), 'numpy.flip', 'np.flip', (['object_support_mask[-1]', 'dim'], {}), '(object_support_mask[-1], dim)\n', (23053, 23083), True, 'import numpy as np\n'), ((30183, 30201), 'numpy.zeros', 'np.zeros', (['nknots1d'], {}), '(nknots1d)\n', (30191, 30201), True, 'import numpy as np\n'), ((30240, 30303), 'scipy.interpolate.splrep', 'sp.interpolate.splrep', (['knots1d', 'y_dummy'], {'k': 'degree', 'per': 'periodic'}), '(knots1d, y_dummy, k=degree, per=periodic)\n', (30261, 30303), True, 'import scipy as sp\n'), ((31031, 31053), 'numpy.empty', 'np.empty', (['(ndim, npts)'], {}), '((ndim, npts))\n', (31039, 31053), True, 'import numpy as np\n'), ((31426, 31456), 'numpy.product', 'np.product', (['values_dim'], {'axis': '(0)'}), '(values_dim, axis=0)\n', (31436, 31456), True, 'import numpy as np\n'), ((36680, 36795), 'math.sqrt', 'math.sqrt', (['((countsToE * exposureCountsPerUnit + readoutNoiseE) * exposureUnits + (\n darkCurrentE + patternNoiseE))'], {}), '((countsToE * exposureCountsPerUnit + readoutNoiseE) *\n exposureUnits + (darkCurrentE + patternNoiseE))\n', (36689, 36795), False, 'import math\n'), ((38031, 38050), 'numpy.mod', 'np.mod', (['(N * beta)', '(1)'], {}), '(N * beta, 1)\n', (38037, 38050), True, 'import numpy as np\n'), ((39035, 39054), 'numpy.mod', 'np.mod', (['(N * beta)', '(1)'], {}), '(N * beta, 1)\n', (39041, 39054), True, 'import numpy as np\n'), ((44471, 44490), 'numpy.sum', 'np.sum', (['(support > 0)'], {}), '(support > 0)\n', (44477, 44490), True, 'import numpy as np\n'), ((45453, 45480), 'numpy.size', 'np.size', (['blur_kernel_map', '(0)'], {}), '(blur_kernel_map, 0)\n', (45460, 45480), True, 'import numpy as np\n'), ((45482, 45509), 'numpy.size', 'np.size', (['blur_kernel_map', '(1)'], {}), '(blur_kernel_map, 1)\n', (45489, 45509), True, 'import numpy as np\n'), ((49439, 49475), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 1.5))'], {}), '(image_size[0] * (row + 1.5))\n', (49446, 49475), True, 'import numpy as np\n'), ((49885, 49911), 'numpy.prod', 'np.prod', (['measurement_count'], {}), '(measurement_count)\n', (49892, 49911), True, 'import numpy as np\n'), ((57676, 57702), 'numpy.prod', 'np.prod', (['measurement_count'], {}), '(measurement_count)\n', (57683, 57702), True, 'import numpy as np\n'), ((59522, 59543), 'numpy.flipud', 'np.flipud', (['new_points'], {}), '(new_points)\n', (59531, 59543), True, 'import numpy as np\n'), ((61115, 61148), 'numpy.ceil', 'np.ceil', (['(object_size / image_size)'], {}), '(object_size / image_size)\n', (61122, 61148), True, 'import numpy as np\n'), ((62527, 62553), 'numpy.prod', 'np.prod', (['measurement_count'], {}), '(measurement_count)\n', (62534, 62553), True, 'import numpy as np\n'), ((68353, 68386), 'numpy.ceil', 'np.ceil', (['(object_size / image_size)'], {}), '(object_size / image_size)\n', (68360, 68386), True, 'import numpy as np\n'), ((70939, 70975), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 1.5))'], {}), '(image_size[0] * (row + 1.5))\n', (70946, 70975), True, 'import numpy as np\n'), ((71385, 71411), 'numpy.prod', 'np.prod', (['measurement_count'], {}), '(measurement_count)\n', (71392, 71411), True, 'import numpy as np\n'), ((73662, 73734), 'numpy.arange', 'np.arange', (['(0)', '((point_seperation + 1) * n_positions)', '(point_seperation + 1)'], {}), '(0, (point_seperation + 1) * n_positions, point_seperation + 1)\n', (73671, 73734), True, 'import numpy as np\n'), ((78579, 78604), 'numpy.arange', 'np.arange', (['(0)', 'n_positions'], {}), '(0, n_positions)\n', (78588, 78604), True, 'import numpy as np\n'), ((79664, 79695), 'numpy.delete', 'np.delete', (['indicies', 'rand_index'], {}), '(indicies, rand_index)\n', (79673, 79695), True, 'import numpy as np\n'), ((79836, 79850), 'numpy.sum', 'np.sum', (['kernel'], {}), '(kernel)\n', (79842, 79850), True, 'import numpy as np\n'), ((80102, 80120), 'numpy.fft.fft', 'np.fft.fft', (['kernel'], {}), '(kernel)\n', (80112, 80120), True, 'import numpy as np\n'), ((80138, 80153), 'numpy.max', 'np.max', (['spectra'], {}), '(spectra)\n', (80144, 80153), True, 'import numpy as np\n'), ((80919, 80938), 'numpy.abs', 'np.abs', (['blurKernelF'], {}), '(blurKernelF)\n', (80925, 80938), True, 'import numpy as np\n'), ((81525, 81536), 'numpy.empty', 'np.empty', (['p'], {}), '(p)\n', (81533, 81536), True, 'import numpy as np\n'), ((82419, 82432), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (82429, 82432), False, 'from operator import itemgetter\n'), ((84340, 84369), 'numpy.floor', 'np.floor', (['(throughputCoeff * p)'], {}), '(throughputCoeff * p)\n', (84348, 84369), True, 'import numpy as np\n'), ((84497, 84507), 'numpy.sum', 'np.sum', (['dc'], {}), '(dc)\n', (84503, 84507), True, 'import numpy as np\n'), ((86683, 86720), 'numpy.real', 'np.real', (['blurKernel[realSpaceSupport]'], {}), '(blurKernel[realSpaceSupport])\n', (86690, 86720), True, 'import numpy as np\n'), ((87266, 87302), 'numpy.abs', 'np.abs', (['blurKernel[realSpaceSupport]'], {}), '(blurKernel[realSpaceSupport])\n', (87272, 87302), True, 'import numpy as np\n'), ((89818, 89835), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (89832, 89835), True, 'import numpy as np\n'), ((97467, 97502), 'numpy.arange', 'np.arange', (['(-N[1] / 2)', '(N[1] / 2)', '(1.0)'], {}), '(-N[1] / 2, N[1] / 2, 1.0)\n', (97476, 97502), True, 'import numpy as np\n'), ((97522, 97557), 'numpy.arange', 'np.arange', (['(-N[0] / 2)', '(N[0] / 2)', '(1.0)'], {}), '(-N[0] / 2, N[0] / 2, 1.0)\n', (97531, 97557), True, 'import numpy as np\n'), ((98158, 98180), 'llops.shape', 'yp.shape', (['self.basis_y'], {}), '(self.basis_y)\n', (98166, 98180), True, 'import llops as yp\n'), ((98206, 98228), 'llops.shape', 'yp.shape', (['self.basis_x'], {}), '(self.basis_x)\n', (98214, 98228), True, 'import llops as yp\n'), ((99178, 99190), 'numpy.amax', 'np.amax', (['rys'], {}), '(rys)\n', (99185, 99190), True, 'import numpy as np\n'), ((99192, 99204), 'numpy.amax', 'np.amax', (['rxs'], {}), '(rxs)\n', (99199, 99204), True, 'import numpy as np\n'), ((100308, 100328), 'numpy.amax', 'np.amax', (['self.illums'], {}), '(self.illums)\n', (100315, 100328), True, 'import numpy as np\n'), ((3260, 3297), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'kernel_length'}), '(size=kernel_length)\n', (3277, 3297), True, 'import numpy as np\n'), ((5339, 5381), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'blur_kernel_length'}), '(size=blur_kernel_length)\n', (5356, 5381), True, 'import numpy as np\n'), ((6886, 6925), 'llops.pad', 'yp.pad', (['blur_vector', 'shape'], {'center': '(True)'}), '(blur_vector, shape, center=True)\n', (6892, 6925), True, 'import llops as yp\n'), ((15790, 15826), 'llops.solvers.iterative.GradientDescent', 'iterative.GradientDescent', (['objective'], {}), '(objective)\n', (15815, 15826), False, 'from llops.solvers import iterative, objectivefunctions\n'), ((16979, 17003), 'numpy.max', 'np.max', (['kernel_recovered'], {}), '(kernel_recovered)\n', (16985, 17003), True, 'import numpy as np\n'), ((17283, 17381), 'comptic.registration.registerImage', 'registerImage', (['dataset.frame_list[index - 1][roi.slice]', 'dataset.frame_list[index][roi.slice]'], {}), '(dataset.frame_list[index - 1][roi.slice], dataset.frame_list[\n index][roi.slice])\n', (17296, 17381), False, 'from comptic.registration import registerImage\n'), ((17579, 17650), 'comptic.registration.registerImage', 'registerImage', (['dataset.frame_list[index - 1]', 'dataset.frame_list[index]'], {}), '(dataset.frame_list[index - 1], dataset.frame_list[index])\n', (17592, 17650), False, 'from comptic.registration import registerImage\n'), ((18099, 18118), 'numpy.asarray', 'np.asarray', (['max_pos'], {}), '(max_pos)\n', (18109, 18118), True, 'import numpy as np\n'), ((18121, 18155), 'numpy.asarray', 'np.asarray', (['kernel_recovered.shape'], {}), '(kernel_recovered.shape)\n', (18131, 18155), True, 'import numpy as np\n'), ((21391, 21414), 'numpy.asarray', 'np.asarray', (['object_size'], {}), '(object_size)\n', (21401, 21414), True, 'import numpy as np\n'), ((24140, 24202), 'llops.operators.PhaseRamp', 'ops.PhaseRamp', (['blur_kernel.shape'], {'dtype': 'dtype', 'backend': 'backend'}), '(blur_kernel.shape, dtype=dtype, backend=backend)\n', (24153, 24202), True, 'import llops.operators as ops\n'), ((24330, 24367), 'llops.reshape', 'yp.reshape', (['(R * x_)', 'blur_kernel.shape'], {}), '(R * x_, blur_kernel.shape)\n', (24340, 24367), True, 'import llops as yp\n'), ((31263, 31350), 'scipy.interpolate.splev', 'sp.interpolate.splev', (['position[idim]', '(self.splines[idim][0], coeffs, self.degree)'], {}), '(position[idim], (self.splines[idim][0], coeffs, self.\n degree))\n', (31283, 31350), True, 'import scipy as sp\n'), ((39421, 39435), 'numpy.sqrt', 'np.sqrt', (['minPs'], {}), '(minPs)\n', (39428, 39435), True, 'import numpy as np\n'), ((41075, 41095), 'numpy.abs', 'np.abs', (['kernelMapCol'], {}), '(kernelMapCol)\n', (41081, 41095), True, 'import numpy as np\n'), ((43702, 43728), 'numpy.size', 'np.size', (['innerKernelMap', '(0)'], {}), '(innerKernelMap, 0)\n', (43709, 43728), True, 'import numpy as np\n'), ((43730, 43756), 'numpy.size', 'np.size', (['innerKernelMap', '(1)'], {}), '(innerKernelMap, 1)\n', (43737, 43756), True, 'import numpy as np\n'), ((48782, 48828), 'numpy.arange', 'np.arange', (['(0)', 'object_size[1]', 'move_direction_x'], {}), '(0, object_size[1], move_direction_x)\n', (48791, 48828), True, 'import numpy as np\n'), ((48881, 48944), 'numpy.arange', 'np.arange', (['(0)', 'raster_segments[2 * row + 1, 1]', 'move_direction_x'], {}), '(0, raster_segments[2 * row + 1, 1], move_direction_x)\n', (48890, 48944), True, 'import numpy as np\n'), ((49024, 49096), 'numpy.arange', 'np.arange', (['raster_segments[2 * row, 1]', 'object_size[1]', 'move_direction_x'], {}), '(raster_segments[2 * row, 1], object_size[1], move_direction_x)\n', (49033, 49096), True, 'import numpy as np\n'), ((49143, 49236), 'numpy.arange', 'np.arange', (['raster_segments[2 * row, 1]', 'raster_segments[2 * row + 1, 1]', 'move_direction_x'], {}), '(raster_segments[2 * row, 1], raster_segments[2 * row + 1, 1],\n move_direction_x)\n', (49152, 49236), True, 'import numpy as np\n'), ((51665, 51693), 'numpy.ceil', 'np.ceil', (['(image_size[1] * 0.5)'], {}), '(image_size[1] * 0.5)\n', (51672, 51693), True, 'import numpy as np\n'), ((51825, 51853), 'numpy.ceil', 'np.ceil', (['(image_size[0] * 0.5)'], {}), '(image_size[0] * 0.5)\n', (51832, 51853), True, 'import numpy as np\n'), ((54317, 54345), 'numpy.arange', 'np.arange', (['(0)', 'object_size[1]'], {}), '(0, object_size[1])\n', (54326, 54345), True, 'import numpy as np\n'), ((54622, 54666), 'numpy.arange', 'np.arange', (['(0)', '(object_size[1] - image_size[1])'], {}), '(0, object_size[1] - image_size[1])\n', (54631, 54666), True, 'import numpy as np\n'), ((66019, 66044), 'numpy.asarray', 'np.asarray', (['line[:middle]'], {}), '(line[:middle])\n', (66029, 66044), True, 'import numpy as np\n'), ((66086, 66113), 'numpy.asarray', 'np.asarray', (['line[middle:-1]'], {}), '(line[middle:-1])\n', (66096, 66113), True, 'import numpy as np\n'), ((66169, 66190), 'numpy.asarray', 'np.asarray', (['line[:-1]'], {}), '(line[:-1])\n', (66179, 66190), True, 'import numpy as np\n'), ((70282, 70328), 'numpy.arange', 'np.arange', (['(0)', 'object_size[1]', 'move_direction_x'], {}), '(0, object_size[1], move_direction_x)\n', (70291, 70328), True, 'import numpy as np\n'), ((70381, 70444), 'numpy.arange', 'np.arange', (['(0)', 'raster_segments[2 * row + 1, 1]', 'move_direction_x'], {}), '(0, raster_segments[2 * row + 1, 1], move_direction_x)\n', (70390, 70444), True, 'import numpy as np\n'), ((70524, 70596), 'numpy.arange', 'np.arange', (['raster_segments[2 * row, 1]', 'object_size[1]', 'move_direction_x'], {}), '(raster_segments[2 * row, 1], object_size[1], move_direction_x)\n', (70533, 70596), True, 'import numpy as np\n'), ((70643, 70736), 'numpy.arange', 'np.arange', (['raster_segments[2 * row, 1]', 'raster_segments[2 * row + 1, 1]', 'move_direction_x'], {}), '(raster_segments[2 * row, 1], raster_segments[2 * row + 1, 1],\n move_direction_x)\n', (70652, 70736), True, 'import numpy as np\n'), ((79744, 79761), 'numpy.size', 'np.size', (['indicies'], {}), '(indicies)\n', (79751, 79761), True, 'import numpy as np\n'), ((79901, 79915), 'numpy.sum', 'np.sum', (['kernel'], {}), '(kernel)\n', (79907, 79915), True, 'import numpy as np\n'), ((80160, 80175), 'numpy.min', 'np.min', (['spectra'], {}), '(spectra)\n', (80166, 80175), True, 'import numpy as np\n'), ((83832, 83863), 'numpy.fft.fftshift', 'np.fft.fftshift', (['x'], {'axes': '(0, 1)'}), '(x, axes=(0, 1))\n', (83847, 83863), True, 'import numpy as np\n'), ((83960, 83991), 'numpy.fft.fftshift', 'np.fft.fftshift', (['x'], {'axes': '(0, 1)'}), '(x, axes=(0, 1))\n', (83975, 83991), True, 'import numpy as np\n'), ((84372, 84410), 'numpy.remainder', 'np.remainder', (['(throughputCoeff * p)', '(1.0)'], {}), '(throughputCoeff * p, 1.0)\n', (84384, 84410), True, 'import numpy as np\n'), ((84945, 84956), 'numpy.empty', 'np.empty', (['p'], {}), '(p)\n', (84953, 84956), True, 'import numpy as np\n'), ((90544, 90561), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (90558, 90561), True, 'import numpy as np\n'), ((92794, 92811), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (92808, 92811), True, 'import numpy as np\n'), ((99722, 99737), 'llops.scalar', 'yp.scalar', (['r[0]'], {}), '(r[0])\n', (99731, 99737), True, 'import llops as yp\n'), ((99750, 99765), 'llops.scalar', 'yp.scalar', (['r[1]'], {}), '(r[1])\n', (99759, 99765), True, 'import llops as yp\n'), ((100272, 100283), 'numpy.abs', 'np.abs', (['rys'], {}), '(rys)\n', (100278, 100283), True, 'import numpy as np\n'), ((100294, 100305), 'numpy.abs', 'np.abs', (['rxs'], {}), '(rxs)\n', (100300, 100305), True, 'import numpy as np\n'), ((101833, 101844), 'numpy.abs', 'np.abs', (['rys'], {}), '(rys)\n', (101839, 101844), True, 'import numpy as np\n'), ((101855, 101866), 'numpy.abs', 'np.abs', (['rxs'], {}), '(rxs)\n', (101861, 101866), True, 'import numpy as np\n'), ((6383, 6401), 'llops.Ft', 'yp.Ft', (['blur_vector'], {}), '(blur_vector)\n', (6388, 6401), True, 'import llops as yp\n'), ((7083, 7122), 'llops.pad', 'yp.pad', (['blur_vector', 'shape'], {'center': '(True)'}), '(blur_vector, shape, center=True)\n', (7089, 7122), True, 'import llops as yp\n'), ((7185, 7230), 'llops.pad', 'yp.pad', (['blur_vector', 'shape'], {'crop_start': '(0, 0)'}), '(blur_vector, shape, crop_start=(0, 0))\n', (7191, 7230), True, 'import llops as yp\n'), ((13011, 13031), 'llops.shape', 'yp.shape', (['_positions'], {}), '(_positions)\n', (13019, 13031), True, 'import llops as yp\n'), ((13147, 13172), 'llops.min', 'yp.min', (['_positions[:, ax]'], {}), '(_positions[:, ax])\n', (13153, 13172), True, 'import llops as yp\n'), ((14217, 14236), 'llops.iFt', 'yp.iFt', (['blur_vector'], {}), '(blur_vector)\n', (14223, 14236), True, 'import llops as yp\n'), ((16451, 16462), 'llops.Ft', 'Ft', (['blurred'], {}), '(blurred)\n', (16453, 16462), False, 'from llops import iFt, Ft\n'), ((22500, 22539), 'numpy.ones', 'np.ones', (['W.shape[1]'], {'dtype': 'np.complex64'}), '(W.shape[1], dtype=np.complex64)\n', (22507, 22539), True, 'import numpy as np\n'), ((24234, 24296), 'numpy.asarray', 'np.asarray', (['(y - object_size[0] // 2, x - object_size[1] // 2)'], {}), '((y - object_size[0] // 2, x - object_size[1] // 2))\n', (24244, 24296), True, 'import numpy as np\n'), ((28406, 28426), 'numpy.asarray', 'np.asarray', (['position'], {}), '(position)\n', (28416, 28426), True, 'import numpy as np\n'), ((28474, 28494), 'numpy.asarray', 'np.asarray', (['position'], {}), '(position)\n', (28484, 28494), True, 'import numpy as np\n'), ((28563, 28583), 'numpy.asarray', 'np.asarray', (['position'], {}), '(position)\n', (28573, 28583), True, 'import numpy as np\n'), ((42413, 42435), 'numpy.abs', 'np.abs', (['outerKernelMap'], {}), '(outerKernelMap)\n', (42419, 42435), True, 'import numpy as np\n'), ((42454, 42476), 'numpy.abs', 'np.abs', (['outerKernelMap'], {}), '(outerKernelMap)\n', (42460, 42476), True, 'import numpy as np\n'), ((46725, 46748), 'numpy.asarray', 'np.asarray', (['object_size'], {}), '(object_size)\n', (46735, 46748), True, 'import numpy as np\n'), ((46751, 46773), 'numpy.asarray', 'np.asarray', (['image_size'], {}), '(image_size)\n', (46761, 46773), True, 'import numpy as np\n'), ((50539, 50578), 'numpy.ceil', 'np.ceil', (['(raster_point_list.shape[0] / 2)'], {}), '(raster_point_list.shape[0] / 2)\n', (50546, 50578), True, 'import numpy as np\n'), ((53486, 53509), 'numpy.asarray', 'np.asarray', (['object_size'], {}), '(object_size)\n', (53496, 53509), True, 'import numpy as np\n'), ((53512, 53534), 'numpy.asarray', 'np.asarray', (['image_size'], {}), '(image_size)\n', (53522, 53534), True, 'import numpy as np\n'), ((55474, 55522), 'numpy.arange', 'np.arange', (['(0)', '(object_size[1] - image_size[1])', '(-1)'], {}), '(0, object_size[1] - image_size[1], -1)\n', (55483, 55522), True, 'import numpy as np\n'), ((55824, 55884), 'numpy.arange', 'np.arange', (['image_size[1]', '(object_size[1] - image_size[1])', '(-1)'], {}), '(image_size[1], object_size[1] - image_size[1], -1)\n', (55833, 55884), True, 'import numpy as np\n'), ((56616, 56656), 'numpy.arange', 'np.arange', (['image_size[1]', 'object_size[1]'], {}), '(image_size[1], object_size[1])\n', (56625, 56656), True, 'import numpy as np\n'), ((56958, 57014), 'numpy.arange', 'np.arange', (['image_size[1]', '(object_size[1] - image_size[1])'], {}), '(image_size[1], object_size[1] - image_size[1])\n', (56967, 57014), True, 'import numpy as np\n'), ((58330, 58369), 'numpy.ceil', 'np.ceil', (['(raster_point_list.shape[0] / 2)'], {}), '(raster_point_list.shape[0] / 2)\n', (58337, 58369), True, 'import numpy as np\n'), ((61796, 61832), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 0.5))'], {}), '(image_size[0] * (row + 0.5))\n', (61803, 61832), True, 'import numpy as np\n'), ((61887, 61915), 'numpy.ceil', 'np.ceil', (['(image_size[1] * 0.5)'], {}), '(image_size[1] * 0.5)\n', (61894, 61915), True, 'import numpy as np\n'), ((61974, 62010), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 0.5))'], {}), '(image_size[0] * (row + 0.5))\n', (61981, 62010), True, 'import numpy as np\n'), ((62069, 62114), 'numpy.ceil', 'np.ceil', (['(object_size[1] - image_size[1] * 0.5)'], {}), '(object_size[1] - image_size[1] * 0.5)\n', (62076, 62114), True, 'import numpy as np\n'), ((63181, 63220), 'numpy.ceil', 'np.ceil', (['(raster_point_list.shape[0] / 2)'], {}), '(raster_point_list.shape[0] / 2)\n', (63188, 63220), True, 'import numpy as np\n'), ((72162, 72196), 'numpy.flip', 'np.flip', (['raster_point_list'], {'axis': '(0)'}), '(raster_point_list, axis=0)\n', (72169, 72196), True, 'import numpy as np\n'), ((73979, 74008), 'numpy.ceil', 'np.ceil', (['(kernel_size[0] * 0.5)'], {}), '(kernel_size[0] * 0.5)\n', (73986, 74008), True, 'import numpy as np\n'), ((75020, 75045), 'numpy.mod', 'np.mod', (['kernel_size[1]', '(2)'], {}), '(kernel_size[1], 2)\n', (75026, 75045), True, 'import numpy as np\n'), ((75256, 75281), 'numpy.mod', 'np.mod', (['kernel_size[0]', '(2)'], {}), '(kernel_size[0], 2)\n', (75262, 75281), True, 'import numpy as np\n'), ((84206, 84232), 'numpy.ceil', 'np.ceil', (['(image_size[0] / 2)'], {}), '(image_size[0] / 2)\n', (84213, 84232), True, 'import numpy as np\n'), ((84249, 84275), 'numpy.ceil', 'np.ceil', (['(image_size[1] / 2)'], {}), '(image_size[1] / 2)\n', (84256, 84275), True, 'import numpy as np\n'), ((84656, 84677), 'numpy.sum', 'np.sum', (['powerSpectrum'], {}), '(powerSpectrum)\n', (84662, 84677), True, 'import numpy as np\n'), ((85596, 85610), 'llops.Ft', 'Ft', (['blurKernel'], {}), '(blurKernel)\n', (85598, 85610), False, 'from llops import iFt, Ft\n'), ((85661, 85675), 'llops.Ft', 'Ft', (['blurKernel'], {}), '(blurKernel)\n', (85663, 85675), False, 'from llops import iFt, Ft\n'), ((86303, 86325), 'numpy.sqrt', 'np.sqrt', (['powerSpectrum'], {}), '(powerSpectrum)\n', (86310, 86325), True, 'import numpy as np\n'), ((87359, 87373), 'llops.Ft', 'Ft', (['blurKernel'], {}), '(blurKernel)\n', (87361, 87373), False, 'from llops import iFt, Ft\n'), ((87424, 87438), 'llops.Ft', 'Ft', (['blurKernel'], {}), '(blurKernel)\n', (87426, 87438), False, 'from llops import iFt, Ft\n'), ((99869, 99882), 'llops.cos', 'yp.cos', (['inner'], {}), '(inner)\n', (99875, 99882), True, 'import llops as yp\n'), ((2024, 2039), 'numpy.max', 'np.max', (['sigma_x'], {}), '(sigma_x)\n', (2030, 2039), True, 'import numpy as np\n'), ((14093, 14118), 'llops.cast', 'yp.cast', (['pos', '"""complex32"""'], {}), "(pos, 'complex32')\n", (14100, 14118), True, 'import llops as yp\n'), ((16437, 16447), 'llops.Ft', 'Ft', (['static'], {}), '(static)\n', (16439, 16447), False, 'from llops import iFt, Ft\n'), ((28129, 28149), 'numpy.asarray', 'np.asarray', (['position'], {}), '(position)\n', (28139, 28149), True, 'import numpy as np\n'), ((28200, 28220), 'numpy.asarray', 'np.asarray', (['position'], {}), '(position)\n', (28210, 28220), True, 'import numpy as np\n'), ((28292, 28312), 'numpy.asarray', 'np.asarray', (['position'], {}), '(position)\n', (28302, 28312), True, 'import numpy as np\n'), ((34719, 34738), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (34729, 34738), True, 'import numpy as np\n'), ((34924, 34949), 'numpy.asarray', 'np.asarray', (['object_update'], {}), '(object_update)\n', (34934, 34949), True, 'import numpy as np\n'), ((42332, 42354), 'numpy.abs', 'np.abs', (['innerKernelMap'], {}), '(innerKernelMap)\n', (42338, 42354), True, 'import numpy as np\n'), ((47525, 47561), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 0.5))'], {}), '(image_size[0] * (row + 0.5))\n', (47532, 47561), True, 'import numpy as np\n'), ((47620, 47648), 'numpy.ceil', 'np.ceil', (['(image_size[1] * 0.5)'], {}), '(image_size[1] * 0.5)\n', (47627, 47648), True, 'import numpy as np\n'), ((47711, 47747), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 0.5))'], {}), '(image_size[0] * (row + 0.5))\n', (47718, 47747), True, 'import numpy as np\n'), ((47810, 47855), 'numpy.ceil', 'np.ceil', (['(object_size[1] - image_size[1] * 0.5)'], {}), '(object_size[1] - image_size[1] * 0.5)\n', (47817, 47855), True, 'import numpy as np\n'), ((47964, 48000), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 0.5))'], {}), '(image_size[0] * (row + 0.5))\n', (47971, 48000), True, 'import numpy as np\n'), ((48059, 48104), 'numpy.ceil', 'np.ceil', (['(object_size[1] - image_size[1] * 0.5)'], {}), '(object_size[1] - image_size[1] * 0.5)\n', (48066, 48104), True, 'import numpy as np\n'), ((48167, 48203), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 0.5))'], {}), '(image_size[0] * (row + 0.5))\n', (48174, 48203), True, 'import numpy as np\n'), ((48275, 48303), 'numpy.ceil', 'np.ceil', (['(image_size[1] * 0.5)'], {}), '(image_size[1] * 0.5)\n', (48282, 48303), True, 'import numpy as np\n'), ((49534, 49570), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 0.5))'], {}), '(image_size[0] * (row + 0.5))\n', (49541, 49570), True, 'import numpy as np\n'), ((49572, 49608), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 1.5))'], {}), '(image_size[0] * (row + 1.5))\n', (49579, 49608), True, 'import numpy as np\n'), ((51343, 51377), 'numpy.flip', 'np.flip', (['raster_point_list'], {'axis': '(0)'}), '(raster_point_list, axis=0)\n', (51350, 51377), True, 'import numpy as np\n'), ((51738, 51766), 'numpy.ceil', 'np.ceil', (['(image_size[0] * 0.5)'], {}), '(image_size[0] * 0.5)\n', (51745, 51766), True, 'import numpy as np\n'), ((51925, 51953), 'numpy.ceil', 'np.ceil', (['(image_size[1] * 0.5)'], {}), '(image_size[1] * 0.5)\n', (51932, 51953), True, 'import numpy as np\n'), ((54375, 54403), 'numpy.ceil', 'np.ceil', (['(image_size[0] * 0.5)'], {}), '(image_size[0] * 0.5)\n', (54382, 54403), True, 'import numpy as np\n'), ((54696, 54724), 'numpy.ceil', 'np.ceil', (['(image_size[0] * 0.5)'], {}), '(image_size[0] * 0.5)\n', (54703, 54724), True, 'import numpy as np\n'), ((59134, 59168), 'numpy.flip', 'np.flip', (['raster_point_list'], {'axis': '(0)'}), '(raster_point_list, axis=0)\n', (59141, 59168), True, 'import numpy as np\n'), ((63985, 64019), 'numpy.flip', 'np.flip', (['raster_point_list'], {'axis': '(0)'}), '(raster_point_list, axis=0)\n', (63992, 64019), True, 'import numpy as np\n'), ((69025, 69061), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 0.5))'], {}), '(image_size[0] * (row + 0.5))\n', (69032, 69061), True, 'import numpy as np\n'), ((69120, 69148), 'numpy.ceil', 'np.ceil', (['(image_size[1] * 0.5)'], {}), '(image_size[1] * 0.5)\n', (69127, 69148), True, 'import numpy as np\n'), ((69211, 69247), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 0.5))'], {}), '(image_size[0] * (row + 0.5))\n', (69218, 69247), True, 'import numpy as np\n'), ((69310, 69355), 'numpy.ceil', 'np.ceil', (['(object_size[1] - image_size[1] * 0.5)'], {}), '(object_size[1] - image_size[1] * 0.5)\n', (69317, 69355), True, 'import numpy as np\n'), ((69464, 69500), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 0.5))'], {}), '(image_size[0] * (row + 0.5))\n', (69471, 69500), True, 'import numpy as np\n'), ((69559, 69604), 'numpy.ceil', 'np.ceil', (['(object_size[1] - image_size[1] * 0.5)'], {}), '(object_size[1] - image_size[1] * 0.5)\n', (69566, 69604), True, 'import numpy as np\n'), ((69667, 69703), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 0.5))'], {}), '(image_size[0] * (row + 0.5))\n', (69674, 69703), True, 'import numpy as np\n'), ((69775, 69803), 'numpy.ceil', 'np.ceil', (['(image_size[1] * 0.5)'], {}), '(image_size[1] * 0.5)\n', (69782, 69803), True, 'import numpy as np\n'), ((71034, 71070), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 0.5))'], {}), '(image_size[0] * (row + 0.5))\n', (71041, 71070), True, 'import numpy as np\n'), ((71072, 71108), 'numpy.ceil', 'np.ceil', (['(image_size[0] * (row + 1.5))'], {}), '(image_size[0] * (row + 1.5))\n', (71079, 71108), True, 'import numpy as np\n'), ((74087, 74116), 'numpy.ceil', 'np.ceil', (['(kernel_size[1] * 0.5)'], {}), '(kernel_size[1] * 0.5)\n', (74094, 74116), True, 'import numpy as np\n'), ((74866, 74891), 'numpy.mod', 'np.mod', (['kernel_size[1]', '(2)'], {}), '(kernel_size[1], 2)\n', (74872, 74891), True, 'import numpy as np\n'), ((74986, 75011), 'numpy.mod', 'np.mod', (['kernel_size[1]', '(2)'], {}), '(kernel_size[1], 2)\n', (74992, 75011), True, 'import numpy as np\n'), ((75102, 75127), 'numpy.mod', 'np.mod', (['kernel_size[0]', '(2)'], {}), '(kernel_size[0], 2)\n', (75108, 75127), True, 'import numpy as np\n'), ((75222, 75247), 'numpy.mod', 'np.mod', (['kernel_size[0]', '(2)'], {}), '(kernel_size[0], 2)\n', (75228, 75247), True, 'import numpy as np\n'), ((79564, 79581), 'numpy.size', 'np.size', (['indicies'], {}), '(indicies)\n', (79571, 79581), True, 'import numpy as np\n'), ((85892, 85906), 'llops.Ft', 'Ft', (['blurKernel'], {}), '(blurKernel)\n', (85894, 85906), False, 'from llops import iFt, Ft\n'), ((85961, 85975), 'llops.Ft', 'Ft', (['blurKernel'], {}), '(blurKernel)\n', (85963, 85975), False, 'from llops import iFt, Ft\n'), ((86406, 86420), 'llops.Ft', 'Ft', (['blurKernel'], {}), '(blurKernel)\n', (86408, 86420), False, 'from llops import iFt, Ft\n'), ((99890, 99903), 'llops.sin', 'yp.sin', (['inner'], {}), '(inner)\n', (99896, 99903), True, 'import llops as yp\n'), ((13095, 13120), 'llops.min', 'yp.min', (['_positions[:, ax]'], {}), '(_positions[:, ax])\n', (13101, 13120), True, 'import llops as yp\n'), ((16474, 16484), 'llops.Ft', 'Ft', (['static'], {}), '(static)\n', (16476, 16484), False, 'from llops import iFt, Ft\n'), ((42376, 42398), 'numpy.abs', 'np.abs', (['innerKernelMap'], {}), '(innerKernelMap)\n', (42382, 42398), True, 'import numpy as np\n'), ((50825, 50874), 'numpy.flip', 'np.flip', (['raster_point_list[0:midpoint, :]'], {'axis': '(0)'}), '(raster_point_list[0:midpoint, :], axis=0)\n', (50832, 50874), True, 'import numpy as np\n'), ((50991, 51039), 'numpy.flip', 'np.flip', (['raster_point_list[midpoint:, :]'], {'axis': '(0)'}), '(raster_point_list[midpoint:, :], axis=0)\n', (50998, 51039), True, 'import numpy as np\n'), ((55552, 55602), 'numpy.ceil', 'np.ceil', (['(image_size[0] * 0.5 + row * image_size[0])'], {}), '(image_size[0] * 0.5 + row * image_size[0])\n', (55559, 55602), True, 'import numpy as np\n'), ((55914, 55964), 'numpy.ceil', 'np.ceil', (['(image_size[0] * 0.5 + row * image_size[0])'], {}), '(image_size[0] * 0.5 + row * image_size[0])\n', (55921, 55964), True, 'import numpy as np\n'), ((56686, 56736), 'numpy.ceil', 'np.ceil', (['(image_size[0] * 0.5 + row * image_size[0])'], {}), '(image_size[0] * 0.5 + row * image_size[0])\n', (56693, 56736), True, 'import numpy as np\n'), ((57044, 57094), 'numpy.ceil', 'np.ceil', (['(image_size[0] * 0.5 + row * image_size[0])'], {}), '(image_size[0] * 0.5 + row * image_size[0])\n', (57051, 57094), True, 'import numpy as np\n'), ((58616, 58665), 'numpy.flip', 'np.flip', (['raster_point_list[0:midpoint, :]'], {'axis': '(0)'}), '(raster_point_list[0:midpoint, :], axis=0)\n', (58623, 58665), True, 'import numpy as np\n'), ((58782, 58830), 'numpy.flip', 'np.flip', (['raster_point_list[midpoint:, :]'], {'axis': '(0)'}), '(raster_point_list[midpoint:, :], axis=0)\n', (58789, 58830), True, 'import numpy as np\n'), ((63467, 63516), 'numpy.flip', 'np.flip', (['raster_point_list[0:midpoint, :]'], {'axis': '(0)'}), '(raster_point_list[0:midpoint, :], axis=0)\n', (63474, 63516), True, 'import numpy as np\n'), ((63633, 63681), 'numpy.flip', 'np.flip', (['raster_point_list[midpoint:, :]'], {'axis': '(0)'}), '(raster_point_list[midpoint:, :], axis=0)\n', (63640, 63681), True, 'import numpy as np\n'), ((87131, 87145), 'llops.Ft', 'Ft', (['blurKernel'], {}), '(blurKernel)\n', (87133, 87145), False, 'from llops import iFt, Ft\n'), ((87186, 87200), 'llops.Ft', 'Ft', (['blurKernel'], {}), '(blurKernel)\n', (87188, 87200), False, 'from llops import iFt, Ft\n'), ((18941, 18972), 'numpy.asarray', 'np.asarray', (['kernel_zeroed.shape'], {}), '(kernel_zeroed.shape)\n', (18951, 18972), True, 'import numpy as np\n'), ((101292, 101313), 'llops.conj', 'yp.conj', (['sum_exp_y[i]'], {}), '(sum_exp_y[i])\n', (101299, 101313), True, 'import llops as yp\n'), ((101410, 101431), 'llops.conj', 'yp.conj', (['sum_exp_x[i]'], {}), '(sum_exp_x[i])\n', (101417, 101431), True, 'import llops as yp\n'), ((102771, 102789), 'llops.conj', 'yp.conj', (['sum_exp_y'], {}), '(sum_exp_y)\n', (102778, 102789), True, 'import llops as yp\n'), ((102886, 102904), 'llops.conj', 'yp.conj', (['sum_exp_x'], {}), '(sum_exp_x)\n', (102893, 102904), True, 'import llops as yp\n'), ((18908, 18921), 'numpy.mean', 'np.mean', (['p[0]'], {}), '(p[0])\n', (18915, 18921), True, 'import numpy as np\n'), ((18923, 18936), 'numpy.mean', 'np.mean', (['p[1]'], {}), '(p[1])\n', (18930, 18936), True, 'import numpy as np\n'), ((86846, 86860), 'llops.Ft', 'Ft', (['blurKernel'], {}), '(blurKernel)\n', (86848, 86860), False, 'from llops import iFt, Ft\n'), ((86917, 86931), 'llops.Ft', 'Ft', (['blurKernel'], {}), '(blurKernel)\n', (86919, 86931), False, 'from llops import iFt, Ft\n')] |
"""
Module used to train the noise remover model,save it to HDF5 format, and later use it.
"""
import glob
import PIL
import cv2
import numpy as np
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D
from tensorflow.keras.losses import binary_crossentropy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.activations import relu, sigmoid
import config_tf
import consts
from base_model import ModelNotLoadedError, BaseTFModel, singleton, ModelNotBuiltError
@singleton
class ImageLoader:
def __init__(self):
self._X_train: np.ndarray = None
self._X_valid: np.ndarray = None
@staticmethod
def _load_images(path: str) -> np.ndarray:
"""
Load images from computer, resize them, convert to grayscale
and reshape them to fit into numpy arrays.
Args:
path (str): The path to a folder containing the images to be loaded.
Returns:
np.ndarray: array containing the images.
"""
X_imgs = []
for image_path in glob.glob(path + '\\*'):
try:
# convert to grayscale and resize to predefined image size
img = PIL.Image.open(image_path).convert('L').resize(consts.IMAGE_SIZE)
except PIL.UnidentifiedImageError:
continue
img_arr = np.asarray(img).astype(np.float32) # convert to numpy array
img_arr = img_arr / 255 # change scale to be between 0.0 and 1.0
X_imgs.append(img_arr)
return np.array(X_imgs)
def load_training_validation(self):
"""
Load training and validation sets from computer, and transform them
to fit the model specifications.
"""
if self._X_train and self._X_valid:
# images are already loaded
return
# load training images
self._X_train = self._load_images(consts.TRAIN_NON_CATEGORICAL_PATH)
print(f'Finished loading {len(self._X_train)} training images.')
# load validation images
self._X_valid = self._load_images(consts.VALIDATION_NON_CATEGORICAL_PATH)
print(f'Finished loading {len(self._X_valid)} validation images.')
@property
def X_train(self):
return self._X_train
@property
def X_valid(self):
return self._X_valid
class DenoisingAutoencoder(BaseTFModel):
"""
Class for building, training, and loading a denoising autoencoder model.
"""
LR = 1e-3
EPOCHS = 2
BATCH_SIZE = 128
MODEL_NAME = 'noise_remover.h5'
def __init__(self):
super().__init__()
self._image_loader: ImageLoader = ImageLoader()
@staticmethod
def _add_gaussian_noise(X_imgs: np.ndarray) -> np.ndarray:
"""
Apply Gaussian noise to images.
Args:
X_imgs (np.ndarray): The images (provided as np arrays of the shape
defined in consts) for which to apply gaussian noise algorithm.
Returns:
np.ndarray: Images after adding noise with added grayscale color channel.
"""
gaussian_noise_imgs = []
width, height = consts.IMAGE_SIZE
for img in X_imgs:
gaussian = np.random.random((width, height, 1)).astype(np.float32)
gaussian_img = cv2.addWeighted(img, 0.75, 0.25 * gaussian, 0.25, 0)
# add grayscale color channel
gaussian_noise_imgs.append(np.expand_dims(gaussian_img, axis=-1))
# convert to np array
gaussian_noise_imgs = np.array(gaussian_noise_imgs, dtype=np.float32)
return gaussian_noise_imgs
def build_model(self) -> None:
"""
Build and compile a model which acts as an autoencoder that removes
image noise. First, we decrease the number of features of the
image, to try and capture the most important parts of the image,
and then we scale the image back to it's original size.
"""
# build encoder - reducing image features
encoder = Sequential([
InputLayer(input_shape=consts.IMAGE_SIZE + (1,)),
Conv2D(filters=256, kernel_size=(3, 3), activation=relu, padding='same'),
Conv2D(filters=128, kernel_size=(3, 3), activation=relu, padding='same'),
MaxPooling2D(pool_size=(2, 2), padding='same'),
Conv2D(filters=64, kernel_size=(3, 3), activation=relu, padding='same'),
Conv2D(filters=32, kernel_size=(3, 3), activation=relu, padding='same'),
MaxPooling2D(pool_size=(2, 2), padding='same'),
Conv2D(filters=32, kernel_size=(3, 3), activation=relu, padding='same'),
MaxPooling2D(pool_size=(2, 2), padding='same'),
])
# build decoder - upscaling back to original size and trying to recreate
# original image, without the noise
decoder = Sequential([
InputLayer(input_shape=(consts.IMAGE_SIZE[0] // 8, consts.IMAGE_SIZE[1] // 8, 32)),
Conv2D(filters=32, kernel_size=(3, 3), activation=relu, padding='same'),
UpSampling2D((2, 2)),
Conv2D(filters=64, kernel_size=(3, 3), activation=relu, padding='same'),
Conv2D(filters=128, kernel_size=(3, 3), activation=relu, padding='same'),
UpSampling2D((2, 2)),
Conv2D(filters=256, kernel_size=(3, 3), activation=relu, padding='same'),
UpSampling2D((2, 2)),
Conv2D(filters=1, kernel_size=(3, 3), activation=sigmoid, padding='same')
])
# combine both models together to create the autoencoder
model = Sequential([encoder, decoder])
# compile the model topography into code that TensorFlow can efficiently
# execute. Configure training to minimize the model's binary crossentropy loss
model.compile(loss=binary_crossentropy,
optimizer=Adam(learning_rate=self.LR),
metrics=['accuracy'])
self._model = model
self._model_built = True
def train_model(self) -> None:
"""
Train the autoencoder with the train and validation sets of images
and log training progress to disk.
Raises:
ModelNotBuiltError: when trying to train an un built model.
"""
if not self._model_built:
raise ModelNotBuiltError('The model has to be built before training it.')
# load training and validation data from disk and preprocess them
self._image_loader.load_training_validation()
X_train, X_valid = self._image_loader.X_train, self._image_loader.X_valid
# add noise for the model to try and remove
X_train_noisy = self._add_gaussian_noise(X_train)
X_valid_noisy = self._add_gaussian_noise(X_valid)
# train the model
self._model.fit(X_train_noisy, X_train,
epochs=self.EPOCHS,
verbose=1,
batch_size=self.BATCH_SIZE,
validation_data=(X_valid_noisy, X_valid))
def save_model(self) -> None:
"""Save the model to disk as a HDF5 file."""
self._model.save(self.MODEL_NAME)
def load_model(self) -> None:
"""Load the model from disk into memory."""
self._model = load_model(self.MODEL_NAME)
self._model_loaded = True
def denoise_image(self, img: np.array) -> np.array:
"""
Use the denoising autoencoder in order to remove noise from the image.
Args:
img (np.array): The image to denoise (image shape needs to be the
size defined in consts, yet the image does not have to include a
grayscale color channel).
Returns:
np.array: The denoised image as np array of shape `(*consts.IMAGE_SIZE, 1)`.
Raises:
ModelNotLoadedError: If the autoencoder was not loaded before
trying to denoise image.
ValueError: In case the specified image has incorrect shape.
"""
if not self._model_loaded:
raise ModelNotLoadedError('You have to load the model before'
' trying to denoise image')
if img.shape != consts.IMAGE_SIZE and img.shape != consts.IMAGE_SIZE + (1,):
raise ValueError(f'Image shape must be {consts.IMAGE_SIZE} '
f'or {consts.IMAGE_SIZE + (1,)}')
if img.max() > 1.0:
# assuming that if values are not between 0.0 and 1.0,
# they are in range 0 to 255, therefore rescale image to fit into model
img = img / 255
# reshape the image to fit into the model
img = img.reshape(consts.IMAGE_SIZE + (1,))
# define batch with only one image
batch = np.expand_dims(img, axis=0)
# perform denoising
denoised = self._model(batch)
# reshape the image back to it's original shape
denoised = np.expand_dims(denoised.numpy().squeeze(), axis=-1)
return denoised
def evaluate(self, images):
"""Evaluate the model and calculate accuracy and loss."""
return self._model.evaluate(images)
| [
"PIL.Image.open",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.MaxPooling2D",
"numpy.random.random",
"base_model.ModelNotLoadedError",
"numpy.asarray",
"base_model.ModelNotBuiltError",
"tensorflow.keras.optimizers.Adam",
"numpy.array",
"cv2.a... | [((1129, 1152), 'glob.glob', 'glob.glob', (["(path + '\\\\*')"], {}), "(path + '\\\\*')\n", (1138, 1152), False, 'import glob\n'), ((1617, 1633), 'numpy.array', 'np.array', (['X_imgs'], {}), '(X_imgs)\n', (1625, 1633), True, 'import numpy as np\n'), ((3623, 3670), 'numpy.array', 'np.array', (['gaussian_noise_imgs'], {'dtype': 'np.float32'}), '(gaussian_noise_imgs, dtype=np.float32)\n', (3631, 3670), True, 'import numpy as np\n'), ((5688, 5718), 'tensorflow.keras.models.Sequential', 'Sequential', (['[encoder, decoder]'], {}), '([encoder, decoder])\n', (5698, 5718), False, 'from tensorflow.keras.models import Sequential, load_model\n'), ((7381, 7408), 'tensorflow.keras.models.load_model', 'load_model', (['self.MODEL_NAME'], {}), '(self.MODEL_NAME)\n', (7391, 7408), False, 'from tensorflow.keras.models import Sequential, load_model\n'), ((8889, 8916), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (8903, 8916), True, 'import numpy as np\n'), ((3389, 3441), 'cv2.addWeighted', 'cv2.addWeighted', (['img', '(0.75)', '(0.25 * gaussian)', '(0.25)', '(0)'], {}), '(img, 0.75, 0.25 * gaussian, 0.25, 0)\n', (3404, 3441), False, 'import cv2\n'), ((6422, 6489), 'base_model.ModelNotBuiltError', 'ModelNotBuiltError', (['"""The model has to be built before training it."""'], {}), "('The model has to be built before training it.')\n", (6440, 6489), False, 'from base_model import ModelNotLoadedError, BaseTFModel, singleton, ModelNotBuiltError\n'), ((8176, 8261), 'base_model.ModelNotLoadedError', 'ModelNotLoadedError', (['"""You have to load the model before trying to denoise image"""'], {}), "('You have to load the model before trying to denoise image'\n )\n", (8195, 8261), False, 'from base_model import ModelNotLoadedError, BaseTFModel, singleton, ModelNotBuiltError\n'), ((3523, 3560), 'numpy.expand_dims', 'np.expand_dims', (['gaussian_img'], {'axis': '(-1)'}), '(gaussian_img, axis=-1)\n', (3537, 3560), True, 'import numpy as np\n'), ((4144, 4192), 'tensorflow.keras.layers.InputLayer', 'InputLayer', ([], {'input_shape': '(consts.IMAGE_SIZE + (1,))'}), '(input_shape=consts.IMAGE_SIZE + (1,))\n', (4154, 4192), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((4206, 4278), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'activation': 'relu', 'padding': '"""same"""'}), "(filters=256, kernel_size=(3, 3), activation=relu, padding='same')\n", (4212, 4278), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((4292, 4364), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'activation': 'relu', 'padding': '"""same"""'}), "(filters=128, kernel_size=(3, 3), activation=relu, padding='same')\n", (4298, 4364), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((4378, 4424), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (4390, 4424), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((4438, 4509), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': 'relu', 'padding': '"""same"""'}), "(filters=64, kernel_size=(3, 3), activation=relu, padding='same')\n", (4444, 4509), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((4523, 4594), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'activation': 'relu', 'padding': '"""same"""'}), "(filters=32, kernel_size=(3, 3), activation=relu, padding='same')\n", (4529, 4594), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((4608, 4654), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (4620, 4654), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((4668, 4739), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'activation': 'relu', 'padding': '"""same"""'}), "(filters=32, kernel_size=(3, 3), activation=relu, padding='same')\n", (4674, 4739), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((4753, 4799), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (4765, 4799), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((4981, 5068), 'tensorflow.keras.layers.InputLayer', 'InputLayer', ([], {'input_shape': '(consts.IMAGE_SIZE[0] // 8, consts.IMAGE_SIZE[1] // 8, 32)'}), '(input_shape=(consts.IMAGE_SIZE[0] // 8, consts.IMAGE_SIZE[1] // \n 8, 32))\n', (4991, 5068), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((5077, 5148), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'activation': 'relu', 'padding': '"""same"""'}), "(filters=32, kernel_size=(3, 3), activation=relu, padding='same')\n", (5083, 5148), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((5162, 5182), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (5174, 5182), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((5196, 5267), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': 'relu', 'padding': '"""same"""'}), "(filters=64, kernel_size=(3, 3), activation=relu, padding='same')\n", (5202, 5267), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((5281, 5353), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'activation': 'relu', 'padding': '"""same"""'}), "(filters=128, kernel_size=(3, 3), activation=relu, padding='same')\n", (5287, 5353), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((5367, 5387), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (5379, 5387), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((5401, 5473), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'activation': 'relu', 'padding': '"""same"""'}), "(filters=256, kernel_size=(3, 3), activation=relu, padding='same')\n", (5407, 5473), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((5487, 5507), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (5499, 5507), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((5521, 5594), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(1)', 'kernel_size': '(3, 3)', 'activation': 'sigmoid', 'padding': '"""same"""'}), "(filters=1, kernel_size=(3, 3), activation=sigmoid, padding='same')\n", (5527, 5594), False, 'from tensorflow.keras.layers import InputLayer, Conv2D, MaxPooling2D, UpSampling2D\n'), ((5968, 5995), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'self.LR'}), '(learning_rate=self.LR)\n', (5972, 5995), False, 'from tensorflow.keras.optimizers import Adam\n'), ((1428, 1443), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1438, 1443), True, 'import numpy as np\n'), ((3306, 3342), 'numpy.random.random', 'np.random.random', (['(width, height, 1)'], {}), '((width, height, 1))\n', (3322, 3342), True, 'import numpy as np\n'), ((1268, 1294), 'PIL.Image.open', 'PIL.Image.open', (['image_path'], {}), '(image_path)\n', (1282, 1294), False, 'import PIL\n')] |
"""
License
-------
Copyright (C) 2021 - <NAME>
You can use this software, redistribute it, and/or modify it under the
terms of the Creative Commons Attribution 4.0 International Public License.
Explanation
---------
This module contains the statistical model of the COVID-19 vaccination campaign
described in assets/model_explanation.html. Moreover, it also includes functions
to sample the model's parameter space.
"""
import numpy as np
import pandas as pd
import time
import datetime
import functools
from collections import defaultdict
import argparse
from argparse import RawTextHelpFormatter
from plot import plot_model_results
def run_single_realization(
p_pro, p_anti, pressure, tau, nv_0, nv_max, max_day_number, N
):
"""
Run a single realization of the statistical model of vaccination campaigns.
This single run corresponds to simulating the evolution of the vaccination campaign
as a function of time. See the assets/model_explanation.html for details on the model.
Parameters
----------
p_pro : float
The probability that a certain person belongs to the pro-vaccines group
p_anti : float
The probability that a specific person belongs to the anti-vaccines group
pressure : float
Strenght of the social pressure effect
tau : float
Duplication time of the weekly arriving vaccines
nv_0 : float
Initial stock of vaccines, measured as a fraction over the population size
nv_max : floa
Maximum weekly delivery capacity, measured as a fraction over the population size
max_day_number : int
Number of days that are going to be simulated
N : int
The population size
Returns
-------
Dictionary (key:string, value:list)
Dictionary with different data collected as a function of the day number
"""
assert p_pro + p_anti <= 1.0
p_agnostics = 1 - (p_pro + p_anti)
n_pro = int(p_pro * N)
n_agnostics = int(p_agnostics * N)
F = lambda t: min(nv_0 * np.exp(np.log(2) * t / (tau * 7)), nv_max) * N
day_number = 0
vaccines_stock = 0
cum_number_vac_received = 0
n_vaccinated = 0
n_waiting = n_pro - n_vaccinated
people_vaccinated_per_hundred = list()
daily_vaccinations_per_million = list()
cum_number_vac_received_per_hundred = list()
vaccines_in_stock_per_hundred = list()
while day_number < max_day_number:
# ------ add arriving vaccines to the stock ------
if day_number % 7 == 0.0:
nv_arriving = int(F(day_number))
else:
nv_arriving = 0
assert nv_arriving >= 0
vaccines_stock += nv_arriving
cum_number_vac_received += nv_arriving
# ------ apply vaccines ------
# prob. of having it available does not take into account only people waitting, but the whole population
# for example, if the population is big, the vaccines will be more spread and less likely to reach anyone
# however is we use the people waiting, we assume the vaccines are being distributed specifically among
# them. Moreover, this is the prob. of having it available a specific day. Since we work in cycles of
# 7 days and only ~2 days a week is possible to have it, we should multiply it by ~2/7 to get an effective
# prob. per day;
proc_vac_available = (2.0 / 7.0) * vaccines_stock / N
delta_n_vacc = np.random.poisson(n_waiting * proc_vac_available)
# don't apply more vaccines than available
delta_n_vacc = min(delta_n_vacc, vaccines_stock)
# don't apply more vaccines than people waiting for it
delta_n_vacc = min(delta_n_vacc, n_waiting)
n_vaccinated += delta_n_vacc
n_waiting -= delta_n_vacc
vaccines_stock -= delta_n_vacc
fract_pop_vaccinated = n_vaccinated / N
# ------ convert agnostics ------
prob_change_mind = fract_pop_vaccinated * pressure
delta_n_agnos = np.random.poisson(n_agnostics * prob_change_mind)
# don't convert more agnostics than agnostics available
delta_n_agnos = min(delta_n_agnos, n_agnostics)
n_agnostics -= delta_n_agnos
n_waiting += delta_n_agnos
day_number += 1
people_vaccinated_per_hundred.append(fract_pop_vaccinated * 100)
daily_vaccinations_per_million.append(delta_n_vacc * 1e6 / N)
cum_number_vac_received_per_hundred.append(cum_number_vac_received * 100 / N)
vaccines_in_stock_per_hundred.append(vaccines_stock * 100 / N)
data = {
"people_vaccinated_per_hundred": people_vaccinated_per_hundred,
"daily_vaccinations_per_million": daily_vaccinations_per_million,
"cum_number_vac_received_per_hundred": cum_number_vac_received_per_hundred,
"vaccines_in_stock_per_hundred": vaccines_in_stock_per_hundred,
}
return data
@functools.lru_cache(maxsize=10)
def run_sampling(params, start_date, end_date, CI, N, max_running_time=None):
"""
Sample the model's parameter space. For that, the model is run for
each input combination of parameters.
Parameters
----------
params : tuple of tuples
Each of the tuples contain a combination of model parameters
(p_pro, p_anti, pressure, tau, nv_0, nv_max, max_day_number).
See run_single_realization for details.
start_date : datetime.datetime
Starting date
end_date : datetime.datetime
The last date at which the model run stops
CI : float
Value of the quantile used for establishing the confidence intervals
N : int
The population size
Returns
-------
Dictionary of dictionaries
Each dictionary key corresponds to the different quantities returned by run_single_realization.
Each of the values is another dictionary of lists that contains the mean of the quantity, its upper
and lower confidence intervals, and the dates associated with each list index.
"""
starting_time = time.time()
dates = pd.date_range(start_date, end_date, freq="1d")
max_days = len(dates)
data = defaultdict(list)
number_finished_samples = 0
for p_pro, p_anti, pressure, tau, nv_0, nv_max in params:
data_ = run_single_realization(
p_pro, p_anti, pressure, tau, nv_0, nv_max, max_days, N
)
# merge a dict into a dict of lists
for k, v in data_.items():
data[k].append(v)
number_finished_samples += 1
elapsed_time = time.time() - starting_time
if max_running_time is not None and elapsed_time > max_running_time:
break
# we work with numpy arrays since Dash Store cannot handle DataFarmes
data = {k: {"dates": dates, "samples": np.vstack(v)} for k, v in data.items()}
# Note: the average is over a time window, but samples are not mixed here
for k in ["daily_vaccinations_per_million"]:
v = data[k]["samples"]
df = pd.DataFrame(np.vstack(v).T, index=dates)
# The model simulates the dynamics of the application of a single dosis, but actually
# (most) those who got a first dosis, will get a second one ~30 days later. Since such second
# doses are included in the daily_vaccinations_per_million from the real-world data,
# we must ialso ncluded them in the model results. For that, we shift the original applied
# doses by 30 days and concatenate the DataFrames.
# The fact that all the second doses are appended after all the first ones
# doesn't matter since afterward we will reindex to compute a moving average
shifted_df = pd.DataFrame(
np.vstack(v).T, index=dates + datetime.timedelta(days=30)
)
df = df.add(shifted_df, fill_value=0.0)
# compute averages over windows of 7 days, as in the real-world data
df = df.reindex(pd.date_range(start=start_date, end=end_date, freq="7d"))
# do not call df.index.values, because that transforms Timestamps to numpy.datetime, and plotly seems to prefer Timestamps
data[k]["dates"] = df.index
data[k]["samples"] = df.values.T
# get confidence intervals for each date, computed accros samples
data_CI = defaultdict(dict)
for k in data.keys():
samples = data[k]["samples"]
quantiles = np.quantile(samples, [(1 - CI)/2., (1 + CI)/2.], axis=0)
data_CI[k]["upper"] = quantiles[1]
data_CI[k]["lower"] = quantiles[0]
data_CI[k]["mean"] = samples.mean(axis=0)
data_CI[k]["dates"] = data[k]["dates"]
data_CI["number_finished_samples"] = number_finished_samples
return data_CI
def sample_param_combinations(
p_pro_bounds,
p_anti_bounds,
pressure_bounds,
tau_bounds,
nv_0_bounds,
nv_max_bounds,
n_rep,
):
"""
Create a sample of parameter combinations. Each parameter
combination is created by drawing values from uniform distributions
with bounds defined by the function's arguments.
Parameters
----------
p_pro_bounds : 2D-tuple of floats
Lower and upper bound for the probability that a certain person belongs to the pro-vaccines group
p_anti_bounds : 2D-tuple of floats
Lower and upper bound for the probability that a specific person belongs to the anti-vaccines group
pressure_bounds : 2D-tuple of floats
Lower and upper bound for the strength of the social pressure effect
tau_bounds : 2D-tuple of floats
Lower and upper bound for the duplication time of the weekly arriving vaccines
nv_0_bounds : 2D-tuple of floats
Lower and upper bound for the initial stock of vaccines measured as a fraction over the population size
nv_max_bounds : 2D-tuple of floats
Lower and upper bound for the maximum weekly delivery capacity measured as a fraction over the population size
n_rep : int
Number of parameter combination, i.e., number of random parameter samples drawn
Returns
-------
Tuple of tuples
Each of the tuples contain a combination of model parameters
(p_pro, p_anti, pressure, tau, nv_0, nv_max, max_day_number).
Tuple
The probability that a person belongs to the agnostics group
"""
params_combinations = list()
p_soft_no_values = list()
n = 0
while len(params_combinations) < n_rep:
p_pro = np.random.uniform(p_pro_bounds[0], p_pro_bounds[1])
p_anti = np.random.uniform(p_anti_bounds[0], p_anti_bounds[1])
# use rejection sampling to ensure that p_anti + p_pro < 1
if p_pro + p_anti > 1.0:
# rejection
n += 1
if n > n_rep * 10:
# if the ammount of rejections is not too high, it means
# that given upper and lower bounds of p_anti and p_pro are
# mutually incompatible. Thus, we abort the parameter sampling
return None, None
else:
continue
else:
pressure = np.random.uniform(pressure_bounds[0], pressure_bounds[1])
tau = np.random.uniform(tau_bounds[0], tau_bounds[1])
nv_0 = np.random.uniform(nv_0_bounds[0], nv_0_bounds[1])
nv_max = np.random.uniform(nv_max_bounds[0], nv_max_bounds[1])
# work with tuples so that we can later use @functools.lru_cache, since it need
# hashable types
params_combinations.append(
tuple([p_pro, p_anti, pressure, tau, nv_0, nv_max])
)
p_soft_no_values.append(1 - (p_pro + p_anti))
return tuple(params_combinations), tuple(p_soft_no_values)
def run_model(
# populatio parameters
p_pro_bounds,
p_anti_bounds,
pressure_bounds,
# vaccinations parameters
tau_bounds,
nv_0_bounds,
nv_max_bounds,
# samping
CI,
n_rep,
N,
date_range,
max_running_time=None,
):
# default output messages
msg_agnostics_pct = "Agnosticts: "
msg_error = ""
# some sliders use values 0-100
params_combinations, p_soft_no_values = sample_param_combinations(
np.array(p_pro_bounds) / 100,
np.array(p_anti_bounds) / 100,
np.array(pressure_bounds),
np.array(tau_bounds),
np.array(nv_0_bounds) / 100,
np.array(nv_max_bounds) / 100,
n_rep,
)
if params_combinations is not None:
# evaluate the agnostics population from the pro and anti vaccines samples
p_soft_no_values = 100 * np.array(p_soft_no_values)
a = max(np.mean(p_soft_no_values) - np.std(p_soft_no_values), 0)
b = np.mean(p_soft_no_values) + np.std(p_soft_no_values)
a_str = "{0:.0f}".format(a)
b_str = "{0:.0f}".format(b)
# if the uncertainty interval is smaller than 1%, report one value instead of the interval
if abs(a - b) < 1:
msg_agnostics_pct += a_str + "%"
else:
msg_agnostics_pct += a_str + " - " + b_str + "%"
else:
msg_error = "ERROR: The pertentages of pro- and anti-vaccines are simultaneously too high. Please reduce them."
return None, msg_error, msg_agnostics_pct
model_results = run_sampling(
params_combinations,
date_range["start_date"],
date_range["end_date"],
CI / 100,
N,
max_running_time,
)
if max_running_time is not None:
number_finished_samples = model_results["number_finished_samples"]
if number_finished_samples < len(params_combinations):
msg_error = f"ERROR: Maximum computation time of {max_running_time}s exceeded. Only {number_finished_samples} of the desired {len(params_combinations)} Monte Carlo runs were performed."
return model_results, msg_error, msg_agnostics_pct
class SplitArgsStr(argparse.Action):
def __call__(self, parser, namespace, values_str, option_string=None):
values = values_str.split(",")
# If ',' is not in the string, the input corresponds to a single value.
# Create list of two values with it.
if len(values) == 1:
values += values
setattr(namespace, self.dest, values)
class SplitArgsFloat(argparse.Action):
def __call__(self, parser, namespace, values_str, option_string=None):
values = [float(x) for x in values_str.split(",")]
# If ',' is not in the string, the input corresponds to a single value.
# Create list of two values with it.
if len(values) == 1:
values += values
setattr(namespace, self.dest, values)
def main():
description = """
This program performs a Monte Carlo sampling of a statistical model of the
COVID-19 vaccination campaign (you can find a detailed explanation of
the model in assets/model_explanation.html).
In each Monte Carlo run, the value of each parameter is drawn from a uniform
probability distribution. The bounds of each distribution are defined in the
command line call as comma-separated strings for each parameter. If instead
of a comma-separated string, a single value is given, that parameter will
assume in every Monte Carlo run exactly that specific value.
When the sampling is complete, the results are automatically rendered as an
interactive plot in your default internet browser.
Example call:
'python model.py --pro=30,40 --anti=17,40 --pressure=0.02,0.025 --dupl_time=3,4 --init_stock=0.2,0.24 --max_delivery=10,10 --date_range=2020-12-30,2021-12-1'
Author: <NAME>.
Related links:
- The author's website: https://www.davidfcastellanos.com
- The source code: https://github.com/kastellane/COVID19-Vaccination-Model
- An interactive web app version: https://covid19-vaccination-app.davidfcastellanos.com
- An associated blog post: https://www.davidfcastellanos.com/covid-19-vaccination-model
"""
parser = argparse.ArgumentParser(
description=description, formatter_class=RawTextHelpFormatter
)
parser.add_argument(
"--pro",
type=str,
help="comma-separated upper and lower bounds for the probability that a certain person belongs to the pro-vaccines group",
default="30,40",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--anti",
type=str,
help="comma-separated upper and lower bounds for the probability that a specific person belongs to the anti-vaccines group",
default="30,40",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--pressure",
type=str,
help="comma-separated upper and lower bounds for the strenght of the social pressure effect",
default="0.02,0.025",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--dupl_time",
type=str,
help="comma-separated upper and lower bounds for the duplication time of the weekly arriving vaccines",
default="3,4",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--init_stock",
type=str,
help="comma-separated upper and lower bounds for the initial stock of vaccines, measured as a percentege of the population size",
default="0.2,0.2",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--max_delivery",
type=str,
help="comma-separated upper and lower bounds for the maximum weekly delivery capacity, measured as a percentage over the population size",
default="10,10",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--mc_samples",
type=int,
help="number of Monte Carlo samples (optional)",
default="100",
)
parser.add_argument(
"--date_range",
type=str,
help="comma-separated starting and ending dates (optional)",
default="2020-12-30,2021-12-1",
action=SplitArgsStr,
required=True,
)
parser.add_argument(
"--CI",
type=float,
help="value of the quantile used for establishing the confidence intervals",
default="0.95",
)
args = vars(parser.parse_args())
# populatio parameters
p_pro_bounds = args["pro"]
p_anti_bounds = args["anti"]
pressure_bounds = args["pressure"]
# vaccinations parameters
tau_bounds = args["dupl_time"]
nv_0_bounds = args["init_stock"]
nv_max_bounds = args["max_delivery"]
# samping
n_rep = args["mc_samples"]
N = 50000
start_date = args["date_range"][0]
end_date = args["date_range"][1]
CI = args["CI"]
date_range = dict(start_date=start_date, end_date=end_date)
model_results, msg_error, msg_agnostics_pct = run_model(
# populatio parameters
p_pro_bounds,
p_anti_bounds,
pressure_bounds,
# vaccinations parameters
tau_bounds,
nv_0_bounds,
nv_max_bounds,
# samping
CI,
n_rep,
N,
date_range,
)
if msg_error != "":
print(msg_error)
else:
fig = plot_model_results(model_results, CI)
# plot_country_data(fig, selected_countries, country_data)
fig.show(renderer="browser")
return
if __name__ == "__main__":
main()
| [
"numpy.mean",
"argparse.ArgumentParser",
"numpy.random.poisson",
"numpy.std",
"numpy.log",
"numpy.array",
"numpy.quantile",
"collections.defaultdict",
"plot.plot_model_results",
"numpy.vstack",
"numpy.random.uniform",
"functools.lru_cache",
"datetime.timedelta",
"time.time",
"pandas.date... | [((4919, 4950), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (4938, 4950), False, 'import functools\n'), ((6055, 6066), 'time.time', 'time.time', ([], {}), '()\n', (6064, 6066), False, 'import time\n'), ((6080, 6126), 'pandas.date_range', 'pd.date_range', (['start_date', 'end_date'], {'freq': '"""1d"""'}), "(start_date, end_date, freq='1d')\n", (6093, 6126), True, 'import pandas as pd\n'), ((6165, 6182), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6176, 6182), False, 'from collections import defaultdict\n'), ((8293, 8310), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (8304, 8310), False, 'from collections import defaultdict\n'), ((16017, 16108), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'formatter_class': 'RawTextHelpFormatter'}), '(description=description, formatter_class=\n RawTextHelpFormatter)\n', (16040, 16108), False, 'import argparse\n'), ((3452, 3501), 'numpy.random.poisson', 'np.random.poisson', (['(n_waiting * proc_vac_available)'], {}), '(n_waiting * proc_vac_available)\n', (3469, 3501), True, 'import numpy as np\n'), ((4009, 4058), 'numpy.random.poisson', 'np.random.poisson', (['(n_agnostics * prob_change_mind)'], {}), '(n_agnostics * prob_change_mind)\n', (4026, 4058), True, 'import numpy as np\n'), ((8394, 8456), 'numpy.quantile', 'np.quantile', (['samples', '[(1 - CI) / 2.0, (1 + CI) / 2.0]'], {'axis': '(0)'}), '(samples, [(1 - CI) / 2.0, (1 + CI) / 2.0], axis=0)\n', (8405, 8456), True, 'import numpy as np\n'), ((10453, 10504), 'numpy.random.uniform', 'np.random.uniform', (['p_pro_bounds[0]', 'p_pro_bounds[1]'], {}), '(p_pro_bounds[0], p_pro_bounds[1])\n', (10470, 10504), True, 'import numpy as np\n'), ((10522, 10575), 'numpy.random.uniform', 'np.random.uniform', (['p_anti_bounds[0]', 'p_anti_bounds[1]'], {}), '(p_anti_bounds[0], p_anti_bounds[1])\n', (10539, 10575), True, 'import numpy as np\n'), ((12278, 12303), 'numpy.array', 'np.array', (['pressure_bounds'], {}), '(pressure_bounds)\n', (12286, 12303), True, 'import numpy as np\n'), ((12313, 12333), 'numpy.array', 'np.array', (['tau_bounds'], {}), '(tau_bounds)\n', (12321, 12333), True, 'import numpy as np\n'), ((19305, 19342), 'plot.plot_model_results', 'plot_model_results', (['model_results', 'CI'], {}), '(model_results, CI)\n', (19323, 19342), False, 'from plot import plot_model_results\n'), ((6567, 6578), 'time.time', 'time.time', ([], {}), '()\n', (6576, 6578), False, 'import time\n'), ((6809, 6821), 'numpy.vstack', 'np.vstack', (['v'], {}), '(v)\n', (6818, 6821), True, 'import numpy as np\n'), ((7942, 7998), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_date', 'end': 'end_date', 'freq': '"""7d"""'}), "(start=start_date, end=end_date, freq='7d')\n", (7955, 7998), True, 'import pandas as pd\n'), ((11092, 11149), 'numpy.random.uniform', 'np.random.uniform', (['pressure_bounds[0]', 'pressure_bounds[1]'], {}), '(pressure_bounds[0], pressure_bounds[1])\n', (11109, 11149), True, 'import numpy as np\n'), ((11168, 11215), 'numpy.random.uniform', 'np.random.uniform', (['tau_bounds[0]', 'tau_bounds[1]'], {}), '(tau_bounds[0], tau_bounds[1])\n', (11185, 11215), True, 'import numpy as np\n'), ((11235, 11284), 'numpy.random.uniform', 'np.random.uniform', (['nv_0_bounds[0]', 'nv_0_bounds[1]'], {}), '(nv_0_bounds[0], nv_0_bounds[1])\n', (11252, 11284), True, 'import numpy as np\n'), ((11306, 11359), 'numpy.random.uniform', 'np.random.uniform', (['nv_max_bounds[0]', 'nv_max_bounds[1]'], {}), '(nv_max_bounds[0], nv_max_bounds[1])\n', (11323, 11359), True, 'import numpy as np\n'), ((12201, 12223), 'numpy.array', 'np.array', (['p_pro_bounds'], {}), '(p_pro_bounds)\n', (12209, 12223), True, 'import numpy as np\n'), ((12239, 12262), 'numpy.array', 'np.array', (['p_anti_bounds'], {}), '(p_anti_bounds)\n', (12247, 12262), True, 'import numpy as np\n'), ((12343, 12364), 'numpy.array', 'np.array', (['nv_0_bounds'], {}), '(nv_0_bounds)\n', (12351, 12364), True, 'import numpy as np\n'), ((12380, 12403), 'numpy.array', 'np.array', (['nv_max_bounds'], {}), '(nv_max_bounds)\n', (12388, 12403), True, 'import numpy as np\n'), ((12589, 12615), 'numpy.array', 'np.array', (['p_soft_no_values'], {}), '(p_soft_no_values)\n', (12597, 12615), True, 'import numpy as np\n'), ((12701, 12726), 'numpy.mean', 'np.mean', (['p_soft_no_values'], {}), '(p_soft_no_values)\n', (12708, 12726), True, 'import numpy as np\n'), ((12729, 12753), 'numpy.std', 'np.std', (['p_soft_no_values'], {}), '(p_soft_no_values)\n', (12735, 12753), True, 'import numpy as np\n'), ((7034, 7046), 'numpy.vstack', 'np.vstack', (['v'], {}), '(v)\n', (7043, 7046), True, 'import numpy as np\n'), ((7725, 7737), 'numpy.vstack', 'np.vstack', (['v'], {}), '(v)\n', (7734, 7737), True, 'import numpy as np\n'), ((12632, 12657), 'numpy.mean', 'np.mean', (['p_soft_no_values'], {}), '(p_soft_no_values)\n', (12639, 12657), True, 'import numpy as np\n'), ((12660, 12684), 'numpy.std', 'np.std', (['p_soft_no_values'], {}), '(p_soft_no_values)\n', (12666, 12684), True, 'import numpy as np\n'), ((7755, 7782), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(30)'}), '(days=30)\n', (7773, 7782), False, 'import datetime\n'), ((2043, 2052), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2049, 2052), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import random
import torch
from transformers import pipeline
from sentence_transformers import SentenceTransformer
import warnings
warnings.filterwarnings('ignore')
# backend model for zero shot object categorizer
classifier_zero_shot = pipeline("zero-shot-classification")
def zero_shot_object_categorizer(text, classifier = classifier_zero_shot):
'''
This function takes a piece of text and a zero shot text classifier. It returns
a string including the given text and the label assigned to it by the classifier, chosen from
the list labels.
Arguments:
text The text that will be categorized
classifier A zero shot text classifier pipeline from Hugging Face
Requirements:
A hugging face zero shot classifier pipeline. eg:
classifier_zero_shot = pipeline("zero-shot-classification")
'''
labels = ['animal', 'food', 'fruit', 'car', 'boat', 'airplane', 'appliance', 'electronic', 'accessory', 'furniture', 'kitchen', 'cutlery', 'crockery', 'person', 'fish', 'instrument', 'tool', 'sports equipment', 'vehicle', 'holy place', 'power tool']
out = classifier(text, labels)
category = out['labels'][np.argmax(out['scores'])]
return f'this is a picture of {text}, a type of {category}'
# backend for masked language modelling based solutions
unmasker = pipeline('fill-mask', model = 'roberta-large')
def MLM_object_categorizer(text, unmasker = unmasker):
"""This function uses masked language modelling in order to categorize the given text. The assigned
category is produced by filling a mask over the category (thus leveraging the original model's training
data).
Arguments:
text The text that will be categorized
unmasker A text unmasker pipeline from Hugging Face
Requirements:
A hugging face text unmasker pipeline. eg:
unmasker = pipeline('fill-mask', model = 'roberta-large')
"""
categories = [i['token_str'].lstrip() for i in unmasker(f'{text} is a type of <mask>')]
if text in categories:
categories.remove(text)
return f'this is a picture of {text}, a type of {categories[0]}'
object_categorization_data = pd.read_csv('object_categorizer_dataset.csv')
def random_sample_n_rows(data, n):
"""This funciton randomly samples and concatenates n rows from data. It takes the
object and the category and makes a sentance, its then concatenates the n sentances into one string.
Arguments:
data A pandas dataframe
n The number of rows
Requirements:
The object_categorization_data that is contained in this folder as:
'object_categorizer_dataset.csv'
"""
indexes = random.sample(range(0, len(data)), n)
rows = []
for i in indexes:
row = data.iloc[i]
rows.append(f'{row[0]} is a type of {row[1]}')
return ', '.join(rows)
def random_sampling_MLM_object_categorizer(text, n = 8, unmasker = unmasker, data = object_categorization_data):
"""This function uses masked language modelling in order to categorize the given text. It is also prompted by
n rows sampled from the object_categorizer_data using random_sample_n_rows. Sampling from the dataset is meant
to improve the assigned label. The assigned category is produced by filling a mask over the category (thus
leveraging the original model's training data).
Arguments:
text The text that will be categorized
unmasker A text unmasker pipeline from Hugging Face
n Number of rows sampled
data A pandas dataframe
Requirements:
A hugging face text unmasker pipeline. eg:
unmasker = pipeline('fill-mask', model = 'roberta-large')
The object_categorization_data that is contained in this folder as:
'object_categorizer_dataset.csv'
The random_sample_n_rows function contained in this script
"""
categories = [i['token_str'].lstrip() for i in unmasker(random_sample_n_rows(data, n) + f' {text} is a type of <mask>')]
if text in categories:
categories.remove(text)
return f'this is a picture of {text}, a type of {categories[0]}'
# backend for SBERT object categorizer and SBERT sampling
SBERT = SentenceTransformer('paraphrase-mpnet-base-v2')
def make_data_embeddings(df, SBERT = SBERT):
"""This function takes a dataframe and an instance of SBERT, makes sentances out of the
dataframe's contents and then returns a torch tensor containing the SBERT embeddings for
all of the sentances.
Arguments:
df A pandas dataframe
SBERT An instance of the transformer SBERT
Requirements:
An instance of SBERT eg:
SBERT = SentenceTransformer('paraphrase-mpnet-base-v2')
"""
sentences = []
for i in range(len(df)):
row = df.iloc[i]
sentences.append(f"{row[0]} is a type of {row[1]}")
embeddings = torch.from_numpy(SBERT.encode(sentences))
return embeddings
#stores the embeddings
data_embeddings = make_data_embeddings(object_categorization_data)
def find_nearest_row(text, embeddings, SBERT = SBERT):
"""This function takes a piece of text, a tensor of text embeddings and an instance of SBERT. It computes the SBERT embeddings
of the text and returns the index of the embedding that is nearest to the embedded text using cosine similarity
Arguments:
text The text that will be compared to the embeddings
embeddings SBERT embeddings of the corpus that the text will be compared to
SBERT An instance of SBERT
Requirements:
A torch tensor containing SBERT embeddings of the object_categorization_data eg:
data_embeddings = make_data_embeddings(object_categorization_data)
An instance of SBERT eg:
SBERT = SentenceTransformer('paraphrase-mpnet-base-v2')
"""
embedded_text = torch.from_numpy(SBERT.encode(f"{text} is a type of"))
distances = torch.zeros(len(embeddings))
for i in range(len(embeddings)):
distances[i] = torch.dot(embeddings[i], embedded_text)/(torch.norm(embeddings[i])*torch.norm(embedded_text))
return torch.argmax(distances).item()
# states the category is that of the row that is nearest to the given text
def SBERT_object_categorizer(text, embeddings = data_embeddings, df = object_categorization_data):
"""This function uses SBERT similarity to assign a categroy to text from the dataframe df. It finds the row most similar to
text using the embeddings and find_nearest_row and then assigns the category from the nearest row in the dataframe
Arguments:
text The text that will be categorized
embeddings The SBERT embeddings of the rows of df
df The object_categorization_dataset
Requirements:
The object_categorization_data that is contained in this folder as:
'object_categorizer_dataset.csv'
A torch tensor containing SBERT embeddings of the object_categorization_data eg:
data_embeddings = make_data_embeddings(object_categorization_data)
An instance of SBERT eg:
SBERT = SentenceTransformer('paraphrase-mpnet-base-v2')
"""
index = find_nearest_row(text, embeddings)
category = df.iloc[index][1]
return f'this is a picture of {text}, a type of {category}'
def SBERT_similarity_sampler(text, n = 8, embeddings = data_embeddings, df = object_categorization_data):
"""This functions returns the text of the n most similar rows of df to the given text
(in accoradance with SBERT representations and euclidean distance).
Arguments:
text The text for which similar rows will be found
n The number of rows that will be found
embeddings The SBERT embeddings of the rows of df
df A pandas dataframe
Requirements:
The object_categorization_data that is contained in this folder as:
'object_categorizer_dataset.csv'
A torch tensor containing SBERT embeddings of the object_categorization_data eg:
data_embeddings = make_data_embeddings(object_categorization_data)
An instance of SBERT eg:
SBERT = SentenceTransformer('paraphrase-mpnet-base-v2')
"""
embedded_text = torch.from_numpy(SBERT.encode(f"{text} is a type of"))
distances = torch.zeros(len(embeddings))
for i in range(len(embeddings)):
distances[i] = torch.cdist(embeddings[i].unsqueeze(0), embedded_text.unsqueeze(0))
n_best_indexes = torch.topk(distances, n, largest = False)[1].numpy().tolist()
rows = []
for i in n_best_indexes:
row = df.iloc[i]
rows.append(f'{row[0]} is a type of {row[1]}')
return ', '.join(rows)
# uses MLM to determine category but also uses SBERT to find prompts similar to given text
def SBERT_MLM_object_categorizer(text, unmasker = unmasker, embeddings = data_embeddings, df = object_categorization_data):
"""This function uses masked language modelling in order to categorize the given text. It is prompted using the 8 rows
from object_categorization_data that are most similar to text. The assigned category is produced by filling a mask over
the category (thus leveraging the original model's training data).
Arguments:
text The text that will be categorized
unmasker A text unmasker pipeline from Hugging Face
embeddings The SBERT embeddings of the rows of df
df A pandas dataframe
Requirements:
A hugging face text unmasker pipeline. eg:
unmasker = pipeline('fill-mask', model = 'roberta-large')
Requirements:
The object_categorization_data that is contained in this folder as:
'object_categorizer_dataset.csv'
A torch tensor containing SBERT embeddings of the object_categorization_data eg:
data_embeddings = make_data_embeddings(object_categorization_data)
An instance of SBERT eg:
SBERT = SentenceTransformer('paraphrase-mpnet-base-v2')
"""
categories = [i['token_str'].lstrip() for i in unmasker(SBERT_similarity_sampler(text) + f' {text} is a type of <mask>')]
if text in categories:
categories.remove(text)
return f'this is a picture of {text}, a type of {categories[0]}'
| [
"sentence_transformers.SentenceTransformer",
"pandas.read_csv",
"torch.topk",
"numpy.argmax",
"torch.argmax",
"transformers.pipeline",
"torch.norm",
"warnings.filterwarnings",
"torch.dot"
] | [((170, 203), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (193, 203), False, 'import warnings\n'), ((278, 314), 'transformers.pipeline', 'pipeline', (['"""zero-shot-classification"""'], {}), "('zero-shot-classification')\n", (286, 314), False, 'from transformers import pipeline\n'), ((1396, 1440), 'transformers.pipeline', 'pipeline', (['"""fill-mask"""'], {'model': '"""roberta-large"""'}), "('fill-mask', model='roberta-large')\n", (1404, 1440), False, 'from transformers import pipeline\n'), ((2269, 2314), 'pandas.read_csv', 'pd.read_csv', (['"""object_categorizer_dataset.csv"""'], {}), "('object_categorizer_dataset.csv')\n", (2280, 2314), True, 'import pandas as pd\n'), ((4394, 4441), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""paraphrase-mpnet-base-v2"""'], {}), "('paraphrase-mpnet-base-v2')\n", (4413, 4441), False, 'from sentence_transformers import SentenceTransformer\n'), ((1237, 1261), 'numpy.argmax', 'np.argmax', (["out['scores']"], {}), "(out['scores'])\n", (1246, 1261), True, 'import numpy as np\n'), ((6252, 6291), 'torch.dot', 'torch.dot', (['embeddings[i]', 'embedded_text'], {}), '(embeddings[i], embedded_text)\n', (6261, 6291), False, 'import torch\n'), ((6362, 6385), 'torch.argmax', 'torch.argmax', (['distances'], {}), '(distances)\n', (6374, 6385), False, 'import torch\n'), ((6293, 6318), 'torch.norm', 'torch.norm', (['embeddings[i]'], {}), '(embeddings[i])\n', (6303, 6318), False, 'import torch\n'), ((6319, 6344), 'torch.norm', 'torch.norm', (['embedded_text'], {}), '(embedded_text)\n', (6329, 6344), False, 'import torch\n'), ((8820, 8859), 'torch.topk', 'torch.topk', (['distances', 'n'], {'largest': '(False)'}), '(distances, n, largest=False)\n', (8830, 8859), False, 'import torch\n')] |
import numpy as np
from matplotlib import __version__ as mpl_version
from matplotlib import get_backend
from matplotlib.path import Path
from matplotlib.pyplot import close, subplots
from matplotlib.widgets import LassoSelector
from numpy import asanyarray, asarray, max, min, swapaxes
from packaging import version
from .utils import figure, ioff, nearest_idx
# functions that are methods
__all__ = [
"heatmap_slicer",
"zoom_factory",
"panhandler",
"image_segmenter",
]
def heatmap_slicer(
X,
Y,
heatmaps,
slices="horizontal",
heatmap_names=None,
max_cols=None,
cmap=None,
vmin=None,
vmax=None,
figsize=(18, 9),
linecolor="k",
labels=("X", "Y"),
interaction_type="move",
fig=None,
):
"""
Compare horizontal and/or vertical slices accross multiple arrays.
Parameters
----------
X,Y : 1D array
heatmaps : array_like
must be 2-D or 3-D. If 3-D the last two axes should be (X,Y)
slice : {'horizontal', 'vertical', 'both'}
Direction to draw slice on heatmap. both will draw horizontal and vertical traces on the same
plot, while both_separate will make a line plot for each.
heatmap_names : (String, String, ...)
An iterable with the names of the heatmaps. If provided it must have as many names as there are heatmaps
max_cols : int, optional - not working yet :(
Maximum number of columns to allow
cmap : str or Colormap, optional
A Colormap instance or registered colormap name. The colormap maps the C values to colors.
vmin, vmax : float, optional
The colorbar range. If None, suitable min/max values are automatically chosen by the Normalize instance.
ax : matplolibt.Axes or None
axes on which to
y_scale : string or tuple of floats, optional
If a tuple it will be passed to ax.set_ylim. Other options are:
'auto': rescale the y axis for every redraw
'stretch': only ever expand the ylims.
slider_format_string : string
A valid format string, this will be used to render
the current value of the parameter
plot_kwargs : None, dict, or iterable of dicts
Keyword arguments to pass to plot. If using multiple f's then plot_kwargs must be either
None or be iterable.l
figure size to pass to `plt.subplots`
labels : (string, string), optional
interaction_type : str
Update on mouse movement or mouse click. Options are {'move','click'}
fig : matplotlib figure, optional
figure to use for the heatmap_slicer. Useful when embedding into a gui.
If you are embedding into a gui make sure you set up the gui canvas first
and then pass the figure to this function
Returns
-------
fig : matplotlib figure
ax : tuple of axes
"""
horiz = vert = False
if slices == "both":
num_line_axes = 2
horiz_axis = -2
vert_axis = -1
horiz = vert = True
else:
horiz_axis = -1
vert_axis = -1
num_line_axes = 1
if slices == "horizontal":
horiz = True
elif slices == "vertical":
vert = True
else:
raise ValueError("Valid options for slices are {horizontal, vertical, both}")
heatmaps = asarray(heatmaps)
if heatmap_names is None:
heatmap_names = [f"heatmap_{i}" for i in range(heatmaps.shape[0])]
if heatmaps.ndim == 3:
num_axes = num_line_axes + heatmaps.shape[0]
if type(heatmap_names) is str or (len(heatmap_names) != heatmaps.shape[0]):
raise ValueError("need to provide at least as many heatmap_names as heatmaps")
elif heatmaps.ndim == 2:
heatmaps = heatmaps.reshape(1, *heatmaps.shape)
if type(heatmap_names) is str:
heatmap_names = [heatmap_names]
num_axes = num_line_axes + 1
else:
raise ValueError(f"heatmaps must be 2D or 3D but is {heatmaps.ndim}D")
if fig is None:
fig, axes = subplots(1, num_axes, figsize=figsize)
else:
axes = fig.subplots(1, num_axes)
hlines = []
vlines = []
init_idx = 0
axes[0].set_ylabel(labels[1])
X = asarray(X)
Y = asarray(Y)
# mpl pcolormesh from verison 3.3+ handles len(X), len(Y) equal to Z shape
# differently than <2. (Unquestionably better, but different enough to justify a shim)
# https://github.com/matplotlib/matplotlib/pull/16258
mpl_gr_33 = version.parse(mpl_version) >= version.parse("3.3")
if mpl_gr_33:
shading = "auto"
else:
shading = "flat"
x_centered = X[:-1] + (X[1:] - X[:-1]) / 2
y_centered = Y[:-1] + (Y[1:] - Y[:-1]) / 2
for i, ax in enumerate(axes[:-num_line_axes]):
ax.pcolormesh(X, Y, heatmaps[i], cmap=cmap, vmin=vmin, vmax=vmax, shading=shading)
ax.set_xlabel(labels[0])
ax.set_title(heatmap_names[i])
hmap_shape = asanyarray(heatmaps[i]).shape
if i > 0:
ax.set_yticklabels([])
if horiz:
same_shape = X.shape[0] == hmap_shape[1]
if same_shape:
x = X
else:
x = x_centered
data_line = axes[horiz_axis].plot(
x, heatmaps[i, init_idx, :], label=f"{heatmap_names[i]}"
)[0]
hlines.append((same_shape, ax.axhline(Y[init_idx], color=linecolor), data_line))
if vert:
same_shape = Y.shape[0] == hmap_shape[0]
if same_shape:
y = Y
else:
y = y_centered
data_line = axes[vert_axis].plot(
y, heatmaps[i, :, init_idx], label=f"{heatmap_names[i]}"
)[0]
vlines.append((same_shape, ax.axvline(X[init_idx], color=linecolor), data_line))
minimum = min(heatmaps)
maximum = max(heatmaps)
if vert:
axes[vert_axis].set_title("Vertical")
axes[vert_axis].set_ylim([minimum, maximum])
axes[vert_axis].legend()
if horiz:
axes[horiz_axis].set_title("Horizontal")
axes[horiz_axis].set_ylim([minimum, maximum])
axes[horiz_axis].legend()
def _gen_idxs(orig, centered, same_shape, event_data):
"""
is there a better way? probably, but this gets the job done
so here we are...
"""
if same_shape:
data_idx = nearest_idx(orig, event_data)
if mpl_gr_33:
disp_idx = nearest_idx(orig, event_data)
arr = orig
else:
disp_idx = nearest_idx(centered, event_data)
arr = centered
else:
disp_idx = nearest_idx(centered, event_data)
data_idx = nearest_idx(centered, event_data)
arr = centered
return arr, data_idx, disp_idx
def update_lines(event):
if event.inaxes in axes[:-num_line_axes]:
y = None
for i, (same_shape, display_line, data_line) in enumerate(hlines):
if y is None:
y, data_idx, disp_idx = _gen_idxs(Y, y_centered, same_shape, event.ydata)
display_line.set_ydata(y[disp_idx])
data_line.set_ydata(heatmaps[i, data_idx])
x = None
for i, (same_shape, display_line, data_line) in enumerate(vlines):
if x is None:
x, data_idx, disp_idx = _gen_idxs(X, x_centered, same_shape, event.xdata)
display_line.set_xdata(x[disp_idx])
data_line.set_ydata(heatmaps[i, :, data_idx])
fig.canvas.draw_idle()
if interaction_type == "move":
fig.canvas.mpl_connect("motion_notify_event", update_lines)
elif interaction_type == "click":
fig.canvas.mpl_connect("button_press_event", update_lines)
else:
close(fig)
raise ValueError(
f"{interaction_type} is not a valid option for interaction_type, valid options are 'click' or 'move'"
)
return fig, axes
# based on https://gist.github.com/tacaswell/3144287
def zoom_factory(ax, base_scale=1.1):
"""
Add ability to zoom with the scroll wheel.
parameters
----------
ax : matplotlib axes object
axis on which to implement scroll to zoom
base_scale : float
how much zoom on each tick of scroll wheel
returns
-------
disconnect_zoom : function
call this to disconnect the scroll listener
"""
def limits_to_range(lim):
return lim[1] - lim[0]
fig = ax.get_figure() # get the figure of interest
fig.canvas.capture_scroll = True
has_toolbar = hasattr(fig.canvas, "toolbar") and fig.canvas.toolbar is not None
if has_toolbar:
# it might be possible to have an interactive backend without
# a toolbar. I'm not sure so being safe here
toolbar = fig.canvas.toolbar
toolbar.push_current()
orig_xlim = ax.get_xlim()
orig_ylim = ax.get_ylim()
orig_yrange = limits_to_range(orig_ylim)
orig_xrange = limits_to_range(orig_xlim)
orig_center = ((orig_xlim[0] + orig_xlim[1]) / 2, (orig_ylim[0] + orig_ylim[1]) / 2)
def zoom_fun(event):
if not event.inaxes is ax:
return
# get the current x and y limits
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
# set the range
cur_xrange = (cur_xlim[1] - cur_xlim[0]) * 0.5
cur_yrange = (cur_ylim[1] - cur_ylim[0]) * 0.5
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
if event.button == "up":
# deal with zoom in
scale_factor = base_scale
elif event.button == "down":
# deal with zoom out
scale_factor = 1 / base_scale
else:
# deal with something that should never happen
scale_factor = 1
# set new limits
new_xlim = [
xdata - (xdata - cur_xlim[0]) / scale_factor,
xdata + (cur_xlim[1] - xdata) / scale_factor,
]
new_ylim = [
ydata - (ydata - cur_ylim[0]) / scale_factor,
ydata + (cur_ylim[1] - ydata) / scale_factor,
]
new_yrange = limits_to_range(new_ylim)
new_xrange = limits_to_range(new_xlim)
if abs(new_yrange) > abs(orig_yrange):
new_ylim = orig_center[1] - new_yrange / 2, orig_center[1] + new_yrange / 2
if abs(new_xrange) > abs(orig_xrange):
new_xlim = orig_center[0] - new_xrange / 2, orig_center[0] + new_xrange / 2
ax.set_xlim(new_xlim)
ax.set_ylim(new_ylim)
if has_toolbar:
toolbar.push_current()
ax.figure.canvas.draw_idle() # force re-draw
# attach the call back
cid = fig.canvas.mpl_connect("scroll_event", zoom_fun)
def disconnect_zoom():
fig.canvas.mpl_disconnect(cid)
# return the disconnect function
return disconnect_zoom
class panhandler:
"""
Enable panning a plot with any mouse button.
button determines which button will be used (default right click)
Left: 1
Middle: 2
Right: 3
"""
def __init__(self, fig, button=3):
self.fig = fig
self._id_drag = None
self.button = button
self.fig.canvas.mpl_connect("button_press_event", self.press)
self.fig.canvas.mpl_connect("button_release_event", self.release)
def _cancel_action(self):
self._xypress = []
if self._id_drag:
self.fig.canvas.mpl_disconnect(self._id_drag)
self._id_drag = None
def press(self, event):
if event.button != self.button:
self._cancel_action()
return
x, y = event.x, event.y
self._xypress = []
for i, a in enumerate(self.fig.get_axes()):
if (
x is not None
and y is not None
and a.in_axes(event)
and a.get_navigate()
and a.can_pan()
):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self._id_drag = self.fig.canvas.mpl_connect("motion_notify_event", self._mouse_move)
def release(self, event):
self._cancel_action()
self.fig.canvas.mpl_disconnect(self._id_drag)
for a, _ind in self._xypress:
a.end_pan()
if not self._xypress:
self._cancel_action()
return
self._cancel_action()
def _mouse_move(self, event):
for a, _ind in self._xypress:
# safer to use the recorded button at the _press than current
# button: # multiple button can get pressed during motion...
a.drag_pan(1, event.key, event.x, event.y)
self.fig.canvas.draw_idle()
import matplotlib.cm as cm
from matplotlib.colors import TABLEAU_COLORS, XKCD_COLORS, to_rgba_array
class image_segmenter:
"""
Manually segment an image with the lasso selector.
"""
def __init__(
self,
img,
nclasses=1,
mask=None,
mask_colors=None,
mask_alpha=0.75,
figsize=(10, 10),
cmap="viridis",
):
"""
parameters
----------
img : array_like
A valid argument to imshow
nclasses : int, default 1
mask: arraylike, optional
If you want to pre-seed the mask
mask_colors : None, color, or array of colors, optional
the colors to use for each class. Unselected regions will always be totally transparent
mask_alpha : float, default .75
The alpha values to use for selected regions. This will always override the alpha values
in mask_colors if any were passed
figsize : (float, float), optional
passed to plt.figure
cmap : 'string'
the colormap to use if img has shape (X,Y)
"""
# ensure mask colors is iterable and the same length as the number of classes
# choose colors from default color cycle?
self.mask_alpha = mask_alpha
if mask_colors is None:
# this will break if there are more than 10 classes
if nclasses <= 10:
self.mask_colors = to_rgba_array(list(TABLEAU_COLORS)[:nclasses])
else:
# up to 949 classes. Hopefully that is always enough....
self.mask_colors = to_rgba_array(list(XKCD_COLORS)[:nclasses])
else:
self.mask_colors = to_rgba_array(np.atleast_1d(mask_colors))
# should probably check the shape here
self.mask_colors[:, -1] = self.mask_alpha
self._img = np.asarray(img)
if mask is None:
self.mask = np.zeros(self._img.shape[:2])
else:
self.mask = mask
self._overlay = np.zeros((*self._img.shape[:2], 4))
self.nclasses = nclasses
for i in range(nclasses + 1):
idx = self.mask == i
if i == 0:
self._overlay[idx] = [0, 0, 0, 0]
else:
self._overlay[idx] = self.mask_colors[i - 1]
with ioff:
self.fig = figure(figsize=figsize)
self.ax = self.fig.gca()
self.displayed = self.ax.imshow(self._img)
self._mask = self.ax.imshow(self._overlay)
lineprops = {"color": "black", "linewidth": 1, "alpha": 0.8}
useblit = False if "ipympl" in get_backend().lower() else True
self.lasso = LassoSelector(self.ax, self._onselect, lineprops=lineprops, useblit=useblit)
self.lasso.set_visible(True)
pix_x = np.arange(self._img.shape[0])
pix_y = np.arange(self._img.shape[1])
xv, yv = np.meshgrid(pix_y, pix_x)
self.pix = np.vstack((xv.flatten(), yv.flatten())).T
self.ph = panhandler(self.fig)
self.disconnect_zoom = zoom_factory(self.ax)
self.current_class = 1
self.erasing = False
def _onselect(self, verts):
self.verts = verts
p = Path(verts)
self.indices = p.contains_points(self.pix, radius=0).reshape(self.mask.shape)
if self.erasing:
self.mask[self.indices] = 0
self._overlay[self.indices] = [0, 0, 0, 0]
else:
self.mask[self.indices] = self.current_class
self._overlay[self.indices] = self.mask_colors[self.current_class - 1]
self._mask.set_data(self._overlay)
self.fig.canvas.draw_idle()
def _ipython_display_(self):
display(self.fig.canvas)
| [
"matplotlib.path.Path",
"matplotlib.widgets.LassoSelector",
"numpy.asarray",
"matplotlib.get_backend",
"numpy.max",
"numpy.asanyarray",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.min",
"numpy.meshgrid",
"packaging.version.parse",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.a... | [((3322, 3339), 'numpy.asarray', 'asarray', (['heatmaps'], {}), '(heatmaps)\n', (3329, 3339), False, 'from numpy import asanyarray, asarray, max, min, swapaxes\n'), ((4218, 4228), 'numpy.asarray', 'asarray', (['X'], {}), '(X)\n', (4225, 4228), False, 'from numpy import asanyarray, asarray, max, min, swapaxes\n'), ((4237, 4247), 'numpy.asarray', 'asarray', (['Y'], {}), '(Y)\n', (4244, 4247), False, 'from numpy import asanyarray, asarray, max, min, swapaxes\n'), ((5847, 5860), 'numpy.min', 'min', (['heatmaps'], {}), '(heatmaps)\n', (5850, 5860), False, 'from numpy import asanyarray, asarray, max, min, swapaxes\n'), ((5875, 5888), 'numpy.max', 'max', (['heatmaps'], {}), '(heatmaps)\n', (5878, 5888), False, 'from numpy import asanyarray, asarray, max, min, swapaxes\n'), ((4036, 4074), 'matplotlib.pyplot.subplots', 'subplots', (['(1)', 'num_axes'], {'figsize': 'figsize'}), '(1, num_axes, figsize=figsize)\n', (4044, 4074), False, 'from matplotlib.pyplot import close, subplots\n'), ((4492, 4518), 'packaging.version.parse', 'version.parse', (['mpl_version'], {}), '(mpl_version)\n', (4505, 4518), False, 'from packaging import version\n'), ((4522, 4542), 'packaging.version.parse', 'version.parse', (['"""3.3"""'], {}), "('3.3')\n", (4535, 4542), False, 'from packaging import version\n'), ((14774, 14789), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (14784, 14789), True, 'import numpy as np\n'), ((14938, 14973), 'numpy.zeros', 'np.zeros', (['(*self._img.shape[:2], 4)'], {}), '((*self._img.shape[:2], 4))\n', (14946, 14973), True, 'import numpy as np\n'), ((15605, 15681), 'matplotlib.widgets.LassoSelector', 'LassoSelector', (['self.ax', 'self._onselect'], {'lineprops': 'lineprops', 'useblit': 'useblit'}), '(self.ax, self._onselect, lineprops=lineprops, useblit=useblit)\n', (15618, 15681), False, 'from matplotlib.widgets import LassoSelector\n'), ((15736, 15765), 'numpy.arange', 'np.arange', (['self._img.shape[0]'], {}), '(self._img.shape[0])\n', (15745, 15765), True, 'import numpy as np\n'), ((15782, 15811), 'numpy.arange', 'np.arange', (['self._img.shape[1]'], {}), '(self._img.shape[1])\n', (15791, 15811), True, 'import numpy as np\n'), ((15829, 15854), 'numpy.meshgrid', 'np.meshgrid', (['pix_y', 'pix_x'], {}), '(pix_y, pix_x)\n', (15840, 15854), True, 'import numpy as np\n'), ((16141, 16152), 'matplotlib.path.Path', 'Path', (['verts'], {}), '(verts)\n', (16145, 16152), False, 'from matplotlib.path import Path\n'), ((4951, 4974), 'numpy.asanyarray', 'asanyarray', (['heatmaps[i]'], {}), '(heatmaps[i])\n', (4961, 4974), False, 'from numpy import asanyarray, asarray, max, min, swapaxes\n'), ((7864, 7874), 'matplotlib.pyplot.close', 'close', (['fig'], {}), '(fig)\n', (7869, 7874), False, 'from matplotlib.pyplot import close, subplots\n'), ((14840, 14869), 'numpy.zeros', 'np.zeros', (['self._img.shape[:2]'], {}), '(self._img.shape[:2])\n', (14848, 14869), True, 'import numpy as np\n'), ((14624, 14650), 'numpy.atleast_1d', 'np.atleast_1d', (['mask_colors'], {}), '(mask_colors)\n', (14637, 14650), True, 'import numpy as np\n'), ((15552, 15565), 'matplotlib.get_backend', 'get_backend', ([], {}), '()\n', (15563, 15565), False, 'from matplotlib import get_backend\n')] |
from PyQt5 import QtCore, QtGui, QtWidgets
import numpy as np
from keras.preprocessing import image
from keras.layers import Dense
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import BatchNormalization
from keras.layers import Dropout
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.BrowseImage = QtWidgets.QPushButton(self.centralwidget)
self.BrowseImage.setGeometry(QtCore.QRect(160, 370, 151, 51))
self.BrowseImage.setObjectName("BrowseImage")
self.imageLbl = QtWidgets.QLabel(self.centralwidget)
self.imageLbl.setGeometry(QtCore.QRect(200, 80, 361, 261))
self.imageLbl.setFrameShape(QtWidgets.QFrame.Box)
self.imageLbl.setText("")
self.imageLbl.setObjectName("imageLbl")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(110, 20, 621, 20))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.Classify = QtWidgets.QPushButton(self.centralwidget)
self.Classify.setGeometry(QtCore.QRect(160, 450, 151, 51))
self.Classify.setObjectName("Classify")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(430, 370, 111, 16))
self.label.setObjectName("label")
self.Training = QtWidgets.QPushButton(self.centralwidget)
self.Training.setGeometry(QtCore.QRect(400, 450, 151, 51))
self.Training.setObjectName("Training")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(400, 390, 211, 51))
self.textEdit.setObjectName("textEdit")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.BrowseImage.clicked.connect(self.loadImage)# when pressing button load image is returned
self.Classify.clicked.connect(self.classifyFunction)# when pressing this button training take place
self.Training.clicked.connect(self.trainingFunction)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.BrowseImage.setText(_translate("MainWindow", "Browse Image"))
self.label_2.setText(_translate("MainWindow", "GUJARATI CHARACTER RECOGNITION USING CNN"))
self.Classify.setText(_translate("MainWindow", "Classify"))
self.label.setText(_translate("MainWindow", "Recognized Class"))
self.Training.setText(_translate("MainWindow", "Training"))
def loadImage(self):
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(None, "Select Image", "", "Image Files (*.png *.jpg *jpeg *.bmp);;All Files (*)") # Ask for file
if fileName: # If the user gives a file
print(fileName)
self.file=fileName
pixmap = QtGui.QPixmap(fileName) # Setup pixmap with the provided image
pixmap = pixmap.scaled(self.imageLbl.width(), self.imageLbl.height(), QtCore.Qt.KeepAspectRatio) # Scale pixmap
self.imageLbl.setPixmap(pixmap) # Set the pixmap onto the label
self.imageLbl.setAlignment(QtCore.Qt.AlignCenter) # Align the label to center
def classifyFunction(self):
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk");
label=["sunna","ek","das","be","tran","char","panc","cha","sat","at","nav","ALA","ANA","B","BHA","CH","CHH","D","DA","DH","DHA","F","G","GH","GNA","H","J","JH","K","KH","KSH","L","M","N","P","R","S","SH","SHH","T","TA","TH","THA","V","Y"]
#label=["fifty","fivehundred","hundred","ten","twenty","twohundred"]
path2=self.file
print(path2)
test_image = image.load_img(path2, target_size = (128, 128))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = loaded_model.predict(test_image)
fresult=np.max(result)
label2=label[result.argmax()]
print(label2)
self.textEdit.setText(label2)
def trainingFunction(self):
self.textEdit.setText("Training under process...")
#basic cnn
model = Sequential()
model.add(Conv2D(32, kernel_size = (3, 3), activation='relu', input_shape=(128,128, 1)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(96, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(45, activation = 'softmax'))
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
train_datagen = ImageDataGenerator(rescale = None,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('D:/python/dl programs/pyqtt-gui/Code-15/Dataset/train',
target_size = (128, 128),
batch_size = 8,
class_mode = 'categorical')
#print(test_datagen);
labels = (training_set.class_indices)
print(labels)
test_set = test_datagen.flow_from_directory('D:/python/dl programs/pyqtt-gui/Code-15/Dataset/val',
target_size = (128, 128),
batch_size = 8,
class_mode = 'categorical')
labels2 = (test_set.class_indices)
print(labels2)
#self.textEdit.setText(labels2)
model.fit_generator(training_set,
steps_per_epoch = 100,
epochs = 10,
validation_data = test_set,
validation_steps = 125)
# Part 3 - Making new predictions
model_json=model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
self.textEdit.setText("Saved model to disk")
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"keras.preprocessing.image.img_to_array",
"keras.layers.Conv2D",
"keras.preprocessing.image.ImageDataGenerator",
"PyQt5.QtWidgets.QApplication",
"keras.layers.Dense",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"PyQt5.QtWidgets.QTextEdit",
"numpy.max",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtWid... | [((8485, 8517), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (8507, 8517), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8535, 8558), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (8556, 8558), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((625, 654), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (642, 654), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((740, 781), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (761, 781), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((930, 966), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (946, 966), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1197, 1233), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1213, 1233), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1314, 1327), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1325, 1327), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1555, 1596), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1576, 1596), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1733, 1769), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1749, 1769), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1900, 1941), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1921, 1941), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2081, 2120), 'PyQt5.QtWidgets.QTextEdit', 'QtWidgets.QTextEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2100, 2120), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2315, 2345), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (2333, 2345), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2523, 2555), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (2543, 2555), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2702, 2751), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (2739, 2751), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3636, 3759), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', (['None', '"""Select Image"""', '""""""', '"""Image Files (*.png *.jpg *jpeg *.bmp);;All Files (*)"""'], {}), "(None, 'Select Image', '',\n 'Image Files (*.png *.jpg *jpeg *.bmp);;All Files (*)')\n", (3673, 3759), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4423, 4457), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (4438, 4457), False, 'from keras.models import model_from_json\n'), ((4973, 5018), 'keras.preprocessing.image.load_img', 'image.load_img', (['path2'], {'target_size': '(128, 128)'}), '(path2, target_size=(128, 128))\n', (4987, 5018), False, 'from keras.preprocessing import image\n'), ((5050, 5080), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['test_image'], {}), '(test_image)\n', (5068, 5080), False, 'from keras.preprocessing import image\n'), ((5102, 5136), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (5116, 5136), True, 'import numpy as np\n'), ((5214, 5228), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (5220, 5228), True, 'import numpy as np\n'), ((5454, 5466), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5464, 5466), False, 'from keras.models import Sequential\n'), ((6605, 6696), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': 'None', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)'}), '(rescale=None, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True)\n', (6623, 6696), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((6854, 6891), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (6872, 6891), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((819, 850), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(160)', '(370)', '(151)', '(51)'], {}), '(160, 370, 151, 51)\n', (831, 850), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1001, 1032), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(200)', '(80)', '(361)', '(261)'], {}), '(200, 80, 361, 261)\n', (1013, 1032), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1267, 1297), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(110)', '(20)', '(621)', '(20)'], {}), '(110, 20, 621, 20)\n', (1279, 1297), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1631, 1662), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(160)', '(450)', '(151)', '(51)'], {}), '(160, 450, 151, 51)\n', (1643, 1662), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1801, 1832), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(430)', '(370)', '(111)', '(16)'], {}), '(430, 370, 111, 16)\n', (1813, 1832), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1976, 2007), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(400)', '(450)', '(151)', '(51)'], {}), '(400, 450, 151, 51)\n', (1988, 2007), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2155, 2186), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(400)', '(390)', '(211)', '(51)'], {}), '(400, 390, 211, 51)\n', (2167, 2186), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2379, 2406), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(800)', '(26)'], {}), '(0, 0, 800, 26)\n', (2391, 2406), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3899, 3922), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['fileName'], {}), '(fileName)\n', (3912, 3922), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5485, 5561), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': '(128, 128, 1)'}), "(32, kernel_size=(3, 3), activation='relu', input_shape=(128, 128, 1))\n", (5491, 5561), False, 'from keras.layers import Conv2D\n'), ((5582, 5612), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5594, 5612), False, 'from keras.layers import MaxPooling2D\n'), ((5631, 5651), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5649, 5651), False, 'from keras.layers import BatchNormalization\n'), ((5671, 5720), 'keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(64, kernel_size=(3, 3), activation='relu')\n", (5677, 5720), False, 'from keras.layers import Conv2D\n'), ((5739, 5769), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5751, 5769), False, 'from keras.layers import MaxPooling2D\n'), ((5788, 5808), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5806, 5808), False, 'from keras.layers import BatchNormalization\n'), ((5828, 5877), 'keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(64, kernel_size=(3, 3), activation='relu')\n", (5834, 5877), False, 'from keras.layers import Conv2D\n'), ((5896, 5926), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5908, 5926), False, 'from keras.layers import MaxPooling2D\n'), ((5945, 5965), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5963, 5965), False, 'from keras.layers import BatchNormalization\n'), ((5985, 6034), 'keras.layers.Conv2D', 'Conv2D', (['(96)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(96, kernel_size=(3, 3), activation='relu')\n", (5991, 6034), False, 'from keras.layers import Conv2D\n'), ((6053, 6083), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6065, 6083), False, 'from keras.layers import MaxPooling2D\n'), ((6102, 6122), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6120, 6122), False, 'from keras.layers import BatchNormalization\n'), ((6142, 6191), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(32, kernel_size=(3, 3), activation='relu')\n", (6148, 6191), False, 'from keras.layers import Conv2D\n'), ((6210, 6240), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6222, 6240), False, 'from keras.layers import MaxPooling2D\n'), ((6259, 6279), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6277, 6279), False, 'from keras.layers import BatchNormalization\n'), ((6299, 6311), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (6306, 6311), False, 'from keras.layers import Dropout\n'), ((6331, 6340), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6338, 6340), False, 'from keras.layers import Flatten\n'), ((6360, 6389), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (6365, 6389), False, 'from keras.layers import Dense\n'), ((6409, 6421), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (6416, 6421), False, 'from keras.layers import Dropout\n'), ((6441, 6472), 'keras.layers.Dense', 'Dense', (['(45)'], {'activation': '"""softmax"""'}), "(45, activation='softmax')\n", (6446, 6472), False, 'from keras.layers import Dense\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from cvxopt import matrix, solvers
from datetime import datetime, date
import quandl
assets = ['AAPL', # Apple
'KO', # Coca-Cola
'DIS', # Disney
'XOM', # Exxon Mobil
'JPM', # JPMorgan Chase
'MCD', # McDonald's
'WMT'] # Walmart
# download historical data from quandl
hist_data = {}
for asset in assets:
data = quandl.get('wiki/'+asset, start_date='2015-01-01', end_date='2017-12-31', authtoken='<PASSWORD>')
hist_data[asset] = data['Adj. Close']
hist_data = pd.concat(hist_data, axis=1)
# calculate historical log returns
hist_return = np.log(hist_data / hist_data.shift())
hist_return = hist_return.dropna()
# find historical mean, covriance, and correlation
hist_mean = hist_return.mean(axis=0).to_frame()
hist_mean.columns = ['mu']
hist_cov = hist_return.cov()
hist_corr = hist_return.corr()
print(hist_mean.transpose())
print(hist_cov)
print(hist_corr)
# construct random portfolios
n_portfolios = 5000
#set up array to hold results
port_returns = np.zeros(n_portfolios)
port_stdevs = np.zeros(n_portfolios)
for i in range(n_portfolios):
w = np.random.rand(len(assets)) # random weights
w = w / sum(w) # weights sum to 1
port_return = np.dot(w.T, hist_mean.as_matrix()) * 250 # annualize; 250 business days
port_stdev = np.sqrt(np.dot(w.T, np.dot(hist_cov, w))) * np.sqrt(250) # annualize; 250 business days
port_returns[i] = port_return
port_stdevs[i] = port_stdev
plt.plot(port_stdevs, port_returns, 'o', markersize=6)
plt.xlabel('Expected Volatility')
plt.ylabel('Expected Return')
plt.title('Return and Standard Deviation of Randomly Generated Portfolios')
plt.show()
# Global Minimum Variance (GMV) -- closed form
hist_cov_inv = - np.linalg.inv(hist_cov)
one_vec = np.ones(len(assets))
w_gmv = np.dot(hist_cov_inv, one_vec) / (np.dot(np.transpose(one_vec), np.dot(hist_cov_inv, one_vec)))
w_gmv_df = pd.DataFrame(data = w_gmv).transpose()
w_gmv_df.columns = assets
stdev_gmv = np.sqrt(np.dot(w_gmv.T, np.dot(hist_cov, w_gmv))) * np.sqrt(250)
print(w_gmv_df)
print(stdev_gmv)
# Global Minimum Variance (GMV) -- numerical
P = matrix(hist_cov.as_matrix())
q = matrix(np.zeros((len(assets), 1)))
A = matrix(1.0, (1, len(assets)))
b = matrix(1.0)
w_gmv_v2 = np.array(solvers.qp(P, q, A=A, b=b)['x'])
w_gmv_df_v2 = pd.DataFrame(w_gmv_v2).transpose()
w_gmv_df_v2.columns = assets
stdev_gmv_v2 = np.sqrt(np.dot(w_gmv_v2.T, np.dot(hist_cov, w_gmv_v2))) * np.sqrt(250)
print(w_gmv_df_v2)
print(np.asscalar(stdev_gmv_v2))
# Maximum return -- closed form
mu_o = np.asscalar(np.max(hist_mean)) # MCD
A = np.matrix([[np.asscalar(np.dot(hist_mean.T,np.dot(hist_cov_inv,hist_mean))),
np.asscalar(np.dot(hist_mean.T,np.dot(hist_cov_inv,one_vec)))],
[np.asscalar(np.dot(hist_mean.T,np.dot(hist_cov_inv,one_vec))),
np.asscalar(np.dot(one_vec.T,np.dot(hist_cov_inv,one_vec)))]])
B = np.hstack([np.array(hist_mean),one_vec.reshape(len(assets),1)])
y = np.matrix([mu_o, 1]).T
w_max_ret = np.dot(np.dot(np.dot(hist_cov_inv, B), np.linalg.inv(A)),y)
w_max_ret_df = pd.DataFrame(w_max_ret).T
w_max_ret_df.columns = assets
print(w_max_ret_df)
# Maximum return -- numerical
P = matrix(hist_cov.as_matrix())
q = matrix(np.zeros((len(assets), 1)))
A = matrix(np.hstack([np.array(hist_mean),one_vec.reshape(len(assets),1)]).transpose())
b = matrix([mu_o,1])
w_max_ret_v2 = np.array(solvers.qp(P, q, A=A, b=b)['x'])
w_max_ret_df_v2 = pd.DataFrame(w_max_ret_v2).transpose()
w_max_ret_df_v2.columns = assets
print(w_max_ret_df_v2)
# efficient frontier
N = 100
ef_left = np.asscalar(min(hist_mean.as_matrix())) # minimum return
ef_right = np.asscalar(max(hist_mean.as_matrix())) # maximum return
target_returns = np.linspace(ef_left, ef_right, N) # N target returns
optimal_weights = [ solvers.qp(P, q, A=A, b=matrix([t,1]))['x'] for t in target_returns ] # QP solver
ef_returns = [ np.asscalar(np.dot(w.T, hist_mean.as_matrix())*250) for w in optimal_weights ] #a nnualized
ef_risks = [ np.asscalar(np.sqrt(np.dot(w.T, np.dot(hist_cov, w)) * 250)) for w in optimal_weights ]
plt.plot(port_stdevs, port_returns, 'o', markersize=6, label='Candidate Market Portfolio')
plt.plot(ef_risks, ef_returns, 'y-o', color='green', markersize=8, label='Efficient Frontier')
plt.xlabel('Expected Volatility')
plt.ylabel('Expected Return')
plt.title('Efficient Frontier and Candidate Portfolios')
plt.legend(loc='best')
plt.show()
transition_data = pd.DataFrame(optimal_weights)
transition_data.columns = assets
plt.stackplot(range(50), transition_data.iloc[:50,:].T, labels=assets) # the other half has negative weights
plt.legend(loc='upper left')
plt.margins(0, 0)
plt.title('Allocation Transition Matrix')
plt.show()
# Maximum sharpe -- closed form
r_f = 0.01
w_sharpe = np.dot(hist_cov_inv, hist_mean.as_matrix()-r_f/250) / np.dot(one_vec, np.dot(hist_cov_inv, hist_mean.as_matrix()-r_f/250))
w_sharpe_df = pd.DataFrame(w_sharpe).T
w_sharpe_df.columns = assets
mu_sharpe = np.dot(w_sharpe.T, hist_mean.as_matrix()) * 250
stdev_sharpe = np.sqrt(np.dot(w_sharpe.T, np.dot(hist_cov, w_sharpe))) * np.sqrt(250)
sharpe_ratio = (mu_sharpe-r_f)/stdev_sharpe
print(w_sharpe_df)
print(mu_sharpe)
print(stdev_sharpe)
print(sharpe_ratio)
from scipy.optimize import minimize
fun = lambda w: -1 * np.dot(w.T, hist_mean.as_matrix()*250-r_f) / np.sqrt(np.dot(w.T, np.dot(hist_cov*250, w)))
cons = ({'type': 'eq', 'fun': lambda w: np.dot(w.T, one_vec)-1})
res = minimize(fun, w_gmv, method='SLSQP', constraints=cons)
w_sharpe_v2 = res['x']
w_sharpe_v2_df = pd.DataFrame(w_sharpe_v2).T
w_sharpe_v2_df.columns = assets
mu_sharpe_v2 = np.dot(w_sharpe_v2.T, hist_mean.as_matrix()) * 250
stdev_sharpe_v2 = np.sqrt(np.dot(w_sharpe_v2.T, np.dot(hist_cov, w_sharpe_v2))) * np.sqrt(250)
sharpe_ratio_v2 = (mu_sharpe-r_f)/stdev_sharpe
print(w_sharpe_v2_df)
print(mu_sharpe_v2)
print(stdev_sharpe_v2)
print(sharpe_ratio_v2) | [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.linspace",
"numpy.dot",
"cvxopt.matrix",
"pandas.DataFrame",
"scipy.optimize.minimize",
"matplotlib.pyplot.title",
"cvxopt.solvers... | [((694, 722), 'pandas.concat', 'pd.concat', (['hist_data'], {'axis': '(1)'}), '(hist_data, axis=1)\n', (703, 722), True, 'import pandas as pd\n'), ((1191, 1213), 'numpy.zeros', 'np.zeros', (['n_portfolios'], {}), '(n_portfolios)\n', (1199, 1213), True, 'import numpy as np\n'), ((1228, 1250), 'numpy.zeros', 'np.zeros', (['n_portfolios'], {}), '(n_portfolios)\n', (1236, 1250), True, 'import numpy as np\n'), ((1675, 1729), 'matplotlib.pyplot.plot', 'plt.plot', (['port_stdevs', 'port_returns', '"""o"""'], {'markersize': '(6)'}), "(port_stdevs, port_returns, 'o', markersize=6)\n", (1683, 1729), True, 'import matplotlib.pyplot as plt\n'), ((1730, 1763), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Expected Volatility"""'], {}), "('Expected Volatility')\n", (1740, 1763), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1793), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expected Return"""'], {}), "('Expected Return')\n", (1774, 1793), True, 'import matplotlib.pyplot as plt\n'), ((1794, 1869), 'matplotlib.pyplot.title', 'plt.title', (['"""Return and Standard Deviation of Randomly Generated Portfolios"""'], {}), "('Return and Standard Deviation of Randomly Generated Portfolios')\n", (1803, 1869), True, 'import matplotlib.pyplot as plt\n'), ((1870, 1880), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1878, 1880), True, 'import matplotlib.pyplot as plt\n'), ((2446, 2457), 'cvxopt.matrix', 'matrix', (['(1.0)'], {}), '(1.0)\n', (2452, 2457), False, 'from cvxopt import matrix, solvers\n'), ((3579, 3596), 'cvxopt.matrix', 'matrix', (['[mu_o, 1]'], {}), '([mu_o, 1])\n', (3585, 3596), False, 'from cvxopt import matrix, solvers\n'), ((3965, 3998), 'numpy.linspace', 'np.linspace', (['ef_left', 'ef_right', 'N'], {}), '(ef_left, ef_right, N)\n', (3976, 3998), True, 'import numpy as np\n'), ((4348, 4443), 'matplotlib.pyplot.plot', 'plt.plot', (['port_stdevs', 'port_returns', '"""o"""'], {'markersize': '(6)', 'label': '"""Candidate Market Portfolio"""'}), "(port_stdevs, port_returns, 'o', markersize=6, label=\n 'Candidate Market Portfolio')\n", (4356, 4443), True, 'import matplotlib.pyplot as plt\n'), ((4439, 4538), 'matplotlib.pyplot.plot', 'plt.plot', (['ef_risks', 'ef_returns', '"""y-o"""'], {'color': '"""green"""', 'markersize': '(8)', 'label': '"""Efficient Frontier"""'}), "(ef_risks, ef_returns, 'y-o', color='green', markersize=8, label=\n 'Efficient Frontier')\n", (4447, 4538), True, 'import matplotlib.pyplot as plt\n'), ((4534, 4567), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Expected Volatility"""'], {}), "('Expected Volatility')\n", (4544, 4567), True, 'import matplotlib.pyplot as plt\n'), ((4568, 4597), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expected Return"""'], {}), "('Expected Return')\n", (4578, 4597), True, 'import matplotlib.pyplot as plt\n'), ((4598, 4654), 'matplotlib.pyplot.title', 'plt.title', (['"""Efficient Frontier and Candidate Portfolios"""'], {}), "('Efficient Frontier and Candidate Portfolios')\n", (4607, 4654), True, 'import matplotlib.pyplot as plt\n'), ((4655, 4677), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (4665, 4677), True, 'import matplotlib.pyplot as plt\n'), ((4678, 4688), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4686, 4688), True, 'import matplotlib.pyplot as plt\n'), ((4708, 4737), 'pandas.DataFrame', 'pd.DataFrame', (['optimal_weights'], {}), '(optimal_weights)\n', (4720, 4737), True, 'import pandas as pd\n'), ((4892, 4920), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (4902, 4920), True, 'import matplotlib.pyplot as plt\n'), ((4921, 4938), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0)'], {}), '(0, 0)\n', (4932, 4938), True, 'import matplotlib.pyplot as plt\n'), ((4939, 4980), 'matplotlib.pyplot.title', 'plt.title', (['"""Allocation Transition Matrix"""'], {}), "('Allocation Transition Matrix')\n", (4948, 4980), True, 'import matplotlib.pyplot as plt\n'), ((4981, 4991), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4989, 4991), True, 'import matplotlib.pyplot as plt\n'), ((5726, 5780), 'scipy.optimize.minimize', 'minimize', (['fun', 'w_gmv'], {'method': '"""SLSQP"""', 'constraints': 'cons'}), "(fun, w_gmv, method='SLSQP', constraints=cons)\n", (5734, 5780), False, 'from scipy.optimize import minimize\n'), ((542, 645), 'quandl.get', 'quandl.get', (["('wiki/' + asset)"], {'start_date': '"""2015-01-01"""', 'end_date': '"""2017-12-31"""', 'authtoken': '"""<PASSWORD>"""'}), "('wiki/' + asset, start_date='2015-01-01', end_date='2017-12-31',\n authtoken='<PASSWORD>')\n", (552, 645), False, 'import quandl\n'), ((1946, 1969), 'numpy.linalg.inv', 'np.linalg.inv', (['hist_cov'], {}), '(hist_cov)\n', (1959, 1969), True, 'import numpy as np\n'), ((2009, 2038), 'numpy.dot', 'np.dot', (['hist_cov_inv', 'one_vec'], {}), '(hist_cov_inv, one_vec)\n', (2015, 2038), True, 'import numpy as np\n'), ((2244, 2256), 'numpy.sqrt', 'np.sqrt', (['(250)'], {}), '(250)\n', (2251, 2256), True, 'import numpy as np\n'), ((2662, 2674), 'numpy.sqrt', 'np.sqrt', (['(250)'], {}), '(250)\n', (2669, 2674), True, 'import numpy as np\n'), ((2700, 2725), 'numpy.asscalar', 'np.asscalar', (['stdev_gmv_v2'], {}), '(stdev_gmv_v2)\n', (2711, 2725), True, 'import numpy as np\n'), ((2779, 2796), 'numpy.max', 'np.max', (['hist_mean'], {}), '(hist_mean)\n', (2785, 2796), True, 'import numpy as np\n'), ((3197, 3217), 'numpy.matrix', 'np.matrix', (['[mu_o, 1]'], {}), '([mu_o, 1])\n', (3206, 3217), True, 'import numpy as np\n'), ((3308, 3331), 'pandas.DataFrame', 'pd.DataFrame', (['w_max_ret'], {}), '(w_max_ret)\n', (3320, 3331), True, 'import pandas as pd\n'), ((5184, 5206), 'pandas.DataFrame', 'pd.DataFrame', (['w_sharpe'], {}), '(w_sharpe)\n', (5196, 5206), True, 'import pandas as pd\n'), ((5371, 5383), 'numpy.sqrt', 'np.sqrt', (['(250)'], {}), '(250)\n', (5378, 5383), True, 'import numpy as np\n'), ((5821, 5846), 'pandas.DataFrame', 'pd.DataFrame', (['w_sharpe_v2'], {}), '(w_sharpe_v2)\n', (5833, 5846), True, 'import pandas as pd\n'), ((6029, 6041), 'numpy.sqrt', 'np.sqrt', (['(250)'], {}), '(250)\n', (6036, 6041), True, 'import numpy as np\n'), ((1563, 1575), 'numpy.sqrt', 'np.sqrt', (['(250)'], {}), '(250)\n', (1570, 1575), True, 'import numpy as np\n'), ((2049, 2070), 'numpy.transpose', 'np.transpose', (['one_vec'], {}), '(one_vec)\n', (2061, 2070), True, 'import numpy as np\n'), ((2072, 2101), 'numpy.dot', 'np.dot', (['hist_cov_inv', 'one_vec'], {}), '(hist_cov_inv, one_vec)\n', (2078, 2101), True, 'import numpy as np\n'), ((2115, 2139), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'w_gmv'}), '(data=w_gmv)\n', (2127, 2139), True, 'import pandas as pd\n'), ((2478, 2504), 'cvxopt.solvers.qp', 'solvers.qp', (['P', 'q'], {'A': 'A', 'b': 'b'}), '(P, q, A=A, b=b)\n', (2488, 2504), False, 'from cvxopt import matrix, solvers\n'), ((2525, 2547), 'pandas.DataFrame', 'pd.DataFrame', (['w_gmv_v2'], {}), '(w_gmv_v2)\n', (2537, 2547), True, 'import pandas as pd\n'), ((3140, 3159), 'numpy.array', 'np.array', (['hist_mean'], {}), '(hist_mean)\n', (3148, 3159), True, 'import numpy as np\n'), ((3246, 3269), 'numpy.dot', 'np.dot', (['hist_cov_inv', 'B'], {}), '(hist_cov_inv, B)\n', (3252, 3269), True, 'import numpy as np\n'), ((3272, 3288), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (3285, 3288), True, 'import numpy as np\n'), ((3620, 3646), 'cvxopt.solvers.qp', 'solvers.qp', (['P', 'q'], {'A': 'A', 'b': 'b'}), '(P, q, A=A, b=b)\n', (3630, 3646), False, 'from cvxopt import matrix, solvers\n'), ((3671, 3697), 'pandas.DataFrame', 'pd.DataFrame', (['w_max_ret_v2'], {}), '(w_max_ret_v2)\n', (3683, 3697), True, 'import pandas as pd\n'), ((2216, 2239), 'numpy.dot', 'np.dot', (['hist_cov', 'w_gmv'], {}), '(hist_cov, w_gmv)\n', (2222, 2239), True, 'import numpy as np\n'), ((2631, 2657), 'numpy.dot', 'np.dot', (['hist_cov', 'w_gmv_v2'], {}), '(hist_cov, w_gmv_v2)\n', (2637, 2657), True, 'import numpy as np\n'), ((5340, 5366), 'numpy.dot', 'np.dot', (['hist_cov', 'w_sharpe'], {}), '(hist_cov, w_sharpe)\n', (5346, 5366), True, 'import numpy as np\n'), ((5695, 5715), 'numpy.dot', 'np.dot', (['w.T', 'one_vec'], {}), '(w.T, one_vec)\n', (5701, 5715), True, 'import numpy as np\n'), ((5995, 6024), 'numpy.dot', 'np.dot', (['hist_cov', 'w_sharpe_v2'], {}), '(hist_cov, w_sharpe_v2)\n', (6001, 6024), True, 'import numpy as np\n'), ((1539, 1558), 'numpy.dot', 'np.dot', (['hist_cov', 'w'], {}), '(hist_cov, w)\n', (1545, 1558), True, 'import numpy as np\n'), ((4070, 4084), 'cvxopt.matrix', 'matrix', (['[t, 1]'], {}), '([t, 1])\n', (4076, 4084), False, 'from cvxopt import matrix, solvers\n'), ((5628, 5653), 'numpy.dot', 'np.dot', (['(hist_cov * 250)', 'w'], {}), '(hist_cov * 250, w)\n', (5634, 5653), True, 'import numpy as np\n'), ((2853, 2884), 'numpy.dot', 'np.dot', (['hist_cov_inv', 'hist_mean'], {}), '(hist_cov_inv, hist_mean)\n', (2859, 2884), True, 'import numpy as np\n'), ((2934, 2963), 'numpy.dot', 'np.dot', (['hist_cov_inv', 'one_vec'], {}), '(hist_cov_inv, one_vec)\n', (2940, 2963), True, 'import numpy as np\n'), ((3014, 3043), 'numpy.dot', 'np.dot', (['hist_cov_inv', 'one_vec'], {}), '(hist_cov_inv, one_vec)\n', (3020, 3043), True, 'import numpy as np\n'), ((3091, 3120), 'numpy.dot', 'np.dot', (['hist_cov_inv', 'one_vec'], {}), '(hist_cov_inv, one_vec)\n', (3097, 3120), True, 'import numpy as np\n'), ((3509, 3528), 'numpy.array', 'np.array', (['hist_mean'], {}), '(hist_mean)\n', (3517, 3528), True, 'import numpy as np\n'), ((4291, 4310), 'numpy.dot', 'np.dot', (['hist_cov', 'w'], {}), '(hist_cov, w)\n', (4297, 4310), True, 'import numpy as np\n')] |
import numpy as np
import numpy.random
import matplotlib.pyplot as plt
import random
# 设置常量
# HIDDEN_LAYER_NUM = 隐层的个数
# LEARNING_RATE = 学习率
# NET_DEEP_ARRAY = 神经网络的深度(输入层X为0)对应的神经元个数
# DEFAULT_TRAIN_TIMES = 默认训练次数
LEARNING_RATE = 1.2
NET_DEEP_ARRAY = []
DEFAULT_TRAIN_TIMES = 5000
# RANDOM_SEED = 随机数的种子
RANDOM_SEED = 2021
# 激活函数
# def sigmoid(x):
# return 1 / (1 + np.exp(-x))
def sigmoid(x):
s = 1 / (1 + np.exp(-x))
return s
# 深层神经网络主驱动
def deep_neural_network(X, Y
, net_deep_array=[0, 6, 1], learning_rate=LEARNING_RATE
, train_times=DEFAULT_TRAIN_TIMES, random_seed=RANDOM_SEED):
# 绘图
plt.title("week4 深层神经网络")
plt.xlabel("x/times")
plt.ylabel("损失值(越小越好)")
plt.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 这两行需要手动设置
x = []
y = []
# 初始化基本参数
net_deep = len(net_deep_array)
net_deep_array[0] = X.shape[0]
numpy.random.seed(random_seed)
m = X.shape[1]
W, b = initial_parameters(net_deep_array)
# 暂存每一个深度的参数值
Z = [0] * net_deep
A = [0] * net_deep
# 后向传播用于梯度下降
dZ = [0] * net_deep
dW = [0] * net_deep
db = [0] * net_deep
dA = [0] * net_deep
A[0] = X
# 训练次数
for i in range(0, train_times, 1):
# 每次前向传播中的纵向深度
for L in range(1, net_deep, 1):
activate = 'tanh'
# 最后一次使用sigmoid激活函数
if L == net_deep - 1:
activate = 'sigmoid'
# 进行前向传播
forward_parameter = forward_propagation(A[L - 1], {
'W': W[L],
'b': b[L],
}, activate)
Z[L] = forward_parameter.get('Z')
A[L] = forward_parameter.get('A')
assert (Z[L].shape == (net_deep_array[L], m))
# 计算成本cost
cost_value = cost(A[net_deep - 1], Y)
if i % 20 == 0:
x.append(i)
y.append(cost_value)
if i % 500 == 0:
print("第", i, "次迭代,成本值为:", np.squeeze(cost_value))
# 后向传播用于梯度下降
# 倒序计算出
dAL = 0
for L in range(net_deep - 1, 0, -1):
if L == net_deep - 1:
dAL = -np.divide(Y, A[net_deep - 1]) + np.divide(1 - Y, 1 - A[net_deep - 1])
dZL = dAL * sigmoid(Z[net_deep - 1]) * sigmoid(1 - Z[net_deep - 1])
else:
dZL = dAL * (1 - (np.tanh(Z[L]) * np.tanh(Z[L])))
dWL = (1 / m) * (np.dot(dZL, A[L - 1].T))
dbL = (1 / m) * np.sum(dZL, axis=1, keepdims=True)
# 提供给下一次的循环
dAL = np.dot(W[L].T, dZL)
# 更新参数
W[L] = W[L] - learning_rate * dWL
b[L] = b[L] - learning_rate * dbL
plt.plot(x, y, color='orange')
plt.show()
parameter = {
'W': W,
'b': b,
'x': x,
'y': y,
'net_deep_array': net_deep_array,
}
return parameter
# 前向传播
def forward_propagation(A_Last, parameter, activate='tanh'):
W = parameter.get('W')
b = parameter.get('b')
Z = np.dot(W, A_Last) + b
if activate == 'tanh':
A = np.tanh(Z)
elif activate == 'sigmoid':
A = sigmoid(Z)
return {
'Z': Z,
'A': A,
}
# 后向传播
def backward_propagation(dA, Z, A_Last_T, W):
dZ = dA * sigmoid(Z) * (1 - sigmoid(Z))
# 计算该结果的成本
def cost(A, Y):
A = np.squeeze(A)
Y = np.squeeze(Y)
assert (A.shape[0] == Y.shape[0])
m = A.shape[0]
# 这里使用 np.multiply 是因为只有一维所以对应想乘
# a = np.log(A)
# b = np.multiply(np.log(A), Y)
# c = np.log(1 - A)
# d = np.multiply((1 - Y), np.log(1 - A))
temp = np.multiply(np.log(A), Y) + np.multiply((1 - Y), np.log(1 - A))
# 取了一次绝对值
temp = np.maximum(temp, -temp)
cost_ret = np.sum(temp) * (-1 / m)
cost_ret = np.maximum(cost_ret, -cost_ret)
return float(cost_ret)
# 初始化W和b的参数
def initial_parameters(net_deep_array):
net_deep = len(net_deep_array)
W = []
b = []
for L in range(net_deep):
WL = np.random.randn(net_deep_array[L], net_deep_array[L - 1]) * 0.01
bL = np.zeros(shape=(net_deep_array[L], 1))
W.append(WL)
b.append(bL)
return W, b
# 对深层神经网络得出的结果进行测试
def test_network(X, Y, parameter):
W = parameter.get('W')
b = parameter.get('b')
net_deep_array = parameter.get('net_deep_array')
net_deep = len(net_deep_array)
A = X
# 每次前向传播中的纵向深度
for L in range(1, net_deep, 1):
activate = 'tanh'
# 最后一次使用sigmoid激活函数
if L == net_deep - 1:
activate = 'sigmoid'
# 进行前向传播
forward_parameter = forward_propagation(A, {
'W': W[L],
'b': b[L],
}, activate)
A = forward_parameter.get('A')
# 计算成本cost
cost_value = cost(A, Y)
print(numpy.around(np.squeeze(A), 1))
print(Y)
m = A.shape[1]
for i in range(0, A.shape[1]):
if A[0, i] > 0.5:
A[0, i] = 1
else:
A[0, i] = 0
print("成本cost=" + str(cost_value))
print("准确性: " + str(float(np.sum((A == Y)) * 100 / m)) + "%")
# 获得数据
# num = 样本数量
def get_number(num):
X = [
[],
[],
[]
]
Y = []
i = 0
for i in range(num):
x = random.randint(-5, 15)
y = random.randint(0, 150)
z = random.randint(0, 150)
temp = np.exp(x) + 3 * y + z
result = 1
if temp < 500:
result = 0
X[0].append(x)
X[1].append(y)
X[2].append(z)
Y.append(result)
# print("-- 当 i =" + str(i))
# print("x=" + str(x))
# print("y=" + str(y))
# print("z=" + str(z))
# print("temp=" + str(temp))
# print("result=" + str(result))
# print("-" * 10)
return X, Y
if __name__ == '__main__':
# 测试集进行学习的次数
# 初始化训练的数据
data_X, data_Y = get_number(5000)
data_X = np.array(data_X)
data_Y = np.array(data_Y)
print(data_X.shape)
print(data_Y.shape)
parameter = deep_neural_network(data_X, data_Y, train_times=5000)
# 对测试集数据进行评估准确性
test_X, test_Y = get_number(15)
test_X = np.array(test_X)
test_Y = np.array(test_Y)
test_network(test_X, test_Y, parameter=parameter)
plt.title("week4 深层神经网络")
plt.xlabel("x/times")
plt.ylabel("损失值(越小越好)")
plt.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 这两行需要手动设置
x = parameter.get('x')
y = parameter.get('y')
plt.plot(x, y, color='orange')
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"numpy.tanh",
"numpy.squeeze",
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.title",
"numpy.maximum",
"numpy.random.randn",
"random.randint",
"numpy.... | [((665, 690), 'matplotlib.pyplot.title', 'plt.title', (['"""week4 深层神经网络"""'], {}), "('week4 深层神经网络')\n", (674, 690), True, 'import matplotlib.pyplot as plt\n'), ((695, 716), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x/times"""'], {}), "('x/times')\n", (705, 716), True, 'import matplotlib.pyplot as plt\n'), ((721, 744), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""损失值(越小越好)"""'], {}), "('损失值(越小越好)')\n", (731, 744), True, 'import matplotlib.pyplot as plt\n'), ((2751, 2781), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""orange"""'}), "(x, y, color='orange')\n", (2759, 2781), True, 'import matplotlib.pyplot as plt\n'), ((2786, 2796), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2794, 2796), True, 'import matplotlib.pyplot as plt\n'), ((3397, 3410), 'numpy.squeeze', 'np.squeeze', (['A'], {}), '(A)\n', (3407, 3410), True, 'import numpy as np\n'), ((3419, 3432), 'numpy.squeeze', 'np.squeeze', (['Y'], {}), '(Y)\n', (3429, 3432), True, 'import numpy as np\n'), ((3754, 3777), 'numpy.maximum', 'np.maximum', (['temp', '(-temp)'], {}), '(temp, -temp)\n', (3764, 3777), True, 'import numpy as np\n'), ((3832, 3863), 'numpy.maximum', 'np.maximum', (['cost_ret', '(-cost_ret)'], {}), '(cost_ret, -cost_ret)\n', (3842, 3863), True, 'import numpy as np\n'), ((5931, 5947), 'numpy.array', 'np.array', (['data_X'], {}), '(data_X)\n', (5939, 5947), True, 'import numpy as np\n'), ((5961, 5977), 'numpy.array', 'np.array', (['data_Y'], {}), '(data_Y)\n', (5969, 5977), True, 'import numpy as np\n'), ((6168, 6184), 'numpy.array', 'np.array', (['test_X'], {}), '(test_X)\n', (6176, 6184), True, 'import numpy as np\n'), ((6198, 6214), 'numpy.array', 'np.array', (['test_Y'], {}), '(test_Y)\n', (6206, 6214), True, 'import numpy as np\n'), ((6274, 6299), 'matplotlib.pyplot.title', 'plt.title', (['"""week4 深层神经网络"""'], {}), "('week4 深层神经网络')\n", (6283, 6299), True, 'import matplotlib.pyplot as plt\n'), ((6304, 6325), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x/times"""'], {}), "('x/times')\n", (6314, 6325), True, 'import matplotlib.pyplot as plt\n'), ((6330, 6353), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""损失值(越小越好)"""'], {}), "('损失值(越小越好)')\n", (6340, 6353), True, 'import matplotlib.pyplot as plt\n'), ((6531, 6561), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""orange"""'}), "(x, y, color='orange')\n", (6539, 6561), True, 'import matplotlib.pyplot as plt\n'), ((6566, 6576), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6574, 6576), True, 'import matplotlib.pyplot as plt\n'), ((3082, 3099), 'numpy.dot', 'np.dot', (['W', 'A_Last'], {}), '(W, A_Last)\n', (3088, 3099), True, 'import numpy as np\n'), ((3143, 3153), 'numpy.tanh', 'np.tanh', (['Z'], {}), '(Z)\n', (3150, 3153), True, 'import numpy as np\n'), ((3793, 3805), 'numpy.sum', 'np.sum', (['temp'], {}), '(temp)\n', (3799, 3805), True, 'import numpy as np\n'), ((4126, 4164), 'numpy.zeros', 'np.zeros', ([], {'shape': '(net_deep_array[L], 1)'}), '(shape=(net_deep_array[L], 1))\n', (4134, 4164), True, 'import numpy as np\n'), ((5279, 5301), 'random.randint', 'random.randint', (['(-5)', '(15)'], {}), '(-5, 15)\n', (5293, 5301), False, 'import random\n'), ((5314, 5336), 'random.randint', 'random.randint', (['(0)', '(150)'], {}), '(0, 150)\n', (5328, 5336), False, 'import random\n'), ((5349, 5371), 'random.randint', 'random.randint', (['(0)', '(150)'], {}), '(0, 150)\n', (5363, 5371), False, 'import random\n'), ((419, 429), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (425, 429), True, 'import numpy as np\n'), ((2614, 2633), 'numpy.dot', 'np.dot', (['W[L].T', 'dZL'], {}), '(W[L].T, dZL)\n', (2620, 2633), True, 'import numpy as np\n'), ((3677, 3686), 'numpy.log', 'np.log', (['A'], {}), '(A)\n', (3683, 3686), True, 'import numpy as np\n'), ((3714, 3727), 'numpy.log', 'np.log', (['(1 - A)'], {}), '(1 - A)\n', (3720, 3727), True, 'import numpy as np\n'), ((4048, 4105), 'numpy.random.randn', 'np.random.randn', (['net_deep_array[L]', 'net_deep_array[L - 1]'], {}), '(net_deep_array[L], net_deep_array[L - 1])\n', (4063, 4105), True, 'import numpy as np\n'), ((4847, 4860), 'numpy.squeeze', 'np.squeeze', (['A'], {}), '(A)\n', (4857, 4860), True, 'import numpy as np\n'), ((2056, 2078), 'numpy.squeeze', 'np.squeeze', (['cost_value'], {}), '(cost_value)\n', (2066, 2078), True, 'import numpy as np\n'), ((2484, 2507), 'numpy.dot', 'np.dot', (['dZL', 'A[L - 1].T'], {}), '(dZL, A[L - 1].T)\n', (2490, 2507), True, 'import numpy as np\n'), ((2537, 2571), 'numpy.sum', 'np.sum', (['dZL'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZL, axis=1, keepdims=True)\n', (2543, 2571), True, 'import numpy as np\n'), ((5387, 5396), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (5393, 5396), True, 'import numpy as np\n'), ((2248, 2285), 'numpy.divide', 'np.divide', (['(1 - Y)', '(1 - A[net_deep - 1])'], {}), '(1 - Y, 1 - A[net_deep - 1])\n', (2257, 2285), True, 'import numpy as np\n'), ((2216, 2245), 'numpy.divide', 'np.divide', (['Y', 'A[net_deep - 1]'], {}), '(Y, A[net_deep - 1])\n', (2225, 2245), True, 'import numpy as np\n'), ((2422, 2435), 'numpy.tanh', 'np.tanh', (['Z[L]'], {}), '(Z[L])\n', (2429, 2435), True, 'import numpy as np\n'), ((2438, 2451), 'numpy.tanh', 'np.tanh', (['Z[L]'], {}), '(Z[L])\n', (2445, 2451), True, 'import numpy as np\n'), ((5097, 5111), 'numpy.sum', 'np.sum', (['(A == Y)'], {}), '(A == Y)\n', (5103, 5111), True, 'import numpy as np\n')] |
from data_utils.data_manager import DataManager
from prototype.prototype import Prototype
from embedding.embeddings import Embeddings
from embedding.embeddings_service import EmbeddingsService
#from embedding.elmo_embeddings import ElmoEmbeddings
from embedding.bert_embeddings import BertEmbeddings
from embedding.embeddings_cache import EmbeddingsCache
import numpy as np
import torch
from spacy.lang.en.stop_words import STOP_WORDS
from classifiers.binary_classifier import BinaryClassifier, CLASSIFIER_TYPE
#from classifiers.ulmfit_classifier import UlmfitClassifier
from classifiers.bert_classifier import BertClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
from embedding.embeddings_service import TextSpan
import collections
from enum import Enum
import datetime
import argparse
import random
from classifiers.bert_custom_models import BertPoolType
from classifiers.bert_classifier import BertTrainConfig
class RATIONALE_REP(Enum):
SINGLETON = 1 #This has a single representation for all rationales
PER_CLASS = 2 #This has a representation for positives and a representation for negatives
PER_INSTANCE = 3 #This experiment has a representation for each rationale
class MODEL_TYPE(Enum):
PROTOTYPE = 1
PA = 2
LR = 3
SVM = 4
ULMFIT = 5
BERT = 6
RBSVM = 7
class WORD_REP(Enum):
NA = 0
W2V = 1
ONE_HOT = 2
ELMO = 3
BERT = 4
random_gen = np.random.RandomState(3333)
def rationale_representation(train_set, emb_service, aggregate = True, freq_words = None ):
rationales = []
for t_i in train_set:
#print("inst " + str(t_i))
t_i_text = t_i["text"]
t_i_sentences = t_i["sents"]
t_i_spans = t_i["spans"]
t_i_rationale_words = []
#print(t_i_spans)
for s in t_i_sentences:
s_start = s[0]
s_end = s[1]
s_text = t_i_text[s_start:s_end]
for t_i_span in t_i_spans:
if s_start <= t_i_span[0] and s_end>=t_i_span[1] :
token_start = len(t_i_text[s_start:t_i_span[0]].split())
token_end = token_start + len(t_i_text[t_i_span[0] : t_i_span[1]].split())
t_i_rationale_words.append(TextSpan(s_text.split(), begin=token_start, end=token_end))
if len(t_i_rationale_words) > 0 :
rationales.append(emb_service.represent_spans(t_i_rationale_words,use_words = freq_words))
if aggregate :
rationales_vec = emb_service.centroid(rationales)
return rationales_vec
else :
return rationales
def instance_split(inst):
inst_text = inst["text"]
inst_sentences = inst["sents"]
if len(inst_sentences) == 0:
print('NOTICE: no sentences in inst', inst['rid'])
inst_spans = []
for s in inst_sentences :
s_text = inst_text[s[0]:s[1]]
inst_spans.append(TextSpan(s_text.split()))
return inst_spans
def instance_split_with_rationales(inst,down_weight, up_weight=1):
inst_text = inst["text"]
inst_sentences = inst["sents"]
inst_spans = inst["spans"] #rationales
#print(t_i_spans)
inst_text_spans = []
for s in inst_sentences:
s_start = s[0]
s_end = s[1]
s_text = inst_text[s_start:s_end]
s_weights = [down_weight] * len(s_text.split())
for i_span in inst_spans:
if s_start <= i_span[0] and s_end>=i_span[1] :
token_start = len(inst_text[s_start:i_span[0]].split())
token_end = token_start + len(inst_text[i_span[0] : i_span[1]].split())
for i in range(token_start, token_end):
s_weights[i]=up_weight
inst_text_spans.append(TextSpan(s_text.split(), weights=s_weights))
return inst_text_spans
def text_representation(train_set, emb_service, use_words ):
text_vecs = []
for t_i in train_set:
t_i_text = t_i["text"]
t_i_sents = t_i["sents"]
t_i_spans = []
for s in t_i_sents :
s_text = t_i_text[s[0]:s[1]]
t_i_spans.append(TextSpan(s_text.split()))
text_vecs.append(emb_service.represent_spans(t_i_spans, use_words = use_words))
return emb_service.centroid(text_vecs)
def sent_lens(data_set):
under30 = 0
under46 = 0
under62 = 0
under94 = 0
longer = 0
for t_i in data_set:
t_i_sents = t_i["sents"]
t_i_text = t_i["text"]
for s in t_i_sents:
length = len(t_i_text[s[0]:s[1]].split())
if length <=30:
under30 += 1
elif length <= 46:
under46 += 1
elif length <= 62:
under62 += 1
elif length <= 94:
under94 += 1
else:
longer += 1
return under30, under46, under62, under94, longer
def sents_per_instance(data_set):
result = []
for t_i in data_set:
result.append(len(t_i["sents"]))
print(sorted(result))
def print_instances (instances):
for inst in instances :
print ("RID " + str(inst["rid"]))
text = inst["text"]
print ("Text: " + text)
print ("Label: " + str(inst["label"]))
for span in inst["spans"] :
print ("Rationale: " + text[span[0]:span[1]])
print ("\n")
def predict_random(instances):
instance_predictions = [ 1 if x >= 0.5 else 0 for x in random_gen.random_sample(len(instances))]
return instance_predictions
def evaluate(predictions, labels):
_,_,f1,_=precision_recall_fscore_support(labels,predictions)
acc = accuracy_score(labels,predictions)
return acc,f1[1] #this is the positive class
def get_word_doc_counts(instances):
word_counts = collections.Counter()
for inst in instances:
words_set = set(inst["text"].split())
word_counts.update(words_set)
return word_counts
def get_frequent_words(instances, min_freq):
word_counts = get_word_doc_counts(instances)
# word_counts = collections.Counter()
# for inst in instances:
# words_set = set(inst["text"].split())
# word_counts.update(inst["text"].split())
freq_words = set()
for word, count in word_counts.most_common():
if count < min_freq:
break
else:
freq_words.add(word)
return freq_words
def inst2text_with_capitals(inst):
text = inst["text"]
sents = inst["sents"]
capitalized_sents = [text[s[0]:s[1]].capitalize() for s in sents]
return ' '.join(capitalized_sents)
def inst2sents(inst):
text = inst["text"]
sents = inst["sents"]
sents = [text[s[0]:s[1]] for s in sents]
return sents
def read_word_counts(word_counts_file):
print('Reading word counts from:'+word_counts_file)
counts = {}
with open(word_counts_file, 'r') as f:
for line in f:
segs = line.strip().split('\t')
word = segs[0]
count = float(segs[1])
counts[word]=count
return counts
def get_idfs(word_counts, doc_num):
# https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfTransformer.html
# idf(d, t) = log [ (1 + n) / (1 + df(d, t)) ] + 1
idfs = {}
# total_counts = sum(word_counts.values())
for word in word_counts.keys():
idfs[word] = np.log((1+float(doc_num)) / (1+float(word_counts[word])))+1
min_idf = min(idfs.values())
print('Min idf: %.2f. Fixing to 1.0' % min_idf)
for word in idfs.keys():
idfs[word] = idfs[word]-min_idf+1
print('Finished computing idf values for %d words.' % len(idfs))
return idfs
def dump_predictions(pred_file, dataset_name, data, dev_predictions, dev_pos_predictions_scores):
if dev_pos_predictions_scores == None:
dev_pos_predictions_scores= [0]*len(data)
with open(args.pred_file, 'a') as f:
f.write('DATASET: '+dataset_name+'\n')
for inst, score in zip(data, dev_pos_predictions_scores):
f.write('%s\t%f\n' % (inst['rid'], score))
def train_and_evaluate(cls, data_manager, train_sample, pos_train_sample, neg_train_sample, emb_service, freq_words, debug_text):
if model == MODEL_TYPE.PROTOTYPE:
rationale_vecs = []
if rationale_rep == RATIONALE_REP.SINGLETON :
rationale_vecs.append(rationale_representation(train_sample, emb_service, freq_words=freq_words))
if rationale_rep == RATIONALE_REP.PER_CLASS :
rationale_vecs.append(rationale_representation(neg_train_sample, emb_service, freq_words=freq_words))
rationale_vecs.append(rationale_representation(pos_train_sample, emb_service, freq_words=freq_words))
if rationale_rep == RATIONALE_REP.PER_INSTANCE :
rationale_vecs = rationale_representation(train_sample, emb_service, aggregate=False, freq_words =freq_words)
#Adjust the bias(rationale representation) by removing the general text representation
text_vec = text_representation(train_sample, emb_service, use_words=freq_words)
rationale_vecs = [emb_service.add_vecs(rationale_vec, -text_vec) for rationale_vec in rationale_vecs]
prototype = Prototype(emb_service)
pos_vecs = [emb_service.represent_spans(instance_split(inst), use_words=freq_words, bias_vecs = rationale_vecs) for inst in pos_train_sample]
pos_prototype = prototype.build_prototype(pos_vecs)
neg_vecs = [emb_service.represent_spans(instance_split(inst), use_words=freq_words, bias_vecs = rationale_vecs) for inst in neg_train_sample]
neg_prototype = prototype.build_prototype(neg_vecs)
#The id of each class prototype is the label of the class
#neg_prototype has label 0
#pos_prototype has label 1
class_prototypes = [neg_prototype,pos_prototype]
# if args.text != None: # for debug
if debug_text != None:
text_vec, weights = emb_service.represent_span(use_words=freq_words, text=args.text.split(), begin=0, end=-1, bias_vecs=rationale_vecs, return_weights=True, weights=None)
weights = [w**bias_strength for w in weights]
# if norm:
# norm2 = np.sqrt(sum([w**2 for w in weights]))
# weights = [w/norm2 for w in weights]
text_pred = prototype.apply_prototype([text_vec], class_prototypes)[0]
# print('Text: %s' % args.text)
# print('Weights: %s' % ' '.join([str(w) for w in weights]))
print('Prediction: %d' % text_pred)
print('\n'.join(['%s\t%.8f' % (word, weight) for (word, weight) in zip(debug_text.split(), weights)]))
import sys
sys.exit(0)
else:
dev_vecs = [emb_service.represent_spans(instance_split(inst), use_words=freq_words, bias_vecs = rationale_vecs) for inst in data_manager.get_data()]
dev_predictions, dev_pos_predictions_scores = prototype.apply_prototype(dev_vecs, class_prototypes)
dev_labels = np.array([inst["label"] for inst in data_manager.get_data()])
acc,f1 = evaluate(dev_predictions,dev_labels)
elif model == MODEL_TYPE.RBSVM :
rationale_vecs = []
rationale_vecs.append(rationale_representation(neg_train_sample, emb_service, freq_words=freq_words))
rationale_vecs.append(rationale_representation(pos_train_sample, emb_service, freq_words=freq_words))
#Adjust the bias(rationale representation) by removing the general text representation
text_vec = text_representation(train_sample, emb_service, use_words=freq_words)
rationale_vecs = [emb_service.add_vecs(rationale_vec, -text_vec) for rationale_vec in rationale_vecs]
pos_vecs = [emb_service.represent_spans(instance_split(inst), use_words=freq_words, bias_vecs = rationale_vecs) for inst in pos_train_sample]
neg_vecs = [emb_service.represent_spans(instance_split(inst), use_words=freq_words, bias_vecs = rationale_vecs) for inst in neg_train_sample]
cls.clear(remember_train_std_if_supported=True)
cls.add_positive_instances(pos_vecs)
cls.add_negative_instances(neg_vecs)
cls.train()
dev_vecs = [emb_service.represent_spans(instance_split(inst), use_words=freq_words, bias_vecs = rationale_vecs) for inst in data_manager.get_data()]
dev_predictions = cls.predict(dev_vecs)
dev_pos_predictions_scores = None # TODO: extract this info if possible
dev_labels = np.array([inst["label"] for inst in data_manager.get_data()])
acc,f1 = evaluate(dev_predictions,dev_labels)
#model != MODEL_TYPE.PROTOTYPE :
elif model == MODEL_TYPE.BERT:
label_list = [0,1]
label2id = {label : i for i, label in enumerate(label_list)}
train_examples = [inst2sents(inst) for inst in train_sample]
train_labels = [inst["label"] for inst in train_sample]
if args.bert_rationale_weight > 0:
# for every word in input sentences the label would be '1' if the word was marked as rationale and '0' otherwise
train_token_label_ids = [[text_span.weights for text_span in
# multi-label rationales
# instance_split_with_rationales(inst,down_weight=0, up_weight=label2id[inst["label"]]+1)] for inst in train_sample]
instance_split_with_rationales(inst,down_weight=0, up_weight=1)] for inst in train_sample]
# dev_token_label_ids = [[text_span.weights for text_span in instance_split_with_rationales(inst,down_weight=0)] for inst in data_manager.get_data()]
else:
train_token_label_ids = None
# dev_token_label_ids = None
train_config = BertTrainConfig(num_train_epochs=args.epochs, learning_rate=args.lr, upper_dropout=args.dropout,
max_seq_length=args.bert_max_seq_len)
bert_pool_type = BertPoolType[args.bert_pool_type]
cls.train(bert_pool_type, train_examples, train_labels, label_list=label_list, label2id=label2id, train_token_label_ids=train_token_label_ids,
train_config=train_config, rationale_weight=args.bert_rationale_weight, text_weight=args.bert_text_weight,
two_berts=args.two_berts, detach_weights=args.bert_detach_weights, learn_weights=args.bert_learn_weights,
bert_independent_rationales=args.bert_independent_rationales, shallow_fine_tuning=not args.bert_deep_fine_tuning)
dev_examples = [inst2sents(inst) for inst in data_manager.get_data()]
dev_labels = np.array([inst["label"] for inst in data_manager.get_data()])
dev_predictions, dev_pos_predictions_scores = cls.predict(dev_examples)
acc,f1 = evaluate(dev_predictions, dev_labels)
elif model == MODEL_TYPE.ULMFIT:
train_pos_texts = [inst2text_with_capitals(inst) for inst in pos_train_sample]
train_neg_texts = [inst2text_with_capitals(inst) for inst in neg_train_sample]
dev_pos_texts = [inst2text_with_capitals(inst) for inst in data_manager.get_data() if inst["label"]==1]
dev_neg_texts = [inst2text_with_capitals(inst) for inst in data_manager.get_data() if inst["label"]==0]
cls.set_train_data(train_pos_texts, train_neg_texts)
dev_labels = cls.set_valid_data(dev_pos_texts, dev_neg_texts)
# print('NOT DUMPING TRAIN/DEV DATA!!!!!')
cls.train()
dev_predictions, dev_pos_predictions_scores = cls.predict()
acc,f1 = evaluate(dev_predictions, dev_labels)
else:
cls.clear(remember_train_std_if_supported=True)
if args.nrfd > 0 :
pos_vecs = [emb_service.represent_spans(instance_split_with_rationales(inst, args.nrfd), use_words=freq_words) for inst in pos_train_sample]
neg_vecs = [emb_service.represent_spans(instance_split_with_rationales(inst, args.nrfd), use_words=freq_words) for inst in neg_train_sample]
else :
pos_vecs = [emb_service.represent_spans(instance_split(inst), use_words=freq_words) for inst in pos_train_sample]
neg_vecs = [emb_service.represent_spans(instance_split(inst), use_words=freq_words) for inst in neg_train_sample]
dev_vecs = [emb_service.represent_spans(instance_split(inst), use_words=freq_words) for inst in data_manager.get_data()]
dev_labels = np.array([inst["label"] for inst in data_manager.get_data()])
cls.add_positive_instances(pos_vecs)
cls.add_negative_instances(neg_vecs)
cls.train()
dev_predictions = cls.predict(dev_vecs)
dev_pos_predictions_scores = None # TODO: extract this info if possible
acc,f1 = evaluate(dev_predictions, dev_labels)
return acc, f1, dev_predictions, dev_pos_predictions_scores
if __name__ == '__main__':
print('\n'+str(datetime.datetime.now()))
#np.random.seed(98634975)
torch.manual_seed(744875692)
parser = argparse.ArgumentParser()
parser.add_argument("--seed", help="Random seed", type=int, default=98634975)
parser.add_argument("--norm", help="Normalize the embeddings", type=bool, default=True)
parser.add_argument("--sow", help="Set-of-words representation (only zero/one counts of words in a given text)", type=bool, default=False)
parser.add_argument("--model", help="The type of model", type=str, choices=[i.name for i in MODEL_TYPE])
parser.add_argument("--bert_pretrained_name", help="The pretrained BERT model to load", type=str, default='bert-base-uncased')
parser.add_argument("--bert_pool_type", help="The way sentences are pooled together in the BERT model", type=str, choices=[i.name for i in BertPoolType])
parser.add_argument("--bert_rationale_weight", help="Weight of rationales in bert training (0 means none)", type=float, default=0.0)
parser.add_argument("--bert_text_weight", help="Weight of instance labels loss in bert training (0 means none)", type=float, default=1.0)
parser.add_argument("--two_berts", dest='two_berts', help="Use a separate bert for label and rationale predictions", action='store_true')
parser.add_argument("--one_bert", dest='two_berts', help="Use a single bert for both label and rationale predictions", action='store_false')
parser.add_argument("--bert_detach_weights", dest='bert_detach_weights', help="Bert token/sent weights are not learned from instance labels", action='store_true')
parser.add_argument("--bert_attach_weights", dest='bert_detach_weights', help="Bert token/sent weights are learned (also) from instance labels", action='store_false')
parser.add_argument("--bert_learn_weights", help="Learn sentence weights from instance labels (like attention)", action='store_true')
parser.add_argument("--bert_independent_rationales", help="Rationales will not be used for weighted averaging in classification", action='store_true')
parser.add_argument("--bert_max_seq_len", help="Texts longer than that (in terms of word-piece count) will be truncated", type=int, default=48)
parser.add_argument("--bert_deep_fine_tuning", help="Fine tune a linear layer on top of a pooling layer for sentence classification", action='store_true')
parser.add_argument("--rep", help="The type of the representation", type=str, choices=[i.name for i in WORD_REP])
parser.add_argument("--bias", help="The strength of the rationale", type=int)
parser.add_argument("--min_word_count", help="The min count for a word to be considered", type=int, default=0)
parser.add_argument("--output", help="The output file for the experiments", type=str)
parser.add_argument("--embeddings", help="The path to the embeddings file", type=str, default=None)
parser.add_argument("--idf", help="Apply inverse-doc-frequence weights", type=bool, default=False)
parser.add_argument("--idf_file", help="The path to the inverse-doc-frequence file. If None, then the train set is used.", type=str, default=None)
parser.add_argument("--elmo_cache", help="The path to the elmo cache file", type=str, default=None)
parser.add_argument("--ep_iter_count", help="The number of experiment iterations per episode", type=int, default=30)
parser.add_argument("--ep_sizes", help="List of the training sizes to be used", type=str, default="2 6 10 20 60 200 400")
parser.add_argument("--nrfd",help="The weight discount for rationale classifiers", type=float,default=0.0)
parser.add_argument("--data_dir", help="The path to input files", type=str, default="data")
parser.add_argument("--dataset", help="Name of dataset to run the experiments on", type=str, default=None)
parser.add_argument("--store_dir", help="The path to store files (e.g. ulmfit data, bert models)", type=str, default=None)
parser.add_argument("--text", help="Input text to classify for debug (instead of running on dev/test sets)", type=str, default=None)
parser.add_argument("--pred_file", help="Output file to dump classifier predictions to", type=str, default=None)
parser.add_argument("--epochs", help="Number of training epochs (currently used only for bert)", type=int, default=3)
parser.add_argument("--lr",help="Learning rate for training (currently used only for bert)", type=float,default=5e-6)
parser.add_argument("--dropout",help="Probability to drop", type=float,default=0.1)
parser.add_argument("--no_sent_split", help="Ignore sentence split in dataset (treats instance as one continguous text", action='store_true')
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
#torch.manual_seed(args.seed)
num_iter_per_episode = args.ep_iter_count
print('two_berts', args.two_berts)
print('num_iter_per_episode: %d' % num_iter_per_episode)
rationale_rep = RATIONALE_REP.PER_CLASS
model = MODEL_TYPE[args.model]
#model = MODEL_TYPE.PROTOTYPE
word_rep = WORD_REP[args.rep] if args.rep is not None else None
#word_rep = WORD_REP.W2V
norm = args.norm
#norm = 1
sow = args.sow
bias_strength = args.bias
#bias_strength = 6
word_feature_min_freq = args.min_word_count
#word_feature_min_freq = 0
output = open(args.output, "a")
predictions_output = open(args.pred_file, 'w') if args.pred_file != None else None
#output = open("experiments.txt", "w")
nrfd_string = "" if args.nrfd == 0 else "_nrfd_"+str(args.nrfd)
idf_string = "" if args.idf == False else "_idf"
data_dir = args.data_dir
conf = ("Experiment Setup:\nmodel = " + str(model)+ "\nword representation = "+str(word_rep)+" norm = "+str(norm)
+"\nbias_strength = "+str(bias_strength)+"\nword_feature_min_freq = " + str(word_feature_min_freq))
id = (str(model)+"_"+str(word_rep)+"_n_"+str(int(norm))+"_b_"+str(bias_strength)+"_wfc_"+str(word_feature_min_freq)+nrfd_string+idf_string)
print (id)
output.write("\n\n\n")
output.write(conf)
output.write("\n")
output.write(id)
output.write("\n")
cache = None
stats_counts = False
# print('Stats counting is ON!')
if model == MODEL_TYPE.ULMFIT or model == MODEL_TYPE.BERT:
emb_service = None
print("NOTICE: Embedding service is not used with model type", model)
else:
idfs = None
if args.idf == True and args.idf_file != None:
# For now, this is not word doc counts, but simply word counts
counts = read_word_counts(args.idf_file)
idfs = get_idfs(counts, sum(counts.values()))
if word_rep == WORD_REP.ONE_HOT or word_rep == WORD_REP.W2V:
# embedding_path = '/data7/DrWatson/embeddings/google/GoogleNews-vectors-negative300.bin.onewords.txt'
embeddings = Embeddings(args.embeddings, normalize=norm, one_hot=(word_rep==WORD_REP.ONE_HOT), stats_count=stats_counts) # embeddings file is used also for one-hot just to read the vocabulary
elif word_rep == WORD_REP.ELMO:
# cache = EmbeddingsCache('/users/Oren.Melamud/data/fewshot/movie_review_elmo_cache.bin')
cache = EmbeddingsCache(args.elmo_cache)
embeddings = ElmoEmbeddings(normalize=norm, cache=cache)
elif word_rep == WORD_REP.BERT:
embeddings = BertEmbeddings(cache_dir=args.store_dir+'/bert/', stats_count=stats_counts)
emb_service = EmbeddingsService(embeddings, normalize=norm, bias_strength=bias_strength, stopwords=STOP_WORDS,
sow_representation=args.sow, idfs=idfs)
print("Finished initializing embedding service")
episodes = [int(s) for s in args.ep_sizes.strip().split()]
print('Episode sizes:', episodes)
cls = None
if model == MODEL_TYPE.LR :
cls = BinaryClassifier(CLASSIFIER_TYPE.LR)
elif model == MODEL_TYPE.PA :
cls = BinaryClassifier(CLASSIFIER_TYPE.PA)
elif model == MODEL_TYPE.RBSVM :
cls = BinaryClassifier(CLASSIFIER_TYPE.SVM)
elif model == MODEL_TYPE.SVM :
cls = BinaryClassifier(CLASSIFIER_TYPE.SVM)
elif model == MODEL_TYPE.ULMFIT :
cls = UlmfitClassifier(args.ulmfit_dir)
elif model == MODEL_TYPE.BERT:
cls = BertClassifier(pretrained_bert=args.bert_pretrained_name, num_labels=2)
data_manager = DataManager(data_dir+"/"+args.dataset+".train.json",
data_dir+"/"+args.dataset+".dev.json",
data_dir+"/"+args.dataset+".test.json",
0, dev = False, sent_split = not args.no_sent_split)
print('Using dataset: ', args.dataset)
#experiment = {}
print('\n'+str(datetime.datetime.now()))
for ep_sz in episodes :
f1_scores_per_ep = []
acc_scores_per_ep = []
#get num_iter_per_episode samples
#train_samples_per_episode = []
output.write(str(ep_sz)+" examples for training")
output.write("\n")
if cls != None :
cls.clear()
for n_iter in range(num_iter_per_episode):
pos_train_sample = []
neg_train_sample = []
train_sample = data_manager.get_train_sample(ep_sz)
train_sample_ids = [ts["rid"] for ts in train_sample]
print("TRAIN-SET SIZE: "+str(ep_sz)+"\tITER: " + str(n_iter))
output.write("Train_sample: " + id +" " +str(ep_sz) + " "+ str(train_sample_ids))
output.write("\n")
# for ONE_HOT we use freq_words always to restrict the vocab only to the words in the train-set (all other words are meaningless in one-hot)
freq_words = get_frequent_words(train_sample, word_feature_min_freq) if (word_rep==WORD_REP.ONE_HOT or word_feature_min_freq > 1) else None
if args.idf == True and args.idf_file == None:
word_counts = get_word_doc_counts(train_sample)
idfs = get_idfs(word_counts, len(train_sample))
emb_service.set_idfs(idfs)
for ts in train_sample :
if ts["label"] == 0 :
neg_train_sample.append(ts)
else :
pos_train_sample.append(ts)
acc = None
f1 = None
if len(pos_train_sample) > 0 and len(neg_train_sample) > 0 :
acc,f1, dev_predictions, dev_pos_predictions_scores = train_and_evaluate(cls, data_manager, train_sample, pos_train_sample, neg_train_sample, emb_service, freq_words, args.text)
else :
print("Ep size " + str(ep_sz) + " sample number " + str(n_iter) + " random class assignment")
dev_predictions = predict_random(data_manager.get_data())
dev_pos_predictions_scores = None
dev_labels = np.array([inst["label"] for inst in data_manager.get_data()])
acc,f1 = evaluate(dev_predictions, dev_labels)
f1_scores_per_ep.append(f1)
acc_scores_per_ep.append(acc)
#output.write('iter acc ' + str(acc) + ' f1 ' + str(f1) + '\n')
print('iter acc ' + str(acc) + ' f1 ' + str(f1) + '\n')
if args.pred_file != None:
dump_predictions(predictions_output, args.dataset, data_manager.get_data(), dev_predictions, dev_pos_predictions_scores)
output.write("F1: " + id + " " +str(ep_sz) + " " + str(f1_scores_per_ep))
output.write("\n")
output.write("Accuracy: " + id + " " +str(ep_sz) + " " + str(acc_scores_per_ep))
output.write("\n")
#experiment[str(ep_sz)] = train_samples_per_episode
output.write("Avg F1 " + id + " " +str(ep_sz) + " " + str(np.average(np.array(f1_scores_per_ep))) + " stdev " +str(np.std(f1_scores_per_ep))+"\n")
output.write ("Avg accuracy " + id + " " +str(ep_sz) + " " + str(np.average(np.array(acc_scores_per_ep))) +" stdev " +str(np.std(acc_scores_per_ep)) + "\n\n" )
print ("Average dev F1 score for episode of size " + str(ep_sz) + " " + str(np.average(np.array(f1_scores_per_ep))) + " stdev " + str(np.std(f1_scores_per_ep)) + " accuracy " + str(np.average(np.array(acc_scores_per_ep))) +" stdev " + str(np.std(acc_scores_per_ep)) )
if stats_counts:
print('total_toks=%d, unk_toks=%d, unk_ratio=%.3f' % (emb_service.embeddings.total_toks, emb_service.embeddings.unks, emb_service.embeddings.get_unk_ratio()))
if cache != None:
cache.close()
print('\n'+str(datetime.datetime.now())+'\n')
output.close()
if predictions_output != None:
predictions_output.close()
| [
"classifiers.bert_classifier.BertClassifier",
"embedding.embeddings.Embeddings",
"prototype.prototype.Prototype",
"numpy.array",
"classifiers.bert_classifier.BertTrainConfig",
"sys.exit",
"data_utils.data_manager.DataManager",
"numpy.random.RandomState",
"argparse.ArgumentParser",
"numpy.random.se... | [((1471, 1498), 'numpy.random.RandomState', 'np.random.RandomState', (['(3333)'], {}), '(3333)\n', (1492, 1498), True, 'import numpy as np\n'), ((5599, 5651), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['labels', 'predictions'], {}), '(labels, predictions)\n', (5630, 5651), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((5661, 5696), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels', 'predictions'], {}), '(labels, predictions)\n', (5675, 5696), False, 'from sklearn.metrics import accuracy_score\n'), ((5800, 5821), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (5819, 5821), False, 'import collections\n'), ((17054, 17082), 'torch.manual_seed', 'torch.manual_seed', (['(744875692)'], {}), '(744875692)\n', (17071, 17082), False, 'import torch\n'), ((17101, 17126), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17124, 17126), False, 'import argparse\n'), ((21685, 21707), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (21696, 21707), False, 'import random\n'), ((21712, 21737), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (21726, 21737), True, 'import numpy as np\n'), ((25456, 25662), 'data_utils.data_manager.DataManager', 'DataManager', (["(data_dir + '/' + args.dataset + '.train.json')", "(data_dir + '/' + args.dataset + '.dev.json')", "(data_dir + '/' + args.dataset + '.test.json')", '(0)'], {'dev': '(False)', 'sent_split': '(not args.no_sent_split)'}), "(data_dir + '/' + args.dataset + '.train.json', data_dir + '/' +\n args.dataset + '.dev.json', data_dir + '/' + args.dataset +\n '.test.json', 0, dev=False, sent_split=not args.no_sent_split)\n", (25467, 25662), False, 'from data_utils.data_manager import DataManager\n'), ((9253, 9275), 'prototype.prototype.Prototype', 'Prototype', (['emb_service'], {}), '(emb_service)\n', (9262, 9275), False, 'from prototype.prototype import Prototype\n'), ((24525, 24665), 'embedding.embeddings_service.EmbeddingsService', 'EmbeddingsService', (['embeddings'], {'normalize': 'norm', 'bias_strength': 'bias_strength', 'stopwords': 'STOP_WORDS', 'sow_representation': 'args.sow', 'idfs': 'idfs'}), '(embeddings, normalize=norm, bias_strength=bias_strength,\n stopwords=STOP_WORDS, sow_representation=args.sow, idfs=idfs)\n', (24542, 24665), False, 'from embedding.embeddings_service import EmbeddingsService\n'), ((24931, 24967), 'classifiers.binary_classifier.BinaryClassifier', 'BinaryClassifier', (['CLASSIFIER_TYPE.LR'], {}), '(CLASSIFIER_TYPE.LR)\n', (24947, 24967), False, 'from classifiers.binary_classifier import BinaryClassifier, CLASSIFIER_TYPE\n'), ((10773, 10784), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (10781, 10784), False, 'import sys\n'), ((23905, 24017), 'embedding.embeddings.Embeddings', 'Embeddings', (['args.embeddings'], {'normalize': 'norm', 'one_hot': '(word_rep == WORD_REP.ONE_HOT)', 'stats_count': 'stats_counts'}), '(args.embeddings, normalize=norm, one_hot=word_rep == WORD_REP.\n ONE_HOT, stats_count=stats_counts)\n', (23915, 24017), False, 'from embedding.embeddings import Embeddings\n'), ((25016, 25052), 'classifiers.binary_classifier.BinaryClassifier', 'BinaryClassifier', (['CLASSIFIER_TYPE.PA'], {}), '(CLASSIFIER_TYPE.PA)\n', (25032, 25052), False, 'from classifiers.binary_classifier import BinaryClassifier, CLASSIFIER_TYPE\n'), ((13847, 13985), 'classifiers.bert_classifier.BertTrainConfig', 'BertTrainConfig', ([], {'num_train_epochs': 'args.epochs', 'learning_rate': 'args.lr', 'upper_dropout': 'args.dropout', 'max_seq_length': 'args.bert_max_seq_len'}), '(num_train_epochs=args.epochs, learning_rate=args.lr,\n upper_dropout=args.dropout, max_seq_length=args.bert_max_seq_len)\n', (13862, 13985), False, 'from classifiers.bert_classifier import BertTrainConfig\n'), ((16993, 17016), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17014, 17016), False, 'import datetime\n'), ((24247, 24279), 'embedding.embeddings_cache.EmbeddingsCache', 'EmbeddingsCache', (['args.elmo_cache'], {}), '(args.elmo_cache)\n', (24262, 24279), False, 'from embedding.embeddings_cache import EmbeddingsCache\n'), ((25104, 25141), 'classifiers.binary_classifier.BinaryClassifier', 'BinaryClassifier', (['CLASSIFIER_TYPE.SVM'], {}), '(CLASSIFIER_TYPE.SVM)\n', (25120, 25141), False, 'from classifiers.binary_classifier import BinaryClassifier, CLASSIFIER_TYPE\n'), ((25773, 25796), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (25794, 25796), False, 'import datetime\n'), ((24414, 24491), 'embedding.bert_embeddings.BertEmbeddings', 'BertEmbeddings', ([], {'cache_dir': "(args.store_dir + '/bert/')", 'stats_count': 'stats_counts'}), "(cache_dir=args.store_dir + '/bert/', stats_count=stats_counts)\n", (24428, 24491), False, 'from embedding.bert_embeddings import BertEmbeddings\n'), ((25191, 25228), 'classifiers.binary_classifier.BinaryClassifier', 'BinaryClassifier', (['CLASSIFIER_TYPE.SVM'], {}), '(CLASSIFIER_TYPE.SVM)\n', (25207, 25228), False, 'from classifiers.binary_classifier import BinaryClassifier, CLASSIFIER_TYPE\n'), ((29336, 29361), 'numpy.std', 'np.std', (['acc_scores_per_ep'], {}), '(acc_scores_per_ep)\n', (29342, 29361), True, 'import numpy as np\n'), ((29635, 29658), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (29656, 29658), False, 'import datetime\n'), ((28888, 28912), 'numpy.std', 'np.std', (['f1_scores_per_ep'], {}), '(f1_scores_per_ep)\n', (28894, 28912), True, 'import numpy as np\n'), ((29050, 29075), 'numpy.std', 'np.std', (['acc_scores_per_ep'], {}), '(acc_scores_per_ep)\n', (29056, 29075), True, 'import numpy as np\n'), ((25364, 25435), 'classifiers.bert_classifier.BertClassifier', 'BertClassifier', ([], {'pretrained_bert': 'args.bert_pretrained_name', 'num_labels': '(2)'}), '(pretrained_bert=args.bert_pretrained_name, num_labels=2)\n', (25378, 25435), False, 'from classifiers.bert_classifier import BertClassifier\n'), ((29289, 29316), 'numpy.array', 'np.array', (['acc_scores_per_ep'], {}), '(acc_scores_per_ep)\n', (29297, 29316), True, 'import numpy as np\n'), ((28842, 28868), 'numpy.array', 'np.array', (['f1_scores_per_ep'], {}), '(f1_scores_per_ep)\n', (28850, 28868), True, 'import numpy as np\n'), ((29004, 29031), 'numpy.array', 'np.array', (['acc_scores_per_ep'], {}), '(acc_scores_per_ep)\n', (29012, 29031), True, 'import numpy as np\n'), ((29231, 29255), 'numpy.std', 'np.std', (['f1_scores_per_ep'], {}), '(f1_scores_per_ep)\n', (29237, 29255), True, 'import numpy as np\n'), ((29184, 29210), 'numpy.array', 'np.array', (['f1_scores_per_ep'], {}), '(f1_scores_per_ep)\n', (29192, 29210), True, 'import numpy as np\n')] |
import numpy as np
from ..util.backend_functions import backend as bd
from .diffractive_element import DOE
from ..util.image_handling import convert_graymap_image_to_hsvmap_image, rescale_img_to_custom_coordinates
from PIL import Image
from pathlib import Path
"""
MPL 2.0 License
Copyright (c) 2022, <NAME>
All rights reserved.
"""
class ApertureFromImage(DOE):
def __init__(self, amplitude_mask_path= None, phase_mask_path= None, image_size = None, phase_mask_format = 'hsv', amplitude_mask_extent = [0,1], simulation = None):
"""
Load the image specified at "amplitude_mask_path" as a numpy graymap array represeting the amplitude transmittance of the aperture.
The image is centered on the plane and its physical size is specified in image_size parameter as image_size = (float, float)
- If image_size isn't specified, the image fills the entire aperture plane
"""
global bd
from ..util.backend_functions import backend as bd
self.simulation = simulation
self.amplitude_mask_path = amplitude_mask_path
self.phase_mask_path = phase_mask_path
self.image_size = image_size
self.phase_mask_format = phase_mask_format
t = 1.
if self.amplitude_mask_path != None:
#load the amplitude_mask image
img = Image.open(Path(self.amplitude_mask_path))
img = img.convert("RGB")
rescaled_img = rescale_img_to_custom_coordinates(img, self.image_size , simulation.extent_x,simulation.extent_y, simulation.Nx, simulation.Ny)
imgRGB = np.asarray(rescaled_img) / 255.0
t = 0.2990 * imgRGB[:, :, 0] + 0.5870 * imgRGB[:, :, 1] + 0.1140 * imgRGB[:, :, 2]
t = bd.array(np.flip(t, axis = 0))
t = t*(amplitude_mask_extent[1] - amplitude_mask_extent[0]) + amplitude_mask_extent[0]
if self.phase_mask_path != None:
from matplotlib.colors import rgb_to_hsv
#load the phase_mask image
img = Image.open(Path(self.phase_mask_path))
img = img.convert("RGB")
if self.phase_mask_format == 'graymap':
img = convert_graymap_image_to_hsvmap_image(img)
rescaled_img = rescale_img_to_custom_coordinates(img, self.image_size , simulation.extent_x,simulation.extent_y, simulation.Nx, simulation.Ny)
imgRGB = np.asarray(rescaled_img) / 255.0
h = rgb_to_hsv( np.moveaxis(np.array([imgRGB[:, :, 0],imgRGB[:, :, 1],imgRGB[:, :, 2]]) , 0, -1))[:,:,0]
phase_mask = bd.flip(bd.array(h) * 2 * bd.pi - bd.pi, axis = 0)
t = t*bd.exp(1j * phase_mask)
self.t = t
def get_transmittance(self, xx, yy, λ):
return self.t
| [
"numpy.flip",
"numpy.array",
"numpy.asarray",
"pathlib.Path"
] | [((1393, 1423), 'pathlib.Path', 'Path', (['self.amplitude_mask_path'], {}), '(self.amplitude_mask_path)\n', (1397, 1423), False, 'from pathlib import Path\n'), ((1639, 1663), 'numpy.asarray', 'np.asarray', (['rescaled_img'], {}), '(rescaled_img)\n', (1649, 1663), True, 'import numpy as np\n'), ((1793, 1811), 'numpy.flip', 'np.flip', (['t'], {'axis': '(0)'}), '(t, axis=0)\n', (1800, 1811), True, 'import numpy as np\n'), ((2079, 2105), 'pathlib.Path', 'Path', (['self.phase_mask_path'], {}), '(self.phase_mask_path)\n', (2083, 2105), False, 'from pathlib import Path\n'), ((2455, 2479), 'numpy.asarray', 'np.asarray', (['rescaled_img'], {}), '(rescaled_img)\n', (2465, 2479), True, 'import numpy as np\n'), ((2532, 2593), 'numpy.array', 'np.array', (['[imgRGB[:, :, 0], imgRGB[:, :, 1], imgRGB[:, :, 2]]'], {}), '([imgRGB[:, :, 0], imgRGB[:, :, 1], imgRGB[:, :, 2]])\n', (2540, 2593), True, 'import numpy as np\n')] |
import json
import numpy as np
import torch
from classifier.classifier_getter import get_classifier
from dataset import loader
from embedding.embedding import get_embedding
from tools.tool import parse_args, print_args, set_seed
def to_tensor(data, cuda, exclude_keys=[]):
'''
Convert all values in the data into torch.tensor
'''
for key in data.keys():
if key in exclude_keys:
continue
data[key] = torch.from_numpy(data[key]).to(torch.int64)
if cuda != -1:
data[key] = data[key].cuda(cuda)
return data
def Print_Attention(file_path, vocab, model, args):
model['G'].eval()
word2id = vocab.itos
data = []
for line in open(file_path, 'r'):
data.append(json.loads(line))
output = {}
output['text'] = []
for i, temp in enumerate(data):
output['text'].append(temp['text'])
for i, temp in enumerate(data):
tem = []
length = len(temp['text'])
for word in temp['text']:
if word in word2id:
tem.append(word2id.index(word))
data[i]['text'] = np.array(tem)
data[i]['text_len'] = 20
data2 = {}
data2['text'] = []
data2['text_len'] = []
data2['label'] = []
for i, temp in enumerate(data):
if temp['text'].shape[0] < 200:
zero = torch.zeros(20 - temp['text'].shape[0])
temp['text'] = np.concatenate((temp['text'], zero))
else:
temp['text'] = temp['text'][:20]
data2['text'].append(temp['text'])
data2['text_len'].append(temp['text_len'])
data2['label'].append(temp['label'])
data2['text'] = np.array(data2['text'])
data2['text_len'] = np.array(data2['text_len'])
data2['label'] = np.array(data2['label'])
query = to_tensor(data2, args.cuda)
query['is_support'] = False
XQ, XQ_inputD, XQ_avg = model['G'](query, flag='query')
output['attention'] = []
for i, temp in enumerate(data):
output['attention'].append(XQ_inputD[i].cpu().detach().numpy().tolist())
output_file_path = 'output_attention.json'
f_w = open(output_file_path, 'w')
f_w.write(json.dumps(output))
f_w.flush()
f_w.close()
def main_attention():
args = parse_args()
print_args(args)
set_seed(args.seed)
# load data
train_data, val_data, test_data, vocab = loader.load_dataset(args)
# initialize model
model = {}
model["G"], model["D"] = get_embedding(vocab, args)
model["clf"] = get_classifier(model["G"].ebd_dim, args)
best_path = '../bin/tmp-runs/16116280768954578/18'
model['G'].load_state_dict(torch.load(best_path + '.G'))
# model['D'].load_state_dict(torch.load(best_path + '.D'))
# model['clf'].load_state_dict(torch.load(best_path + '.clf'))
# if args.pretrain is not None:
# model["ebd"] = load_model_state_dict(model["G"], args.pretrain)
file_path = r'../data/attention_data.json'
Print_Attention(file_path, vocab, model, args) | [
"tools.tool.parse_args",
"tools.tool.set_seed",
"dataset.loader.load_dataset",
"json.loads",
"torch.load",
"json.dumps",
"embedding.embedding.get_embedding",
"torch.from_numpy",
"tools.tool.print_args",
"numpy.array",
"numpy.concatenate",
"classifier.classifier_getter.get_classifier",
"torch... | [((1678, 1701), 'numpy.array', 'np.array', (["data2['text']"], {}), "(data2['text'])\n", (1686, 1701), True, 'import numpy as np\n'), ((1726, 1753), 'numpy.array', 'np.array', (["data2['text_len']"], {}), "(data2['text_len'])\n", (1734, 1753), True, 'import numpy as np\n'), ((1775, 1799), 'numpy.array', 'np.array', (["data2['label']"], {}), "(data2['label'])\n", (1783, 1799), True, 'import numpy as np\n'), ((2267, 2279), 'tools.tool.parse_args', 'parse_args', ([], {}), '()\n', (2277, 2279), False, 'from tools.tool import parse_args, print_args, set_seed\n'), ((2285, 2301), 'tools.tool.print_args', 'print_args', (['args'], {}), '(args)\n', (2295, 2301), False, 'from tools.tool import parse_args, print_args, set_seed\n'), ((2307, 2326), 'tools.tool.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (2315, 2326), False, 'from tools.tool import parse_args, print_args, set_seed\n'), ((2389, 2414), 'dataset.loader.load_dataset', 'loader.load_dataset', (['args'], {}), '(args)\n', (2408, 2414), False, 'from dataset import loader\n'), ((2483, 2509), 'embedding.embedding.get_embedding', 'get_embedding', (['vocab', 'args'], {}), '(vocab, args)\n', (2496, 2509), False, 'from embedding.embedding import get_embedding\n'), ((2529, 2569), 'classifier.classifier_getter.get_classifier', 'get_classifier', (["model['G'].ebd_dim", 'args'], {}), "(model['G'].ebd_dim, args)\n", (2543, 2569), False, 'from classifier.classifier_getter import get_classifier\n'), ((1123, 1136), 'numpy.array', 'np.array', (['tem'], {}), '(tem)\n', (1131, 1136), True, 'import numpy as np\n'), ((2179, 2197), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (2189, 2197), False, 'import json\n'), ((2657, 2685), 'torch.load', 'torch.load', (["(best_path + '.G')"], {}), "(best_path + '.G')\n", (2667, 2685), False, 'import torch\n'), ((754, 770), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (764, 770), False, 'import json\n'), ((1355, 1394), 'torch.zeros', 'torch.zeros', (["(20 - temp['text'].shape[0])"], {}), "(20 - temp['text'].shape[0])\n", (1366, 1394), False, 'import torch\n'), ((1422, 1458), 'numpy.concatenate', 'np.concatenate', (["(temp['text'], zero)"], {}), "((temp['text'], zero))\n", (1436, 1458), True, 'import numpy as np\n'), ((451, 478), 'torch.from_numpy', 'torch.from_numpy', (['data[key]'], {}), '(data[key])\n', (467, 478), False, 'import torch\n')] |
import os
from glob import glob
import h5py
import numpy as np
import pandas as pd
import re
import xarray as xr
from pathlib import Path
from tqdm import tqdm
from brainio_base.stimuli import StimulusSet
from brainio_base.assemblies import NeuronRecordingAssembly
from brainio_collection.packaging import package_stimulus_set, package_data_assembly
storage_location = ("C:/Users/hsuen/Desktop/bigData/brainscore_img_elec_time_70hz150/")
# This needs to return a stimulus set
# Which will be one row per image
# Three columns, image ID and image filename and (additional) the label
def collect_stimuli(stimuli_directory):
labels = np.load(stimuli_directory + 'stimgroups.npy') # labels of image
stim_sequence = np.load(
stimuli_directory + 'stimsequence.npy') # the names of the files with a b: "b'V12'" (Image ID)
# image file name will be the stimuli_directory + ID.jpg
stimuli = []
for x in range(len(labels)):
stimuli.append({
'image_id': stim_sequence[x].decode('UTF-8'), # extract just the ID
'image_file_name': stimuli_directory + "stimuli/" + str(stim_sequence[x].decode('UTF-8')) + ".jpg",
'image_number': x,
'label': labels[x],
})
stimuli = pd.DataFrame(stimuli)
# convert stimuli object into something that can be used with all the packaging functions
stimuli = StimulusSet(stimuli)
# after converted to a type "StimulusSet", you set an attribute of the object, suchas "image_paths":
stimuli.image_paths = {key: stimuli['image_file_name'][i] for i, key in enumerate(stimuli['image_id'])}
return stimuli
# pass into this function the stimuli object that you obtain from the above function
# stimuli is a Pandas DataFrame
# also pass into this function the neural response file (neural_responses.npy)
def load_responses(response_file, stimuli):
neural_response_file = response_file + "neural_responses.npy"
neural_responses = np.load(neural_response_file)
brodmann_file = response_file + "brodmann_areas.npy"
brodmann_locations = np.load(brodmann_file)
assembly = xr.DataArray(neural_responses,
coords={
'image_num': ('presentation', list(range(neural_responses.shape[0]))),
'image_id': ('presentation',
[stimuli['image_id'][stimuli['image_number'] == num].values[0]
for num in range(neural_responses.shape[0])]),
'region': ('neuroid', brodmann_locations),
# right now puts value "brodmann" area for all coords
'neuroid_id': ('neuroid', list(range(neural_responses.shape[1]))),
'time': ('time_bin', np.linspace(0, 1, 32)),
'time_bin_start': ('time_bin', np.arange(0, 1000, 31.25)),
'time_bin_end': ('time_bin', np.arange(31.25, 1001, 31.25))
},
dims=['presentation', 'neuroid', 'time_bin'])
assembly = NeuronRecordingAssembly(assembly)
assembly = assembly.transpose('presentation', 'neuroid', 'time_bin')
return assembly
def main():
stimuli = collect_stimuli(storage_location)
stimuli.name = 'aru.Kuzovkin2018'
assembly = load_responses(storage_location, stimuli)
assembly.name = 'aru.Kuzovkin2018'
print("Packaging stimuli")
package_stimulus_set(stimuli, stimulus_set_identifier=stimuli.name,
bucket_name="brainio.contrib")
print("Packaging assembly")
package_data_assembly(assembly, assembly_identifier=assembly.name, stimulus_set_identifier=stimuli.name,
bucket_name="brainio.contrib")
if __name__ == '__main__':
main()
| [
"brainio_base.stimuli.StimulusSet",
"brainio_base.assemblies.NeuronRecordingAssembly",
"numpy.linspace",
"pandas.DataFrame",
"brainio_collection.packaging.package_data_assembly",
"numpy.load",
"numpy.arange",
"brainio_collection.packaging.package_stimulus_set"
] | [((643, 688), 'numpy.load', 'np.load', (["(stimuli_directory + 'stimgroups.npy')"], {}), "(stimuli_directory + 'stimgroups.npy')\n", (650, 688), True, 'import numpy as np\n'), ((728, 775), 'numpy.load', 'np.load', (["(stimuli_directory + 'stimsequence.npy')"], {}), "(stimuli_directory + 'stimsequence.npy')\n", (735, 775), True, 'import numpy as np\n'), ((1259, 1280), 'pandas.DataFrame', 'pd.DataFrame', (['stimuli'], {}), '(stimuli)\n', (1271, 1280), True, 'import pandas as pd\n'), ((1392, 1412), 'brainio_base.stimuli.StimulusSet', 'StimulusSet', (['stimuli'], {}), '(stimuli)\n', (1403, 1412), False, 'from brainio_base.stimuli import StimulusSet\n'), ((1978, 2007), 'numpy.load', 'np.load', (['neural_response_file'], {}), '(neural_response_file)\n', (1985, 2007), True, 'import numpy as np\n'), ((2091, 2113), 'numpy.load', 'np.load', (['brodmann_file'], {}), '(brodmann_file)\n', (2098, 2113), True, 'import numpy as np\n'), ((3207, 3240), 'brainio_base.assemblies.NeuronRecordingAssembly', 'NeuronRecordingAssembly', (['assembly'], {}), '(assembly)\n', (3230, 3240), False, 'from brainio_base.assemblies import NeuronRecordingAssembly\n'), ((3569, 3671), 'brainio_collection.packaging.package_stimulus_set', 'package_stimulus_set', (['stimuli'], {'stimulus_set_identifier': 'stimuli.name', 'bucket_name': '"""brainio.contrib"""'}), "(stimuli, stimulus_set_identifier=stimuli.name,\n bucket_name='brainio.contrib')\n", (3589, 3671), False, 'from brainio_collection.packaging import package_stimulus_set, package_data_assembly\n'), ((3729, 3868), 'brainio_collection.packaging.package_data_assembly', 'package_data_assembly', (['assembly'], {'assembly_identifier': 'assembly.name', 'stimulus_set_identifier': 'stimuli.name', 'bucket_name': '"""brainio.contrib"""'}), "(assembly, assembly_identifier=assembly.name,\n stimulus_set_identifier=stimuli.name, bucket_name='brainio.contrib')\n", (3750, 3868), False, 'from brainio_collection.packaging import package_stimulus_set, package_data_assembly\n'), ((2879, 2900), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(32)'], {}), '(0, 1, 32)\n', (2890, 2900), True, 'import numpy as np\n'), ((2966, 2991), 'numpy.arange', 'np.arange', (['(0)', '(1000)', '(31.25)'], {}), '(0, 1000, 31.25)\n', (2975, 2991), True, 'import numpy as np\n'), ((3055, 3084), 'numpy.arange', 'np.arange', (['(31.25)', '(1001)', '(31.25)'], {}), '(31.25, 1001, 31.25)\n', (3064, 3084), True, 'import numpy as np\n')] |
import torch
import argparse
import random
import pandas as pd
import numpy as np
from gensim.models.word2vec import Word2Vec
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, recall_score, roc_auc_score, precision_score
from trainer import Trainer
from utils.util import config_parser, blocks_to_index, save_result
def prepare_input(df, train_s, val_s, test_s, run, w2v):
print("\n-------------- Prepare input --------------")
index_df = blocks_to_index(df, w2v)
train_df = pd.DataFrame(columns=['sid', 'code', 'label'])
val_df = pd.DataFrame(columns=['sid', 'code', 'label'])
test_df = pd.DataFrame(columns=['sid', 'code', 'label'])
train_len, val_len, test_len = 0, 0, 0
for row_idx, row in index_df.iterrows():
cur_id = row['sid']
tmpx = row['blocks_seq']
tmpy = int(row['label'])
if cur_id in train_s:
train_df = train_df.append({'sid': cur_id, 'code': tmpx, 'label': tmpy}, ignore_index=True)
train_len += 1
elif cur_id in val_s:
val_df = val_df.append({'sid': cur_id, 'code': tmpx, 'label': tmpy}, ignore_index=True)
val_len += 1
elif cur_id in test_s:
test_df = test_df.append({'sid': cur_id, 'code': tmpx, 'label': tmpy}, ignore_index=True)
test_len += 1
else:
pass
print("Data is ready for the [{}] resample run".format(run + 1))
args.train_data = train_df
args.val_data = val_df
args.test_data = test_df
return train_len, val_len, test_len
if __name__ == '__main__':
args = config_parser().parse_args('--name ASTNN --lang Snap --batch 20 --epochs 50'.split())
# args = config_parser().parse_args()
all_data = pd.read_pickle('./data/{}/parsed_prob.pkl'.format(args.language))
semester = all_data['semester'].unique().tolist()
semester.sort(key=lambda x: (-int(x[-1]), x[0]), reverse=True)
res = pd.DataFrame(index=semester+['overall'],
columns=['Accuracy', 'AUC', 'Precision', 'Recall', 'F1_score', 'Confusion_matrix'])
all_true, all_pred = [], []
for test_idx in range(1, len(semester)):
test_semester = semester[test_idx]
train_semester = semester[:test_idx]
print("\ntest semester:", test_semester, "train semester:", train_semester)
data = all_data.loc[all_data['semester'].isin(train_semester + [test_semester]), ]
train_val_sid = np.array(data.loc[data['semester'].isin(train_semester), 'sid'].values.tolist())
test_sid = data.loc[data['semester'].isin([test_semester]), 'sid'].values.tolist()
# ---- load SnapJava w2v
word2vec = Word2Vec.load('./embeddings/w2v_SnapJava_{}'.format(args.embedding_dim)).wv
# # ---- load all semester w2v
# word2vec = Word2Vec.load('./embeddings/w2v_{}_{}'.format(args.language, args.embedding_dim)).wv
# # ---- load semester-specific w2v
# word2vec = Word2Vec.load(
# './embeddings/w2v_{}_{}_{}'.format(args.language, args.embedding_dim, test_semester)).wv
max_tokens = word2vec.vectors.shape[0]
embeddings = np.zeros((max_tokens + 1, args.embedding_dim), dtype="float32")
embeddings[:max_tokens] = word2vec.vectors
args.pretrained_embedding, args.vocab_size = embeddings, max_tokens + 1
"""
for each semester fold, run 5-cv
"""
best_acc = 0.0
pred, actual = [], []
kf = KFold(n_splits=5, shuffle=True)
for run_id, (train_idx, val_idx) in enumerate(kf.split(train_val_sid)):
train_sid = train_val_sid[train_idx].tolist()
val_sid = train_val_sid[val_idx].tolist()
train_num, val_num, test_num = prepare_input(df=data, run=run_id,
train_s=train_sid, val_s=val_sid, test_s=test_sid,
w2v=word2vec)
print("training size:", train_num, "validation size:", val_num, "testing size:", test_num)
trainer = Trainer(args)
test_pred, test_actual = trainer.run(info=test_semester, test_inputs=trainer.val_data)
cur_acc = accuracy_score(test_actual, test_pred)
# record best run among 10-cv
if cur_acc > best_acc:
best_acc = cur_acc
pred, actual = test_pred, test_actual
# save best result for each semester
res = save_result(res, test_semester, pred, actual)
res.to_csv('./result/res_{}_{}.csv'.format(args.name, args.language))
# append predictions and labels
all_true.extend(actual)
all_pred.extend(pred)
# save overall result
res = save_result(res, 'overall', all_pred, all_true)
res.to_csv('./result/res_{}_{}.csv'.format(args.name, args.language))
| [
"trainer.Trainer",
"sklearn.metrics.accuracy_score",
"numpy.zeros",
"utils.util.blocks_to_index",
"pandas.DataFrame",
"utils.util.config_parser",
"sklearn.model_selection.KFold",
"utils.util.save_result"
] | [((513, 537), 'utils.util.blocks_to_index', 'blocks_to_index', (['df', 'w2v'], {}), '(df, w2v)\n', (528, 537), False, 'from utils.util import config_parser, blocks_to_index, save_result\n'), ((554, 600), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['sid', 'code', 'label']"}), "(columns=['sid', 'code', 'label'])\n", (566, 600), True, 'import pandas as pd\n'), ((614, 660), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['sid', 'code', 'label']"}), "(columns=['sid', 'code', 'label'])\n", (626, 660), True, 'import pandas as pd\n'), ((675, 721), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['sid', 'code', 'label']"}), "(columns=['sid', 'code', 'label'])\n", (687, 721), True, 'import pandas as pd\n'), ((1993, 2123), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "(semester + ['overall'])", 'columns': "['Accuracy', 'AUC', 'Precision', 'Recall', 'F1_score', 'Confusion_matrix']"}), "(index=semester + ['overall'], columns=['Accuracy', 'AUC',\n 'Precision', 'Recall', 'F1_score', 'Confusion_matrix'])\n", (2005, 2123), True, 'import pandas as pd\n'), ((4804, 4851), 'utils.util.save_result', 'save_result', (['res', '"""overall"""', 'all_pred', 'all_true'], {}), "(res, 'overall', all_pred, all_true)\n", (4815, 4851), False, 'from utils.util import config_parser, blocks_to_index, save_result\n'), ((3204, 3267), 'numpy.zeros', 'np.zeros', (['(max_tokens + 1, args.embedding_dim)'], {'dtype': '"""float32"""'}), "((max_tokens + 1, args.embedding_dim), dtype='float32')\n", (3212, 3267), True, 'import numpy as np\n'), ((3531, 3562), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)'}), '(n_splits=5, shuffle=True)\n', (3536, 3562), False, 'from sklearn.model_selection import KFold\n'), ((4540, 4585), 'utils.util.save_result', 'save_result', (['res', 'test_semester', 'pred', 'actual'], {}), '(res, test_semester, pred, actual)\n', (4551, 4585), False, 'from utils.util import config_parser, blocks_to_index, save_result\n'), ((1652, 1667), 'utils.util.config_parser', 'config_parser', ([], {}), '()\n', (1665, 1667), False, 'from utils.util import config_parser, blocks_to_index, save_result\n'), ((4139, 4152), 'trainer.Trainer', 'Trainer', (['args'], {}), '(args)\n', (4146, 4152), False, 'from trainer import Trainer\n'), ((4274, 4312), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_actual', 'test_pred'], {}), '(test_actual, test_pred)\n', (4288, 4312), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, recall_score, roc_auc_score, precision_score\n')] |
"""
Metropolis Hastings example with simple model
Inspired by <NAME>'s blog post
https://twiecki.io/blog/2015/11/10/mcmc-sampling/
"""
import numpy as np
import scipy.stats as stats
np.random.seed(0)
N_mu_30_sd_1_data = stats.norm.rvs(loc=30, scale=1, size=1000).flatten()
def mh_sampler(data, samples=4, mu_init=.5, proposal_width=.5, mu_prior_mu=0, mu_prior_sd=1., offset=20):
"""Basic Metropolis Hasting Sampler. Optimized a little."""
mu_current = mu_init
posterior = []
prior_logpdf = stats.norm(mu_prior_mu, mu_prior_sd).logpdf
likelihood_logpdf = stats.norm(offset, 1).logpdf # transform the inputs by subtracting the mean
data = np.array(data) # for safety reasons
for _ in range(samples):
# Suggest new position
mu_proposal = np.random.normal(mu_current, proposal_width)
# Compute likelihood by multiplying probabilities of each data point
likelihood_current = likelihood_logpdf(data - mu_current).sum()
likelihood_proposal = likelihood_logpdf(data - mu_proposal).sum()
# Compute prior probability of current and proposed mu
prior_current = prior_logpdf(mu_current)
prior_proposal = prior_logpdf(mu_proposal)
p_current = likelihood_current + prior_current
p_proposal = likelihood_proposal + prior_proposal
# Accept proposal?
p_accept = np.exp(p_proposal - p_current)
# Usually would include prior probability, which we neglect here for simplicity
accept = np.random.rand() < p_accept
if accept:
# Update position
mu_current = mu_proposal
posterior.append(mu_current)
return np.array(posterior)
| [
"numpy.random.normal",
"numpy.random.rand",
"scipy.stats.norm",
"scipy.stats.norm.rvs",
"numpy.exp",
"numpy.array",
"numpy.random.seed"
] | [((185, 202), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (199, 202), True, 'import numpy as np\n'), ((669, 683), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (677, 683), True, 'import numpy as np\n'), ((1684, 1703), 'numpy.array', 'np.array', (['posterior'], {}), '(posterior)\n', (1692, 1703), True, 'import numpy as np\n'), ((225, 267), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'loc': '(30)', 'scale': '(1)', 'size': '(1000)'}), '(loc=30, scale=1, size=1000)\n', (239, 267), True, 'import scipy.stats as stats\n'), ((513, 549), 'scipy.stats.norm', 'stats.norm', (['mu_prior_mu', 'mu_prior_sd'], {}), '(mu_prior_mu, mu_prior_sd)\n', (523, 549), True, 'import scipy.stats as stats\n'), ((581, 602), 'scipy.stats.norm', 'stats.norm', (['offset', '(1)'], {}), '(offset, 1)\n', (591, 602), True, 'import scipy.stats as stats\n'), ((788, 832), 'numpy.random.normal', 'np.random.normal', (['mu_current', 'proposal_width'], {}), '(mu_current, proposal_width)\n', (804, 832), True, 'import numpy as np\n'), ((1382, 1412), 'numpy.exp', 'np.exp', (['(p_proposal - p_current)'], {}), '(p_proposal - p_current)\n', (1388, 1412), True, 'import numpy as np\n'), ((1519, 1535), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1533, 1535), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
import re
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import pandas as pd
import seaborn as sns
from itertools import cycle
from scipy.cluster.hierarchy import dendrogram, linkage
# Adjust matplotlib backend for snakemake/cluster
try:
plt.figure()
except:
import matplotlib
matplotlib.use('Agg')
try:
import libs.utils as ut
except ModuleNotFoundError:
import utils as ut
COLORS = [
'#1F78B4', '#33A02C', '#E31A1C', '#FF7F00', '#6A3D9A', # dark
'#A6CEE3', '#B2DF8A', '#FB9A99', '#FDBF6F', '#CAB2D6', #light
'#62A3CB', '#72BF5B', '#EF5A5A', '#FE9F37', '#9A77B8', # medium
'#FFFF99', '#B15928', #ugly
]
TICK_FONTSIZE = 12
LABEL_FONTSIZE = 16
def get_colors(n, cmap='gist_rainbow', scale=0.85, alternating=True):
def scale_color(col, scale):
col_scaled = np.clip(col * scale, 0, 255).astype(int)
return '#{:02x}{:02x}{:02x}'.format(*col_scaled)
cm = plt.get_cmap(cmap)
colors = np.apply_along_axis(
scale_color, axis=1, arr=cm(np.arange(0, 1, 1 / n))[:, :-1] * 255,
scale=scale
)
if alternating:
colors1, colors2 = np.array_split(colors, 2)
colors = np.full(n, '#000000', dtype='U7')
np.place(colors, np.arange(n) % 2 == 0, colors1)
np.place(colors, np.arange(n) % 2 == 1, colors2)
return cycle(colors)
def _get_col_order(assignment):
clusters, cluster_cnt = np.unique(assignment, return_counts=True)
col_order = np.array([], dtype=int)
for cl_idx in np.argsort(cluster_cnt)[::-1]:
cols = [i for i, j in enumerate(assignment) \
if j == clusters[cl_idx]]
col_order = np.append(col_order, cols)
return col_order
def plot_raw_data(data_in, data_raw_in=pd.DataFrame(), out_file=None,
assignment=np.array([]), metric='correlation', row_cl=True):
data = data_in.copy()
data_raw = data_raw_in.copy()
height = int(data.shape[0] // 5)
width = int(data.shape[1] // 7.5)
fig, ax = plt.subplots(figsize=(width, height))
if len(assignment) > 0:
col_order = _get_col_order(assignment)
clusters, cl_cnt = np.unique(assignment, return_counts=True)
if clusters.size > len(COLORS):
colors = get_colors(clusters.size - len(COLORS))
col_map = {}
for i, j in enumerate(clusters[np.argsort(cl_cnt)[::-1]]):
try:
col_map[j] = COLORS[i]
except IndexError:
col_map[j] = next(colors)
col_dict = np.full(data_in.shape[1], '#ffffff', dtype='<U7')
for i, cl in enumerate(col_order):
col_dict[i] = col_map[assignment[cl]]
cluster_cols = pd.Series(col_dict, name='clusters', index=col_order)
data.columns = np.arange(data_in.shape[1])
data = data[col_order]
if not data_raw.empty:
data_raw.columns = np.arange(data_raw_in.shape[1])
data_raw = data_raw[col_order]
x_labels = data_raw_in.columns[col_order]
else:
x_labels = data_in.columns[col_order]
else:
x_labels = data_in.columns
if row_cl:
Z = linkage(data.fillna(3), 'complete')
row_order = dendrogram(Z, truncate_mode=None)['leaves']
data = data.iloc[row_order]
if not data_raw.empty:
data_raw = data_raw.iloc[row_order]
else:
row_order = np.arange(data.shape[0])
if not data_raw.empty:
annot = pd.DataFrame(
np.full(data_raw.shape, '', dtype=str),
index=data.index, columns=data.columns
)
if data.min().min() < 0:
annot[(data.round() == -1) & (data_raw == 1)] = 'o'
else:
annot[(data.round() == 0) & (data_raw == 1)] = 'o'
annot[(data.round() == 1) & (data_raw == 0)] = 'x'
annot[data_raw.isnull()] = '-'
else:
annot = False
# cmap = plt.get_cmap('bwr', 100)
# cmap = plt.get_cmap('Reds', 100)
cmap = plt.get_cmap('Reds', 2)
cmap.set_over('green')
cmap.set_bad('grey')
cm = sns.clustermap(
data, annot=annot, square=False, vmin=0, vmax=1, cmap=cmap, fmt='',
linewidths=0, linecolor='lightgray', col_colors=cluster_cols,
col_cluster=False, row_cluster=False #, col_colors_ratio=0.15
)
cm.cax.set_visible(False)
cm.ax_row_dendrogram.set_visible(False)
cm.ax_heatmap.spines['top'].set_visible(True)
cm.ax_heatmap.spines['right'].set_visible(True)
cm.ax_heatmap.spines['bottom'].set_visible(True)
cm.ax_heatmap.spines['left'].set_visible(True)
cm.ax_heatmap.set_yticks(np.arange(0.5, data.shape[0], 1))
cm.ax_heatmap.set_xticks(np.arange(0.5, data.shape[1], 1))
cm.ax_heatmap.set_xticklabels(x_labels, rotation=90, fontsize=8)
cm.ax_heatmap.set_yticklabels(data_in.index, fontsize=8)
cm.gs.set_width_ratios([0, 0, 1])
cm.gs.set_height_ratios([0, 0, 0.05, 0.95])
cm.gs.update(left=0, bottom=0.00, right=1, top=1)
if not out_file:
plt.show()
elif data.shape[0] < 50:
cm.savefig(out_file, dpi=300)
elif data.shape[0] < 100:
cm.savefig(out_file, dpi=200)
else:
cm.savefig(out_file, dpi=100)
plt.close()
def plot_traces(results, out_file=None, burn_in=0):
no_rows = 6
if 'FP' in results[0].keys():
no_rows += 2
errors = True
else:
errors = False
if 'PSRF' in results[0].keys():
no_rows += 1
psrf = True
else:
psrf = False
fig = plt.figure(figsize=(10, no_rows * 2))
gs = GridSpec(no_rows, 1)
ax = {0: fig.add_subplot(gs[0, 0]),
1: fig.add_subplot(gs[1, 0]),
2: fig.add_subplot(gs[2:4, 0]),
3: fig.add_subplot(gs[4:6, 0])}
if errors:
ax[4] = fig.add_subplot(gs[6, 0])
ax[5] = fig.add_subplot(gs[7, 0])
for chain, chain_result in enumerate(results):
try:
color = COLORS[chain]
except IndexError:
try:
color = next(colors)
except NameError:
missing_cols = len(results) - len(COLORS)
colors = get_colors(missing_cols)
color = next(colors)
_add_chain_traces(chain_result, ax, color)
step_no = chain_result['ML'].size + 1
if psrf:
ax[6] = fig.add_subplot(gs[no_rows - 1, 0])
psrf_val = np.full(step_no, np.nan)
for step_i, psrf_i in chain_result['PSRF']:
psrf_val[step_i] = psrf_i
ax[6].plot(np.arange(step_no), psrf_val, 'rx')
ax[6].set_ylabel('PSRF', fontsize=LABEL_FONTSIZE)
ax[6].axhline(1, ls='-', c='black')
ax[6].axhline(chain_result['PSRF_cutoff'], ls=':', c='red')
# Add x-axis label and tick labels below last plot, remove from others
tick_dist = int(np.floor(step_no // 10 / 100) * 100)
tick_pos = [tick_dist * i for i in range(0, 11, 1)]
last_ax = max(ax.keys())
for ax_id, ax_obj in ax.items():
ax_obj.set_xlim(-step_no * 0.05, step_no * 1.05)
ax_obj.set_xticks(tick_pos)
if ax_id == last_ax:
ax_obj.set_xticklabels([str(i) for i in tick_pos])
ax_obj.set_xlabel('MCMC steps', fontsize=LABEL_FONTSIZE)
else:
ax_obj.set_xticklabels([])
stdout_fig(fig, out_file)
def _add_chain_traces(data, ax, color, alpha=0.4, std_fkt=2.576):
burn_in = data['burn_in']
a_mean, a_std = ut._get_posterior_avg(data['DP_alpha'][burn_in:])
ax[0].plot(data['DP_alpha'], color, alpha=alpha)
ax[0].set_ylabel('DPMM\nalpha', fontsize=LABEL_FONTSIZE)
ax[0].axhline(a_mean, ls='--', c=color)
ax[0].set_ylim(a_mean - std_fkt * a_std, a_mean + std_fkt * a_std)
cl = [np.sum(~np.isnan(np.unique(i))) for i in data['assignments']]
cl_mean, cl_std = ut._get_posterior_avg(cl[burn_in:])
ax[1].plot(cl, color, alpha=alpha)
ax[1].axhline(cl_mean, ls='--', c=color)
ax[1].set_ylim(cl_mean - std_fkt * cl_std, cl_mean + std_fkt * cl_std)
ax[1].plot(cl, color, alpha=alpha)
ax[1].axhline(cl_mean, ls='--', c=color)
ax[1].set_ylabel('Cluster\nnumber', fontsize=LABEL_FONTSIZE)
if data['MAP'].shape[0] != data['MAP'].size:
for i, MAP in enumerate(data['MAP']):
ax[2].plot(MAP, COLORS[i+1], alpha=alpha)
ax[3].plot(data['ML'][i], COLORS[i+1], alpha=alpha)
else:
ax[2].plot(data['MAP'], color, alpha=alpha)
ax[3].plot(data['ML'], color, alpha=alpha)
ax[2].set_ylabel('Log a posteriori', fontsize=LABEL_FONTSIZE)
ax[3].set_ylabel('Log likelihood', fontsize=LABEL_FONTSIZE)
if 4 in ax:
FN_mean, FN_std = ut._get_posterior_avg(data['FN'][burn_in:])
ax[4].plot(data['FN'].round(4), color, alpha=alpha)
# ax[4].set_ylim(FN_mean - std_fkt * FN_std, FN_mean + std_fkt * FN_std)
ax[4].set_ylabel('FN error', fontsize=LABEL_FONTSIZE)
ax[4].axhline(FN_mean, ls='--', c=color)
if 5 in ax:
FP_mean, FP_std = ut._get_posterior_avg(data['FP'][burn_in:])
ax[5].plot(data['FP'].round(4), color, alpha=alpha)
# ax[5].set_ylim(FP_mean - std_fkt * FP_std, FP_mean + std_fkt * FP_std)
ax[5].set_ylabel('FP error', fontsize=LABEL_FONTSIZE)
ax[5].axhline(FP_mean, ls='--', c=color)
if burn_in > 0:
for ax_id, ax_obj in ax.items():
ax_obj.axvline(burn_in, c=color)
def plot_similarity(data, out_file=None, attachments=None):
cmap='OrRd'
fig, ax = plt.subplots(figsize=np.clip(np.array(data.shape) * 0.3, 1, 50))
if not isinstance(attachments, type(None)):
col_order = _get_col_order(attachments)
data = pd.DataFrame(data)
data = data[col_order]
data = data.reindex(col_order)
hm = sns.heatmap(
data, ax=ax, annot_kws={'size': 6}, annot=True, fmt='.2f',
linewidths=.5, square=True, linecolor='lightgray',
cmap=cmap, cbar_kws={'shrink': .5}, vmin=0, vmax=1,
)
ax.set_ylabel('Cell', fontsize=LABEL_FONTSIZE)
ax.set_xlabel('Cell', fontsize=LABEL_FONTSIZE)
ax.set_title('Pairwise Similarity Matrix', fontsize=LABEL_FONTSIZE)
if data.shape[0] < 50:
stdout_fig(fig, out_file)
elif data.shape[0] < 100:
stdout_fig(fig, out_file, dpi=200)
else:
stdout_fig(fig, out_file, dpi=100)
def color_tree_nodes(tree_file, clusters, out_dir='', transpose=True,
prefix='colored'):
with open(tree_file, 'r') as f_in:
gv_raw = f_in.read().rstrip('}')
if len(re.findall('circle', gv_raw)) > 1:
circle_pos = gv_raw.rfind('circle')
gv_raw = gv_raw[:circle_pos] + 'square' + gv_raw[circle_pos+6:]
clusters = [-1 if isinstance(i, tuple) else i for i in clusters]
colors = get_colors(np.unique(clusters).size)
cluster_cols = {i: next(colors) for i in np.unique(clusters)}
# White for doublet cells
cluster_cols[-1] = '#ffffff'
if transpose:
for cell, cluster in enumerate(clusters):
gv_raw += f's{cell:02d} [fillcolor="{cluster_cols[cluster]}"];\n'
else:
for mut, cluster in enumerate(clusters):
gv_raw += f'{mut+1} [fillcolor="{cluster_cols[cluster]}"];\n'
gv_raw += '}'
out_file = os.path.join(
out_dir,
os.path.basename(tree_file).replace('.gv', f'__{prefix}.gv')
)
with open(out_file, 'w') as f_out:
f_out.write(gv_raw)
try:
from graphviz import render
render('dot', 'png', out_file)
except:
pass
def stdout_fig(fig, out_file, dpi=300):
if not out_file:
try:
fig.tight_layout()
except AttributeError:
pass
plt.show()
else:
try:
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9)
except AttributeError:
pass
fig.savefig(out_file, dpi=dpi)
plt.close()
def load_txt(path):
try:
df = pd.read_csv(path, sep='\t', index_col=False)
x = df.at[0, 'Assignment'].strip().split(' ')
except ValueError:
with open(path, 'r') as f:
x = f.read().strip().split(' ')
return [int(i) for i in x]
if __name__ == '__main__':
print('Here be dragons...')
| [
"numpy.clip",
"pandas.read_csv",
"numpy.array_split",
"numpy.array",
"numpy.argsort",
"numpy.arange",
"graphviz.render",
"matplotlib.pyplot.close",
"matplotlib.gridspec.GridSpec",
"pandas.DataFrame",
"itertools.cycle",
"matplotlib.use",
"seaborn.clustermap",
"numpy.floor",
"seaborn.heatm... | [((322, 334), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (332, 334), True, 'import matplotlib.pyplot as plt\n'), ((996, 1014), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (1008, 1014), True, 'import matplotlib.pyplot as plt\n'), ((1399, 1412), 'itertools.cycle', 'cycle', (['colors'], {}), '(colors)\n', (1404, 1412), False, 'from itertools import cycle\n'), ((1475, 1516), 'numpy.unique', 'np.unique', (['assignment'], {'return_counts': '(True)'}), '(assignment, return_counts=True)\n', (1484, 1516), True, 'import numpy as np\n'), ((1537, 1560), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1545, 1560), True, 'import numpy as np\n'), ((1812, 1826), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1824, 1826), True, 'import pandas as pd\n'), ((1866, 1878), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1874, 1878), True, 'import numpy as np\n'), ((2067, 2104), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (2079, 2104), True, 'import matplotlib.pyplot as plt\n'), ((4078, 4101), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Reds"""', '(2)'], {}), "('Reds', 2)\n", (4090, 4101), True, 'import matplotlib.pyplot as plt\n'), ((4165, 4355), 'seaborn.clustermap', 'sns.clustermap', (['data'], {'annot': 'annot', 'square': '(False)', 'vmin': '(0)', 'vmax': '(1)', 'cmap': 'cmap', 'fmt': '""""""', 'linewidths': '(0)', 'linecolor': '"""lightgray"""', 'col_colors': 'cluster_cols', 'col_cluster': '(False)', 'row_cluster': '(False)'}), "(data, annot=annot, square=False, vmin=0, vmax=1, cmap=cmap,\n fmt='', linewidths=0, linecolor='lightgray', col_colors=cluster_cols,\n col_cluster=False, row_cluster=False)\n", (4179, 4355), True, 'import seaborn as sns\n'), ((5311, 5322), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5320, 5322), True, 'import matplotlib.pyplot as plt\n'), ((5624, 5661), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, no_rows * 2)'}), '(figsize=(10, no_rows * 2))\n', (5634, 5661), True, 'import matplotlib.pyplot as plt\n'), ((5671, 5691), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['no_rows', '(1)'], {}), '(no_rows, 1)\n', (5679, 5691), False, 'from matplotlib.gridspec import GridSpec\n'), ((7541, 7590), 'utils._get_posterior_avg', 'ut._get_posterior_avg', (["data['DP_alpha'][burn_in:]"], {}), "(data['DP_alpha'][burn_in:])\n", (7562, 7590), True, 'import utils as ut\n'), ((7915, 7950), 'utils._get_posterior_avg', 'ut._get_posterior_avg', (['cl[burn_in:]'], {}), '(cl[burn_in:])\n', (7936, 7950), True, 'import utils as ut\n'), ((9871, 10055), 'seaborn.heatmap', 'sns.heatmap', (['data'], {'ax': 'ax', 'annot_kws': "{'size': 6}", 'annot': '(True)', 'fmt': '""".2f"""', 'linewidths': '(0.5)', 'square': '(True)', 'linecolor': '"""lightgray"""', 'cmap': 'cmap', 'cbar_kws': "{'shrink': 0.5}", 'vmin': '(0)', 'vmax': '(1)'}), "(data, ax=ax, annot_kws={'size': 6}, annot=True, fmt='.2f',\n linewidths=0.5, square=True, linecolor='lightgray', cmap=cmap, cbar_kws\n ={'shrink': 0.5}, vmin=0, vmax=1)\n", (9882, 10055), True, 'import seaborn as sns\n'), ((369, 390), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (383, 390), False, 'import matplotlib\n'), ((1197, 1222), 'numpy.array_split', 'np.array_split', (['colors', '(2)'], {}), '(colors, 2)\n', (1211, 1222), True, 'import numpy as np\n'), ((1240, 1273), 'numpy.full', 'np.full', (['n', '"""#000000"""'], {'dtype': '"""U7"""'}), "(n, '#000000', dtype='U7')\n", (1247, 1273), True, 'import numpy as np\n'), ((1579, 1602), 'numpy.argsort', 'np.argsort', (['cluster_cnt'], {}), '(cluster_cnt)\n', (1589, 1602), True, 'import numpy as np\n'), ((1722, 1748), 'numpy.append', 'np.append', (['col_order', 'cols'], {}), '(col_order, cols)\n', (1731, 1748), True, 'import numpy as np\n'), ((2209, 2250), 'numpy.unique', 'np.unique', (['assignment'], {'return_counts': '(True)'}), '(assignment, return_counts=True)\n', (2218, 2250), True, 'import numpy as np\n'), ((2593, 2642), 'numpy.full', 'np.full', (['data_in.shape[1]', '"""#ffffff"""'], {'dtype': '"""<U7"""'}), "(data_in.shape[1], '#ffffff', dtype='<U7')\n", (2600, 2642), True, 'import numpy as np\n'), ((2776, 2829), 'pandas.Series', 'pd.Series', (['col_dict'], {'name': '"""clusters"""', 'index': 'col_order'}), "(col_dict, name='clusters', index=col_order)\n", (2785, 2829), True, 'import pandas as pd\n'), ((2854, 2881), 'numpy.arange', 'np.arange', (['data_in.shape[1]'], {}), '(data_in.shape[1])\n', (2863, 2881), True, 'import numpy as np\n'), ((3489, 3513), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (3498, 3513), True, 'import numpy as np\n'), ((4714, 4746), 'numpy.arange', 'np.arange', (['(0.5)', 'data.shape[0]', '(1)'], {}), '(0.5, data.shape[0], 1)\n', (4723, 4746), True, 'import numpy as np\n'), ((4777, 4809), 'numpy.arange', 'np.arange', (['(0.5)', 'data.shape[1]', '(1)'], {}), '(0.5, data.shape[1], 1)\n', (4786, 4809), True, 'import numpy as np\n'), ((5113, 5123), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5121, 5123), True, 'import matplotlib.pyplot as plt\n'), ((6483, 6507), 'numpy.full', 'np.full', (['step_no', 'np.nan'], {}), '(step_no, np.nan)\n', (6490, 6507), True, 'import numpy as np\n'), ((8759, 8802), 'utils._get_posterior_avg', 'ut._get_posterior_avg', (["data['FN'][burn_in:]"], {}), "(data['FN'][burn_in:])\n", (8780, 8802), True, 'import utils as ut\n'), ((9097, 9140), 'utils._get_posterior_avg', 'ut._get_posterior_avg', (["data['FP'][burn_in:]"], {}), "(data['FP'][burn_in:])\n", (9118, 9140), True, 'import utils as ut\n'), ((9771, 9789), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (9783, 9789), True, 'import pandas as pd\n'), ((11579, 11609), 'graphviz.render', 'render', (['"""dot"""', '"""png"""', 'out_file'], {}), "('dot', 'png', out_file)\n", (11585, 11609), False, 'from graphviz import render\n'), ((11798, 11808), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11806, 11808), True, 'import matplotlib.pyplot as plt\n'), ((12001, 12012), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12010, 12012), True, 'import matplotlib.pyplot as plt\n'), ((12057, 12101), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '"""\t"""', 'index_col': '(False)'}), "(path, sep='\\t', index_col=False)\n", (12068, 12101), True, 'import pandas as pd\n'), ((2976, 3007), 'numpy.arange', 'np.arange', (['data_raw_in.shape[1]'], {}), '(data_raw_in.shape[1])\n', (2985, 3007), True, 'import numpy as np\n'), ((3299, 3332), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['Z'], {'truncate_mode': 'None'}), '(Z, truncate_mode=None)\n', (3309, 3332), False, 'from scipy.cluster.hierarchy import dendrogram, linkage\n'), ((3584, 3622), 'numpy.full', 'np.full', (['data_raw.shape', '""""""'], {'dtype': 'str'}), "(data_raw.shape, '', dtype=str)\n", (3591, 3622), True, 'import numpy as np\n'), ((6617, 6635), 'numpy.arange', 'np.arange', (['step_no'], {}), '(step_no)\n', (6626, 6635), True, 'import numpy as np\n'), ((6919, 6948), 'numpy.floor', 'np.floor', (['(step_no // 10 / 100)'], {}), '(step_no // 10 / 100)\n', (6927, 6948), True, 'import numpy as np\n'), ((10635, 10663), 're.findall', 're.findall', (['"""circle"""', 'gv_raw'], {}), "('circle', gv_raw)\n", (10645, 10663), False, 'import re\n'), ((10881, 10900), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (10890, 10900), True, 'import numpy as np\n'), ((10953, 10972), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (10962, 10972), True, 'import numpy as np\n'), ((888, 916), 'numpy.clip', 'np.clip', (['(col * scale)', '(0)', '(255)'], {}), '(col * scale, 0, 255)\n', (895, 916), True, 'import numpy as np\n'), ((11390, 11417), 'os.path.basename', 'os.path.basename', (['tree_file'], {}), '(tree_file)\n', (11406, 11417), False, 'import os\n'), ((1299, 1311), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1308, 1311), True, 'import numpy as np\n'), ((1356, 1368), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1365, 1368), True, 'import numpy as np\n'), ((2415, 2433), 'numpy.argsort', 'np.argsort', (['cl_cnt'], {}), '(cl_cnt)\n', (2425, 2433), True, 'import numpy as np\n'), ((7848, 7860), 'numpy.unique', 'np.unique', (['i'], {}), '(i)\n', (7857, 7860), True, 'import numpy as np\n'), ((9622, 9642), 'numpy.array', 'np.array', (['data.shape'], {}), '(data.shape)\n', (9630, 9642), True, 'import numpy as np\n'), ((1085, 1107), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / n)'], {}), '(0, 1, 1 / n)\n', (1094, 1107), True, 'import numpy as np\n')] |
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense
from keras import backend as K
import numpy as np
class BFNN:
def __init__(self, nodes, layers, weights, threshold, rate):
"""
Constructor for a BFNN (binary feedforward neural network).
Parameters:
'nodes' - a set of strings denoting node labels
'layers' - a nested list manually separating the layers of the nodes
'weights' - a dictionary mapping each node pair (i, j) to
its weight (a float)
'thresholds' - a dictionary mapping each node i to its
threshold (a float)
'rate' - the learning rate (a float)
"""
self.nodes = nodes
self.weights = weights
self.layers = layers
self.threshold = threshold # TODO: support for separate threshold
# for each neuron; not strictly needed,
# but mentioned in paper
self.rate = rate
# Create the net and manually set its weights.
self.tfnet = self.make_net()
self._set_weights()
def make_net(self):
"""
"""
# Dynamically construct layered net based on our graph topology
layer_widths = [len(layer) for layer in self.layers]
input_layer = Input(shape=(layer_widths[0],))
x = input_layer #input_layer.output
for width in layer_widths[1:-1]:
#x = Binary_Relu_Layer(width, self.threshold)(x)
x = Dense(width, activation=self._binary_relu)(x)
#output_layer = Binary_Relu_Layer(layer_widths[-1], self.threshold)(x)
output_layer = Dense(layer_widths[-1], activation=self._binary_relu)(x)
return tf.keras.Model(inputs=input_layer, outputs=output_layer)
def _binary_relu(self, x):
"""
A binary rectified linear (ReLU) activation function.
Technically, we are using ReLU for the activation, and then we
binarize the output (and keep it non-negative again using ReLU).
But tensorflow conflates activation and output, so we combine the two here.
This function just checks whether ReLU applied to the input
vector 'x' exceeds 'self.thresholds'. If so, we return a vector
of 1's, otherwise we return a vector of 0's.
"""
thres_tensor = np.full(x.get_shape()[0], self.threshold)
activation = tf.keras.activations.relu(x)
return tf.keras.activations.relu(tf.sign(tf.subtract(tf.keras.activations.relu(x), thres_tensor)))
def _set_weights(self):
"""
Helper function to set the weights of 'self.tfnet' to all 0's,
except for those weights mentioned in 'self.weights'.
"""
for i in range(1, len(self.layers)):
zero = np.array([0.0 for j in range(len(self.layers[i]))])
new_weights = np.array([zero] * len(self.layers[i-1]))
# Within this layer, set the weights that are mentioned in self.weights.
for n1, n2 in self.weights.keys():
if n1 in self.layers[i-1] and n2 in self.layers[i]:
index_n1 = self.layers[i-1].index(n1)
index_n2 = self.layers[i].index(n2)
new_weights[index_n1][index_n2] = self.weights[(n1, n2)]
bias = self.tfnet.layers[i].get_weights()[1]
self.tfnet.layers[i].set_weights([new_weights, bias])
def _get_activation(self, xvec, layer1, layer2):
"""
Helper function to get the activation output of 'layer' that results
from passing 'xvec' into it.
See:
https://stackoverflow.com/questions/36812256/accessing-neural-network-weights-and-neuron-activations
"""
get_nth_layer_output = K.function([self.tfnet.layers[layer1].output], # input
[self.tfnet.layers[layer2].output]) # output
inp = np.asarray([np.asarray(xvec)])
layer_output = get_nth_layer_output(inp)[0][0]
return list(layer_output)
def reachable(self, signal):
"""
Function to get the set of nodes that are reachable from 'signal',
in the sense of graph-reachability.
"""
result = set()
# Perform DFS on each node, and put the visited nodes in the result set.
stack = list(signal)
while stack != []:
curr = stack.pop()
if curr not in result:
result.add(curr)
for (e, w) in self.weights.items():
if e[0] == curr:
next = e[1]
stack.append(next)
return result
def propagate(self, signal):
"""
Function to get the propagation of a signal 'signal'.
We configure the net with the nodes in 'signal' all active,
then forward-propagate these activations through the net.
We return the resulting set of nodes that are active.
Parameters:
'signal' - a 'set' of neurons to be initially active
"""
# Note:
# Tensorflow is not designed to deal with multiple layers at once
# (it can only consider propagations of a single layer at a time).
# So we need to do the propagation layer by layer. We start with
# the first layer, propagate its signals to get the active neurons
# in the next layer, etc.
result = set(signal)
for i in range(1, len(self.layers)):
layer1 = self.layers[i-1]
layer2 = self.layers[i]
# We get the nodes activated in the next layer by this layer (along with
# the original signal, since the signal may include neurons at this layer).
xvec = [1.0 if (e in signal) or (e in result) else 0.0 for e in layer1]
next_activation = self._get_activation(xvec, i-1, i)
# Update result with both active neurons from the current layer
# as well as the newly activated neurons from the next layer.
result.update(set([layer2[k] for k in range(len(layer2))
if next_activation[k] == 1.0]))
# HELPFUL DEBUGGING OUTPUT:
# print(f"layer1: {layer1}, layer2: {layer2}, input: {xvec}, output: {next_activation}")
# print(f"current prop = {result}")
return result
def hebb_update(self, signal):
"""
Function to perform one round of Hebbian learning.
We propagate 'signal', and increase each weight W_ij by
ΔW_ij = self.rate * x_i * x_j
We then return the resulting net.
"""
# First, populate new weights with every possible edge (including
# those edges with weight 0).
new_weights = self.weights.copy()
for i in range(1, len(self.layers)):
layer1 = self.layers[i-1]
layer2 = self.layers[i]
for n1 in layer1:
for n2 in layer2:
if (n1, n2) not in new_weights.keys():
new_weights[(n1, n2)] = 0.0
# We now increase every edge (by self.rate) if it was within
# the propagation of 'signal'
prop = self.propagate(signal)
for n1, n2 in new_weights.keys():
if n1 in prop and n2 in prop:
new_weights[(n1, n2)] += self.rate
# Finally, we filter out all of the '0' edges from the dictionary
# (for prettiness, mostly)
new_weights = {k : v for k , v in new_weights.items() if v != 0.0}
return BFNN(self.nodes, self.layers, new_weights, self.threshold, self.rate)
def backprop_update(self, signal):
"""
Function to perform one round of backpropagation.
"""
# FUTURE FUNCTIONALTY
pass
def __str__(self):
"""
String function for pretty printing
TODO: Also make a function that gives us a pretty network
diagram version of the neural net.
"""
result = ""
result += "BFNN\n"
result += f"T = {self.threshold} ; rate = {self.rate}\n"
result += f"Nodes: {self.nodes}\n"
result += f"Layers: {self.layers}\n"
result += f"Weights: {self.weights}\n"
return result | [
"tensorflow.keras.layers.Input",
"numpy.asarray",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.activations.relu",
"tensorflow.keras.Model",
"keras.backend.function"
] | [((1365, 1396), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(layer_widths[0],)'}), '(shape=(layer_widths[0],))\n', (1370, 1396), False, 'from tensorflow.keras.layers import Input, Dense\n'), ((1797, 1853), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'input_layer', 'outputs': 'output_layer'}), '(inputs=input_layer, outputs=output_layer)\n', (1811, 1853), True, 'import tensorflow as tf\n'), ((2529, 2557), 'tensorflow.keras.activations.relu', 'tf.keras.activations.relu', (['x'], {}), '(x)\n', (2554, 2557), True, 'import tensorflow as tf\n'), ((3917, 4004), 'keras.backend.function', 'K.function', (['[self.tfnet.layers[layer1].output]', '[self.tfnet.layers[layer2].output]'], {}), '([self.tfnet.layers[layer1].output], [self.tfnet.layers[layer2].\n output])\n', (3927, 4004), True, 'from keras import backend as K\n'), ((1725, 1778), 'tensorflow.keras.layers.Dense', 'Dense', (['layer_widths[-1]'], {'activation': 'self._binary_relu'}), '(layer_widths[-1], activation=self._binary_relu)\n', (1730, 1778), False, 'from tensorflow.keras.layers import Input, Dense\n'), ((1568, 1610), 'tensorflow.keras.layers.Dense', 'Dense', (['width'], {'activation': 'self._binary_relu'}), '(width, activation=self._binary_relu)\n', (1573, 1610), False, 'from tensorflow.keras.layers import Input, Dense\n'), ((4086, 4102), 'numpy.asarray', 'np.asarray', (['xvec'], {}), '(xvec)\n', (4096, 4102), True, 'import numpy as np\n'), ((2624, 2652), 'tensorflow.keras.activations.relu', 'tf.keras.activations.relu', (['x'], {}), '(x)\n', (2649, 2652), True, 'import tensorflow as tf\n')] |
import torch
from .torchpoints import ball_query_partial_dense
import numpy as np
import numba
from typing import List
@numba.jit(nopython=True)
def _grow_proximity_core(neighbours, min_cluster_size):
num_points = int(neighbours.shape[0])
visited = np.zeros((num_points,), dtype=numba.types.bool_)
clusters = []
for i in range(num_points):
if visited[i]:
continue
cluster = []
queue = []
visited[i] = True
queue.append(i)
cluster.append(i)
while len(queue):
k = queue.pop()
k_neighbours = neighbours[k]
for nei in k_neighbours:
if nei.item() == -1:
break
if not visited[nei]:
visited[nei] = True
queue.append(nei.item())
cluster.append(nei.item())
if len(cluster) >= min_cluster_size:
clusters.append(cluster)
return clusters
def grow_proximity(pos, batch, nsample=16, radius=0.02, min_cluster_size=32):
"""Grow based on proximity only
Neighbour search is done on device while the cluster assignement is done on cpu"""
assert pos.shape[0] == batch.shape[0]
neighbours = ball_query_partial_dense(radius, nsample, pos, pos, batch, batch)[0].cpu().numpy()
return _grow_proximity_core(neighbours, min_cluster_size)
def region_grow(
pos, labels, batch, ignore_labels=[], nsample=16, radius=0.02, min_cluster_size=32
) -> List[torch.Tensor]:
"""Region growing clustering algorithm proposed in
PointGroup: Dual-Set Point Grouping for 3D Instance Segmentation
https://arxiv.org/pdf/2004.01658.pdf
for instance segmentation
Parameters
----------
pos: torch.Tensor [N, 3]
Location of the points
labels: torch.Tensor [N,]
labels of each point
ignore_labels:
Labels that should be ignored, no region growing will be performed on those
nsample:
maximum number of neighbours to consider
radius:
radius for the neighbour search
min_cluster_size:
Number of points above which a cluster is considered valid
"""
assert labels.dim() == 1
assert pos.dim() == 2
assert pos.shape[0] == labels.shape[0]
unique_labels = torch.unique(labels)
clusters = []
ind = torch.arange(0, pos.shape[0])
for l in unique_labels:
if l in ignore_labels:
continue
# Build clusters for a given label (ignore other points)
label_mask = labels == l
local_ind = ind[label_mask]
# Remap batch to a continuous sequence
label_batch = batch[label_mask]
unique_in_batch = torch.unique(label_batch)
remaped_batch = torch.empty_like(label_batch)
for new, old in enumerate(unique_in_batch):
mask = label_batch == old
remaped_batch[mask] = new
# Cluster
label_clusters = grow_proximity(
pos[label_mask, :],
remaped_batch,
nsample=nsample,
radius=radius,
min_cluster_size=min_cluster_size,
)
# Remap indices to original coordinates
if len(label_clusters):
for cluster in label_clusters:
cluster = torch.tensor(cluster).to(pos.device)
clusters.append(local_ind[cluster])
return clusters
| [
"torch.unique",
"torch.empty_like",
"torch.tensor",
"numpy.zeros",
"numba.jit",
"torch.arange"
] | [((122, 146), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (131, 146), False, 'import numba\n'), ((259, 307), 'numpy.zeros', 'np.zeros', (['(num_points,)'], {'dtype': 'numba.types.bool_'}), '((num_points,), dtype=numba.types.bool_)\n', (267, 307), True, 'import numpy as np\n'), ((2304, 2324), 'torch.unique', 'torch.unique', (['labels'], {}), '(labels)\n', (2316, 2324), False, 'import torch\n'), ((2353, 2382), 'torch.arange', 'torch.arange', (['(0)', 'pos.shape[0]'], {}), '(0, pos.shape[0])\n', (2365, 2382), False, 'import torch\n'), ((2712, 2737), 'torch.unique', 'torch.unique', (['label_batch'], {}), '(label_batch)\n', (2724, 2737), False, 'import torch\n'), ((2762, 2791), 'torch.empty_like', 'torch.empty_like', (['label_batch'], {}), '(label_batch)\n', (2778, 2791), False, 'import torch\n'), ((3302, 3323), 'torch.tensor', 'torch.tensor', (['cluster'], {}), '(cluster)\n', (3314, 3323), False, 'import torch\n')] |
# vim: set fdm=indent:
'''
___
/ | ____ ___ ____ _____ ____ ____
/ /| | / __ `__ \/ __ `/_ / / __ \/ __ \
/ ___ |/ / / / / / /_/ / / /_/ /_/ / / / /
/_/ |_/_/ /_/ /_/\__,_/ /___/\____/_/ /_/
______ __
/ ____/___ ________ _________ ______/ /_
/ /_ / __ \/ ___/ _ \/ ___/ __ `/ ___/ __/
/ __/ / /_/ / / / __/ /__/ /_/ (__ ) /_
/_/ \____/_/ \___/\___/\__,_/____/\__/
___ __ __
/ | _____________ / /__ _________ _/ /_____ _____
/ /| |/ ___/ ___/ _ \/ / _ \/ ___/ __ `/ __/ __ \/ ___/
/ ___ / /__/ /__/ __/ / __/ / / /_/ / /_/ /_/ / /
/_/ |_\___/\___/\___/_/\___/_/ \__,_/\__/\____/_/
GITHUB:
https://github.com/aws-samples/simple-forecat-solution/
USAGE:
streamlit run -- ./app.py --local-dir LOCAL_DIR [--landing-page-url URL]
OPTIONS:
--local-dir LOCAL_DIR /path/to/ a local directory from which the UI
will look for files.
--landing-page-url URL URL of the AFA landing page
'''
import os
import sys
import io
import glob
import time
import datetime
import base64
import pathlib
import textwrap
import argparse
import re
import json
import logging
import gzip
import gc
import boto3
import numpy as np
import pandas as pd
import awswrangler as wr
import streamlit as st
import plotly.express as pex
import plotly.graph_objects as go
import cloudpickle
import gzip
from collections import OrderedDict, deque, namedtuple
from concurrent import futures
from urllib.parse import urlparse
from toolz.itertoolz import partition_all
from botocore.exceptions import ClientError
from sspipe import p, px
from streamlit import session_state as state
from textwrap import dedent
from stqdm import stqdm
from afa import (load_data, resample, run_pipeline, run_cv_select,
calc_smape, calc_wape,
make_demand_classification, process_forecasts, make_perf_summary,
make_health_summary, GROUP_COLS, EXP_COLS)
from lambdamap import LambdaExecutor, LambdaFunction
from awswrangler.exceptions import NoFilesFound
from streamlit import caching
from streamlit.uploaded_file_manager import UploadedFile
from streamlit.script_runner import RerunException
from st_aggrid import AgGrid, GridOptionsBuilder, JsCode
from joblib import Parallel, delayed
from humanfriendly import format_timespan
ST_STATIC_PATH = pathlib.Path(st.__path__[0]).joinpath("static")
ST_DOWNLOADS_PATH = ST_STATIC_PATH.joinpath("downloads")
LAMBDAMAP_FUNC = "AfaLambdaMapFunction"
LOCAL_DIR = "/home/ec2-user/SageMaker"
if not os.path.exists(ST_DOWNLOADS_PATH):
ST_DOWNLOADS_PATH.mkdir()
FREQ_MAP = OrderedDict(Daily="D", Weekly="W-MON", Monthly="MS")
FREQ_MAP_AFC = OrderedDict(Daily="D", Weekly="W", Monthly="M")
FREQ_MAP_LONG = {
"D": "Daily", "W-MON": "Weekly", "W": "Weekly", "M": "Monthly",
"MS": "Monthly"
}
FREQ_MAP_PD = {
"D": "D",
"W": "W-MON",
"W-SUN": "W-MON",
"W-MON": "W-MON",
"M": "MS",
"MS": "MS"
}
METRIC = "smape"
MAX_LAMBDAS = 1000
def validate(df):
"""Validate a dataset.
"""
err_msgs = []
warn_msgs = []
# check column names
for col in EXP_COLS:
if col not in df:
err_msgs.append(f"missing **{col}** column")
msgs = {
"errors": err_msgs,
"warnings": warn_msgs
}
is_valid_file = len(err_msgs) == 0
return df, msgs, is_valid_file
@st.cache
def load_file(path):
"""
"""
if path.endswith(".csv.gz"):
compression = "gzip"
elif path.endswith(".csv"):
compression = None
else:
raise NotImplementedError
return pd.read_csv(path, dtype={"timestamp": str}, compression=compression)
def _sum(y):
if np.all(pd.isnull(y)):
return np.nan
return np.nansum(y)
def _resample(df2, freq):
df2 = df2.groupby(["channel", "family", "item_id"]) \
.resample(freq) \
.demand \
.sum(min_count=1)
return df2
def process_data(df, freq, chunksize=None):
"""
"""
df["timestamp"] = pd.DatetimeIndex(df["timestamp"])
df.set_index("timestamp", inplace=True)
groups = df.groupby(["channel", "family", "item_id"], sort=False)
if chunksize is None:
chunksize = min(groups.ngroups, 1000)
total = int(np.ceil(groups.ngroups / chunksize))
all_results = []
for chunk in stqdm(partition_all(chunksize, groups), total=total, desc="Progress"):
results = Parallel(n_jobs=-1)(delayed(_resample)(dd, freq) for _, dd in chunk)
all_results.extend(results)
df = pd.concat(all_results) \
.reset_index(["channel", "family", "item_id"])
df.index.name = None
return df
class StreamlitExecutor(LambdaExecutor):
"""Custom LambdaExecutor to display a progress bar in the app.
"""
def map(self, func, payloads, local_mode=False):
"""
"""
if local_mode:
f = func
else:
f = LambdaFunction(func, self._client, self._lambda_arn)
ex = self._executor
wait_for = [ex.submit(f, *p["args"], **p["kwargs"]) for p in payloads]
return wait_for
def display_progress(wait_for, desc=None):
"""
"""
# display progress of the futures
pbar = stqdm(desc=desc, total=len(wait_for))
prev_n_done = 0
n_done = sum(f.done() for f in wait_for)
while n_done != len(wait_for):
diff = n_done - prev_n_done
pbar.update(diff)
prev_n_done = n_done
n_done = sum(f.done() for f in wait_for)
time.sleep(0.25)
diff = n_done - prev_n_done
pbar.update(diff)
return
def run_lambdamap(df, horiz, freq):
"""
"""
payloads = []
freq = FREQ_MAP_PD[freq]
if freq[0] == "W":
cv_periods = None
cv_stride = 2
elif freq[0] == "M":
cv_periods = None
cv_stride = 1
else:
raise NotImplementedError
from toolz.itertoolz import partition
from tqdm.auto import tqdm
#with st.spinner(f":rocket: Launching forecasts via AWS Lambda (λ)..."):
# resample the dataset to the forecast frequency before running
# lambdamap
start = time.time()
df2 = get_df_resampled(df, freq)
print(f"completed in {format_timespan(time.time()-start)}")
groups = df2.groupby(GROUP_COLS, as_index=False, sort=False)
# generate payload
for _, dd in groups:
payloads.append(
{"args": (dd, horiz, freq),
"kwargs": {"metric": "smape",
"cv_periods": cv_periods, "cv_stride": cv_stride}})
# launch jobs in chunks of 1000
executor = StreamlitExecutor(max_workers=min(MAX_LAMBDAS, len(payloads)),
lambda_arn=LAMBDAMAP_FUNC)
wait_for = executor.map(run_cv_select, payloads)
display_progress(wait_for, "🔥 Generating forecasts")
return wait_for
def get_df_resampled(df, freq):
groups = df.groupby(["channel", "family", "item_id"], sort=False)
chunksize = min(1000, groups.ngroups)
total = int(np.ceil(float(groups.ngroups) / chunksize))
all_results = []
for chunk in stqdm(partition_all(chunksize, groups), total=total,
desc="Batch Preparation Progress"):
results = Parallel(n_jobs=-1)(delayed(_resample)(dd, freq) for _, dd in chunk)
all_results.extend(results)
df2 = pd.concat(all_results) \
.reset_index(["channel", "family", "item_id"])
df2 = _resample(df, freq).reset_index(["channel", "family", "item_id"])
df2.index.name = None
state["report"]["data"]["df2"] = df2
return df2
def display_ag_grid(df, auto_height=False, paginate=False,
comma_cols=None, selection_mode=None, use_checkbox=False):
"""
Parameters
----------
df : pd.DataFrame
auto_height : bool
pagination : bool
comma_cols : tuple or list
Numeric columns to apply comma thousands separator.
"""
gb = GridOptionsBuilder.from_dataframe(df)
#gb.configure_selection("single")
gb.configure_auto_height(auto_height)
gb.configure_pagination(enabled=paginate)
if selection_mode is not None:
gb.configure_selection(selection_mode=selection_mode,
use_checkbox=use_checkbox)
comma_renderer = JsCode(textwrap.dedent("""
function(params) {
return params.value
.toString()
.split( /(?=(?:\d{3})+(?:\.|$))/g ).join( "," )
}
"""))
for col in comma_cols:
gb.configure_column(col, cellRenderer=comma_renderer)
response = AgGrid(df, gridOptions=gb.build(), allow_unsafe_jscode=True)
return response
def valid_launch_freqs():
data_freq = state.report["data"]["freq"]
valid_freqs = ["D", "W", "M"]
if data_freq in ("D",):
# don't allow daily forecasting yet
valid_freqs = valid_freqs[1:]
elif data_freq in ("W","W-MON",):
valid_freqs = valid_freqs[1:]
elif data_freq in ("M","MS",):
valid_freqs = valid_freqs[2:]
else:
raise NotImplementedError
return valid_freqs
def create_presigned_url(s3_path, expiration=3600):
"""Generate a presigned URL to share an S3 object
:param bucket_name: string
:param object_name: string
:param expiration: Time in seconds for the presigned URL to remain valid
:return: Presigned URL as string. If error, returns None.
"""
parsed_url = urlparse(s3_path, allow_fragments=False)
bucket_name = parsed_url.netloc
object_name = parsed_url.path.strip("/")
# Generate a presigned URL for the S3 object
s3_client = boto3.client('s3')
try:
response = s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket_name,
'Key': object_name},
ExpiresIn=expiration)
except ClientError as e:
logging.error(e)
return None
# The response contains the presigned URL
return response
def make_df_backtests(df_results, parallel=False):
"""Expand df_results to a "long" dataframe with the columns:
channel, family, item_id, timestamp, actual, backtest.
"""
def _expand(dd):
ts = np.hstack(dd["ts_cv"].apply(np.hstack))
ys = np.hstack(dd["y_cv"].apply(np.hstack))
yp = np.hstack(dd["yp_cv"].apply(np.hstack))
df = pd.DataFrame({"timestamp": ts, "demand": ys, "backtest": yp})
return df
groups = df_results.query("rank == 1") \
.groupby(["channel", "family", "item_id"],
as_index=True, sort=False)
if parallel:
df_backtests = groups.parallel_apply(_expand)
else:
df_backtests = groups.apply(_expand)
df_backtests["timestamp"] = pd.DatetimeIndex(df_backtests["timestamp"])
return df_backtests.reset_index(["channel", "family", "item_id"])
def save_report(report_fn):
"""
"""
if "report" not in state or "name" not in state["report"]:
return
if "path" not in state["report"]["data"]:
st.warning(textwrap.dedent(f"""
Warning: unable to save report, no input data was loaded.
"""))
return
start = time.time()
with st.spinner(":hourglass_flowing_sand: Saving Report ..."):
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
local_path = f'/tmp/{report_fn}'
# save the report locally
cloudpickle.dump(state["report"], gzip.open(local_path, "wb"))
# upload the report to s3
s3_path = \
f'{state["report"]["afa"]["s3_afa_reports_path"]}/{report_fn}'
parsed_url = urlparse(s3_path, allow_fragments=False)
bucket = parsed_url.netloc
key = parsed_url.path.strip("/")
s3_client = boto3.client("s3")
try:
response = s3_client.upload_file(local_path, bucket, key)
signed_url = create_presigned_url(s3_path)
st.info(textwrap.dedent(f"""
The report can be downloaded [here]({signed_url}).
"""))
except ClientError as e:
logging.error(e)
st.text(f"(completed in {format_timespan(time.time() - start)})")
return
def make_df_reports(bucket, prefix):
s3 = boto3.client("s3")
df = pd.DataFrame()
df["filename"] = \
[e['Key'] for p in s3.get_paginator("list_objects_v2")
.paginate(Bucket=bucket, Prefix=prefix) for e in p['Contents']]
#df["s3_path"] = "s3://" + bucket + "/" + df["filename"]
df["filename"] = df["filename"].apply(os.path.basename)
return df
#
# Panels
#
def make_mask(df, channel, family, item_id):
mask = np.ones(len(df)).astype(bool)
# only mask when all three keys are non-empty
if channel == "" or family == "" or item_id == "":
return ~mask
mask &= df["channel"].str.upper() == channel.upper()
mask &= df["family"].str.upper() == family.upper()
mask &= df["item_id"].str.upper() == item_id.upper()
return mask
@st.cache
def make_downloads(df_pred, df_results):
"""
"""
pred_fn = os.path.join(ST_DOWNLOADS_PATH,
f"{state.uploaded_file.name}_fcast.csv.gz")
results_fn = os.path.join(ST_DOWNLOADS_PATH,
f"{state.uploaded_file.name}_results.csv.gz")
state.df_pred.to_csv(pred_fn, index=False, compression="gzip")
state.df_results.to_csv(results_fn, index=False, compression="gzip")
return pred_fn, results_fn
def _info(s):
st.info(textwrap.dedent(s))
def _success(s):
st.success(textwrap.dedent(s))
def _write(s):
st.write(textwrap.dedent(s))
def panel_create_report(expanded=True):
"""Display the 'Load Data' panel.
"""
def _load_data(path):
if path.endswith(".csv"):
compression = None
elif path.endswith(".csv.gz"):
compression = "gzip"
else:
raise NotImplementedError
df = pd.read_csv(path,
dtype={"timestamp": str, "channel": str, "family": str,
"item_id": str}, compression=compression)
return df
default_name = state["report"].get("name", None)
file_path = state["report"]["data"].get("path", None)
freq = state["report"]["data"].get("freq", None)
st.markdown("## Create Report")
with st.beta_expander("⬆️ Load + Validate Data", expanded=expanded):
st.write(f"""Step 1 – Create a new forecast report by selecting an uploaded
file containing the demand history for your use-case. You must also specify
the frequency of the demand (e.g. _Daily_, _Weekly_, or _Monthly_). Demand
history files are uploaded using the [SageMaker Notebook interface]({state["landing_page_url"]})""")
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
btn_refresh_files = st.button("Refresh Files", help="Refresh the _File_ selector with recently uploaded files.")
with st.form("create_report_form"):
report_name = st.text_input("Report Name (optional)",
help="You may optionally give this report a name, otherwise one will be automatically generated.")
_cols = st.beta_columns([3,1])
with _cols[0]:
fn = file_selectbox(
"File (.csv or .csv.gz files)", args.local_dir,
help="This file contains the demand history as either a `.csv` or `.csv.gz` file.")
with _cols[1]:
freq = st.selectbox("Frequency", list(s for s in FREQ_MAP.values() if s != 'D'),
format_func=lambda s: FREQ_MAP_LONG[s],
help="This input file must contain demand history at a _daily_, _weekly_, or _monthly_ frequency.")
btn_validate = st.form_submit_button("Load & Validate")
if btn_validate:
start = time.time()
if fn is None:
st.error(textwrap.dedent("""
**Error**
No files were selected.
1. Upload your file(s).
2. Click the **Refresh Files** button.
3. Select the file from the dropdown box.
4. Select the **Frequency**.
5. Click the **Validate** button.
####
"""))
st.stop()
if report_name == "":
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
report_name = f"AfaReport_{now_str}"
if report_name != "" and re.match(r"^[A-Za-z0-9-_]*$", report_name) is None:
st.error(dedent("""
The report name may only contain:
- uppercase letters
- lowercase letters
- numbers
- dashes ('-')
- underscores ('_')
####
"""))
else:
# temporarily load the file for validation and store it in state
# iff the data is valid
with st.spinner(":hourglass_flowing_sand: Validating file ..."):
df, msgs, is_valid_file = validate(_load_data(fn))#.drop(["timestamp", "channel"], axis=1))
if is_valid_file:
with st.spinner(":hourglass_flowing_sand: Processing file ..."):
state.report["name"] = report_name
state.report["data"]["path"] = fn
state.report["data"]["sz_bytes"] = os.path.getsize(fn)
state.report["data"]["freq"] = freq
# impute missing dates from the validated dataframe, this
# will fill in the missing timestamps with null demand values
# state.report["data"]["df"] = \
# load_data(df, impute_freq=state.report["data"]["freq"])
state.report["data"]["df"] = \
process_data(df,state.report["data"]["freq"])
state.report["data"]["is_valid"] = True
# clear any existing data health check results, this forces
# a rechecking of data health
state.report["data"]["df_health"] = None
st.text(f"(completed in {format_timespan(time.time() - start)})")
else:
err_bullets = "\n".join("- " + s for s in msgs["errors"])
st.error(f"**Validation failed**\n\n{err_bullets}")
if state.report["data"].get("is_valid", False):
_success(f"""
`{os.path.basename(state.report["data"]["path"])}` is **valid**
""")
return
def panel_load_report(expanded=True):
"""
"""
def format_func(s):
if s == "local":
return "Local Filesystem"
elif s == "s3":
return "☁️ S3"
s3 = boto3.client("s3")
st.markdown("## Load Report")
with st.beta_expander("📂 Load Report", expanded=expanded):
st.write(f"""Optional – Alternatively, you can load a previously-generated
report. Report files must have the `.pkl.gz` file extension and can be uploaded
using the [SageMaker Notebook interface]({state["landing_page_url"]}).""")
report_source = st.radio("Source", ["local"], format_func=format_func)
_cols = st.beta_columns([3,1])
with _cols[0]:
if report_source == "local":
fn = file_selectbox("File", os.path.join(args.local_dir),
globs=("*.pkl.gz",))
elif report_source == "s3":
pass
else:
raise NotImplementedError
load_report_btn = st.button("Load", key="load_report_btn")
with _cols[1]:
st.write("##")
st.button("Refresh Files", key="refresh_report_files_btn")
if load_report_btn:
start = time.time()
with st.spinner(":hourglass_flowing_sand: Loading Report ..."):
state["report"] = cloudpickle.load(gzip.open(fn, "rb"))
st.text(f"(completed in {format_timespan(time.time() - start)})")
state["prev_state"] = "report_loaded"
return
def panel_data_health():
"""
"""
df = state.report["data"].get("df", None)
df_health = state.report["data"].get("df_health", None)
freq = state.report["data"].get("freq", None)
if df is None:
return
st.header("Data Health")
with st.beta_expander("❤️ Data Health", expanded=True):
st.write(f"""Step 2 – Inspect the characteristics of the dataset
for irregularities prior to generating any forecasts. For example,
missing channels, families, item IDs; or unusually short/long
timeseries lengths.""")
with st.spinner("Performing data health check ..."):
start = time.time()
# check iff required
if df_health is None:
df_health = make_health_summary(df, state.report["data"]["freq"])
# save the health check results
state.report["data"]["df_health"] = df_health
# calc. ranked series by demand
state.report["data"]["df_ranks"] = \
df.groupby(["channel", "family", "item_id"]) \
.agg({"demand": sum}) \
.sort_values(by="demand", ascending=False)
num_series = df_health.shape[0]
num_channels = df_health["channel"].nunique()
num_families = df_health["family"].nunique()
num_item_ids = df_health["item_id"].nunique()
first_date = df_health['timestamp_min'].dt.strftime('%Y-%m-%d').min()
last_date = df_health['timestamp_max'].dt.strftime('%Y-%m-%d').max()
if freq == 'D':
duration_unit = 'D'
duration_str = 'days'
elif freq in ("W", "W-MON",):
duration_unit = 'W'
duration_str = 'weeks'
elif freq in ("M", "MS",):
duration_unit = 'M'
duration_str = 'months'
else:
raise NotImplementedError
duration = pd.Timestamp(last_date).to_period(duration_unit) - \
pd.Timestamp(first_date).to_period(duration_unit)
pc_missing = \
df_health["demand_missing_dates"].sum() / df_health["demand_len"].sum()
with st.beta_container():
_cols = st.beta_columns(3)
with _cols[0]:
st.markdown("#### Summary")
st.text(textwrap.dedent(f"""
No. series:\t{num_series}
No. channels:\t{num_channels}
No. families:\t{num_families}
No. item IDs:\t{num_item_ids}
"""))
with _cols[1]:
st.markdown("#### Timespan")
st.text(f"Frequency:\t{FREQ_MAP_LONG[freq]}\n"
f"Duration:\t{duration.n} {duration_str}\n"
f"First date:\t{first_date}\n"
f"Last date:\t{last_date}\n")
#f"% missing:\t{int(np.round(pc_missing*100,0))}")
with _cols[2]:
st.markdown("#### Timeseries Lengths")
fig = pex.box(df_health, x="demand_nonnull_count", height=160)
fig.update_layout(
margin={"t": 5, "b": 0, "r": 0, "l": 0},
xaxis_title=duration_str,
height=100
)
st.plotly_chart(fig, use_container_width=True)
st.text(f"(completed in {format_timespan(time.time() - start)})")
return
def panel_launch():
"""
"""
def _format_func(short):
if short == "local":
s = " Local"
if short == "lambdamap":
s = "AWS Lambda"
return s
df = state.report["data"].get("df", None)
df_health = state.report["data"].get("df_health", None)
horiz = state.report["afa"].get("horiz", None)
freq = state.report["afa"].get("freq", None)
if df is None or df_health is None:
return
st.header("Statistical Forecasts")
with st.beta_expander("🚀 Launch", expanded=True):
st.write(f"""Step 3 – Generate forecasts by training and evaluating 75+
configurations of [statistical forecasting
models](https://otexts.com/fpp3/) for each timeseries in
parallel using AWS Lambda. A forecast at the desired _horizon length_ and
_frequency_ is then generated using the each individual timeseries' best model.
This process typically completes at a rate of 500–1,000 timeseries/min.
""")
with st.form("afa_form"):
with st.beta_container():
_cols = st.beta_columns(3)
with _cols[0]:
horiz = st.number_input("Horizon Length", value=1, min_value=1)
with _cols[1]:
freq = st.selectbox("Forecast Frequency", valid_launch_freqs(), 0,
format_func=lambda s: FREQ_MAP_LONG[s])
with _cols[2]:
backend = st.selectbox("Compute Backend",
["lambdamap"], 0, _format_func)
btn_launch = st.form_submit_button("Launch")
if btn_launch:
start = time.time()
# save form data
state.report["afa"]["freq"] = freq
state.report["afa"]["horiz"] = horiz
state.report["afa"]["backend"] = backend
df = state.report["data"]["df"]
freq_in = state.report["data"]["freq"]
freq_out = state.report["afa"]["freq"]
if backend == "local":
wait_for = \
run_pipeline(df, freq_in, freq_out, metric=METRIC,
cv_stride=2, backend="futures", horiz=horiz)
display_progress(wait_for, "🔥 Generating forecasts")
raw_results = [f.result() for f in futures.as_completed(wait_for)]
elif backend == "lambdamap":
with st.spinner(f":rocket: Launching forecasts via AWS Lambda (λ)..."):
all_raw_results = []
groups = df.groupby(["channel", "family", "item_id"], sort=False)
chunksize = min(5000, groups.ngroups)
# divide the dataset into chunks
df["grp"] = groups.ngroup() % int(np.ceil(groups.ngroups / chunksize))
groups = df.groupby("grp", sort=False)
total = df["grp"].nunique()
for _, dd in stqdm(groups, total=total, desc="Overall Progress"):
wait_for = run_lambdamap(dd, horiz, freq_out)
raw_results = [f.result() for f in futures.as_completed(wait_for)]
all_raw_results.extend(raw_results)
raw_results = all_raw_results
else:
raise NotImplementedError
with st.spinner("⏳ Calculating results ..."):
# generate the results and predictions as dataframes
df_results, df_preds, df_model_dist, best_err, naive_err = \
process_forecasts(wait_for, METRIC)
# generate the demand classifcation info
df_demand_cln = make_demand_classification(df, freq_in)
# save results and forecast data
state.report["afa"]["df_results"] = df_results
state.report["afa"]["df_preds"] = df_preds
state.report["afa"]["df_demand_cln"] = df_demand_cln
state.report["afa"]["df_model_dist"] = df_model_dist
state.report["afa"]["best_err"] = best_err
state.report["afa"]["naive_err"] = naive_err
state.report["afa"]["job_duration"] = time.time() - start
job_duration = state.report["afa"].get("job_duration", None)
if job_duration:
st.text(f"(completed in {format_timespan(job_duration)})")
return
def panel_accuracy():
"""
"""
df = state.report["data"].get("df", None)
df_demand_cln = state.report["afa"].get("df_demand_cln", None)
df_results = state.report["afa"].get("df_results", None)
df_model_dist = state["report"]["afa"].get("df_model_dist", None)
best_err = state["report"]["afa"].get("best_err", None)
naive_err = state["report"]["afa"].get("naive_err", None)
horiz = state.report["afa"].get("horiz", None)
freq_out = state.report["afa"].get("freq", None)
if df is None or df_results is None or df_model_dist is None:
return
def _calc_metrics(dd, metric="smape"):
if metric == "smape":
metric_func = calc_smape
elif metric == "wape":
metric_func = calc_wape
else:
raise NotImplementedError
ys = np.hstack(dd["y_cv"].apply(np.hstack))
yp = np.hstack(dd["yp_cv"].apply(np.hstack))
return metric_func(ys, yp)
df_acc = df_results.groupby(["channel", "family", "item_id"], as_index=False, sort=True) \
.apply(lambda dd: _calc_metrics(dd, METRIC)) \
.rename({None: METRIC}, axis=1)
with st.beta_expander("🎯 Forecast Summary", expanded=True):
_write(f"""
Step 4 – The forecast error is calculated as the [symmetric
mean absolute percentage error
(SMAPE)](https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error)
via sliding window backtesting. Forecast _accuracy_ is calculated as
`100-SMAPE` and is averaged across all timeseries to give the _overall
accuracy_. The overall accuracy of the best naive models is used as a baseline.
The _classification_ distribution indicates the percentage timeseries
that have a _short_, _medium_, or _continuous_ lifecycle. The _Best Models_ chart
shows the distribution of each model type that were selected as the best model
across the dataset.
""")
df_cln = pd.DataFrame({"category": ["short", "medium", "continuous"]})
df_cln = df_cln.merge(
df_demand_cln["category"]
.value_counts(normalize=True)
.reset_index()
.rename({"index": "category", "category": "frac"}, axis=1),
on="category", how="left"
)
df_cln = df_cln.fillna(0.0)
df_cln["frac"] *= 100
df_cln["frac"] = df_cln["frac"].astype(int)
_cols = st.beta_columns(3)
with _cols[0]:
st.markdown("#### Parameters")
st.text(f"Horiz. Length:\t{horiz}\n"
f"Frequency:\t{FREQ_MAP_LONG[freq_out]}")
st.markdown("#### Classification")
st.text(f"Short:\t\t{df_cln.iloc[0]['frac']} %\n"
f"Medium:\t\t{df_cln.iloc[1]['frac']} %\n"
f"Continuous:\t{df_cln.iloc[2]['frac']} %")
with _cols[1]:
st.markdown("#### Best Models")
df_model_dist = df_model_dist.query("perc > 0")
labels = df_model_dist["model_type"].values
values = df_model_dist["perc"].values
fig = go.Figure(data=[go.Pie(labels=labels, values=values, hole=0.40)])
fig.update(layout_showlegend=False)
fig.update_layout(
margin={"t": 0, "b": 0, "r": 20, "l": 20},
width=200,
height=150,
)
#fig.update_traces(textinfo="percent+label", texttemplate="%{label} – %{percent:.1%f}")
fig.update_traces(textinfo="percent+label")
st.plotly_chart(fig)
acc_val = (1 - np.nanmean(df_acc[METRIC])) * 100.
acc_naive = (1 - naive_err.err_mean) * 100.
with _cols[2]:
st.markdown("#### Overall Accuracy")
st.markdown(
f"<div style='font-size:36pt;font-weight:bold'>{acc_val:.0f}%</div>"
f"({np.clip(acc_val - acc_naive, 0, None):.0f}% increase vs. naive)",
unsafe_allow_html=True)
return
@st.cache()
def make_df_top(df, df_results, groupby_cols, dt_start, dt_stop, cperc_thresh,
metric="smape"):
"""
"""
def calc_period_metrics(dd, dt_start, dt_stop):
"""
"""
dt_start = pd.Timestamp(dt_start)
dt_stop = pd.Timestamp(dt_stop)
ts = np.hstack(dd["ts_cv"].apply(np.hstack))
ix = (ts >= dt_start) & (ts <= dt_stop)
ys = np.hstack(dd["y_cv"].apply(np.hstack))[ix]
yp = np.hstack(dd["yp_cv"].apply(np.hstack))[ix]
if metric == "smape":
error = calc_smape(ys, yp)
elif metric == "wape":
error = calc_wape(ys, yp)
else:
raise NotImplementedError
return error
metric_name = f"{metric}_mean"
df.index.name = "timestamp"
dt_start = pd.Timestamp(dt_start).strftime("%Y-%m-%d")
dt_stop = pd.Timestamp(dt_stop).strftime("%Y-%m-%d")
df2 = df.query(f"timestamp >= '{dt_start}' and timestamp <= '{dt_stop}'")
total_demand = df2["demand"].sum()
# calculate per-group demand %
df_grp_demand = \
df2.groupby(groupby_cols, as_index=False, sort=False) \
.agg({"demand": sum})
df_grp_demand["perc"] = df_grp_demand["demand"] / total_demand * 100
# get the best models for each group
df_grp_metrics = \
df_results.query("rank == 1") \
.groupby(groupby_cols, as_index=False, sort=False) \
.apply(lambda dd: calc_period_metrics(dd, dt_start, dt_stop)) \
.pipe(pd.DataFrame) \
.rename({None: metric_name}, axis=1) \
.reset_index()
df_grp_metrics["accuracy"] = 100 * (1-df_grp_metrics[metric_name])
df_grp_metrics.drop(["index", metric_name], axis=1, inplace=True)
# combine, sort, and display
df_grp = df_grp_demand \
.merge(df_grp_metrics, on=groupby_cols, how="left") \
.sort_values(by="demand", ascending=False)
df_grp["cperc"] = df_grp["perc"].cumsum()
df_grp = df_grp.query(f"cperc <= {cperc_thresh}")
df_grp.rename({"perc": "% total demand", "accuracy": "% accuracy"}, axis=1, inplace=True)
df_grp.drop("cperc", axis=1, inplace=True)
# calc. summary row
df_grp_summary = df_grp.agg({"demand": sum, "% accuracy": np.nanmean})
df_grp_summary["% total demand"] = np.round(100 * df_grp_summary["demand"] / total_demand, 1)
df_grp_summary = pd.DataFrame(df_grp_summary).T[["demand", "% total demand", "% accuracy"]]
df_grp_summary.insert(0, "group by", ", ".join(groupby_cols))
df_grp_summary["% accuracy"] = df_grp_summary["% accuracy"].round(0)
df_grp["demand"] = df_grp["demand"].round(0)
df_grp["% total demand"] = df_grp["% total demand"].round(1)
df_grp["% accuracy"] = df_grp["% accuracy"].round(0)
df_grp.insert(0, "rank", np.arange(df_grp.shape[0]) + 1)
df_grp_summary["demand"] = df_grp_summary["demand"].round(0)
df_grp_summary["% total demand"] = df_grp_summary["% total demand"].round(1)
return df_grp, df_grp_summary
@st.cache()
def make_ml_df_top(df, df_backtests, groupby_cols, dt_start, dt_stop, cperc_thresh, metric):
"""
"""
def calc_period_metrics(dd, dt_start, dt_stop):
"""
"""
dt_start = pd.Timestamp(dt_start)
dt_stop = pd.Timestamp(dt_stop)
ts = dd["timestamp"]
ix = (ts >= dt_start) & (ts <= dt_stop)
ys = dd["target_value"][ix]
yp = dd["demand"][ix]
if metric == "smape":
error = calc_smape(ys, yp)
elif metric == "wape":
error = calc_wape(ys, yp)
else:
raise NotImplementedError
return error
df.index.name = "timestamp"
dt_start = pd.Timestamp(dt_start).strftime("%Y-%m-%d")
dt_stop = pd.Timestamp(dt_stop).strftime("%Y-%m-%d")
df2 = df.query(f"timestamp >= '{dt_start}' and timestamp <= '{dt_stop}'")
total_demand = df2["demand"].sum()
# calculate per-group demand %
df_grp_demand = \
df2.groupby(groupby_cols, as_index=False, sort=False) \
.agg({"demand": sum})
df_grp_demand["perc"] = df_grp_demand["demand"] / total_demand * 100
# get the best models for each group
df_grp_metrics = \
df_backtests.groupby(groupby_cols, as_index=False, sort=False) \
.apply(lambda dd: calc_period_metrics(dd, dt_start, dt_stop)) \
.rename({None: metric}, axis=1)
df_grp_metrics["accuracy"] = 100 * (1-df_grp_metrics[metric])
df_grp_metrics.drop(metric, axis=1, inplace=True)
# combine, sort, and display
df_grp = df_grp_demand \
.merge(df_grp_metrics, on=groupby_cols, how="left") \
.sort_values(by="demand", ascending=False)
df_grp["cperc"] = df_grp["perc"].cumsum()
df_grp = df_grp.query(f"cperc <= {cperc_thresh}")
df_grp.rename({"perc": "% total demand", "accuracy": "% accuracy"}, axis=1, inplace=True)
df_grp.drop("cperc", axis=1, inplace=True)
# calc. summary row
df_grp_summary = df_grp.agg({"demand": sum, "% accuracy": np.nanmean})
df_grp_summary["% total demand"] = np.round(100 * df_grp_summary["demand"] / total_demand, 1)
df_grp_summary = pd.DataFrame(df_grp_summary).T[["demand", "% total demand", "% accuracy"]]
df_grp_summary.insert(0, "group by", ", ".join(groupby_cols))
df_grp_summary["% accuracy"] = df_grp_summary["% accuracy"].round(0)
df_grp["demand"] = df_grp["demand"].round(0)
df_grp["% total demand"] = df_grp["% total demand"].round(1)
df_grp["% accuracy"] = df_grp["% accuracy"].round(0)
df_grp.insert(0, "rank", np.arange(df_grp.shape[0]) + 1)
df_grp_summary["demand"] = df_grp_summary["demand"].round(0)
df_grp_summary["% total demand"] = df_grp_summary["% total demand"].round(1)
return df_grp, df_grp_summary
def panel_top_performers():
"""
"""
df = state.report["data"].get("df", None)
df_demand_cln = state.report["afa"].get("df_demand_cln", None)
df_results = state.report["afa"].get("df_results", None)
horiz = state.report["afa"].get("horiz", None)
freq_out = state.report["afa"].get("freq", None)
if df is None or df_results is None:
return
with st.beta_expander("🏆 Top Performers", expanded=True):
_write(f"""
Step 5 – Inspect the forecast
accuracy of individual channels, families, and item IDs (and each subset
combination therein) for specific time periods and for groups of items
that cover a given percentage of total demand. For example, you can inspect
the accuracy for the smaller subset of items that cover 80% of demand in
the most recent six-month period.
""")
st.write("#### Filters")
_cols = st.beta_columns([2,1,1])
dt_min = df.index.min()
dt_max = df.index.max()
with _cols[0]:
groupby_cols = st.multiselect("Group By",
["channel", "family", "item_id"], ["channel", "family", "item_id"])
with _cols[1]:
dt_start = st.date_input("Start", value=dt_min, min_value=dt_min, max_value=dt_max)
with _cols[2]:
dt_stop = st.date_input("Stop", value=dt_max, min_value=dt_min, max_value=dt_max)
cperc_thresh = st.slider("Percentage of total demand",
step=5, value=80, format="%d%%")
dt_start = dt_start.strftime("%Y-%m-%d")
dt_stop = dt_stop.strftime("%Y-%m-%d")
start = time.time()
with st.spinner("Processing top performers ..."):
df_grp, df_grp_summary = \
make_df_top(df, df_results, groupby_cols, dt_start, dt_stop, cperc_thresh, METRIC)
st.write("#### Group Summary")
with st.spinner("Loading **Summary** table"):
display_ag_grid(df_grp_summary, auto_height=True,
comma_cols=("demand",))
st.write("#### Groups")
with st.spinner("Loading **Groups** table ..."):
display_ag_grid(df_grp, paginate=True, comma_cols=("demand",))
st.text(f"(completed in {format_timespan(time.time() - start)})")
if st.button("Export"):
with st.spinner(":hourglass_flowing_sand: Exporting **Top Performers** ..."):
start = time.time()
# write the dataframe to s3
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
basename = os.path.basename(state["report"]["data"]["path"])
s3_afa_export_path = state["report"]["afa"]["s3_afa_export_path"]
s3_path = f'{s3_afa_export_path}/{basename}_{now_str}_afa-top-performers.csv.gz'
wr.s3.to_csv(df_grp, s3_path, compression="gzip", index=False)
# generate presigned s3 url for user to download
signed_url = create_presigned_url(s3_path)
st.info(textwrap.dedent(f"""
Download the top performers file [here]({signed_url})
`(completed in {format_timespan(time.time() - start)})`
"""))
return
def panel_visualization():
"""
"""
df = state.report["data"].get("df", None)
df_results = state.report["afa"].get("df_results", None)
df_preds = state.report["afa"].get("df_preds", None)
if df is None or df_results is None or df_preds is None:
return
freq = state.report["afa"]["freq"]
horiz = state.report["afa"]["horiz"]
start = time.time()
df_top = df.groupby(["channel", "family", "item_id"], as_index=False) \
.agg({"demand": sum}) \
.sort_values(by="demand", ascending=False)
channel_vals = [""] + sorted(df_results["channel"].unique())
family_vals = [""] + sorted(df_results["family"].unique())
item_id_vals = [""] + sorted(df_results["item_id"].unique())
channel_index = channel_vals.index(df_top["channel"].iloc[0])
family_index = family_vals.index(df_top["family"].iloc[0])
item_id_index = item_id_vals.index(df_top["item_id"].iloc[0])
with st.beta_expander("👁️ Visualization", expanded=True):
_write(f"""
Step 6 – Plot the historic, backtest, and forecasted demand for each
timeseries.
""")
with st.form("viz_form"):
st.markdown("#### Filter By")
_cols = st.beta_columns(3)
with _cols[0]:
channel_choice = st.selectbox("Channel", channel_vals, index=channel_index)
with _cols[1]:
family_choice = st.selectbox("Family", family_vals, index=family_index)
with _cols[2]:
item_id_choice = st.selectbox("Item ID", item_id_vals, index=item_id_index)
viz_form_button = st.form_submit_button("Apply")
if viz_form_button:
pass
results_mask = \
make_mask(df_results, channel_choice, family_choice, item_id_choice)
pred_mask = \
make_mask(df_preds, channel_choice, family_choice, item_id_choice)
df_plot = df_preds[pred_mask]
if len(df_plot) > 0:
# display the line chart
y = df_plot.query("type == 'actual'")["demand"]
y_ts = df_plot.query("type == 'actual'")["timestamp"]
yp = df_plot.query("type == 'fcast'")["demand"]
yp_ts = df_plot.query("type == 'fcast'")["timestamp"]
fig = go.Figure()
fig.add_trace(go.Scatter(
x=y_ts, y=y, mode='lines', name="actual",
fill="tozeroy", line={"width": 3}
))
fig.add_trace(go.Scatter(
x=yp_ts, y=yp, mode='lines', name="forecast",
fill="tozeroy", line={"width": 3}
))
# plot
dd = df_results[results_mask].query("rank == 1").iloc[0]
df_backtest = \
pd.DataFrame({"yp": np.hstack(dd['yp_cv'])},
index=pd.DatetimeIndex(np.hstack(dd["ts_cv"]))) \
.sort_index() \
.resample(FREQ_MAP_PD[freq]) \
.apply(np.nanmean)
fig.add_trace(go.Scatter(x=df_backtest.index, y=df_backtest.yp, mode="lines",
name="backtest (mean)", line_dash="dot", line_color="black"))
# fig.update_layout(
# xaxis={
# "showgrid": True,
# "gridcolor": "lightgrey",
# },
# yaxis={
# "showgrid": True,
# "gridcolor": "lightgrey",
# }
# )
fig.update_layout(
margin={"t": 0, "b": 0, "r": 0, "l": 0},
height=250,
legend={"orientation": "h", "yanchor": "bottom", "y": 1.0, "xanchor":"left", "x": 0.0}
)
fig.update_xaxes(
rangeslider_visible=True,
# rangeselector=dict(
# buttons=list([
# dict(count=1, label="1m", step="month", stepmode="backward"),
# dict(count=6, label="6m", step="month", stepmode="backward"),
# dict(count=1, label="YTD", step="year", stepmode="todate"),
# dict(count=1, label="1y", step="year", stepmode="backward"),
# dict(step="all")
# ])
# )
)
initial_range = pd.date_range(end=yp_ts.max(), periods=horiz*8, freq=freq)
initial_range = [max(initial_range[0], y_ts.min()), initial_range[-1]]
fig["layout"]["xaxis"].update(range=initial_range)
st.plotly_chart(fig, use_container_width=True)
plot_duration = time.time() - start
st.text(f"(completed in {format_timespan(plot_duration)})")
return
def download_afc_files():
"""
"""
df = state["report"]["data"]["df"]
status_dict = parse_s3_json(state.report["afc"]["status_json_s3_path"])
s3_export_path = status_dict["s3_export_path"]
prefix = status_dict["prefix"]
horiz = state["report"]["afc"]["horiz"]
freq = state["report"]["afc"]["freq"]
preds_s3_prefix = \
f'{s3_export_path}/{prefix}/{prefix}_processed.csv'
results_s3_prefix = \
f'{s3_export_path}/{prefix}/accuracy-metrics-values/Accuracy_{prefix}_*.csv'
backtests_s3_prefix = \
f'{s3_export_path}/{prefix}/forecasted-values/Forecasts_{prefix}_BacktestExportJob_*.csv'
_df_preds = wr.s3.read_csv(preds_s3_prefix,
dtype={"channel": str, "family": str, "item_id": str})
_preds = []
for _, dd in _df_preds.groupby(["channel", "family", "item_id"], as_index=False, sort=False):
dd.sort_values(by="timestamp", ascending=True, inplace=True)
if dd.shape[0] > horiz:
dd = dd.iloc[1:,:]
_preds.append(dd)
df_preds = pd.concat(_preds)
df_preds["type"] = "fcast"
df_preds["timestamp"] = pd.DatetimeIndex(df_preds["timestamp"])
df_actual = state["report"]["data"].get("df2", None)
if df_actual is None:
df_actual = get_df_resampled(df, freq)
df_preds = df_preds.append(
df_actual
.reset_index()
.rename({"index": "timestamp"}, axis=1)
.assign(type='actual'))
df_preds["channel"] = df_preds["channel"].str.upper()
df_preds["family"] = df_preds["family"].str.upper()
df_preds["item_id"] = df_preds["item_id"].str.upper()
freq = FREQ_MAP_PD[state.report["afc"]["freq"]]
df_results = wr.s3.read_csv(results_s3_prefix,
dtype={"channel": str, "family": str, "item_id": str})
df_results[["channel", "family", "item_id"]] = \
df_results["item_id"].str.split("@@", expand=True)
df_backtests = \
wr.s3.read_csv(backtests_s3_prefix)
df_backtests[["channel", "family", "item_id"]] = \
df_backtests["item_id"].str.split("@@", expand=True)
df_backtests["timestamp"] = pd.DatetimeIndex(df_backtests["backtestwindow_end_time"])
df_backtests["p10"] = np.clip(df_backtests["p10"], 0, None)
df_backtests["demand"] = np.round(np.clip(df_backtests["p50"], 0, None), 0)
df_backtests["target_value"] = df_backtests["target_value"].round(0)
df_backtests = df_backtests[["timestamp", "channel", "family", "item_id",
"demand", "p10", "p90", "target_value"]]
df_backtests.sort_values(by=["channel", "family", "item_id", "timestamp"],
inplace=True)
return df_preds, df_results, df_backtests
def panel_downloads():
"""
"""
df = state.report["data"].get("df", None)
df_results = state.report["afa"].get("df_results", None)
df_preds = state.report["afa"].get("df_preds", None)
df_afc_results = state.report["afc"].get("df_results", None)
df_afc_preds = state.report["afc"].get("df_preds", None)
if df is None or df_results is None or (df_preds is None and df_afc_preds is None):
return
with st.beta_expander("⬇️ Export Forecasts", expanded=True):
_write(f"""
Export the forecasts and backtests as `.csv.gz` files.
""")
# use cached forecast files if previously generated
afa_forecasts_s3_path = state.report["afa"].get("forecasts_s3_path", None)
afa_backtests_s3_path = state.report["afa"].get("backtests_s3_path", None)
afc_forecasts_s3_path = state.report["afc"].get("forecasts_s3_path", None)
afc_backtests_s3_path = state.report["afc"].get("backtests_s3_path", None)
export_forecasts_btn = \
st.button("Export Statistical Forecasts", key="afa_export_forecast_btn")
if export_forecasts_btn:
start = time.time()
s3_afa_export_path = state["report"]["afa"]["s3_afa_export_path"]
with st.spinner(":hourglass_flowing_sand: Exporting Forecasts ..."):
# export the forecast file to s3 if it doesnt exist
if afa_forecasts_s3_path is None:
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
basename = os.path.basename(state["report"]["data"]["path"])
afa_forecasts_s3_path = \
f'{s3_afa_export_path}/{basename}_{now_str}_afa-forecasts.csv.gz'
wr.s3.to_csv(df_preds, afa_forecasts_s3_path,
compression="gzip", index=False)
state["report"]["afa"]["forecasts_s3_path"] = \
afa_forecasts_s3_path
else:
pass
forecasts_signed_url = create_presigned_url(afa_forecasts_s3_path)
st.markdown("#### Statistical Forecasts")
st.markdown("####")
st.info(textwrap.dedent(f"""
Download the forecasts file [here]({forecasts_signed_url})
`(completed in {format_timespan(time.time()-start)})`.
"""))
with st.spinner(":hourglass_flowing_sand: Exporting Backtests ..."):
# export the forecast file to s3 if it doesnt exist
if afa_backtests_s3_path is None:
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
basename = os.path.basename(state["report"]["data"]["path"])
afa_backtests_s3_path = \
f'{s3_afa_export_path}/{basename}_{now_str}_afa-backtests.csv.gz'
keep_cols = \
["channel", "family", "item_id", "model_type",
"smape_mean"]
df_backtests = df_results[GROUP_COLS + ["y_cv", "yp_cv"]].copy()
# convert df_results to csv-friendly backtests
df_backtests["y_cv"] = df_backtests["y_cv"].apply(lambda xs: xs.tolist())
df_backtests["yp_cv"] = df_backtests["yp_cv"].apply(lambda xs: xs.tolist())
df_backtests.rename(
{"y_cv": "bt_actuals", "yp_cv": "bt_forecast"}, axis=1,
inplace=True)
wr.s3.to_csv(df_backtests, afa_backtests_s3_path,
compression="gzip", index=False)
state["report"]["afa"]["backtests_s3_path"] = \
afa_backtests_s3_path
else:
pass
backtests_signed_url = create_presigned_url(afa_backtests_s3_path)
st.info(textwrap.dedent(f"""
Download the forecast backtests file [here]({backtests_signed_url})
`(completed in {format_timespan(time.time()-start)})`.
"""))
df_afc_preds = state["report"]["afc"].get("df_preds", None)
df_afc_results = state["report"]["afc"].get("df_results", None)
export_afc_forecasts_btn = \
st.button("Export Machine Learning Forecasts",
key="afc_export_forecast_btn")
if export_afc_forecasts_btn:
if df_afc_preds is None or df_afc_results is None:
st.info("Machine learning forecasts are not yet ready.")
s3_afc_export_path = state["report"]["afc"]["s3_afc_export_path"]
if state.report["afc"].get("status_json_s3_path", None):
st.markdown("#### Machine Learning Forecasts")
st.markdown("####")
status_dict = parse_s3_json(state.report["afc"]["status_json_s3_path"])
prefix = status_dict["prefix"]
s3_export_path = status_dict["s3_export_path"]
# read the raw amazon forecast files from here
preds_s3_prefix = \
f'{s3_export_path}/{prefix}/{prefix}_processed.csv'
results_s3_prefix = \
f'{s3_export_path}/{prefix}/accuracy-metrics-values/Accuracy_{prefix}_*.csv'
start = time.time()
if afc_forecasts_s3_path is None:
try:
df_preds = state["report"]["afc"]["df_preds"]
df_preds["demand"] = np.ceil(df_preds["demand"].clip(0).fillna(0.0))
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
basename = os.path.basename(state["report"]["data"]["path"])
afc_forecasts_path = \
f"{s3_afc_export_path}/{basename}_{now_str}_afc-forecasts.csv.gz"
wr.s3.to_csv(
df_preds[["timestamp","channel", "family", "item_id", "demand", "type"]],
afc_forecasts_path, compression="gzip", index=False)
state["report"]["afc"]["forecasts_s3_path"] = afc_forecasts_path
afc_forecasts_s3_path = afc_forecasts_path
except NoFilesFound:
pass
else:
pass
forecasts_signed_url = create_presigned_url(afc_forecasts_s3_path)
st.info(textwrap.dedent(f"""
Download the forecasts file [here]({forecasts_signed_url})
`(completed in {format_timespan(time.time()-start)})`.
"""))
start = time.time()
if afc_backtests_s3_path is None:
try:
df_backtests = state["report"]["afc"]["df_backtests"]
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
basename = os.path.basename(state["report"]["data"]["path"])
afc_backtests_s3_path = \
f"{s3_afc_export_path}/{basename}_{now_str}_afc-backtests.csv.gz"
wr.s3.to_csv(
df_backtests.rename({"demand": "bt_forecasts", "target_value": "bt_actuals"}, axis=1)
.drop(["p10", "p90"], axis=1),
afc_backtests_s3_path, compression="gzip", index=False)
state["report"]["afc"]["backtests_s3_path"] = afc_backtests_s3_path
except NoFilesFound:
pass
else:
pass
backtests_signed_url = create_presigned_url(afc_backtests_s3_path)
st.info(textwrap.dedent(f"""
Download the forecast backtests file [here]({backtests_signed_url})
`(completed in {format_timespan(time.time()-start)})`.
"""))
with st.beta_expander("ℹ️ Export File Formats", expanded=True):
st.write(dedent("""
#### Common columns
- `timestamp` – String, date of the demand, in the format `YYYY-mm-dd` (e.g. "2020-12-25")
- `channel` – String, the originating store or platform of the demand (e.g. Website, Store-22)
- `family` – String, the category of the item (e.g. Shirts)
- `item_id` – String, the unique item identifier/SKU code (e.g. SKU29292)
- `demand` – Numeric, the demand amount of the item, which must be >= 0 (e.g. 413)
- `type` – String
- "actual" when `demand` is the historic demand
- "fcast" when `demand` is the forecasted demand
#### Statistical Forecasts
- Forecasts file columns
- `timestamp`, `channel`, `family`, `item_id`, `demand`, `type`
- Backtests file columns
- `channel`, `family`, `item_id`
- `bt_actuals` – list of sliding window actuals, each window is the
length of the forecast horizon.
- `bt_forecast` – list of sliding window forecasts, each window
is the length of the forecast horizon and has a 1:1 correspondence
with the windows in `bt_actuals`.
- The `timestamp` column is omitted to reduce the file size,
however, the first and last sliding windows correspond to the
first and last timestamps of the historic demand, respectively.
#### Machine Learning Forecasts
- Forecasts file columns
- `timestamp`, `channel`, `family`, `item_id`, `demand`, `type`
- Backtests file columns
- `timestamp`, `channel`, `family`, `item_id`
- `bt_actuals` – the actual demand for the backtest period
- `bt_forecast` – the forecasted demand for the backtest period
####
"""), unsafe_allow_html=True)
return
#
# ML Forecasting Panels
#
def parse_s3_json(path):
"""
"""
parsed_url = urlparse(path, allow_fragments=False)
bucket = parsed_url.netloc
key = parsed_url.path.strip("/")
s3 = boto3.resource("s3")
s3_obj = s3.Object(bucket_name=bucket, key=key)
status_dict = json.loads(s3_obj.get()["Body"].read())
return status_dict
def panel_ml_launch():
"""
"""
df = state.report["data"].get("df", None)
if df is None:
return
st.header("Machine Learning Forecasts")
with st.beta_expander("🚀 Launch", expanded=True):
st.write("_Optional_ – Launch machine learning forecasts using the [Amazon Forecast](https://aws.amazon.com/forecast/) managed service.")
with st.form("ml_form"):
_cols = st.beta_columns(3)
with _cols[0]:
horiz = st.number_input("Horizon Length", key="ml_horiz_input",
value=1, min_value=1)
with _cols[1]:
freq = st.selectbox("Forecast Frequency",
valid_launch_freqs(), 0,
format_func=lambda s: FREQ_MAP_LONG[s], key="ml_freq_input")
with _cols[2]:
st.selectbox("Algorithm", ["AutoML"], 0, key="ml_algo_input")
ml_form_button = st.form_submit_button("Launch")
# Launch Amazon Forecast job
if ml_form_button:
with st.spinner("🚀 Launching ML forecasting job ..."):
state.report["afc"]["horiz"] = horiz
state.report["afc"]["freq"] = freq
execution_arn, prefix, status_json_s3_path = \
run_ml_state_machine()
state.report["afc"]["execution_arn"] = execution_arn
state.report["afc"]["status_json_s3_path"] = status_json_s3_path
state.report["afc"]["prefix"] = prefix
st.info(dedent(f"""
Job submitted, the ARN is:
- {execution_arn}
####
"""))
execution_arn = state.report["afc"].get("execution_arn", None)
check_job_status_btn = st.button("🔄 Check Job Status")
if check_job_status_btn and execution_arn is not None:
with st.spinner("⏳ Checking job status ..."):
sfn_status, status_dict = refresh_ml_state_machine_status()
sfn_state = status_dict["PROGRESS"]["state"]
if sfn_status not in ("RUNNING", "SUCCEEDED", "FAILED",
"TIMED_OUT", "ABORTED",):
sfn_status_str = "UNKNOWN"
st.info(textwrap.dedent(f"""
**Status:** {sfn_status}
**Stage:** {sfn_state}
**Execution ARN:** `{execution_arn}`
**AWS Console:** [view](https://console.aws.amazon.com/states/home#/executions/details/{execution_arn})
"""))
if sfn_status == "SUCCEEDED":
# download the results
with st.spinner("⏳ Loading ML forecasts and results..."):
df_preds, df_results, df_backtests = download_afc_files()
state["report"]["afc"]["df_preds"] = df_preds
state["report"]["afc"]["df_results"] = df_results
state["report"]["afc"]["df_backtests"] = df_backtests
_cols = st.beta_columns([2,0.485])
with _cols[1]:
ml_stop_button = st.button("🛑 Stop Job")
if ml_stop_button:
sfn_client = boto3.client("stepfunctions")
resp = sfn_client.stop_execution(executionArn=execution_arn)
st.write(resp)
return
def calc_afc_ml_accuracies(metric="smape"):
"""
"""
if metric == "smape":
metric_func = calc_smape
elif metric == "wape":
metric_func = calc_wape
else:
raise NotImplementedError
df_backtests = state.report["afc"]["df_backtests"]
df_accuracies = df_backtests \
| px.groupby(["channel", "family", "item_id"], sort=False) \
| px.apply(lambda dd: metric_func(dd["target_value"].clip(0, None),
dd["demand"].clip(0, None))) \
| px.reset_index() \
| px.rename({0: metric}, axis=1) \
| px.assign(smape=px[metric].clip(0,1)) \
| px.assign(acc=(1-px[metric])*100)
return df_accuracies
def panel_ml_forecast_summary():
"""
"""
df = state.report["data"].get("df", None)
df_preds = state.report["afc"].get("df_preds", None)
df_results = state.report["afc"].get("df_results", None)
df_backtests = state.report["afc"].get("df_backtests", None)
if df is None or df_results is None or df_backtests is None or \
df_preds is None:
return
with st.beta_expander("🎯 Forecast Summary", expanded=True):
df_accuracies = calc_afc_ml_accuracies(METRIC)
ml_acc = df_accuracies["acc"].mean()
_cols = st.beta_columns([3,1])
with _cols[0]:
st.write(dedent(f"""
The forecast error is calculated as the [symmetric
mean absolute percentage error
(SMAPE)](https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error)
via sliding window backtesting. Forecast _accuracy_ is calculated as
`100-SMAPE` and is averaged across all timeseries to give the _overall accuracy_.
Note: Due to the limitations of the ML forecasting approach,
backtests were only generated over [the five most recent windows
before the
horizon](https://docs.aws.amazon.com/forecast/latest/dg/metrics.html#backtesting).
"""))
with _cols[1]:
st.markdown("#### Overall Accuracy")
st.markdown(
f"<div style='font-size:36pt;font-weight:bold'>{ml_acc:.0f}%</div>",
unsafe_allow_html=True)
return
def panel_ml_top_performers():
"""
"""
df = state.report["data"].get("df", None)
df_results = state.report["afc"].get("df_results", None)
horiz = state.report["afc"].get("horiz", None)
freq_out = state.report["afc"].get("freq", None)
if df is None or df_results is None:
return
df_backtests = state.report["afc"]["df_backtests"]
with st.beta_expander("🏆 Top Performers", expanded=True):
_write(f"""
Inspect the forecast accuracy of individual channels,
families, and item IDs (and each subset combination therein) for
specific groups of items during a given backtest period.
""")
st.write("#### Filters")
# dt_min and dt_max are the time boundaries of the backtesting
# for amazon forecast, this is relatively short
dt_min = df_backtests["timestamp"].min()
dt_max = df_backtests["timestamp"].max()
_cols = st.beta_columns([2,1,1])
with _cols[0]:
groupby_cols = st.multiselect("Group By",
["channel", "family", "item_id"], ["channel", "family", "item_id"],
key="ml_top_perf_groupby")
with _cols[1]:
dt_start = st.date_input("Start", value=dt_min, min_value=dt_min,
max_value=dt_max, key="ml_dt_start")
with _cols[2]:
dt_stop = st.date_input("Stop", value=dt_max, min_value=dt_min,
max_value=dt_max, key="ml_dt_stop")
cperc_thresh = st.slider("Percentage of total demand",
step=5, value=80, format="%d%%", key="ml_perc_demand")
dt_start = dt_start.strftime("%Y-%m-%d")
dt_stop = dt_stop.strftime("%Y-%m-%d")
start = time.time()
with st.spinner("Processing top performers ..."):
df_grp, df_grp_summary = \
make_ml_df_top(df, df_backtests, groupby_cols, dt_start,
dt_stop, cperc_thresh, METRIC)
st.write("#### Group Summary")
with st.spinner("Loading **Summary** table"):
display_ag_grid(df_grp_summary, auto_height=True,
comma_cols=("demand",))
st.write("#### Groups")
with st.spinner("Loading **Groups** table ..."):
display_ag_grid(df_grp, paginate=True, comma_cols=("demand",))
st.text(f"(completed in {format_timespan(time.time() - start)})")
if st.button("Export", key="ml_top_perf_export_btn"):
with st.spinner(":hourglass_flowing_sand: Exporting **Top Performers** ..."):
start = time.time()
# write the dataframe to s3
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
basename = os.path.basename(state["report"]["data"]["path"])
s3_afc_export_path = state["report"]["afc"]["s3_afc_export_path"]
s3_path = f'{s3_afc_export_path}/{basename}_{now_str}_afc-top-performers.csv.gz'
wr.s3.to_csv(df_grp, s3_path, compression="gzip", index=False)
# generate presigned s3 url for user to download
signed_url = create_presigned_url(s3_path)
st.info(textwrap.dedent(f"""
Download the top performers file [here]({signed_url})
`(completed in {format_timespan(time.time() - start)})`
"""))
return
def panel_ml_visualization():
"""
"""
df = state.report["data"].get("df", None)
df_ml_results = state.report["afc"].get("df_results", None)
df_ml_preds = state.report["afc"].get("df_preds", None)
df_ml_backtests = state.report["afc"].get("df_backtests", None)
if df is None or df_ml_results is None or df_ml_preds is None:
return
freq = state.report["afc"]["freq"]
horiz = state.report["afc"]["horiz"]
start = time.time()
df_top = df.groupby(["channel", "family", "item_id"], as_index=False) \
.agg({"demand": sum}) \
.sort_values(by="demand", ascending=False)
channel_vals = [""] + sorted(df_ml_results["channel"].unique())
family_vals = [""] + sorted(df_ml_results["family"].unique())
item_id_vals = [""] + sorted(df_ml_results["item_id"].unique())
channel_index = channel_vals.index(df_top["channel"].iloc[0])
family_index = family_vals.index(df_top["family"].iloc[0])
item_id_index = item_id_vals.index(df_top["item_id"].iloc[0])
with st.beta_expander("👁️ Visualization", expanded=True):
with st.form("ml_viz_form"):
st.markdown("#### Filter By")
_cols = st.beta_columns(3)
with _cols[0]:
channel_choice = st.selectbox("Channel", channel_vals, index=channel_index, key="ml_results_channel")
with _cols[1]:
family_choice = st.selectbox("Family", family_vals, index=family_index, key="ml_results_family")
with _cols[2]:
item_id_choice = st.selectbox("Item ID", item_id_vals, index=item_id_index, key="ml_results_item")
viz_form_button = st.form_submit_button("Apply")
if viz_form_button:
pass
results_mask = \
make_mask(df_ml_results, channel_choice, family_choice, item_id_choice)
pred_mask = \
make_mask(df_ml_preds, channel_choice, family_choice, item_id_choice)
backtest_mask = \
make_mask(df_ml_backtests, channel_choice, family_choice, item_id_choice)
df_plot = df_ml_preds[pred_mask]
_df_backtests = df_ml_backtests[backtest_mask]
if len(df_plot) > 0:
# display the line chart
#fig = pex.line(df_plot, x="timestamp", y="demand", color="type")
y = df_plot.query("type == 'actual'")["demand"]
y_ts = df_plot.query("type == 'actual'")["timestamp"]
yp = df_plot.query("type == 'fcast'")["demand"]
yp_ts = df_plot.query("type == 'fcast'")["timestamp"]
fig = go.Figure()
fig.add_trace(go.Scatter(
x=y_ts, y=y, mode='lines+markers', name="actual",
fill="tozeroy", line={"width":3}, marker=dict(size=4)
))
fig.add_trace(go.Scatter(
x=yp_ts, y=np.round(yp, 0), mode='lines+markers', name="forecast",
fill="tozeroy", marker=dict(size=4)
))
fig.add_trace(go.Scatter(x=_df_backtests["timestamp"],
y=np.round(_df_backtests.demand, 0), mode="lines",
name="backtest", line_dash="dot", line_color="black"))
fig.update_layout(
margin={"t": 0, "b": 0, "r": 0, "l": 0},
height=250,
legend={"orientation": "h", "yanchor": "bottom", "y": 1.0, "xanchor":"left", "x": 0.0}
)
fig.update_xaxes(rangeslider_visible=True)
initial_range = pd.date_range(end=yp_ts.max(), periods=horiz*8, freq=freq)
initial_range = [max(initial_range[0], y_ts.min()), initial_range[-1]]
fig["layout"]["xaxis"].update(range=initial_range)
st.plotly_chart(fig, use_container_width=True)
plot_duration = time.time() - start
st.text(f"(completed in {format_timespan(plot_duration)})")
return
def run_ml_state_machine():
"""Execute the Amazon Forecast state machine.
"""
PD_TIMESTAMP_FMT = "%Y-%m-%d"
AFC_TIMESTAMP_FMT = "yyyy-MM-dd"
AFC_FORECAST_HORIZON = state.report["afc"]["horiz"]
AFC_FORECAST_FREQUENCY = state.report["afc"]["freq"]
df = state.report["data"].get("df", None)
fn = state.report["data"]["path"]
assert(df is not None)
data_freq = state.report["data"]["freq"]
if data_freq in ("D",):
pass
elif data_freq in ("W", "W-MON",):
data_freq = "W"
elif data_freq in ("M", "MS",):
data_freq = "M"
else:
raise NotImplementedError
# state.df is already resampled to same frequency as the forecast freq.
state_machine_arn = None
# generate a unique prefix for the Amazon Forecast resources
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
prefix = f"AfaAfc{now_str}"
# get the state machine arn and s3 paths
ssm_client = boto3.client("ssm")
state_machine_arn = \
ssm_client.get_parameter(Name="AfaAfcStateMachineArn")["Parameter"]["Value"]
s3_input_path = \
ssm_client.get_parameter(Name="AfaS3InputPath")["Parameter"]["Value"].rstrip("/")
s3_output_path = \
ssm_client.get_parameter(Name="AfaS3OutputPath")["Parameter"]["Value"].rstrip("/")
# generate amazon forecast compatible data
with st.spinner("Launching Amazon Forecast job ..."):
df_afc = df \
| px.reset_index() \
| px.rename({"index": "timestamp"}, axis=1) \
| px.assign(item_id=px["channel"] + "@@" + px["family"] + "@@" + px["item_id"]) \
| px[["timestamp", "demand", "item_id"]] \
| px.sort_values(by=["item_id", "timestamp"])
df_afc["timestamp"] = \
pd.DatetimeIndex(df_afc["timestamp"]).strftime("%Y-%m-%d")
afc_input_fn = \
re.sub("(.csv.gz)", ".csv", os.path.basename(fn))
s3_input_path = f"{s3_input_path}/{afc_input_fn}"
# upload the input csv to s3
wr.s3.to_csv(df_afc, s3_input_path, index=False)
# upload local re-sampled csv file to s3 input path
client = boto3.client("stepfunctions")
resp = client.start_execution(
stateMachineArn=state_machine_arn,
input=json.dumps({
"prefix": prefix,
"data_freq": data_freq,
"horiz": AFC_FORECAST_HORIZON,
"freq": AFC_FORECAST_FREQUENCY,
"s3_path": s3_input_path,
"s3_export_path": s3_output_path
})
)
status_json_s3_path = \
os.path.join(s3_output_path, f'{prefix}_status.json')
return resp["executionArn"], prefix, status_json_s3_path
def refresh_ml_state_machine_status():
"""
"""
sfn_client = boto3.client("stepfunctions")
resp = sfn_client.describe_execution(
executionArn=state.report["afc"]["execution_arn"])
sfn_status = resp["status"]
status_dict = parse_s3_json(state.report["afc"]["status_json_s3_path"])
return sfn_status, status_dict
def file_selectbox(label, folder, globs=("*.csv", "*.csv.gz"), **kwargs):
"""
"""
if folder.startswith("s3://"):
raise NotImplementedError
else:
fns = []
for pat in globs:
fns.extend(glob.glob(os.path.join(folder, pat)))
fn = st.selectbox(label, fns, format_func=lambda s: os.path.basename(s),
**kwargs)
return fn
def nav_radio_format_func(s):
if s == "create_report":
return "📄 Create Report"
elif s == "load_report":
return "⬆️ Load Report"
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--local-dir", type=str,
help="/path/to local folder to store input/output files.",
default=os.path.expanduser("~/SageMaker/"))
parser.add_argument("--lambdamap-function", type=str,
help="ARN/name of the lambdamap function",
default="AfaLambdaMapFunction")
parser.add_argument("--landing-page-url", type=str,
help="URL of the AFA landing page", default="#")
parser.add_argument("--max-lambdas", type=int,
help="URL of the AFA landing page", default=MAX_LAMBDAS)
args = parser.parse_args()
MAX_LAMBDAS = args.max_lambdas
assert(os.path.exists(os.path.expanduser(args.local_dir)))
landing_page_url = "https://" + re.sub(r"^(https*://)", "", args.landing_page_url)
state["landing_page_url"] = landing_page_url
#st.set_page_config(layout="wide")
#
# Sidebar
#
st.sidebar.title("Amazon Forecast Accelerator")
st.sidebar.markdown(textwrap.dedent("""
- [source code @ github](https://github.com/aws-samples/simple-forecast-solution)
"""))
clear_report_btn = st.sidebar.button("❌ Clear Report")
if clear_report_btn:
state.pop("report")
gc.collect()
if "report" not in state:
state["report"] = {"data": {}, "afa": {}, "afc": {}}
# populate state global variables from ssm
ssm_client = boto3.client("ssm")
if "s3_afc_export_path" not in state["report"]["afc"]:
state["report"]["afc"]["s3_afc_export_path"] = \
ssm_client.get_parameter(Name="AfaS3OutputPath")["Parameter"]["Value"].rstrip("/")
if "s3_bucket" not in state["report"]:
state["report"]["s3_bucket"] = \
ssm_client.get_parameter(Name="AfaS3Bucket")["Parameter"]["Value"].strip("/")
if "s3_afa_export_path" not in state["report"]:
state["report"]["afa"]["s3_afa_export_path"] = \
f's3://{state["report"]["s3_bucket"]}/afa-exports'
if "s3_afa_reports_path" not in state["report"]:
state["report"]["afa"]["s3_afa_reports_path"] = \
f's3://{state["report"]["s3_bucket"]}/afa-reports'
if "s3_afc_export_path" not in state["report"]:
state["report"]["afc"]["s3_afc_export_path"] = \
f's3://{state["report"]["s3_bucket"]}/afc-exports'
if "s3_afc_reports_path" not in state["report"]:
state["report"]["afc"]["s3_afc_reports_path"] = \
f's3://{state["report"]["s3_bucket"]}/afc-reports'
#st.write(state["report"])
#
# Main page
#
panel_create_report(expanded=True)
panel_load_report(expanded=False)
panel_data_health()
panel_launch()
panel_accuracy()
panel_top_performers()
panel_visualization()
panel_ml_launch()
panel_ml_forecast_summary()
panel_ml_top_performers()
panel_ml_visualization()
if "df" in state["report"]["data"]:
st.markdown("## Export")
panel_downloads()
def panel_save_report():
if "data" not in state["report"] or "path" not in state["report"]["data"] or \
"df" not in state["report"]["data"]:
return
with st.beta_expander("💾 Save Report", expanded=True):
_write(f"""
Save this report for future use, note that the filename must have the `.pkl.gz`
file extension. You can then re-load the report using the [Load Report](#load-report) form.
""")
default_name = f'{state.report["name"]}.report.pkl.gz'
report_fn = st.text_input("File name", value=default_name,
help="Please note that the report file name needs to have a `.pkl.gz` file extension.")
save_btn = st.button("Save")
if save_btn:
save_report(report_fn)
panel_save_report()
| [
"numpy.clip",
"pandas.read_csv",
"gzip.open",
"streamlit.header",
"logging.error",
"numpy.arange",
"plotly.express.box",
"streamlit.form",
"textwrap.dedent",
"streamlit.cache",
"streamlit.stop",
"numpy.round",
"streamlit.sidebar.button",
"streamlit.write",
"gc.collect",
"awswrangler.s3... | [((2848, 2900), 'collections.OrderedDict', 'OrderedDict', ([], {'Daily': '"""D"""', 'Weekly': '"""W-MON"""', 'Monthly': '"""MS"""'}), "(Daily='D', Weekly='W-MON', Monthly='MS')\n", (2859, 2900), False, 'from collections import OrderedDict, deque, namedtuple\n'), ((2916, 2963), 'collections.OrderedDict', 'OrderedDict', ([], {'Daily': '"""D"""', 'Weekly': '"""W"""', 'Monthly': '"""M"""'}), "(Daily='D', Weekly='W', Monthly='M')\n", (2927, 2963), False, 'from collections import OrderedDict, deque, namedtuple\n'), ((32679, 32689), 'streamlit.cache', 'st.cache', ([], {}), '()\n', (32687, 32689), True, 'import streamlit as st\n'), ((35727, 35737), 'streamlit.cache', 'st.cache', ([], {}), '()\n', (35735, 35737), True, 'import streamlit as st\n'), ((2771, 2804), 'os.path.exists', 'os.path.exists', (['ST_DOWNLOADS_PATH'], {}), '(ST_DOWNLOADS_PATH)\n', (2785, 2804), False, 'import os\n'), ((3842, 3910), 'pandas.read_csv', 'pd.read_csv', (['path'], {'dtype': "{'timestamp': str}", 'compression': 'compression'}), "(path, dtype={'timestamp': str}, compression=compression)\n", (3853, 3910), True, 'import pandas as pd\n'), ((3989, 4001), 'numpy.nansum', 'np.nansum', (['y'], {}), '(y)\n', (3998, 4001), True, 'import numpy as np\n'), ((4273, 4306), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df['timestamp']"], {}), "(df['timestamp'])\n", (4289, 4306), True, 'import pandas as pd\n'), ((6404, 6415), 'time.time', 'time.time', ([], {}), '()\n', (6413, 6415), False, 'import time\n'), ((8183, 8220), 'st_aggrid.GridOptionsBuilder.from_dataframe', 'GridOptionsBuilder.from_dataframe', (['df'], {}), '(df)\n', (8216, 8220), False, 'from st_aggrid import AgGrid, GridOptionsBuilder, JsCode\n'), ((9664, 9704), 'urllib.parse.urlparse', 'urlparse', (['s3_path'], {'allow_fragments': '(False)'}), '(s3_path, allow_fragments=False)\n', (9672, 9704), False, 'from urllib.parse import urlparse\n'), ((9852, 9870), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (9864, 9870), False, 'import boto3\n'), ((11120, 11163), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df_backtests['timestamp']"], {}), "(df_backtests['timestamp'])\n", (11136, 11163), True, 'import pandas as pd\n'), ((11555, 11566), 'time.time', 'time.time', ([], {}), '()\n', (11564, 11566), False, 'import time\n'), ((12613, 12631), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (12625, 12631), False, 'import boto3\n'), ((12641, 12655), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12653, 12655), True, 'import pandas as pd\n'), ((13452, 13527), 'os.path.join', 'os.path.join', (['ST_DOWNLOADS_PATH', 'f"""{state.uploaded_file.name}_fcast.csv.gz"""'], {}), "(ST_DOWNLOADS_PATH, f'{state.uploaded_file.name}_fcast.csv.gz')\n", (13464, 13527), False, 'import os\n'), ((13557, 13634), 'os.path.join', 'os.path.join', (['ST_DOWNLOADS_PATH', 'f"""{state.uploaded_file.name}_results.csv.gz"""'], {}), "(ST_DOWNLOADS_PATH, f'{state.uploaded_file.name}_results.csv.gz')\n", (13569, 13634), False, 'import os\n'), ((13652, 13714), 'streamlit.session_state.df_pred.to_csv', 'state.df_pred.to_csv', (['pred_fn'], {'index': '(False)', 'compression': '"""gzip"""'}), "(pred_fn, index=False, compression='gzip')\n", (13672, 13714), True, 'from streamlit import session_state as state\n'), ((13719, 13787), 'streamlit.session_state.df_results.to_csv', 'state.df_results.to_csv', (['results_fn'], {'index': '(False)', 'compression': '"""gzip"""'}), "(results_fn, index=False, compression='gzip')\n", (13742, 13787), True, 'from streamlit import session_state as state\n'), ((14627, 14658), 'streamlit.markdown', 'st.markdown', (['"""## Create Report"""'], {}), "('## Create Report')\n", (14638, 14658), True, 'import streamlit as st\n'), ((19314, 19332), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (19326, 19332), False, 'import boto3\n'), ((19338, 19367), 'streamlit.markdown', 'st.markdown', (['"""## Load Report"""'], {}), "('## Load Report')\n", (19349, 19367), True, 'import streamlit as st\n'), ((20909, 20933), 'streamlit.header', 'st.header', (['"""Data Health"""'], {}), "('Data Health')\n", (20918, 20933), True, 'import streamlit as st\n'), ((24633, 24667), 'streamlit.header', 'st.header', (['"""Statistical Forecasts"""'], {}), "('Statistical Forecasts')\n", (24642, 24667), True, 'import streamlit as st\n'), ((35011, 35069), 'numpy.round', 'np.round', (["(100 * df_grp_summary['demand'] / total_demand)", '(1)'], {}), "(100 * df_grp_summary['demand'] / total_demand, 1)\n", (35019, 35069), True, 'import numpy as np\n'), ((37834, 37892), 'numpy.round', 'np.round', (["(100 * df_grp_summary['demand'] / total_demand)", '(1)'], {}), "(100 * df_grp_summary['demand'] / total_demand, 1)\n", (37842, 37892), True, 'import numpy as np\n'), ((42184, 42195), 'time.time', 'time.time', ([], {}), '()\n', (42193, 42195), False, 'import time\n'), ((47225, 47315), 'awswrangler.s3.read_csv', 'wr.s3.read_csv', (['preds_s3_prefix'], {'dtype': "{'channel': str, 'family': str, 'item_id': str}"}), "(preds_s3_prefix, dtype={'channel': str, 'family': str,\n 'item_id': str})\n", (47239, 47315), True, 'import awswrangler as wr\n'), ((47609, 47626), 'pandas.concat', 'pd.concat', (['_preds'], {}), '(_preds)\n', (47618, 47626), True, 'import pandas as pd\n'), ((47686, 47725), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df_preds['timestamp']"], {}), "(df_preds['timestamp'])\n", (47702, 47725), True, 'import pandas as pd\n'), ((48289, 48381), 'awswrangler.s3.read_csv', 'wr.s3.read_csv', (['results_s3_prefix'], {'dtype': "{'channel': str, 'family': str, 'item_id': str}"}), "(results_s3_prefix, dtype={'channel': str, 'family': str,\n 'item_id': str})\n", (48303, 48381), True, 'import awswrangler as wr\n'), ((48529, 48564), 'awswrangler.s3.read_csv', 'wr.s3.read_csv', (['backtests_s3_prefix'], {}), '(backtests_s3_prefix)\n', (48543, 48564), True, 'import awswrangler as wr\n'), ((48714, 48771), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df_backtests['backtestwindow_end_time']"], {}), "(df_backtests['backtestwindow_end_time'])\n", (48730, 48771), True, 'import pandas as pd\n'), ((48798, 48835), 'numpy.clip', 'np.clip', (["df_backtests['p10']", '(0)', 'None'], {}), "(df_backtests['p10'], 0, None)\n", (48805, 48835), True, 'import numpy as np\n'), ((59452, 59489), 'urllib.parse.urlparse', 'urlparse', (['path'], {'allow_fragments': '(False)'}), '(path, allow_fragments=False)\n', (59460, 59489), False, 'from urllib.parse import urlparse\n'), ((59568, 59588), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (59582, 59588), False, 'import boto3\n'), ((59851, 59890), 'streamlit.header', 'st.header', (['"""Machine Learning Forecasts"""'], {}), "('Machine Learning Forecasts')\n", (59860, 59890), True, 'import streamlit as st\n'), ((69266, 69277), 'time.time', 'time.time', ([], {}), '()\n', (69275, 69277), False, 'import time\n'), ((73687, 73706), 'boto3.client', 'boto3.client', (['"""ssm"""'], {}), "('ssm')\n", (73699, 73706), False, 'import boto3\n'), ((75363, 75416), 'os.path.join', 'os.path.join', (['s3_output_path', 'f"""{prefix}_status.json"""'], {}), "(s3_output_path, f'{prefix}_status.json')\n", (75375, 75416), False, 'import os\n'), ((75553, 75582), 'boto3.client', 'boto3.client', (['"""stepfunctions"""'], {}), "('stepfunctions')\n", (75565, 75582), False, 'import boto3\n'), ((76426, 76451), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (76449, 76451), False, 'import argparse\n'), ((77343, 77390), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Amazon Forecast Accelerator"""'], {}), "('Amazon Forecast Accelerator')\n", (77359, 77390), True, 'import streamlit as st\n'), ((77555, 77590), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""❌ Clear Report"""'], {}), "('❌ Clear Report')\n", (77572, 77590), True, 'import streamlit as st\n'), ((77823, 77842), 'boto3.client', 'boto3.client', (['"""ssm"""'], {}), "('ssm')\n", (77835, 77842), False, 'import boto3\n'), ((2579, 2607), 'pathlib.Path', 'pathlib.Path', (['st.__path__[0]'], {}), '(st.__path__[0])\n', (2591, 2607), False, 'import pathlib\n'), ((3941, 3953), 'pandas.isnull', 'pd.isnull', (['y'], {}), '(y)\n', (3950, 3953), True, 'import pandas as pd\n'), ((4512, 4547), 'numpy.ceil', 'np.ceil', (['(groups.ngroups / chunksize)'], {}), '(groups.ngroups / chunksize)\n', (4519, 4547), True, 'import numpy as np\n'), ((4595, 4627), 'toolz.itertoolz.partition_all', 'partition_all', (['chunksize', 'groups'], {}), '(chunksize, groups)\n', (4608, 4627), False, 'from toolz.itertoolz import partition_all\n'), ((5779, 5795), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (5789, 5795), False, 'import time\n'), ((7374, 7406), 'toolz.itertoolz.partition_all', 'partition_all', (['chunksize', 'groups'], {}), '(chunksize, groups)\n', (7387, 7406), False, 'from toolz.itertoolz import partition_all\n'), ((8513, 8715), 'textwrap.dedent', 'textwrap.dedent', (['"""\n function(params) {\n return params.value\n .toString()\n .split( /(?=(?:\\\\d{3})+(?:\\\\.|$))/g ).join( "," )\n }\n """'], {}), '(\n """\n function(params) {\n return params.value\n .toString()\n .split( /(?=(?:\\\\d{3})+(?:\\\\.|$))/g ).join( "," )\n }\n """\n )\n', (8528, 8715), False, 'import textwrap\n'), ((10704, 10765), 'pandas.DataFrame', 'pd.DataFrame', (["{'timestamp': ts, 'demand': ys, 'backtest': yp}"], {}), "({'timestamp': ts, 'demand': ys, 'backtest': yp})\n", (10716, 10765), True, 'import pandas as pd\n'), ((11577, 11633), 'streamlit.spinner', 'st.spinner', (['""":hourglass_flowing_sand: Saving Report ..."""'], {}), "(':hourglass_flowing_sand: Saving Report ...')\n", (11587, 11633), True, 'import streamlit as st\n'), ((12001, 12041), 'urllib.parse.urlparse', 'urlparse', (['s3_path'], {'allow_fragments': '(False)'}), '(s3_path, allow_fragments=False)\n', (12009, 12041), False, 'from urllib.parse import urlparse\n'), ((12139, 12157), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (12151, 12157), False, 'import boto3\n'), ((13848, 13866), 'textwrap.dedent', 'textwrap.dedent', (['s'], {}), '(s)\n', (13863, 13866), False, 'import textwrap\n'), ((13902, 13920), 'textwrap.dedent', 'textwrap.dedent', (['s'], {}), '(s)\n', (13917, 13920), False, 'import textwrap\n'), ((13952, 13970), 'textwrap.dedent', 'textwrap.dedent', (['s'], {}), '(s)\n', (13967, 13970), False, 'import textwrap\n'), ((14291, 14410), 'pandas.read_csv', 'pd.read_csv', (['path'], {'dtype': "{'timestamp': str, 'channel': str, 'family': str, 'item_id': str}", 'compression': 'compression'}), "(path, dtype={'timestamp': str, 'channel': str, 'family': str,\n 'item_id': str}, compression=compression)\n", (14302, 14410), True, 'import pandas as pd\n'), ((14669, 14732), 'streamlit.beta_expander', 'st.beta_expander', (['"""⬆️ Load + Validate Data"""'], {'expanded': 'expanded'}), "('⬆️ Load + Validate Data', expanded=expanded)\n", (14685, 14732), True, 'import streamlit as st\n'), ((14742, 15103), 'streamlit.write', 'st.write', (['f"""Step 1 – Create a new forecast report by selecting an uploaded\n file containing the demand history for your use-case. You must also specify\n the frequency of the demand (e.g. _Daily_, _Weekly_, or _Monthly_). Demand\n history files are uploaded using the [SageMaker Notebook interface]({state[\'landing_page_url\']})"""'], {}), '(\n f"""Step 1 – Create a new forecast report by selecting an uploaded\n file containing the demand history for your use-case. You must also specify\n the frequency of the demand (e.g. _Daily_, _Weekly_, or _Monthly_). Demand\n history files are uploaded using the [SageMaker Notebook interface]({state[\'landing_page_url\']})"""\n )\n', (14750, 15103), True, 'import streamlit as st\n'), ((15191, 15288), 'streamlit.button', 'st.button', (['"""Refresh Files"""'], {'help': '"""Refresh the _File_ selector with recently uploaded files."""'}), "('Refresh Files', help=\n 'Refresh the _File_ selector with recently uploaded files.')\n", (15200, 15288), True, 'import streamlit as st\n'), ((19378, 19430), 'streamlit.beta_expander', 'st.beta_expander', (['"""📂 Load Report"""'], {'expanded': 'expanded'}), "('📂 Load Report', expanded=expanded)\n", (19394, 19430), True, 'import streamlit as st\n'), ((19440, 19695), 'streamlit.write', 'st.write', (['f"""Optional – Alternatively, you can load a previously-generated\n report. Report files must have the `.pkl.gz` file extension and can be uploaded\n using the [SageMaker Notebook interface]({state[\'landing_page_url\']})."""'], {}), '(\n f"""Optional – Alternatively, you can load a previously-generated\n report. Report files must have the `.pkl.gz` file extension and can be uploaded\n using the [SageMaker Notebook interface]({state[\'landing_page_url\']})."""\n )\n', (19448, 19695), True, 'import streamlit as st\n'), ((19711, 19765), 'streamlit.radio', 'st.radio', (['"""Source"""', "['local']"], {'format_func': 'format_func'}), "('Source', ['local'], format_func=format_func)\n", (19719, 19765), True, 'import streamlit as st\n'), ((19783, 19806), 'streamlit.beta_columns', 'st.beta_columns', (['[3, 1]'], {}), '([3, 1])\n', (19798, 19806), True, 'import streamlit as st\n'), ((20944, 20993), 'streamlit.beta_expander', 'st.beta_expander', (['"""❤️ Data Health"""'], {'expanded': '(True)'}), "('❤️ Data Health', expanded=True)\n", (20960, 20993), True, 'import streamlit as st\n'), ((21003, 21284), 'streamlit.write', 'st.write', (['f"""Step 2 – Inspect the characteristics of the dataset\n for irregularities prior to generating any forecasts. For example,\n missing channels, families, item IDs; or unusually short/long\n timeseries lengths."""'], {}), '(\n f"""Step 2 – Inspect the characteristics of the dataset\n for irregularities prior to generating any forecasts. For example,\n missing channels, families, item IDs; or unusually short/long\n timeseries lengths."""\n )\n', (21011, 21284), True, 'import streamlit as st\n'), ((24678, 24721), 'streamlit.beta_expander', 'st.beta_expander', (['"""🚀 Launch"""'], {'expanded': '(True)'}), "('🚀 Launch', expanded=True)\n", (24694, 24721), True, 'import streamlit as st\n'), ((24731, 25191), 'streamlit.write', 'st.write', (['f"""Step 3 – Generate forecasts by training and evaluating 75+\n configurations of [statistical forecasting\n models](https://otexts.com/fpp3/) for each timeseries in\n parallel using AWS Lambda. A forecast at the desired _horizon length_ and\n _frequency_ is then generated using the each individual timeseries\' best model.\n This process typically completes at a rate of 500–1,000 timeseries/min.\n """'], {}), '(\n f"""Step 3 – Generate forecasts by training and evaluating 75+\n configurations of [statistical forecasting\n models](https://otexts.com/fpp3/) for each timeseries in\n parallel using AWS Lambda. A forecast at the desired _horizon length_ and\n _frequency_ is then generated using the each individual timeseries\' best model.\n This process typically completes at a rate of 500–1,000 timeseries/min.\n """\n )\n', (24739, 25191), True, 'import streamlit as st\n'), ((29792, 29845), 'streamlit.beta_expander', 'st.beta_expander', (['"""🎯 Forecast Summary"""'], {'expanded': '(True)'}), "('🎯 Forecast Summary', expanded=True)\n", (29808, 29845), True, 'import streamlit as st\n'), ((30622, 30683), 'pandas.DataFrame', 'pd.DataFrame', (["{'category': ['short', 'medium', 'continuous']}"], {}), "({'category': ['short', 'medium', 'continuous']})\n", (30634, 30683), True, 'import pandas as pd\n'), ((31090, 31108), 'streamlit.beta_columns', 'st.beta_columns', (['(3)'], {}), '(3)\n', (31105, 31108), True, 'import streamlit as st\n'), ((32902, 32924), 'pandas.Timestamp', 'pd.Timestamp', (['dt_start'], {}), '(dt_start)\n', (32914, 32924), True, 'import pandas as pd\n'), ((32943, 32964), 'pandas.Timestamp', 'pd.Timestamp', (['dt_stop'], {}), '(dt_stop)\n', (32955, 32964), True, 'import pandas as pd\n'), ((35943, 35965), 'pandas.Timestamp', 'pd.Timestamp', (['dt_start'], {}), '(dt_start)\n', (35955, 35965), True, 'import pandas as pd\n'), ((35984, 36005), 'pandas.Timestamp', 'pd.Timestamp', (['dt_stop'], {}), '(dt_stop)\n', (35996, 36005), True, 'import pandas as pd\n'), ((38939, 38990), 'streamlit.beta_expander', 'st.beta_expander', (['"""🏆 Top Performers"""'], {'expanded': '(True)'}), "('🏆 Top Performers', expanded=True)\n", (38955, 38990), True, 'import streamlit as st\n'), ((39439, 39463), 'streamlit.write', 'st.write', (['"""#### Filters"""'], {}), "('#### Filters')\n", (39447, 39463), True, 'import streamlit as st\n'), ((39481, 39507), 'streamlit.beta_columns', 'st.beta_columns', (['[2, 1, 1]'], {}), '([2, 1, 1])\n', (39496, 39507), True, 'import streamlit as st\n'), ((39995, 40067), 'streamlit.slider', 'st.slider', (['"""Percentage of total demand"""'], {'step': '(5)', 'value': '(80)', 'format': '"""%d%%"""'}), "('Percentage of total demand', step=5, value=80, format='%d%%')\n", (40004, 40067), True, 'import streamlit as st\n'), ((40194, 40205), 'time.time', 'time.time', ([], {}), '()\n', (40203, 40205), False, 'import time\n'), ((40412, 40442), 'streamlit.write', 'st.write', (['"""#### Group Summary"""'], {}), "('#### Group Summary')\n", (40420, 40442), True, 'import streamlit as st\n'), ((40609, 40632), 'streamlit.write', 'st.write', (['"""#### Groups"""'], {}), "('#### Groups')\n", (40617, 40632), True, 'import streamlit as st\n'), ((40852, 40871), 'streamlit.button', 'st.button', (['"""Export"""'], {}), "('Export')\n", (40861, 40871), True, 'import streamlit as st\n'), ((42770, 42822), 'streamlit.beta_expander', 'st.beta_expander', (['"""👁️ Visualization"""'], {'expanded': '(True)'}), "('👁️ Visualization', expanded=True)\n", (42786, 42822), True, 'import streamlit as st\n'), ((48874, 48911), 'numpy.clip', 'np.clip', (["df_backtests['p50']", '(0)', 'None'], {}), "(df_backtests['p50'], 0, None)\n", (48881, 48911), True, 'import numpy as np\n'), ((49759, 49813), 'streamlit.beta_expander', 'st.beta_expander', (['"""⬇️ Export Forecasts"""'], {'expanded': '(True)'}), "('⬇️ Export Forecasts', expanded=True)\n", (49775, 49813), True, 'import streamlit as st\n'), ((50351, 50423), 'streamlit.button', 'st.button', (['"""Export Statistical Forecasts"""'], {'key': '"""afa_export_forecast_btn"""'}), "('Export Statistical Forecasts', key='afa_export_forecast_btn')\n", (50360, 50423), True, 'import streamlit as st\n'), ((53661, 53738), 'streamlit.button', 'st.button', (['"""Export Machine Learning Forecasts"""'], {'key': '"""afc_export_forecast_btn"""'}), "('Export Machine Learning Forecasts', key='afc_export_forecast_btn')\n", (53670, 53738), True, 'import streamlit as st\n'), ((57412, 57469), 'streamlit.beta_expander', 'st.beta_expander', (['"""ℹ️ Export File Formats"""'], {'expanded': '(True)'}), "('ℹ️ Export File Formats', expanded=True)\n", (57428, 57469), True, 'import streamlit as st\n'), ((59901, 59944), 'streamlit.beta_expander', 'st.beta_expander', (['"""🚀 Launch"""'], {'expanded': '(True)'}), "('🚀 Launch', expanded=True)\n", (59917, 59944), True, 'import streamlit as st\n'), ((59954, 60101), 'streamlit.write', 'st.write', (['"""_Optional_ – Launch machine learning forecasts using the [Amazon Forecast](https://aws.amazon.com/forecast/) managed service."""'], {}), "(\n '_Optional_ – Launch machine learning forecasts using the [Amazon Forecast](https://aws.amazon.com/forecast/) managed service.'\n )\n", (59962, 60101), True, 'import streamlit as st\n'), ((61524, 61555), 'streamlit.button', 'st.button', (['"""🔄 Check Job Status"""'], {}), "('🔄 Check Job Status')\n", (61533, 61555), True, 'import streamlit as st\n'), ((63781, 63818), 'sspipe.px.assign', 'px.assign', ([], {'acc': '((1 - px[metric]) * 100)'}), '(acc=(1 - px[metric]) * 100)\n', (63790, 63818), False, 'from sspipe import p, px\n'), ((64243, 64296), 'streamlit.beta_expander', 'st.beta_expander', (['"""🎯 Forecast Summary"""'], {'expanded': '(True)'}), "('🎯 Forecast Summary', expanded=True)\n", (64259, 64296), True, 'import streamlit as st\n'), ((64415, 64438), 'streamlit.beta_columns', 'st.beta_columns', (['[3, 1]'], {}), '([3, 1])\n', (64430, 64438), True, 'import streamlit as st\n'), ((65775, 65826), 'streamlit.beta_expander', 'st.beta_expander', (['"""🏆 Top Performers"""'], {'expanded': '(True)'}), "('🏆 Top Performers', expanded=True)\n", (65791, 65826), True, 'import streamlit as st\n'), ((66070, 66094), 'streamlit.write', 'st.write', (['"""#### Filters"""'], {}), "('#### Filters')\n", (66078, 66094), True, 'import streamlit as st\n'), ((66338, 66364), 'streamlit.beta_columns', 'st.beta_columns', (['[2, 1, 1]'], {}), '([2, 1, 1])\n', (66353, 66364), True, 'import streamlit as st\n'), ((66907, 67005), 'streamlit.slider', 'st.slider', (['"""Percentage of total demand"""'], {'step': '(5)', 'value': '(80)', 'format': '"""%d%%"""', 'key': '"""ml_perc_demand"""'}), "('Percentage of total demand', step=5, value=80, format='%d%%',\n key='ml_perc_demand')\n", (66916, 67005), True, 'import streamlit as st\n'), ((67128, 67139), 'time.time', 'time.time', ([], {}), '()\n', (67137, 67139), False, 'import time\n'), ((67382, 67412), 'streamlit.write', 'st.write', (['"""#### Group Summary"""'], {}), "('#### Group Summary')\n", (67390, 67412), True, 'import streamlit as st\n'), ((67579, 67602), 'streamlit.write', 'st.write', (['"""#### Groups"""'], {}), "('#### Groups')\n", (67587, 67602), True, 'import streamlit as st\n'), ((67822, 67871), 'streamlit.button', 'st.button', (['"""Export"""'], {'key': '"""ml_top_perf_export_btn"""'}), "('Export', key='ml_top_perf_export_btn')\n", (67831, 67871), True, 'import streamlit as st\n'), ((69861, 69913), 'streamlit.beta_expander', 'st.beta_expander', (['"""👁️ Visualization"""'], {'expanded': '(True)'}), "('👁️ Visualization', expanded=True)\n", (69877, 69913), True, 'import streamlit as st\n'), ((74101, 74148), 'streamlit.spinner', 'st.spinner', (['"""Launching Amazon Forecast job ..."""'], {}), "('Launching Amazon Forecast job ...')\n", (74111, 74148), True, 'import streamlit as st\n'), ((74766, 74814), 'awswrangler.s3.to_csv', 'wr.s3.to_csv', (['df_afc', 's3_input_path'], {'index': '(False)'}), '(df_afc, s3_input_path, index=False)\n', (74778, 74814), True, 'import awswrangler as wr\n'), ((74893, 74922), 'boto3.client', 'boto3.client', (['"""stepfunctions"""'], {}), "('stepfunctions')\n", (74905, 74922), False, 'import boto3\n'), ((77097, 77131), 'os.path.expanduser', 'os.path.expanduser', (['args.local_dir'], {}), '(args.local_dir)\n', (77115, 77131), False, 'import os\n'), ((77171, 77220), 're.sub', 're.sub', (['"""^(https*://)"""', '""""""', 'args.landing_page_url'], {}), "('^(https*://)', '', args.landing_page_url)\n", (77177, 77220), False, 'import re\n'), ((77415, 77539), 'textwrap.dedent', 'textwrap.dedent', (['"""\n - [source code @ github](https://github.com/aws-samples/simple-forecast-solution)\n """'], {}), '(\n """\n - [source code @ github](https://github.com/aws-samples/simple-forecast-solution)\n """\n )\n', (77430, 77539), False, 'import textwrap\n'), ((77625, 77644), 'streamlit.session_state.pop', 'state.pop', (['"""report"""'], {}), "('report')\n", (77634, 77644), True, 'from streamlit import session_state as state\n'), ((77653, 77665), 'gc.collect', 'gc.collect', ([], {}), '()\n', (77663, 77665), False, 'import gc\n'), ((79347, 79371), 'streamlit.markdown', 'st.markdown', (['"""## Export"""'], {}), "('## Export')\n", (79358, 79371), True, 'import streamlit as st\n'), ((4678, 4697), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (4686, 4697), False, 'from joblib import Parallel, delayed\n'), ((4793, 4815), 'pandas.concat', 'pd.concat', (['all_results'], {}), '(all_results)\n', (4802, 4815), True, 'import pandas as pd\n'), ((5187, 5239), 'lambdamap.LambdaFunction', 'LambdaFunction', (['func', 'self._client', 'self._lambda_arn'], {}), '(func, self._client, self._lambda_arn)\n', (5201, 5239), False, 'from lambdamap import LambdaExecutor, LambdaFunction\n'), ((7483, 7502), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (7491, 7502), False, 'from joblib import Parallel, delayed\n'), ((7599, 7621), 'pandas.concat', 'pd.concat', (['all_results'], {}), '(all_results)\n', (7608, 7621), True, 'import pandas as pd\n'), ((10221, 10237), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (10234, 10237), False, 'import logging\n'), ((11426, 11535), 'textwrap.dedent', 'textwrap.dedent', (['f"""\n Warning: unable to save report, no input data was loaded.\n """'], {}), '(\n f"""\n Warning: unable to save report, no input data was loaded.\n """\n )\n', (11441, 11535), False, 'import textwrap\n'), ((11820, 11847), 'gzip.open', 'gzip.open', (['local_path', '"""wb"""'], {}), "(local_path, 'wb')\n", (11829, 11847), False, 'import gzip\n'), ((15298, 15327), 'streamlit.form', 'st.form', (['"""create_report_form"""'], {}), "('create_report_form')\n", (15305, 15327), True, 'import streamlit as st\n'), ((15355, 15503), 'streamlit.text_input', 'st.text_input', (['"""Report Name (optional)"""'], {'help': '"""You may optionally give this report a name, otherwise one will be automatically generated."""'}), "('Report Name (optional)', help=\n 'You may optionally give this report a name, otherwise one will be automatically generated.'\n )\n", (15368, 15503), True, 'import streamlit as st\n'), ((15531, 15554), 'streamlit.beta_columns', 'st.beta_columns', (['[3, 1]'], {}), '([3, 1])\n', (15546, 15554), True, 'import streamlit as st\n'), ((16140, 16180), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Load & Validate"""'], {}), "('Load & Validate')\n", (16161, 16180), True, 'import streamlit as st\n'), ((16227, 16238), 'time.time', 'time.time', ([], {}), '()\n', (16236, 16238), False, 'import time\n'), ((20155, 20195), 'streamlit.button', 'st.button', (['"""Load"""'], {'key': '"""load_report_btn"""'}), "('Load', key='load_report_btn')\n", (20164, 20195), True, 'import streamlit as st\n'), ((20232, 20246), 'streamlit.write', 'st.write', (['"""##"""'], {}), "('##')\n", (20240, 20246), True, 'import streamlit as st\n'), ((20259, 20317), 'streamlit.button', 'st.button', (['"""Refresh Files"""'], {'key': '"""refresh_report_files_btn"""'}), "('Refresh Files', key='refresh_report_files_btn')\n", (20268, 20317), True, 'import streamlit as st\n'), ((20367, 20378), 'time.time', 'time.time', ([], {}), '()\n', (20376, 20378), False, 'import time\n'), ((21289, 21335), 'streamlit.spinner', 'st.spinner', (['"""Performing data health check ..."""'], {}), "('Performing data health check ...')\n", (21299, 21335), True, 'import streamlit as st\n'), ((21357, 21368), 'time.time', 'time.time', ([], {}), '()\n', (21366, 21368), False, 'import time\n'), ((22882, 22901), 'streamlit.beta_container', 'st.beta_container', ([], {}), '()\n', (22899, 22901), True, 'import streamlit as st\n'), ((22923, 22941), 'streamlit.beta_columns', 'st.beta_columns', (['(3)'], {}), '(3)\n', (22938, 22941), True, 'import streamlit as st\n'), ((25196, 25215), 'streamlit.form', 'st.form', (['"""afa_form"""'], {}), "('afa_form')\n", (25203, 25215), True, 'import streamlit as st\n'), ((25869, 25880), 'time.time', 'time.time', ([], {}), '()\n', (25878, 25880), False, 'import time\n'), ((31145, 31175), 'streamlit.markdown', 'st.markdown', (['"""#### Parameters"""'], {}), "('#### Parameters')\n", (31156, 31175), True, 'import streamlit as st\n'), ((31188, 31263), 'streamlit.text', 'st.text', (['f"""Horiz. Length:\t{horiz}\nFrequency:\t{FREQ_MAP_LONG[freq_out]}"""'], {}), '(f"""Horiz. Length:\t{horiz}\nFrequency:\t{FREQ_MAP_LONG[freq_out]}""")\n', (31195, 31263), True, 'import streamlit as st\n'), ((31300, 31334), 'streamlit.markdown', 'st.markdown', (['"""#### Classification"""'], {}), "('#### Classification')\n", (31311, 31334), True, 'import streamlit as st\n'), ((31347, 31482), 'streamlit.text', 'st.text', (['f"""Short:\t\t{df_cln.iloc[0][\'frac\']} %\nMedium:\t\t{df_cln.iloc[1][\'frac\']} %\nContinuous:\t{df_cln.iloc[2][\'frac\']} %"""'], {}), '(\n f"""Short:\t\t{df_cln.iloc[0][\'frac\']} %\nMedium:\t\t{df_cln.iloc[1][\'frac\']} %\nContinuous:\t{df_cln.iloc[2][\'frac\']} %"""\n )\n', (31354, 31482), True, 'import streamlit as st\n'), ((31560, 31591), 'streamlit.markdown', 'st.markdown', (['"""#### Best Models"""'], {}), "('#### Best Models')\n", (31571, 31591), True, 'import streamlit as st\n'), ((32218, 32238), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {}), '(fig)\n', (32233, 32238), True, 'import streamlit as st\n'), ((32386, 32422), 'streamlit.markdown', 'st.markdown', (['"""#### Overall Accuracy"""'], {}), "('#### Overall Accuracy')\n", (32397, 32422), True, 'import streamlit as st\n'), ((33251, 33269), 'afa.calc_smape', 'calc_smape', (['ys', 'yp'], {}), '(ys, yp)\n', (33261, 33269), False, 'from afa import load_data, resample, run_pipeline, run_cv_select, calc_smape, calc_wape, make_demand_classification, process_forecasts, make_perf_summary, make_health_summary, GROUP_COLS, EXP_COLS\n'), ((33509, 33531), 'pandas.Timestamp', 'pd.Timestamp', (['dt_start'], {}), '(dt_start)\n', (33521, 33531), True, 'import pandas as pd\n'), ((33567, 33588), 'pandas.Timestamp', 'pd.Timestamp', (['dt_stop'], {}), '(dt_stop)\n', (33579, 33588), True, 'import pandas as pd\n'), ((35091, 35119), 'pandas.DataFrame', 'pd.DataFrame', (['df_grp_summary'], {}), '(df_grp_summary)\n', (35103, 35119), True, 'import pandas as pd\n'), ((35506, 35532), 'numpy.arange', 'np.arange', (['df_grp.shape[0]'], {}), '(df_grp.shape[0])\n', (35515, 35532), True, 'import numpy as np\n'), ((36213, 36231), 'afa.calc_smape', 'calc_smape', (['ys', 'yp'], {}), '(ys, yp)\n', (36223, 36231), False, 'from afa import load_data, resample, run_pipeline, run_cv_select, calc_smape, calc_wape, make_demand_classification, process_forecasts, make_perf_summary, make_health_summary, GROUP_COLS, EXP_COLS\n'), ((36436, 36458), 'pandas.Timestamp', 'pd.Timestamp', (['dt_start'], {}), '(dt_start)\n', (36448, 36458), True, 'import pandas as pd\n'), ((36494, 36515), 'pandas.Timestamp', 'pd.Timestamp', (['dt_stop'], {}), '(dt_stop)\n', (36506, 36515), True, 'import pandas as pd\n'), ((37914, 37942), 'pandas.DataFrame', 'pd.DataFrame', (['df_grp_summary'], {}), '(df_grp_summary)\n', (37926, 37942), True, 'import pandas as pd\n'), ((38329, 38355), 'numpy.arange', 'np.arange', (['df_grp.shape[0]'], {}), '(df_grp.shape[0])\n', (38338, 38355), True, 'import numpy as np\n'), ((39622, 39720), 'streamlit.multiselect', 'st.multiselect', (['"""Group By"""', "['channel', 'family', 'item_id']", "['channel', 'family', 'item_id']"], {}), "('Group By', ['channel', 'family', 'item_id'], ['channel',\n 'family', 'item_id'])\n", (39636, 39720), True, 'import streamlit as st\n'), ((39780, 39852), 'streamlit.date_input', 'st.date_input', (['"""Start"""'], {'value': 'dt_min', 'min_value': 'dt_min', 'max_value': 'dt_max'}), "('Start', value=dt_min, min_value=dt_min, max_value=dt_max)\n", (39793, 39852), True, 'import streamlit as st\n'), ((39899, 39970), 'streamlit.date_input', 'st.date_input', (['"""Stop"""'], {'value': 'dt_max', 'min_value': 'dt_min', 'max_value': 'dt_max'}), "('Stop', value=dt_max, min_value=dt_min, max_value=dt_max)\n", (39912, 39970), True, 'import streamlit as st\n'), ((40220, 40263), 'streamlit.spinner', 'st.spinner', (['"""Processing top performers ..."""'], {}), "('Processing top performers ...')\n", (40230, 40263), True, 'import streamlit as st\n'), ((40457, 40496), 'streamlit.spinner', 'st.spinner', (['"""Loading **Summary** table"""'], {}), "('Loading **Summary** table')\n", (40467, 40496), True, 'import streamlit as st\n'), ((40646, 40688), 'streamlit.spinner', 'st.spinner', (['"""Loading **Groups** table ..."""'], {}), "('Loading **Groups** table ...')\n", (40656, 40688), True, 'import streamlit as st\n'), ((42968, 42987), 'streamlit.form', 'st.form', (['"""viz_form"""'], {}), "('viz_form')\n", (42975, 42987), True, 'import streamlit as st\n'), ((43001, 43030), 'streamlit.markdown', 'st.markdown', (['"""#### Filter By"""'], {}), "('#### Filter By')\n", (43012, 43030), True, 'import streamlit as st\n'), ((43051, 43069), 'streamlit.beta_columns', 'st.beta_columns', (['(3)'], {}), '(3)\n', (43066, 43069), True, 'import streamlit as st\n'), ((43455, 43485), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Apply"""'], {}), "('Apply')\n", (43476, 43485), True, 'import streamlit as st\n'), ((44119, 44130), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (44128, 44130), True, 'import plotly.graph_objects as go\n'), ((46381, 46427), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {'use_container_width': '(True)'}), '(fig, use_container_width=True)\n', (46396, 46427), True, 'import streamlit as st\n'), ((46453, 46464), 'time.time', 'time.time', ([], {}), '()\n', (46462, 46464), False, 'import time\n'), ((50478, 50489), 'time.time', 'time.time', ([], {}), '()\n', (50487, 50489), False, 'import time\n'), ((51457, 51498), 'streamlit.markdown', 'st.markdown', (['"""#### Statistical Forecasts"""'], {}), "('#### Statistical Forecasts')\n", (51468, 51498), True, 'import streamlit as st\n'), ((51511, 51530), 'streamlit.markdown', 'st.markdown', (['"""####"""'], {}), "('####')\n", (51522, 51530), True, 'import streamlit as st\n'), ((57488, 59335), 'textwrap.dedent', 'dedent', (['"""\n #### Common columns\n\n - `timestamp` – String, date of the demand, in the format `YYYY-mm-dd` (e.g. "2020-12-25")\n - `channel` – String, the originating store or platform of the demand (e.g. Website, Store-22)\n - `family` – String, the category of the item (e.g. Shirts)\n - `item_id` – String, the unique item identifier/SKU code (e.g. SKU29292)\n - `demand` – Numeric, the demand amount of the item, which must be >= 0 (e.g. 413)\n - `type` – String\n - "actual" when `demand` is the historic demand\n - "fcast" when `demand` is the forecasted demand\n\n #### Statistical Forecasts\n\n - Forecasts file columns\n - `timestamp`, `channel`, `family`, `item_id`, `demand`, `type`\n\n - Backtests file columns\n\n - `channel`, `family`, `item_id`\n - `bt_actuals` – list of sliding window actuals, each window is the \n length of the forecast horizon.\n - `bt_forecast` – list of sliding window forecasts, each window\n is the length of the forecast horizon and has a 1:1 correspondence\n with the windows in `bt_actuals`.\n - The `timestamp` column is omitted to reduce the file size,\n however, the first and last sliding windows correspond to the\n first and last timestamps of the historic demand, respectively.\n\n #### Machine Learning Forecasts\n\n - Forecasts file columns\n - `timestamp`, `channel`, `family`, `item_id`, `demand`, `type`\n\n - Backtests file columns\n - `timestamp`, `channel`, `family`, `item_id`\n - `bt_actuals` – the actual demand for the backtest period\n - `bt_forecast` – the forecasted demand for the backtest period\n\n ####\n """'], {}), '(\n """\n #### Common columns\n\n - `timestamp` – String, date of the demand, in the format `YYYY-mm-dd` (e.g. "2020-12-25")\n - `channel` – String, the originating store or platform of the demand (e.g. Website, Store-22)\n - `family` – String, the category of the item (e.g. Shirts)\n - `item_id` – String, the unique item identifier/SKU code (e.g. SKU29292)\n - `demand` – Numeric, the demand amount of the item, which must be >= 0 (e.g. 413)\n - `type` – String\n - "actual" when `demand` is the historic demand\n - "fcast" when `demand` is the forecasted demand\n\n #### Statistical Forecasts\n\n - Forecasts file columns\n - `timestamp`, `channel`, `family`, `item_id`, `demand`, `type`\n\n - Backtests file columns\n\n - `channel`, `family`, `item_id`\n - `bt_actuals` – list of sliding window actuals, each window is the \n length of the forecast horizon.\n - `bt_forecast` – list of sliding window forecasts, each window\n is the length of the forecast horizon and has a 1:1 correspondence\n with the windows in `bt_actuals`.\n - The `timestamp` column is omitted to reduce the file size,\n however, the first and last sliding windows correspond to the\n first and last timestamps of the historic demand, respectively.\n\n #### Machine Learning Forecasts\n\n - Forecasts file columns\n - `timestamp`, `channel`, `family`, `item_id`, `demand`, `type`\n\n - Backtests file columns\n - `timestamp`, `channel`, `family`, `item_id`\n - `bt_actuals` – the actual demand for the backtest period\n - `bt_forecast` – the forecasted demand for the backtest period\n\n ####\n """\n )\n', (57494, 59335), False, 'from textwrap import dedent\n'), ((60105, 60123), 'streamlit.form', 'st.form', (['"""ml_form"""'], {}), "('ml_form')\n", (60112, 60123), True, 'import streamlit as st\n'), ((60145, 60163), 'streamlit.beta_columns', 'st.beta_columns', (['(3)'], {}), '(3)\n', (60160, 60163), True, 'import streamlit as st\n'), ((60682, 60713), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Launch"""'], {}), "('Launch')\n", (60703, 60713), True, 'import streamlit as st\n'), ((62792, 62819), 'streamlit.beta_columns', 'st.beta_columns', (['[2, 0.485]'], {}), '([2, 0.485])\n', (62807, 62819), True, 'import streamlit as st\n'), ((65192, 65228), 'streamlit.markdown', 'st.markdown', (['"""#### Overall Accuracy"""'], {}), "('#### Overall Accuracy')\n", (65203, 65228), True, 'import streamlit as st\n'), ((65241, 65350), 'streamlit.markdown', 'st.markdown', (['f"""<div style=\'font-size:36pt;font-weight:bold\'>{ml_acc:.0f}%</div>"""'], {'unsafe_allow_html': '(True)'}), '(f"<div style=\'font-size:36pt;font-weight:bold\'>{ml_acc:.0f}%</div>"\n , unsafe_allow_html=True)\n', (65252, 65350), True, 'import streamlit as st\n'), ((66414, 66539), 'streamlit.multiselect', 'st.multiselect', (['"""Group By"""', "['channel', 'family', 'item_id']", "['channel', 'family', 'item_id']"], {'key': '"""ml_top_perf_groupby"""'}), "('Group By', ['channel', 'family', 'item_id'], ['channel',\n 'family', 'item_id'], key='ml_top_perf_groupby')\n", (66428, 66539), True, 'import streamlit as st\n'), ((66615, 66710), 'streamlit.date_input', 'st.date_input', (['"""Start"""'], {'value': 'dt_min', 'min_value': 'dt_min', 'max_value': 'dt_max', 'key': '"""ml_dt_start"""'}), "('Start', value=dt_min, min_value=dt_min, max_value=dt_max,\n key='ml_dt_start')\n", (66628, 66710), True, 'import streamlit as st\n'), ((66773, 66867), 'streamlit.date_input', 'st.date_input', (['"""Stop"""'], {'value': 'dt_max', 'min_value': 'dt_min', 'max_value': 'dt_max', 'key': '"""ml_dt_stop"""'}), "('Stop', value=dt_max, min_value=dt_min, max_value=dt_max, key\n ='ml_dt_stop')\n", (66786, 66867), True, 'import streamlit as st\n'), ((67154, 67197), 'streamlit.spinner', 'st.spinner', (['"""Processing top performers ..."""'], {}), "('Processing top performers ...')\n", (67164, 67197), True, 'import streamlit as st\n'), ((67427, 67466), 'streamlit.spinner', 'st.spinner', (['"""Loading **Summary** table"""'], {}), "('Loading **Summary** table')\n", (67437, 67466), True, 'import streamlit as st\n'), ((67616, 67658), 'streamlit.spinner', 'st.spinner', (['"""Loading **Groups** table ..."""'], {}), "('Loading **Groups** table ...')\n", (67626, 67658), True, 'import streamlit as st\n'), ((69928, 69950), 'streamlit.form', 'st.form', (['"""ml_viz_form"""'], {}), "('ml_viz_form')\n", (69935, 69950), True, 'import streamlit as st\n'), ((69964, 69993), 'streamlit.markdown', 'st.markdown', (['"""#### Filter By"""'], {}), "('#### Filter By')\n", (69975, 69993), True, 'import streamlit as st\n'), ((70014, 70032), 'streamlit.beta_columns', 'st.beta_columns', (['(3)'], {}), '(3)\n', (70029, 70032), True, 'import streamlit as st\n'), ((70494, 70524), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Apply"""'], {}), "('Apply')\n", (70515, 70524), True, 'import streamlit as st\n'), ((71412, 71423), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (71421, 71423), True, 'import plotly.graph_objects as go\n'), ((72544, 72590), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {'use_container_width': '(True)'}), '(fig, use_container_width=True)\n', (72559, 72590), True, 'import streamlit as st\n'), ((72616, 72627), 'time.time', 'time.time', ([], {}), '()\n', (72625, 72627), False, 'import time\n'), ((73543, 73566), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (73564, 73566), False, 'import datetime\n'), ((74426, 74469), 'sspipe.px.sort_values', 'px.sort_values', ([], {'by': "['item_id', 'timestamp']"}), "(by=['item_id', 'timestamp'])\n", (74440, 74469), False, 'from sspipe import p, px\n'), ((74640, 74660), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (74656, 74660), False, 'import os\n'), ((76585, 76619), 'os.path.expanduser', 'os.path.expanduser', (['"""~/SageMaker/"""'], {}), "('~/SageMaker/')\n", (76603, 76619), False, 'import os\n'), ((79592, 79640), 'streamlit.beta_expander', 'st.beta_expander', (['"""💾 Save Report"""'], {'expanded': '(True)'}), "('💾 Save Report', expanded=True)\n", (79608, 79640), True, 'import streamlit as st\n'), ((79970, 80114), 'streamlit.text_input', 'st.text_input', (['"""File name"""'], {'value': 'default_name', 'help': '"""Please note that the report file name needs to have a `.pkl.gz` file extension."""'}), "('File name', value=default_name, help=\n 'Please note that the report file name needs to have a `.pkl.gz` file extension.'\n )\n", (79983, 80114), True, 'import streamlit as st\n'), ((80144, 80161), 'streamlit.button', 'st.button', (['"""Save"""'], {}), "('Save')\n", (80153, 80161), True, 'import streamlit as st\n'), ((11653, 11676), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11674, 11676), False, 'import datetime\n'), ((12317, 12427), 'textwrap.dedent', 'textwrap.dedent', (['f"""\n The report can be downloaded [here]({signed_url}).\n """'], {}), '(\n f"""\n The report can be downloaded [here]({signed_url}).\n """\n )\n', (12332, 12427), False, 'import textwrap\n'), ((12464, 12480), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (12477, 12480), False, 'import logging\n'), ((15113, 15136), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15134, 15136), False, 'import datetime\n'), ((16688, 16697), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (16695, 16697), True, 'import streamlit as st\n'), ((20397, 20454), 'streamlit.spinner', 'st.spinner', (['""":hourglass_flowing_sand: Loading Report ..."""'], {}), "(':hourglass_flowing_sand: Loading Report ...')\n", (20407, 20454), True, 'import streamlit as st\n'), ((21465, 21518), 'afa.make_health_summary', 'make_health_summary', (['df', "state.report['data']['freq']"], {}), "(df, state.report['data']['freq'])\n", (21484, 21518), False, 'from afa import load_data, resample, run_pipeline, run_cv_select, calc_smape, calc_wape, make_demand_classification, process_forecasts, make_perf_summary, make_health_summary, GROUP_COLS, EXP_COLS\n'), ((22986, 23013), 'streamlit.markdown', 'st.markdown', (['"""#### Summary"""'], {}), "('#### Summary')\n", (22997, 23013), True, 'import streamlit as st\n'), ((23311, 23339), 'streamlit.markdown', 'st.markdown', (['"""#### Timespan"""'], {}), "('#### Timespan')\n", (23322, 23339), True, 'import streamlit as st\n'), ((23356, 23501), 'streamlit.text', 'st.text', (['f"""Frequency:\t{FREQ_MAP_LONG[freq]}\nDuration:\t{duration.n} {duration_str}\nFirst date:\t{first_date}\nLast date:\t{last_date}\n"""'], {}), '(\n f"""Frequency:\t{FREQ_MAP_LONG[freq]}\nDuration:\t{duration.n} {duration_str}\nFirst date:\t{first_date}\nLast date:\t{last_date}\n"""\n )\n', (23363, 23501), True, 'import streamlit as st\n'), ((23699, 23737), 'streamlit.markdown', 'st.markdown', (['"""#### Timeseries Lengths"""'], {}), "('#### Timeseries Lengths')\n", (23710, 23737), True, 'import streamlit as st\n'), ((23761, 23817), 'plotly.express.box', 'pex.box', (['df_health'], {'x': '"""demand_nonnull_count"""', 'height': '(160)'}), "(df_health, x='demand_nonnull_count', height=160)\n", (23768, 23817), True, 'import plotly.express as pex\n'), ((24026, 24072), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {'use_container_width': '(True)'}), '(fig, use_container_width=True)\n', (24041, 24072), True, 'import streamlit as st\n'), ((25234, 25253), 'streamlit.beta_container', 'st.beta_container', ([], {}), '()\n', (25251, 25253), True, 'import streamlit as st\n'), ((25279, 25297), 'streamlit.beta_columns', 'st.beta_columns', (['(3)'], {}), '(3)\n', (25294, 25297), True, 'import streamlit as st\n'), ((25793, 25824), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Launch"""'], {}), "('Launch')\n", (25814, 25824), True, 'import streamlit as st\n'), ((26292, 26392), 'afa.run_pipeline', 'run_pipeline', (['df', 'freq_in', 'freq_out'], {'metric': 'METRIC', 'cv_stride': '(2)', 'backend': '"""futures"""', 'horiz': 'horiz'}), "(df, freq_in, freq_out, metric=METRIC, cv_stride=2, backend=\n 'futures', horiz=horiz)\n", (26304, 26392), False, 'from afa import load_data, resample, run_pipeline, run_cv_select, calc_smape, calc_wape, make_demand_classification, process_forecasts, make_perf_summary, make_health_summary, GROUP_COLS, EXP_COLS\n'), ((27568, 27607), 'streamlit.spinner', 'st.spinner', (['"""⏳ Calculating results ..."""'], {}), "('⏳ Calculating results ...')\n", (27578, 27607), True, 'import streamlit as st\n'), ((27775, 27810), 'afa.process_forecasts', 'process_forecasts', (['wait_for', 'METRIC'], {}), '(wait_for, METRIC)\n', (27792, 27810), False, 'from afa import load_data, resample, run_pipeline, run_cv_select, calc_smape, calc_wape, make_demand_classification, process_forecasts, make_perf_summary, make_health_summary, GROUP_COLS, EXP_COLS\n'), ((27901, 27940), 'afa.make_demand_classification', 'make_demand_classification', (['df', 'freq_in'], {}), '(df, freq_in)\n', (27927, 27940), False, 'from afa import load_data, resample, run_pipeline, run_cv_select, calc_smape, calc_wape, make_demand_classification, process_forecasts, make_perf_summary, make_health_summary, GROUP_COLS, EXP_COLS\n'), ((28393, 28404), 'time.time', 'time.time', ([], {}), '()\n', (28402, 28404), False, 'import time\n'), ((32263, 32289), 'numpy.nanmean', 'np.nanmean', (['df_acc[METRIC]'], {}), '(df_acc[METRIC])\n', (32273, 32289), True, 'import numpy as np\n'), ((33321, 33338), 'afa.calc_wape', 'calc_wape', (['ys', 'yp'], {}), '(ys, yp)\n', (33330, 33338), False, 'from afa import load_data, resample, run_pipeline, run_cv_select, calc_smape, calc_wape, make_demand_classification, process_forecasts, make_perf_summary, make_health_summary, GROUP_COLS, EXP_COLS\n'), ((36283, 36300), 'afa.calc_wape', 'calc_wape', (['ys', 'yp'], {}), '(ys, yp)\n', (36292, 36300), False, 'from afa import load_data, resample, run_pipeline, run_cv_select, calc_smape, calc_wape, make_demand_classification, process_forecasts, make_perf_summary, make_health_summary, GROUP_COLS, EXP_COLS\n'), ((40890, 40961), 'streamlit.spinner', 'st.spinner', (['""":hourglass_flowing_sand: Exporting **Top Performers** ..."""'], {}), "(':hourglass_flowing_sand: Exporting **Top Performers** ...')\n", (40900, 40961), True, 'import streamlit as st\n'), ((40987, 40998), 'time.time', 'time.time', ([], {}), '()\n', (40996, 40998), False, 'import time\n'), ((41146, 41195), 'os.path.basename', 'os.path.basename', (["state['report']['data']['path']"], {}), "(state['report']['data']['path'])\n", (41162, 41195), False, 'import os\n'), ((41392, 41454), 'awswrangler.s3.to_csv', 'wr.s3.to_csv', (['df_grp', 's3_path'], {'compression': '"""gzip"""', 'index': '(False)'}), "(df_grp, s3_path, compression='gzip', index=False)\n", (41404, 41454), True, 'import awswrangler as wr\n'), ((43131, 43189), 'streamlit.selectbox', 'st.selectbox', (['"""Channel"""', 'channel_vals'], {'index': 'channel_index'}), "('Channel', channel_vals, index=channel_index)\n", (43143, 43189), True, 'import streamlit as st\n'), ((43249, 43304), 'streamlit.selectbox', 'st.selectbox', (['"""Family"""', 'family_vals'], {'index': 'family_index'}), "('Family', family_vals, index=family_index)\n", (43261, 43304), True, 'import streamlit as st\n'), ((43365, 43423), 'streamlit.selectbox', 'st.selectbox', (['"""Item ID"""', 'item_id_vals'], {'index': 'item_id_index'}), "('Item ID', item_id_vals, index=item_id_index)\n", (43377, 43423), True, 'import streamlit as st\n'), ((44157, 44249), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'y_ts', 'y': 'y', 'mode': '"""lines"""', 'name': '"""actual"""', 'fill': '"""tozeroy"""', 'line': "{'width': 3}"}), "(x=y_ts, y=y, mode='lines', name='actual', fill='tozeroy', line={\n 'width': 3})\n", (44167, 44249), True, 'import plotly.graph_objects as go\n'), ((44318, 44413), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'yp_ts', 'y': 'yp', 'mode': '"""lines"""', 'name': '"""forecast"""', 'fill': '"""tozeroy"""', 'line': "{'width': 3}"}), "(x=yp_ts, y=yp, mode='lines', name='forecast', fill='tozeroy',\n line={'width': 3})\n", (44328, 44413), True, 'import plotly.graph_objects as go\n'), ((44863, 44992), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'df_backtest.index', 'y': 'df_backtest.yp', 'mode': '"""lines"""', 'name': '"""backtest (mean)"""', 'line_dash': '"""dot"""', 'line_color': '"""black"""'}), "(x=df_backtest.index, y=df_backtest.yp, mode='lines', name=\n 'backtest (mean)', line_dash='dot', line_color='black')\n", (44873, 44992), True, 'import plotly.graph_objects as go\n'), ((50587, 50649), 'streamlit.spinner', 'st.spinner', (['""":hourglass_flowing_sand: Exporting Forecasts ..."""'], {}), "(':hourglass_flowing_sand: Exporting Forecasts ...')\n", (50597, 50649), True, 'import streamlit as st\n'), ((51751, 51813), 'streamlit.spinner', 'st.spinner', (['""":hourglass_flowing_sand: Exporting Backtests ..."""'], {}), "(':hourglass_flowing_sand: Exporting Backtests ...')\n", (51761, 51813), True, 'import streamlit as st\n'), ((53878, 53934), 'streamlit.info', 'st.info', (['"""Machine learning forecasts are not yet ready."""'], {}), "('Machine learning forecasts are not yet ready.')\n", (53885, 53934), True, 'import streamlit as st\n'), ((54100, 54146), 'streamlit.markdown', 'st.markdown', (['"""#### Machine Learning Forecasts"""'], {}), "('#### Machine Learning Forecasts')\n", (54111, 54146), True, 'import streamlit as st\n'), ((54163, 54182), 'streamlit.markdown', 'st.markdown', (['"""####"""'], {}), "('####')\n", (54174, 54182), True, 'import streamlit as st\n'), ((54713, 54724), 'time.time', 'time.time', ([], {}), '()\n', (54722, 54724), False, 'import time\n'), ((56094, 56105), 'time.time', 'time.time', ([], {}), '()\n', (56103, 56105), False, 'import time\n'), ((60216, 60293), 'streamlit.number_input', 'st.number_input', (['"""Horizon Length"""'], {'key': '"""ml_horiz_input"""', 'value': '(1)', 'min_value': '(1)'}), "('Horizon Length', key='ml_horiz_input', value=1, min_value=1)\n", (60231, 60293), True, 'import streamlit as st\n'), ((60590, 60651), 'streamlit.selectbox', 'st.selectbox', (['"""Algorithm"""', "['AutoML']", '(0)'], {'key': '"""ml_algo_input"""'}), "('Algorithm', ['AutoML'], 0, key='ml_algo_input')\n", (60602, 60651), True, 'import streamlit as st\n'), ((60796, 60844), 'streamlit.spinner', 'st.spinner', (['"""🚀 Launching ML forecasting job ..."""'], {}), "('🚀 Launching ML forecasting job ...')\n", (60806, 60844), True, 'import streamlit as st\n'), ((61637, 61676), 'streamlit.spinner', 'st.spinner', (['"""⏳ Checking job status ..."""'], {}), "('⏳ Checking job status ...')\n", (61647, 61676), True, 'import streamlit as st\n'), ((62880, 62903), 'streamlit.button', 'st.button', (['"""🛑 Stop Job"""'], {}), "('🛑 Stop Job')\n", (62889, 62903), True, 'import streamlit as st\n'), ((62965, 62994), 'boto3.client', 'boto3.client', (['"""stepfunctions"""'], {}), "('stepfunctions')\n", (62977, 62994), False, 'import boto3\n'), ((63088, 63102), 'streamlit.write', 'st.write', (['resp'], {}), '(resp)\n', (63096, 63102), True, 'import streamlit as st\n'), ((63688, 63720), 'sspipe.px.rename', 'px.rename', (['{(0): metric}'], {'axis': '(1)'}), '({(0): metric}, axis=1)\n', (63697, 63720), False, 'from sspipe import p, px\n'), ((64483, 65164), 'textwrap.dedent', 'dedent', (['f"""\n The forecast error is calculated as the [symmetric\n mean absolute percentage error\n (SMAPE)](https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error)\n via sliding window backtesting. Forecast _accuracy_ is calculated as\n `100-SMAPE` and is averaged across all timeseries to give the _overall accuracy_.\n\n Note: Due to the limitations of the ML forecasting approach,\n backtests were only generated over [the five most recent windows\n before the\n horizon](https://docs.aws.amazon.com/forecast/latest/dg/metrics.html#backtesting).\n """'], {}), '(\n f"""\n The forecast error is calculated as the [symmetric\n mean absolute percentage error\n (SMAPE)](https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error)\n via sliding window backtesting. Forecast _accuracy_ is calculated as\n `100-SMAPE` and is averaged across all timeseries to give the _overall accuracy_.\n\n Note: Due to the limitations of the ML forecasting approach,\n backtests were only generated over [the five most recent windows\n before the\n horizon](https://docs.aws.amazon.com/forecast/latest/dg/metrics.html#backtesting).\n """\n )\n', (64489, 65164), False, 'from textwrap import dedent\n'), ((67890, 67961), 'streamlit.spinner', 'st.spinner', (['""":hourglass_flowing_sand: Exporting **Top Performers** ..."""'], {}), "(':hourglass_flowing_sand: Exporting **Top Performers** ...')\n", (67900, 67961), True, 'import streamlit as st\n'), ((67987, 67998), 'time.time', 'time.time', ([], {}), '()\n', (67996, 67998), False, 'import time\n'), ((68146, 68195), 'os.path.basename', 'os.path.basename', (["state['report']['data']['path']"], {}), "(state['report']['data']['path'])\n", (68162, 68195), False, 'import os\n'), ((68392, 68454), 'awswrangler.s3.to_csv', 'wr.s3.to_csv', (['df_grp', 's3_path'], {'compression': '"""gzip"""', 'index': '(False)'}), "(df_grp, s3_path, compression='gzip', index=False)\n", (68404, 68454), True, 'import awswrangler as wr\n'), ((70094, 70183), 'streamlit.selectbox', 'st.selectbox', (['"""Channel"""', 'channel_vals'], {'index': 'channel_index', 'key': '"""ml_results_channel"""'}), "('Channel', channel_vals, index=channel_index, key=\n 'ml_results_channel')\n", (70106, 70183), True, 'import streamlit as st\n'), ((70239, 70324), 'streamlit.selectbox', 'st.selectbox', (['"""Family"""', 'family_vals'], {'index': 'family_index', 'key': '"""ml_results_family"""'}), "('Family', family_vals, index=family_index, key='ml_results_family'\n )\n", (70251, 70324), True, 'import streamlit as st\n'), ((70381, 70467), 'streamlit.selectbox', 'st.selectbox', (['"""Item ID"""', 'item_id_vals'], {'index': 'item_id_index', 'key': '"""ml_results_item"""'}), "('Item ID', item_id_vals, index=item_id_index, key=\n 'ml_results_item')\n", (70393, 70467), True, 'import streamlit as st\n'), ((74515, 74552), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df_afc['timestamp']"], {}), "(df_afc['timestamp'])\n", (74531, 74552), True, 'import pandas as pd\n'), ((75028, 75213), 'json.dumps', 'json.dumps', (["{'prefix': prefix, 'data_freq': data_freq, 'horiz': AFC_FORECAST_HORIZON,\n 'freq': AFC_FORECAST_FREQUENCY, 's3_path': s3_input_path,\n 's3_export_path': s3_output_path}"], {}), "({'prefix': prefix, 'data_freq': data_freq, 'horiz':\n AFC_FORECAST_HORIZON, 'freq': AFC_FORECAST_FREQUENCY, 's3_path':\n s3_input_path, 's3_export_path': s3_output_path})\n", (75038, 75213), False, 'import json\n'), ((76160, 76179), 'os.path.basename', 'os.path.basename', (['s'], {}), '(s)\n', (76176, 76179), False, 'import os\n'), ((4698, 4716), 'joblib.delayed', 'delayed', (['_resample'], {}), '(_resample)\n', (4705, 4716), False, 'from joblib import Parallel, delayed\n'), ((7503, 7521), 'joblib.delayed', 'delayed', (['_resample'], {}), '(_resample)\n', (7510, 7521), False, 'from joblib import Parallel, delayed\n'), ((16292, 16680), 'textwrap.dedent', 'textwrap.dedent', (['"""\n **Error**\n\n No files were selected.\n\n 1. Upload your file(s).\n 2. Click the **Refresh Files** button.\n 3. Select the file from the dropdown box.\n 4. Select the **Frequency**.\n 5. Click the **Validate** button.\n\n ####\n """'], {}), '(\n """\n **Error**\n\n No files were selected.\n\n 1. Upload your file(s).\n 2. Click the **Refresh Files** button.\n 3. Select the file from the dropdown box.\n 4. Select the **Frequency**.\n 5. Click the **Validate** button.\n\n ####\n """\n )\n', (16307, 16680), False, 'import textwrap\n'), ((16899, 16940), 're.match', 're.match', (['"""^[A-Za-z0-9-_]*$"""', 'report_name'], {}), "('^[A-Za-z0-9-_]*$', report_name)\n", (16907, 16940), False, 'import re\n'), ((16976, 17253), 'textwrap.dedent', 'dedent', (['"""\n The report name may only contain:\n - uppercase letters\n - lowercase letters\n - numbers\n - dashes (\'-\')\n - underscores (\'_\')\n ####\n """'], {}), '(\n """\n The report name may only contain:\n - uppercase letters\n - lowercase letters\n - numbers\n - dashes (\'-\')\n - underscores (\'_\')\n ####\n """\n )\n', (16982, 17253), False, 'from textwrap import dedent\n'), ((17405, 17463), 'streamlit.spinner', 'st.spinner', (['""":hourglass_flowing_sand: Validating file ..."""'], {}), "(':hourglass_flowing_sand: Validating file ...')\n", (17415, 17463), True, 'import streamlit as st\n'), ((18869, 18922), 'streamlit.error', 'st.error', (['f"""**Validation failed**\n\n{err_bullets}"""'], {}), '(f"""**Validation failed**\n\n{err_bullets}""")\n', (18877, 18922), True, 'import streamlit as st\n'), ((19915, 19943), 'os.path.join', 'os.path.join', (['args.local_dir'], {}), '(args.local_dir)\n', (19927, 19943), False, 'import os\n'), ((20507, 20526), 'gzip.open', 'gzip.open', (['fn', '"""rb"""'], {}), "(fn, 'rb')\n", (20516, 20526), False, 'import gzip\n'), ((22638, 22661), 'pandas.Timestamp', 'pd.Timestamp', (['last_date'], {}), '(last_date)\n', (22650, 22661), True, 'import pandas as pd\n'), ((22710, 22734), 'pandas.Timestamp', 'pd.Timestamp', (['first_date'], {}), '(first_date)\n', (22722, 22734), True, 'import pandas as pd\n'), ((23038, 23271), 'textwrap.dedent', 'textwrap.dedent', (['f"""\n No. series:\t{num_series} \n No. channels:\t{num_channels} \n No. families:\t{num_families} \n No. item IDs:\t{num_item_ids}\n """'], {}), '(\n f"""\n No. series:\t{num_series} \n No. channels:\t{num_channels} \n No. families:\t{num_families} \n No. item IDs:\t{num_item_ids}\n """\n )\n', (23053, 23271), False, 'import textwrap\n'), ((25358, 25413), 'streamlit.number_input', 'st.number_input', (['"""Horizon Length"""'], {'value': '(1)', 'min_value': '(1)'}), "('Horizon Length', value=1, min_value=1)\n", (25373, 25413), True, 'import streamlit as st\n'), ((25663, 25726), 'streamlit.selectbox', 'st.selectbox', (['"""Compute Backend"""', "['lambdamap']", '(0)', '_format_func'], {}), "('Compute Backend', ['lambdamap'], 0, _format_func)\n", (25675, 25726), True, 'import streamlit as st\n'), ((46506, 46536), 'humanfriendly.format_timespan', 'format_timespan', (['plot_duration'], {}), '(plot_duration)\n', (46521, 46536), False, 'from humanfriendly import format_timespan\n'), ((50879, 50928), 'os.path.basename', 'os.path.basename', (["state['report']['data']['path']"], {}), "(state['report']['data']['path'])\n", (50895, 50928), False, 'import os\n'), ((51086, 51164), 'awswrangler.s3.to_csv', 'wr.s3.to_csv', (['df_preds', 'afa_forecasts_s3_path'], {'compression': '"""gzip"""', 'index': '(False)'}), "(df_preds, afa_forecasts_s3_path, compression='gzip', index=False)\n", (51098, 51164), True, 'import awswrangler as wr\n'), ((52043, 52092), 'os.path.basename', 'os.path.basename', (["state['report']['data']['path']"], {}), "(state['report']['data']['path'])\n", (52059, 52092), False, 'import os\n'), ((52899, 52986), 'awswrangler.s3.to_csv', 'wr.s3.to_csv', (['df_backtests', 'afa_backtests_s3_path'], {'compression': '"""gzip"""', 'index': '(False)'}), "(df_backtests, afa_backtests_s3_path, compression='gzip', index\n =False)\n", (52911, 52986), True, 'import awswrangler as wr\n'), ((61288, 61428), 'textwrap.dedent', 'dedent', (['f"""\n Job submitted, the ARN is:\n - {execution_arn}\n ####\n """'], {}), '(\n f"""\n Job submitted, the ARN is:\n - {execution_arn}\n ####\n """\n )\n', (61294, 61428), False, 'from textwrap import dedent\n'), ((62024, 62334), 'textwrap.dedent', 'textwrap.dedent', (['f"""\n **Status:** {sfn_status} \n **Stage:** {sfn_state} \n **Execution ARN:** `{execution_arn}` \n **AWS Console:** [view](https://console.aws.amazon.com/states/home#/executions/details/{execution_arn})\n """'], {}), '(\n f"""\n **Status:** {sfn_status} \n **Stage:** {sfn_state} \n **Execution ARN:** `{execution_arn}` \n **AWS Console:** [view](https://console.aws.amazon.com/states/home#/executions/details/{execution_arn})\n """\n )\n', (62039, 62334), False, 'import textwrap\n'), ((62429, 62480), 'streamlit.spinner', 'st.spinner', (['"""⏳ Loading ML forecasts and results..."""'], {}), "('⏳ Loading ML forecasts and results...')\n", (62439, 62480), True, 'import streamlit as st\n'), ((63659, 63675), 'sspipe.px.reset_index', 'px.reset_index', ([], {}), '()\n', (63673, 63675), False, 'from sspipe import p, px\n'), ((72669, 72699), 'humanfriendly.format_timespan', 'format_timespan', (['plot_duration'], {}), '(plot_duration)\n', (72684, 72699), False, 'from humanfriendly import format_timespan\n'), ((74277, 74354), 'sspipe.px.assign', 'px.assign', ([], {'item_id': "(px['channel'] + '@@' + px['family'] + '@@' + px['item_id'])"}), "(item_id=px['channel'] + '@@' + px['family'] + '@@' + px['item_id'])\n", (74286, 74354), False, 'from sspipe import p, px\n'), ((76075, 76100), 'os.path.join', 'os.path.join', (['folder', 'pat'], {}), '(folder, pat)\n', (76087, 76100), False, 'import os\n'), ((6495, 6506), 'time.time', 'time.time', ([], {}), '()\n', (6504, 6506), False, 'import time\n'), ((12527, 12538), 'time.time', 'time.time', ([], {}), '()\n', (12536, 12538), False, 'import time\n'), ((16759, 16782), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16780, 16782), False, 'import datetime\n'), ((17637, 17695), 'streamlit.spinner', 'st.spinner', (['""":hourglass_flowing_sand: Processing file ..."""'], {}), "(':hourglass_flowing_sand: Processing file ...')\n", (17647, 17695), True, 'import streamlit as st\n'), ((17873, 17892), 'os.path.getsize', 'os.path.getsize', (['fn'], {}), '(fn)\n', (17888, 17892), False, 'import os\n'), ((19040, 19086), 'os.path.basename', 'os.path.basename', (["state.report['data']['path']"], {}), "(state.report['data']['path'])\n", (19056, 19086), False, 'import os\n'), ((26532, 26562), 'concurrent.futures.as_completed', 'futures.as_completed', (['wait_for'], {}), '(wait_for)\n', (26552, 26562), False, 'from concurrent import futures\n'), ((26626, 26691), 'streamlit.spinner', 'st.spinner', (['f""":rocket: Launching forecasts via AWS Lambda (λ)..."""'], {}), "(f':rocket: Launching forecasts via AWS Lambda (λ)...')\n", (26636, 26691), True, 'import streamlit as st\n'), ((27165, 27216), 'stqdm.stqdm', 'stqdm', (['groups'], {'total': 'total', 'desc': '"""Overall Progress"""'}), "(groups, total=total, desc='Overall Progress')\n", (27170, 27216), False, 'from stqdm import stqdm\n'), ((28546, 28575), 'humanfriendly.format_timespan', 'format_timespan', (['job_duration'], {}), '(job_duration)\n', (28561, 28575), False, 'from humanfriendly import format_timespan\n'), ((31793, 31839), 'plotly.graph_objects.Pie', 'go.Pie', ([], {'labels': 'labels', 'values': 'values', 'hole': '(0.4)'}), '(labels=labels, values=values, hole=0.4)\n', (31799, 31839), True, 'import plotly.graph_objects as go\n'), ((32554, 32591), 'numpy.clip', 'np.clip', (['(acc_val - acc_naive)', '(0)', 'None'], {}), '(acc_val - acc_naive, 0, None)\n', (32561, 32591), True, 'import numpy as np\n'), ((41070, 41093), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (41091, 41093), False, 'import datetime\n'), ((55082, 55131), 'os.path.basename', 'os.path.basename', (["state['report']['data']['path']"], {}), "(state['report']['data']['path'])\n", (55098, 55131), False, 'import os\n'), ((55299, 55443), 'awswrangler.s3.to_csv', 'wr.s3.to_csv', (["df_preds[['timestamp', 'channel', 'family', 'item_id', 'demand', 'type']]", 'afc_forecasts_path'], {'compression': '"""gzip"""', 'index': '(False)'}), "(df_preds[['timestamp', 'channel', 'family', 'item_id',\n 'demand', 'type']], afc_forecasts_path, compression='gzip', index=False)\n", (55311, 55443), True, 'import awswrangler as wr\n'), ((56379, 56428), 'os.path.basename', 'os.path.basename', (["state['report']['data']['path']"], {}), "(state['report']['data']['path'])\n", (56395, 56428), False, 'import os\n'), ((68070, 68093), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (68091, 68093), False, 'import datetime\n'), ((71679, 71694), 'numpy.round', 'np.round', (['yp', '(0)'], {}), '(yp, 0)\n', (71687, 71694), True, 'import numpy as np\n'), ((71888, 71921), 'numpy.round', 'np.round', (['_df_backtests.demand', '(0)'], {}), '(_df_backtests.demand, 0)\n', (71896, 71921), True, 'import numpy as np\n'), ((74219, 74260), 'sspipe.px.rename', 'px.rename', (["{'index': 'timestamp'}"], {'axis': '(1)'}), "({'index': 'timestamp'}, axis=1)\n", (74228, 74260), False, 'from sspipe import p, px\n'), ((40815, 40826), 'time.time', 'time.time', ([], {}), '()\n', (40824, 40826), False, 'import time\n'), ((50799, 50822), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (50820, 50822), False, 'import datetime\n'), ((51963, 51986), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (51984, 51986), False, 'import datetime\n'), ((63441, 63497), 'sspipe.px.groupby', 'px.groupby', (["['channel', 'family', 'item_id']"], {'sort': '(False)'}), "(['channel', 'family', 'item_id'], sort=False)\n", (63451, 63497), False, 'from sspipe import p, px\n'), ((67785, 67796), 'time.time', 'time.time', ([], {}), '()\n', (67794, 67796), False, 'import time\n'), ((74186, 74202), 'sspipe.px.reset_index', 'px.reset_index', ([], {}), '()\n', (74200, 74202), False, 'from sspipe import p, px\n'), ((20582, 20593), 'time.time', 'time.time', ([], {}), '()\n', (20591, 20593), False, 'import time\n'), ((24127, 24138), 'time.time', 'time.time', ([], {}), '()\n', (24136, 24138), False, 'import time\n'), ((26987, 27022), 'numpy.ceil', 'np.ceil', (['(groups.ngroups / chunksize)'], {}), '(groups.ngroups / chunksize)\n', (26994, 27022), True, 'import numpy as np\n'), ((54998, 55021), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (55019, 55021), False, 'import datetime\n'), ((56295, 56318), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (56316, 56318), False, 'import datetime\n'), ((27347, 27377), 'concurrent.futures.as_completed', 'futures.as_completed', (['wait_for'], {}), '(wait_for)\n', (27367, 27377), False, 'from concurrent import futures\n'), ((51714, 51725), 'time.time', 'time.time', ([], {}), '()\n', (51723, 51725), False, 'import time\n'), ((53451, 53462), 'time.time', 'time.time', ([], {}), '()\n', (53460, 53462), False, 'import time\n'), ((41769, 41780), 'time.time', 'time.time', ([], {}), '()\n', (41778, 41780), False, 'import time\n'), ((56046, 56057), 'time.time', 'time.time', ([], {}), '()\n', (56055, 56057), False, 'import time\n'), ((57379, 57390), 'time.time', 'time.time', ([], {}), '()\n', (57388, 57390), False, 'import time\n'), ((68769, 68780), 'time.time', 'time.time', ([], {}), '()\n', (68778, 68780), False, 'import time\n'), ((18724, 18735), 'time.time', 'time.time', ([], {}), '()\n', (18733, 18735), False, 'import time\n'), ((44612, 44634), 'numpy.hstack', 'np.hstack', (["dd['yp_cv']"], {}), "(dd['yp_cv'])\n", (44621, 44634), True, 'import numpy as np\n'), ((44689, 44711), 'numpy.hstack', 'np.hstack', (["dd['ts_cv']"], {}), "(dd['ts_cv'])\n", (44698, 44711), True, 'import numpy as np\n')] |
import os
import numpy as np
import scipy.io
import torch
from einops import repeat
from torch.utils.data import DataLoader, Dataset
from .base import Builder
class NSZongyiBuilder(Builder):
name = 'ns_zongyi'
def __init__(self, data_path: str, train_size: int, test_size: int,
ssr: int, n_steps: int, append_pos: bool = True, **kwargs):
super().__init__()
self.kwargs = kwargs
self.data_path = data_path
data = scipy.io.loadmat(os.path.expandvars(data_path))[
'u'].astype(np.float32)
data = torch.from_numpy(data)
a = data[:, ::ssr, ::ssr, :n_steps]
u = data[:, ::ssr, ::ssr, n_steps:n_steps*2]
B, X, Y, T = a.shape
if append_pos:
# Note that linspace is inclusive of both ends
ticks = torch.linspace(0, 1, X)
grid_x = repeat(ticks, 'x -> b x y 1', b=B, y=Y)
grid_y = repeat(ticks, 'y -> b x y 1', b=B, x=X)
# Add positional information to inputs
a = torch.cat([a, grid_x, grid_y], dim=-1)
# a.shape == [1200, 64, 64, 12]
# u.shape == [1200, 64, 64, 10]
self.train_dataset = NavierStokesDataset(
a[:train_size], u[:train_size])
self.test_dataset = NavierStokesDataset(
a[-test_size:], u[-test_size:])
# train_dataset.shape == [1000, 64, 64, 10]
def train_dataloader(self) -> DataLoader:
loader = DataLoader(self.train_dataset,
shuffle=True,
drop_last=False,
**self.kwargs)
return loader
def val_dataloader(self) -> DataLoader:
loader = DataLoader(self.test_dataset,
shuffle=False,
drop_last=False,
**self.kwargs)
return loader
def test_dataloader(self) -> DataLoader:
loader = DataLoader(self.test_dataset,
shuffle=False,
drop_last=False,
**self.kwargs)
return loader
def inference_data(self):
data = scipy.io.loadmat(self.data_path)['u'].astype(np.float32)[:512]
return {'data': data}
class NavierStokesDataset(Dataset):
def __init__(self, a, u):
self.a = a
self.u = u
self.times = np.arange(10, 20)
def __len__(self):
return self.a.shape[0]
def __getitem__(self, idx):
return {
'x': self.a[idx],
'y': self.u[idx],
'times': self.times,
}
| [
"numpy.arange",
"os.path.expandvars",
"einops.repeat",
"torch.from_numpy",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.linspace"
] | [((575, 597), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (591, 597), False, 'import torch\n'), ((1472, 1548), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_dataset'], {'shuffle': '(True)', 'drop_last': '(False)'}), '(self.train_dataset, shuffle=True, drop_last=False, **self.kwargs)\n', (1482, 1548), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((1717, 1793), 'torch.utils.data.DataLoader', 'DataLoader', (['self.test_dataset'], {'shuffle': '(False)', 'drop_last': '(False)'}), '(self.test_dataset, shuffle=False, drop_last=False, **self.kwargs)\n', (1727, 1793), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((1963, 2039), 'torch.utils.data.DataLoader', 'DataLoader', (['self.test_dataset'], {'shuffle': '(False)', 'drop_last': '(False)'}), '(self.test_dataset, shuffle=False, drop_last=False, **self.kwargs)\n', (1973, 2039), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((2412, 2429), 'numpy.arange', 'np.arange', (['(10)', '(20)'], {}), '(10, 20)\n', (2421, 2429), True, 'import numpy as np\n'), ((827, 850), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', 'X'], {}), '(0, 1, X)\n', (841, 850), False, 'import torch\n'), ((872, 911), 'einops.repeat', 'repeat', (['ticks', '"""x -> b x y 1"""'], {'b': 'B', 'y': 'Y'}), "(ticks, 'x -> b x y 1', b=B, y=Y)\n", (878, 911), False, 'from einops import repeat\n'), ((933, 972), 'einops.repeat', 'repeat', (['ticks', '"""y -> b x y 1"""'], {'b': 'B', 'x': 'X'}), "(ticks, 'y -> b x y 1', b=B, x=X)\n", (939, 972), False, 'from einops import repeat\n'), ((1041, 1079), 'torch.cat', 'torch.cat', (['[a, grid_x, grid_y]'], {'dim': '(-1)'}), '([a, grid_x, grid_y], dim=-1)\n', (1050, 1079), False, 'import torch\n'), ((492, 521), 'os.path.expandvars', 'os.path.expandvars', (['data_path'], {}), '(data_path)\n', (510, 521), False, 'import os\n')] |
from math import sqrt
import math
from math import atan2, degrees
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
from skimage import io
import matplotlib.pyplot as plt
from scipy import stats
from scipy import spatial
import numpy as np
from scipy import ndimage as ndi
from skimage.morphology import watershed
from skimage.feature import peak_local_max
# ref : https://scikit-image.org/docs/dev/auto_examples/features_detection/plot_blob.html
#image = data.hubble_deep_field()[0:500, 0:500]
#image_gray = rgb2gray(image)
neighbor_search_dist = 100
im_path = r'F:\entropy_veg\lidar\las_products\USGS_LPC_TN_27County_blk2_2015_2276581SE_LAS_2017\USGS_LPC_TN_27County_blk2_2015_2276581SE_LAS_2017_dhm.tif'
image_gray = io.imread(im_path)
image_gray[image_gray > 500] = 0
image_gray[image_gray < 3] = 0
image_gray = image_gray[2500:, 500:2000]
#image_gray = image_gray[500:2000, 4500:6000]
#image_gray = image_gray[3100:3500, 1100:1500]
def distance(p0, p1):
return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
def angle(p1, p2):
xDiff = p2[0] - p1[0]
yDiff = p2[1] - p1[1]
#return degrees(atan2(yDiff, xDiff))
return atan2(yDiff, xDiff)
io.imshow(image_gray)
io.show()
# blobs
print('Computing laplace of gaussian')
#blobs_log = blob_log(image_gray, max_sigma=35, min_sigma=3, num_sigma=10, threshold=2, overlap=.01)
blobs_log = blob_log(image_gray, max_sigma=35, min_sigma=6, num_sigma=10, threshold=2, overlap=.01)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
print('Computed')
fig, ax = plt.subplots(1, 1)
# ax.set_title('Laplacian of Gaussian')
ax.imshow(image_gray)
print('Drawing')
for blob in blobs_log:
y, x, r = blob
#c = plt.Circle((x, y), 3, color='red', linewidth=1, fill=False)
c = plt.Circle((x, y), r, color='red', linewidth=1, fill=False)
ax.add_patch(c)
ax.set_axis_off()
plt.tight_layout()
plt.show()
y, x = blobs_log[:,0], blobs_log[:,1]
y = 1500-y
# Define the borders
deltaX = (max(x) - min(x))/10
deltaY = (max(y) - min(y))/10
xmin = min(x) - deltaX
xmax = max(x) + deltaX
ymin = min(y) - deltaY
ymax = max(y) + deltaY
print(xmin, xmax, ymin, ymax)
# Create meshgrid
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)*10e9
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
cfset = ax.contourf(xx, yy, f, cmap='coolwarm')
ax.imshow(np.rot90(f), cmap='coolwarm', extent=[xmin, xmax, ymin, ymax])
cset = ax.contour(xx, yy, f, colors='k')
ax.clabel(cset, inline=1, fontsize=10)
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.title('2D Gaussian Kernel density estimation')
for blob in blobs_log:
ya, xa, r = blob
#c = plt.Circle((x, y), 3, color='red', linewidth=1, fill=False)
c = plt.Circle((xa, 1500-ya), 5, color='red', linewidth=1, fill=True)
ax.add_patch(c)
plt.show()
pt_coords = blobs_log[:,0:2]
tree = spatial.cKDTree(pt_coords,
leafsize=16,
compact_nodes=True,
copy_data=False,
balanced_tree=True)
print('Finding neighbors')
neighbor_list = [tree.query_ball_point([x,y], neighbor_search_dist) for y,x in pt_coords]
for i,l in enumerate(neighbor_list):
if i in l:
l.remove(i)
distances_list = []
angles_list = []
print('Computing angles and distances')
for (y,x),group in zip(pt_coords,neighbor_list):
distance_group = []
angles_group = []
for neighbor in group:
nx = pt_coords[neighbor][1]
ny = pt_coords[neighbor][0]
d = distance([x,y],[nx,ny])
a = angle([x,y],[nx,ny])
distance_group.append(d)
angles_group.append(a)
distances_list.append(distance_group)
angles_list.append(angles_group)
pt_data = {i:{'neighbors':neis, 'distances':dists, 'angles':angs}
for i,(neis,dists,angs) in
enumerate(zip(neighbor_list,distances_list,angles_list))}
print('Done')
| [
"skimage.feature.blob_log",
"scipy.stats.gaussian_kde",
"matplotlib.pyplot.Circle",
"scipy.spatial.cKDTree",
"skimage.io.show",
"math.sqrt",
"skimage.io.imread",
"matplotlib.pyplot.figure",
"numpy.vstack",
"matplotlib.pyplot.tight_layout",
"skimage.io.imshow",
"math.atan2",
"matplotlib.pyplo... | [((792, 810), 'skimage.io.imread', 'io.imread', (['im_path'], {}), '(im_path)\n', (801, 810), False, 'from skimage import io\n'), ((1242, 1263), 'skimage.io.imshow', 'io.imshow', (['image_gray'], {}), '(image_gray)\n', (1251, 1263), False, 'from skimage import io\n'), ((1264, 1273), 'skimage.io.show', 'io.show', ([], {}), '()\n', (1271, 1273), False, 'from skimage import io\n'), ((1437, 1529), 'skimage.feature.blob_log', 'blob_log', (['image_gray'], {'max_sigma': '(35)', 'min_sigma': '(6)', 'num_sigma': '(10)', 'threshold': '(2)', 'overlap': '(0.01)'}), '(image_gray, max_sigma=35, min_sigma=6, num_sigma=10, threshold=2,\n overlap=0.01)\n', (1445, 1529), False, 'from skimage.feature import blob_dog, blob_log, blob_doh\n'), ((1633, 1651), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1645, 1651), True, 'import matplotlib.pyplot as plt\n'), ((1949, 1967), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1965, 1967), True, 'import matplotlib.pyplot as plt\n'), ((1968, 1978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1976, 1978), True, 'import matplotlib.pyplot as plt\n'), ((2359, 2376), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (2368, 2376), True, 'import numpy as np\n'), ((2386, 2412), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['values'], {}), '(values)\n', (2404, 2412), False, 'from scipy import stats\n'), ((2471, 2497), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (2481, 2497), True, 'import matplotlib.pyplot as plt\n'), ((2799, 2849), 'matplotlib.pyplot.title', 'plt.title', (['"""2D Gaussian Kernel density estimation"""'], {}), "('2D Gaussian Kernel density estimation')\n", (2808, 2849), True, 'import matplotlib.pyplot as plt\n'), ((3058, 3068), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3066, 3068), True, 'import matplotlib.pyplot as plt\n'), ((3106, 3206), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['pt_coords'], {'leafsize': '(16)', 'compact_nodes': '(True)', 'copy_data': '(False)', 'balanced_tree': '(True)'}), '(pt_coords, leafsize=16, compact_nodes=True, copy_data=False,\n balanced_tree=True)\n', (3121, 3206), False, 'from scipy import spatial\n'), ((1045, 1099), 'math.sqrt', 'math.sqrt', (['((p0[0] - p1[0]) ** 2 + (p0[1] - p1[1]) ** 2)'], {}), '((p0[0] - p1[0]) ** 2 + (p0[1] - p1[1]) ** 2)\n', (1054, 1099), False, 'import math\n'), ((1221, 1240), 'math.atan2', 'atan2', (['yDiff', 'xDiff'], {}), '(yDiff, xDiff)\n', (1226, 1240), False, 'from math import atan2, degrees\n'), ((1596, 1603), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (1600, 1603), False, 'from math import sqrt\n'), ((1850, 1909), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(x, y)', 'r'], {'color': '"""red"""', 'linewidth': '(1)', 'fill': '(False)'}), "((x, y), r, color='red', linewidth=1, fill=False)\n", (1860, 1909), True, 'import matplotlib.pyplot as plt\n'), ((2618, 2629), 'numpy.rot90', 'np.rot90', (['f'], {}), '(f)\n', (2626, 2629), True, 'import numpy as np\n'), ((2971, 3038), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(xa, 1500 - ya)', '(5)'], {'color': '"""red"""', 'linewidth': '(1)', 'fill': '(True)'}), "((xa, 1500 - ya), 5, color='red', linewidth=1, fill=True)\n", (2981, 3038), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import progressbar
from terminaltables import AsciiTable
from scratch_ml.utils import bar_widget, batch_iterator
class NeuralNetwork():
"""Neural Networ base model."""
def __init__(self, optimizer, loss, validation_data=None):
self.optimizer = optimizer
self.layers = []
self.errors = {"training": [], "validation": []}
self.loss_function = loss()
self.progressbar = progressbar.ProgressBar(widgets=bar_widget)
self.val_set = None
if validation_data:
x, y = validation_data
self.val_set = {"x": x, "y": y}
def set_trainable(self, trainable):
"""Method which enables freezing of the weights of the network's layers."""
for layer in self.layers:
layer.trainable = trainable
def add(self, layer):
"""Method which adds a layer to the neural network."""
# If this is not the first layer added then set the input shape
# to the output shape of the last added layer
if self.layers:
layer.set_input_shape(shape=self.layers[-1].output_shape())
# weights that needs to be initialized
if hasattr(layer, 'initialize'):
layer.initialize(optimizer=self.optimizer)
self.layers.append(layer)
def test_on_batch(self, x, y):
"""Evaluates the model over a single batch of samples."""
y_pred = self._forward_pass(x, training=False)
loss = np.mean(self.loss_function.loss(y, y_pred))
acc = self.loss_function.accuracy(y, y_pred)
return loss, acc
def train_on_batch(self, x, y):
"""Single gradient update over one batch of samples."""
y_pred = self._forward_pass(x)
loss = np.mean(self.loss_function.loss(y, y_pred))
acc = self.loss_function.accuracy(y, y_pred)
loss_grad = self.loss_function.gradient(y, y_pred)
self._backward_pass(loss_grad=loss_grad)
return loss, acc
def fit(self, x, y, n_epochs, batch_size):
for _ in self.progressbar(range(n_epochs)):
batch_error = []
for X_batch, y_batch in batch_iterator(x, y, batch_size=batch_size):
loss, _ = self.train_on_batch(X_batch, y_batch)
batch_error.append(loss)
self.errors["training"].append(np.mean(batch_error))
if self.val_set is not None:
val_loss, _ = self.test_on_batch(
self.val_set["x"], self.val_set["y"])
self.errors["validation"].append(val_loss)
return self.errors["training"], self.errors["validation"]
def _forward_pass(self, x, training=True):
layer_output = x
for layer in self.layers:
layer_output = layer.forward_pass(layer_output, training)
return layer_output
def _backward_pass(self, loss_grad):
for layer in reversed(self.layers):
loss_grad = layer.backward_pass(loss_grad)
def summary(self, name="Model Summary"):
print(AsciiTable([[name]]).table)
print("Input Shape: %s" % str(self.layers[0].input_shape))
# Iterate through network and get each layer's configuration
table_data = [["Layer Type", "Parameters", "Output Shape"]]
tot_params = 0
for layer in self.layers:
layer_name = layer.layer_name()
params = layer.parameters()
out_shape = layer.output_shape()
table_data.append([layer_name, str(params), str(out_shape)])
tot_params += params
print(AsciiTable(table_data).table)
print("Total Parameters: %d\n" % tot_params)
def predict(self, x):
return self._forward_pass(x, training=False)
| [
"scratch_ml.utils.batch_iterator",
"numpy.mean",
"terminaltables.AsciiTable",
"progressbar.ProgressBar"
] | [((437, 480), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'widgets': 'bar_widget'}), '(widgets=bar_widget)\n', (460, 480), False, 'import progressbar\n'), ((2148, 2191), 'scratch_ml.utils.batch_iterator', 'batch_iterator', (['x', 'y'], {'batch_size': 'batch_size'}), '(x, y, batch_size=batch_size)\n', (2162, 2191), False, 'from scratch_ml.utils import bar_widget, batch_iterator\n'), ((2341, 2361), 'numpy.mean', 'np.mean', (['batch_error'], {}), '(batch_error)\n', (2348, 2361), True, 'import numpy as np\n'), ((3043, 3063), 'terminaltables.AsciiTable', 'AsciiTable', (['[[name]]'], {}), '([[name]])\n', (3053, 3063), False, 'from terminaltables import AsciiTable\n'), ((3581, 3603), 'terminaltables.AsciiTable', 'AsciiTable', (['table_data'], {}), '(table_data)\n', (3591, 3603), False, 'from terminaltables import AsciiTable\n')] |
import numpy as np
import pytest
from scipy.constants import c, h, k
#
# get Stull's c_1 and c_2 from fundamental constants
#
# c=2.99792458e+08 #m/s -- speed of light in vacuum
# h=6.62606876e-34 #J s -- Planck's constant
# k=1.3806503e-23 # J/K -- Boltzman's constant
c1 = 2. * h * c**2.
c2 = h * c / k
sigma = 2. * np.pi**5. * k**4. / (15 * h**3. * c**2.)
def Elambda(wavel, Temp):
"""
Calculate the blackbody radiant exitence (Stull 2.13)
Parameters
----------
wavel: float or array
wavelength (meters)
Temp: float
temperature (K)
Returns
-------
Elambda: float or arr
monochromatic radiant exitence (W/m^2/m)
"""
Elambda_val = c1 * np.pi / (wavel**5. * (np.exp(c2 / (wavel * Temp)) - 1))
return Elambda_val
def calc_radiance(wavel, Temp):
"""
Calculate the blackbody radiance
Parameters
----------
wavel: float or array
wavelength (meters)
Temp: float
temperature (K)
Returns
-------
Llambda: float or arr
monochromatic radiance (W/m^2/m/sr)
"""
Llambda_val = c1 / (wavel**5. * (np.exp(c2 / (wavel * Temp)) - 1))
return Llambda_val
def planck_invert(wavel, Lstar):
"""
Calculate the brightness temperature
Parameters
----------
wavel: float
wavelength (meters)
Lstar: float or array
Blackbody radiance (W/m^2/m/sr)
Returns
-------
Tbright: float or arr
brightness temperature (K)
"""
Tbright = c2 / (wavel * np.log(c1 / (wavel**5. * Lstar) + 1.))
return Tbright
def test_planck_wavelen():
"""
test planck function for several wavelengths
and Temps
"""
#
# need Temp in K and wavelen in m
#
the_temps = [200., 250., 350.]
the_wavelens = np.array([8., 10., 12.]) * 1.e-6
out = []
for a_temp in the_temps:
for a_wavelen in the_wavelens:
#
# convert to W/m^2/micron/sr
#
the_bbr = calc_radiance(a_wavelen, a_temp) * 1.e-6
out.append(the_bbr)
answer = [0.4521, 0.8954, 1.1955, 2.7324, 3.7835, 3.9883,
21.4495, 19.8525, 16.0931]
np.testing.assert_array_almost_equal(out, answer, decimal=4)
return None
def test_planck_inverse():
"""
test planck inverse for several round trips
and Temps
"""
#
# need Temp in K and wavelen in m
#
the_temps = [200., 250., 350.]
the_wavelens = np.array([8., 10., 12.]) * 1.e-6
out = []
for a_temp in the_temps:
for a_wavelen in the_wavelens:
#
# convert to W/m^2/micron/sr
#
the_bbr = calc_radiance(a_wavelen, a_temp)
out.append((a_wavelen, the_bbr))
brights = []
for wavelen, bbr in out:
brights.append(planck_invert(wavelen, bbr))
answer = [200.0, 200.0, 200.0, 250.0, 250.0, 250.0, 350.0, 350.0, 350.0]
np.testing.assert_array_almost_equal(brights, answer, decimal=10)
return None
if __name__ == "__main__":
#
# the variable __file__ contains the name of this file
# so the result of the following line will be the same as if
# you typed:
#
# pytest a301/radiation.py -q
#
# in a terminal (the -q means 'suppress most of output')
#
print('testing {}'.format(__file__))
pytest.main([__file__, '-q'])
| [
"numpy.testing.assert_array_almost_equal",
"numpy.log",
"pytest.main",
"numpy.exp",
"numpy.array"
] | [((2271, 2331), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['out', 'answer'], {'decimal': '(4)'}), '(out, answer, decimal=4)\n', (2307, 2331), True, 'import numpy as np\n'), ((3028, 3093), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['brights', 'answer'], {'decimal': '(10)'}), '(brights, answer, decimal=10)\n', (3064, 3093), True, 'import numpy as np\n'), ((3445, 3474), 'pytest.main', 'pytest.main', (["[__file__, '-q']"], {}), "([__file__, '-q'])\n", (3456, 3474), False, 'import pytest\n'), ((1874, 1901), 'numpy.array', 'np.array', (['[8.0, 10.0, 12.0]'], {}), '([8.0, 10.0, 12.0])\n', (1882, 1901), True, 'import numpy as np\n'), ((2565, 2592), 'numpy.array', 'np.array', (['[8.0, 10.0, 12.0]'], {}), '([8.0, 10.0, 12.0])\n', (2573, 2592), True, 'import numpy as np\n'), ((1599, 1640), 'numpy.log', 'np.log', (['(c1 / (wavel ** 5.0 * Lstar) + 1.0)'], {}), '(c1 / (wavel ** 5.0 * Lstar) + 1.0)\n', (1605, 1640), True, 'import numpy as np\n'), ((754, 781), 'numpy.exp', 'np.exp', (['(c2 / (wavel * Temp))'], {}), '(c2 / (wavel * Temp))\n', (760, 781), True, 'import numpy as np\n'), ((1175, 1202), 'numpy.exp', 'np.exp', (['(c2 / (wavel * Temp))'], {}), '(c2 / (wavel * Temp))\n', (1181, 1202), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
def linearRegCostFunction(X, y, theta, _lambda):
theta = theta.reshape(np.shape(X)[1], 1)
m = np.shape(X)[0]
theta_tmp = theta[1:]
delta = np.dot(X, theta) - y
J = np.dot(delta.T, delta) / (2 * m) + _lambda / (2 * m) * np.dot(theta_tmp.T, theta_tmp)
grad_no_reg = np.dot(X.T, np.dot(X, theta) - y) / m
grad = grad_no_reg + theta * _lambda / m
grad[0] = grad_no_reg[0]
return J.flatten()[0], grad.flatten()
| [
"numpy.dot",
"numpy.shape"
] | [((170, 181), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (178, 181), True, 'import numpy as np\n'), ((224, 240), 'numpy.dot', 'np.dot', (['X', 'theta'], {}), '(X, theta)\n', (230, 240), True, 'import numpy as np\n'), ((143, 154), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (151, 154), True, 'import numpy as np\n'), ((253, 275), 'numpy.dot', 'np.dot', (['delta.T', 'delta'], {}), '(delta.T, delta)\n', (259, 275), True, 'import numpy as np\n'), ((308, 338), 'numpy.dot', 'np.dot', (['theta_tmp.T', 'theta_tmp'], {}), '(theta_tmp.T, theta_tmp)\n', (314, 338), True, 'import numpy as np\n'), ((370, 386), 'numpy.dot', 'np.dot', (['X', 'theta'], {}), '(X, theta)\n', (376, 386), True, 'import numpy as np\n')] |
import numpy as np
import cv2 as cv
import torch
from models import VideoTools
import os.path
from utils import initialImage
class LoadedModel:
def __init__(self, name, device, upscale_factor):
super().__init__()
self.name = os.path.splitext(os.path.basename(name))[0]
self.device = device
self.upscale_factor = upscale_factor
print(self.name+':')
checkpoint = torch.load(name)
self.parameters = checkpoint.get('parameters', dict())
if not isinstance(self.parameters, dict):
self.parameters = vars(self.parameters) # namespace object
self.model = checkpoint['model']
self.model.to(device)
self.model.train(False)
print(self.model)
#find first module
first_module = self.model
while True:
it = first_module.children()
try:
o = next(it)
except StopIteration:
break
first_module = o
print('The first module in the network is:', first_module)
self.input_channels = first_module.in_channels
if self.input_channels == 5 + 6 * (self.upscale_factor**2) or \
self.parameters.get('unshaded', False):
self.unshaded = True
print("Network runs in unshaded mode")
else:
self.unshaded = False
self.input_single_channels = self.input_channels - 3 * (self.upscale_factor**2)
if self.input_single_channels >= 7:
self.has_normal = True
self.input_single_channels -= 3
else:
self.has_normal = False
if self.input_single_channels >= 5:
self.has_depth = True
self.input_single_channels -= 1
else:
self.has_depth = False
print('Number of input channels:', self.input_channels,
', has_normal:', self.has_normal,
', has_depth:', self.has_depth)
# Read mode for initial image
if not "initialImage" in self.parameters:
if self.unshaded:
self.initial_image_mode = "input"
else:
self.initial_image_mode = "zero"
else:
self.initial_image_mode = self.parameters['initialImage']
print('initial image mode:', self.initial_image_mode)
# Read other settings
self.inverse_ao = self.parameters.get('aoInverted', False)
def inference(self, current_low, prev_high):
"""
Performs the superresolution.
current_low: low-resolution input from the renderer, 10 channels (RGB, mask, normal, depth, flow), GPU. Format: (B,C,H,W)
prev_high: RGB-image of the previous inference result
"""
with torch.no_grad():
current_low_cpu = current_low.cpu().numpy()[0]
# compute flow
flow_inpaint = np.stack((
cv.inpaint(current_low_cpu[8,:,:], np.uint8(current_low_cpu[3,:,:]==0), 3, cv.INPAINT_NS),
cv.inpaint(current_low_cpu[9,:,:], np.uint8(current_low_cpu[3,:,:]==0), 3, cv.INPAINT_NS)), axis=0).astype(np.float32)
flow = torch.unsqueeze(torch.from_numpy(flow_inpaint), dim=0).to(self.device)
#input
if self.unshaded:
input = torch.cat((current_low[:,3:4,:,:]*2-1, current_low[:,4:8,:,:]), dim=1)
if prev_high is None:
previous_warped = initialImage(input, 6,
self.initial_image_mode,
self.inverse_ao,
self.upscale_factor).to(self.device)
else:
previous_warped = VideoTools.warp_upscale(
prev_high.to(self.device),
flow,
self.upscale_factor,
special_mask = True)
else:
if self.has_normal and self.has_depth:
input = torch.clamp(current_low[:,0:8,:,:], 0, 1)
elif self.has_normal: #no depth
input = current_low[:,0:7,:,:]
elif self.has_depth: #no normal
input = torch.cat((current_low[:,0:4,:,:], current_low[:,7:8,:,:]), dim=1)
else: #only color+mask
input = current_low[:,0:4,:,:]
if prev_high is None:
#prev_high = np.zeros(
# (3, input.shape[2]*self.upscale_factor, input.shape[3]*self.upscale_factor),
# dtype=current_low.dtype)
prev_high = initialImage(input, 3, self.initial_image_mode, self.upscale_factor)
previous_warped = VideoTools.warp_upscale(
prev_high.to(self.device),
flow,
self.upscale_factor,
special_mask = False)
previous_warped_flattened = VideoTools.flatten_high(previous_warped, self.upscale_factor)
# run the network
single_input = torch.cat((input, previous_warped_flattened), dim=1)
prediction, _ = self.model(single_input)
return prediction | [
"numpy.uint8",
"torch.load",
"utils.initialImage",
"torch.from_numpy",
"torch.cat",
"torch.no_grad",
"models.VideoTools.flatten_high",
"torch.clamp"
] | [((415, 431), 'torch.load', 'torch.load', (['name'], {}), '(name)\n', (425, 431), False, 'import torch\n'), ((2823, 2838), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2836, 2838), False, 'import torch\n'), ((5078, 5139), 'models.VideoTools.flatten_high', 'VideoTools.flatten_high', (['previous_warped', 'self.upscale_factor'], {}), '(previous_warped, self.upscale_factor)\n', (5101, 5139), False, 'from models import VideoTools\n'), ((5197, 5249), 'torch.cat', 'torch.cat', (['(input, previous_warped_flattened)'], {'dim': '(1)'}), '((input, previous_warped_flattened), dim=1)\n', (5206, 5249), False, 'import torch\n'), ((3369, 3454), 'torch.cat', 'torch.cat', (['(current_low[:, 3:4, :, :] * 2 - 1, current_low[:, 4:8, :, :])'], {'dim': '(1)'}), '((current_low[:, 3:4, :, :] * 2 - 1, current_low[:, 4:8, :, :]), dim=1\n )\n', (3378, 3454), False, 'import torch\n'), ((4114, 4158), 'torch.clamp', 'torch.clamp', (['current_low[:, 0:8, :, :]', '(0)', '(1)'], {}), '(current_low[:, 0:8, :, :], 0, 1)\n', (4125, 4158), False, 'import torch\n'), ((4753, 4821), 'utils.initialImage', 'initialImage', (['input', '(3)', 'self.initial_image_mode', 'self.upscale_factor'], {}), '(input, 3, self.initial_image_mode, self.upscale_factor)\n', (4765, 4821), False, 'from utils import initialImage\n'), ((3241, 3271), 'torch.from_numpy', 'torch.from_numpy', (['flow_inpaint'], {}), '(flow_inpaint)\n', (3257, 3271), False, 'import torch\n'), ((3516, 3606), 'utils.initialImage', 'initialImage', (['input', '(6)', 'self.initial_image_mode', 'self.inverse_ao', 'self.upscale_factor'], {}), '(input, 6, self.initial_image_mode, self.inverse_ao, self.\n upscale_factor)\n', (3528, 3606), False, 'from utils import initialImage\n'), ((4331, 4403), 'torch.cat', 'torch.cat', (['(current_low[:, 0:4, :, :], current_low[:, 7:8, :, :])'], {'dim': '(1)'}), '((current_low[:, 0:4, :, :], current_low[:, 7:8, :, :]), dim=1)\n', (4340, 4403), False, 'import torch\n'), ((3015, 3054), 'numpy.uint8', 'np.uint8', (['(current_low_cpu[3, :, :] == 0)'], {}), '(current_low_cpu[3, :, :] == 0)\n', (3023, 3054), True, 'import numpy as np\n'), ((3122, 3161), 'numpy.uint8', 'np.uint8', (['(current_low_cpu[3, :, :] == 0)'], {}), '(current_low_cpu[3, :, :] == 0)\n', (3130, 3161), True, 'import numpy as np\n')] |
from __future__ import division
import sys
import unittest
import numpy as np
import numpy.testing
import npinterval
class TestInterval(unittest.TestCase):
def test_single(self):
"""Test the interval warns if only 1 sample is included"""
# Don't know how to test warnings below python 3.2
if sys.version_info[0] + 0.1*sys.version_info[1] < 3.2:
return
x = np.array([-5, -3, -2, -2, 100])
with self.assertWarns(RuntimeWarning):
npinterval.interval(x, 1/5)
def test_intervals(self):
"""Test the interval function correctly computes valid intervals"""
x = np.array([-5, -3, -2, -2, 100])
self.assertEqual(
npinterval.interval(x, 2/5),
(-2, -2, 2, 4))
self.assertEqual(
npinterval.interval(x, 3/5),
(-3, -2, 1, 4))
self.assertEqual(
npinterval.interval(x, 4/5),
(-5, -2, 0, 4))
def test_full(self):
"""Test the interval function correctly finds the full interval"""
x = np.array([-5, -3, -2, -2, 100])
self.assertEqual(
npinterval.interval(x, 1),
(-5, 100, 0, 5))
def test_invalid(self):
"""Test the interval function catches invalid intervals"""
x = np.array([-5, -3, -2, -2, 100])
with self.assertRaises(ValueError):
npinterval.interval(x, 1.01)
with self.assertRaises(ValueError):
npinterval.interval(x, 0)
class TestHalfSampleMode(unittest.TestCase):
def test_left(self):
"""Test edge case where mode is left-most values"""
x = np.array([-1.1, -1, 0, 1, 2, 100])
self.assertEqual(npinterval.half_sample_mode(x), -1.05)
def test_right(self):
"""Test edge case where mode is right-most values"""
x = np.array([-100, -2, -1, 0, 1, 1.1])
self.assertEqual(npinterval.half_sample_mode(x), +1.05)
def test_central(self):
"""Test edge case where mode is near middle"""
x = np.array([-100, -2, 0, 0, 1, 1.1])
self.assertEqual(npinterval.half_sample_mode(x), 0)
def test_edges(self):
"""Test edge cases"""
x = np.array([0, 1])
self.assertEqual(npinterval.half_sample_mode(x), 0.5)
x = np.array([0, 1, 1])
self.assertEqual(npinterval.half_sample_mode(x), 1)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"npinterval.interval",
"npinterval.half_sample_mode"
] | [((2416, 2431), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2429, 2431), False, 'import unittest\n'), ((408, 439), 'numpy.array', 'np.array', (['[-5, -3, -2, -2, 100]'], {}), '([-5, -3, -2, -2, 100])\n', (416, 439), True, 'import numpy as np\n'), ((646, 677), 'numpy.array', 'np.array', (['[-5, -3, -2, -2, 100]'], {}), '([-5, -3, -2, -2, 100])\n', (654, 677), True, 'import numpy as np\n'), ((1076, 1107), 'numpy.array', 'np.array', (['[-5, -3, -2, -2, 100]'], {}), '([-5, -3, -2, -2, 100])\n', (1084, 1107), True, 'import numpy as np\n'), ((1310, 1341), 'numpy.array', 'np.array', (['[-5, -3, -2, -2, 100]'], {}), '([-5, -3, -2, -2, 100])\n', (1318, 1341), True, 'import numpy as np\n'), ((1653, 1687), 'numpy.array', 'np.array', (['[-1.1, -1, 0, 1, 2, 100]'], {}), '([-1.1, -1, 0, 1, 2, 100])\n', (1661, 1687), True, 'import numpy as np\n'), ((1852, 1887), 'numpy.array', 'np.array', (['[-100, -2, -1, 0, 1, 1.1]'], {}), '([-100, -2, -1, 0, 1, 1.1])\n', (1860, 1887), True, 'import numpy as np\n'), ((2048, 2082), 'numpy.array', 'np.array', (['[-100, -2, 0, 0, 1, 1.1]'], {}), '([-100, -2, 0, 0, 1, 1.1])\n', (2056, 2082), True, 'import numpy as np\n'), ((2212, 2228), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2220, 2228), True, 'import numpy as np\n'), ((2303, 2322), 'numpy.array', 'np.array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (2311, 2322), True, 'import numpy as np\n'), ((499, 528), 'npinterval.interval', 'npinterval.interval', (['x', '(1 / 5)'], {}), '(x, 1 / 5)\n', (518, 528), False, 'import npinterval\n'), ((716, 745), 'npinterval.interval', 'npinterval.interval', (['x', '(2 / 5)'], {}), '(x, 2 / 5)\n', (735, 745), False, 'import npinterval\n'), ((811, 840), 'npinterval.interval', 'npinterval.interval', (['x', '(3 / 5)'], {}), '(x, 3 / 5)\n', (830, 840), False, 'import npinterval\n'), ((906, 935), 'npinterval.interval', 'npinterval.interval', (['x', '(4 / 5)'], {}), '(x, 4 / 5)\n', (925, 935), False, 'import npinterval\n'), ((1146, 1171), 'npinterval.interval', 'npinterval.interval', (['x', '(1)'], {}), '(x, 1)\n', (1165, 1171), False, 'import npinterval\n'), ((1398, 1426), 'npinterval.interval', 'npinterval.interval', (['x', '(1.01)'], {}), '(x, 1.01)\n', (1417, 1426), False, 'import npinterval\n'), ((1483, 1508), 'npinterval.interval', 'npinterval.interval', (['x', '(0)'], {}), '(x, 0)\n', (1502, 1508), False, 'import npinterval\n'), ((1713, 1743), 'npinterval.half_sample_mode', 'npinterval.half_sample_mode', (['x'], {}), '(x)\n', (1740, 1743), False, 'import npinterval\n'), ((1913, 1943), 'npinterval.half_sample_mode', 'npinterval.half_sample_mode', (['x'], {}), '(x)\n', (1940, 1943), False, 'import npinterval\n'), ((2108, 2138), 'npinterval.half_sample_mode', 'npinterval.half_sample_mode', (['x'], {}), '(x)\n', (2135, 2138), False, 'import npinterval\n'), ((2254, 2284), 'npinterval.half_sample_mode', 'npinterval.half_sample_mode', (['x'], {}), '(x)\n', (2281, 2284), False, 'import npinterval\n'), ((2348, 2378), 'npinterval.half_sample_mode', 'npinterval.half_sample_mode', (['x'], {}), '(x)\n', (2375, 2378), False, 'import npinterval\n')] |
import numpy as np
from tf_rl.env.continuous_gridworld.env import GridWorld
dense_goals = [(13.0, 8.0), (18.0, 11.0), (20.0, 15.0), (22.0, 19.0)]
env = GridWorld(max_episode_len=500, num_rooms=1, action_limit_max=1.0, silent_mode=True,
start_position=(8.0, 8.0), goal_position=(22.0, 22.0), goal_reward=+100.0,
dense_goals=dense_goals, dense_reward=+5,
grid_len=30, plot_path="./images")
state = env.reset()
traj = list()
for ep in range(3):
for i in range(1000):
action = env.action_space.sample()
traj.append(state)
state, reward, done, info = env.step(action)
if done:
state = env.reset()
traj = np.array(traj)
env.vis_exploration(traj=traj, file_name="exploration.png")
env.vis_trajectory(traj=traj, file_name="traj.png") | [
"tf_rl.env.continuous_gridworld.env.GridWorld",
"numpy.array"
] | [((153, 401), 'tf_rl.env.continuous_gridworld.env.GridWorld', 'GridWorld', ([], {'max_episode_len': '(500)', 'num_rooms': '(1)', 'action_limit_max': '(1.0)', 'silent_mode': '(True)', 'start_position': '(8.0, 8.0)', 'goal_position': '(22.0, 22.0)', 'goal_reward': '(+100.0)', 'dense_goals': 'dense_goals', 'dense_reward': '(+5)', 'grid_len': '(30)', 'plot_path': '"""./images"""'}), "(max_episode_len=500, num_rooms=1, action_limit_max=1.0,\n silent_mode=True, start_position=(8.0, 8.0), goal_position=(22.0, 22.0),\n goal_reward=+100.0, dense_goals=dense_goals, dense_reward=+5, grid_len=\n 30, plot_path='./images')\n", (162, 401), False, 'from tf_rl.env.continuous_gridworld.env import GridWorld\n'), ((699, 713), 'numpy.array', 'np.array', (['traj'], {}), '(traj)\n', (707, 713), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.timeseries.tcquery as esttc
import emission.core.wrapper.localdate as ecwl
# Module for pretty-printing outputs (e.g. head) to help users
# understand what is going on
# However, this means that this module can only be used in an ipython notebook
import IPython.display as disp
import emission.core.get_database as edb
def get_time_query(year, month):
if year is None and month is None:
return None
if month is None:
assert year is not None
query_ld = ecwl.LocalDate({"year": year})
else:
assert year is not None and month is not None
query_ld = ecwl.LocalDate({"year": year, "month": month})
tq = esttc.TimeComponentQuery("data.start_local_dt", query_ld, query_ld)
return tq
def get_participant_uuids(program):
"""
Get the list of participant UUIDs for the specified program.
Note that the "program" parameter is currently a NOP but will be enabled
once we have other programs start
"""
participant_uuid_obj = list(edb.get_profile_db().find({"install_group": "participant"}, {"user_id": 1, "_id": 0}))
participant_uuid_str = [u["user_id"] for u in participant_uuid_obj]
disp.display(participant_uuid_str)
return participant_uuid_str
def load_all_confirmed_trips(tq):
agg = esta.TimeSeries.get_aggregate_time_series()
all_ct = agg.get_data_df("analysis/confirmed_trip", tq)
print("Loaded all confirmed trips of length %s" % len(all_ct))
disp.display(all_ct.head())
return all_ct
def load_all_participant_trips(program, tq):
participant_list = get_participant_uuids(program)
all_ct = load_all_confirmed_trips(tq)
participant_ct_df = all_ct[all_ct.user_id.isin(participant_list)]
print("After filtering, found %s participant trips " % len(participant_ct_df))
disp.display(participant_ct_df.head())
return participant_ct_df
def filter_labeled_trips(mixed_trip_df):
labeled_ct = mixed_trip_df[mixed_trip_df.user_input != {}]
print("After filtering, found %s labeled trips" % len(labeled_ct))
disp.display(labeled_ct.head())
return labeled_ct
def expand_userinputs(labeled_ct):
label_only = pd.DataFrame(labeled_ct.user_input.to_list(), index=labeled_ct.index)
disp.display(label_only.head())
expanded_ct = pd.concat([labeled_ct, label_only], axis=1)
assert len(expanded_ct) == len(labeled_ct), \
("Mismatch after expanding labels, expanded_ct.rows = %s != labeled_ct.rows" %
(len(expanded_ct), len(labeled_ct)))
print("After expanding, columns went from %s -> %s" %
(len(labeled_ct.columns), len(expanded_ct.columns)))
assert len(expanded_ct.columns) == len(labeled_ct.columns) + 3, \
("Mismatch after expanding labels, expanded_ct.columns = %s != labeled_ct.rows" %
(len(expanded_ct.columns), len(labeled_ct.columns)))
disp.display(expanded_ct.head())
return expanded_ct
def get_quality_text(participant_ct_df, expanded_ct):
cq = (len(expanded_ct), len(expanded_ct.user_id.unique()), len(participant_ct_df), len(participant_ct_df.user_id.unique()), (len(expanded_ct) * 100) / len(participant_ct_df), )
quality_text = "Based on %s confirmed trips from %d users\nof %s total trips from %d users (%.2f%%)" % cq
print(quality_text)
return quality_text
def get_file_suffix(year, month, program):
suffix = "_%04d" % year if year is not None else ""
suffix = suffix + "_%02d" % month if month is not None else ""
suffix = suffix + "_%s" % program if program is not None else ""
print(suffix)
return suffix
def get_quality_text_ebike(all_confirmed_df, ebike_ct_df):
cq = (len(ebike_ct_df), len(ebike_ct_df.user_id.unique()), len(all_confirmed_df), len(all_confirmed_df.user_id.unique()), (len(ebike_ct_df) * 100) / len(all_confirmed_df), )
quality_text = "Based on %s eBike trips from %d users\nof %s confirmed trips (all modes) from %d users (%.2f%%)" % cq
print(quality_text)
return quality_text
def data_quality_check(expanded_ct):
'''1. Delete rows where the mode_confirm was pilot_ebike and repalced_mode was pilot_ebike.
2. Delete rows where the mode_confirm was pilot_ebike and repalced_mode was same_mode.
3. Replace same_mode for the mode_confirm for Energy Impact Calcualtion.'''
expanded_ct.drop(expanded_ct[(expanded_ct['mode_confirm'] == 'pilot_ebike') & (expanded_ct['replaced_mode'] == 'pilot_ebike')].index, inplace=True)
expanded_ct.drop(expanded_ct[(expanded_ct['mode_confirm'] == 'pilot_ebike') & (expanded_ct['replaced_mode'] == 'same_mode')].index, inplace=True)
expanded_ct['replaced_mode'] = np.where(expanded_ct['replaced_mode'] == 'same_mode',expanded_ct['mode_confirm'], expanded_ct['replaced_mode'])
return expanded_ct
def unit_conversions(df):
df['distance_miles']= df["distance"]*0.00062 #meters to miles
def energy_intensity(df,df1,distance,col1,col2):
""" Inputs:
df = dataframe with data
df = dataframe with energy factors
distance = distance in meters
col1 = Replaced_mode
col2= Mode_confirm
"""
df1 = df1.copy()
df1[col1] = df1['mode']
dic_ei_factor = dict(zip(df1[col1],df1['energy_intensity_factor']))
dic_CO2_factor = dict(zip(df1[col1],df1['CO2_factor']))
dic_ei_trip = dict(zip(df1[col1],df1['(kWH)/trip']))
df['ei_'+col1] = df[col1].map(dic_ei_factor)
df['CO2_'+col1] = df[col1].map(dic_CO2_factor)
df['ei_trip_'+col1] = df[col1].map(dic_ei_trip)
df1[col2] = df1[col1]
dic_ei_factor = dict(zip(df1[col2],df1['energy_intensity_factor']))
dic_ei_trip = dict(zip(df1[col2],df1['(kWH)/trip']))
dic_CO2_factor = dict(zip(df1[col2],df1['CO2_factor']))
df['ei_'+col2] = df[col2].map(dic_ei_factor)
df['CO2_'+col2] = df[col2].map(dic_CO2_factor)
df['ei_trip_'+col2] = df[col2].map(dic_ei_trip)
return df
def energy_impact_kWH(df,distance,col1,col2):
""" Inputs:
df = dataframe with data
distance = distance in miles
col1 = Replaced_mode
col2= Mode_confirm
"""
conditions_col1 = [(df['Replaced_mode_fuel'] =='gasoline'),
(df['Replaced_mode_fuel'] == 'diesel'),
(df['Replaced_mode_fuel'] == 'electric')]
conditions_col2 = [(df['Mode_confirm_fuel'] =='gasoline'),
(df['Mode_confirm_fuel'] == 'diesel'),
(df['Mode_confirm_fuel'] == 'electric')]
gasoline_col1 = (df[distance]*df['ei_'+col1]*0.000293071) # 1 BTU = 0.000293071 kWH
diesel_col1 = (df[distance]*df['ei_'+col1]*0.000293071)
electric_col1 = (df[distance]*df['ei_'+col1])+ df['ei_trip_'+col1]
gasoline_col2 = (df[distance]*df['ei_'+col2]*0.000293071)
diesel_col2 = (df[distance]*df['ei_'+col2]*0.000293071)
electric_col2 = (df[distance]*df['ei_'+col2])+ df['ei_trip_'+col2]
values_col1 = [gasoline_col1,diesel_col1,electric_col1]
values_col2 = [gasoline_col2,diesel_col2,electric_col2]
df[col1+'_EI(kWH)'] = np.select(conditions_col1, values_col1)
df[col2+'_EI(kWH)'] = np.select(conditions_col2, values_col2)
df['Energy_Impact(kWH)'] = round((df[col1+'_EI(kWH)'] - df[col2+'_EI(kWH)']),3)
return df
def CO2_impact_lb(df,distance,col1,col2):
""" Inputs:
df = dataframe with data
distance = distance in miles
col1 = Replaced_mode
col2= Mode_confirm
"""
conditions_col1 = [(df['Replaced_mode_fuel'] =='gasoline'),
(df['Replaced_mode_fuel'] == 'diesel'),
(df['Replaced_mode_fuel'] == 'electric')]
conditions_col2 = [(df['Mode_confirm_fuel'] =='gasoline'),
(df['Mode_confirm_fuel'] == 'diesel'),
(df['Mode_confirm_fuel'] == 'electric')]
gasoline_col1 = (df[distance]*df['ei_'+col1]*0.000001)* df['CO2_Replaced_mode']
diesel_col1 = (df[distance]*df['ei_'+col1]*0.000001)* df['CO2_Replaced_mode']
electric_col1 = (((df[distance]*df['ei_'+col1])+df['ei_trip_'+col1])*0.001)*df['CO2_'+col1]
gasoline_col2 = (df[distance]*df['ei_'+col2]*0.000001)* df['CO2_Mode_confirm']
diesel_col2 = (df[distance]*df['ei_'+col2]*0.000001)* df['CO2_Mode_confirm']
electric_col2 = (((df[distance]*df['ei_'+col2])+df['ei_trip_'+col2])*0.001)*df['CO2_'+col2]
values_col1 = [gasoline_col1,diesel_col1,electric_col1]
values_col2 = [gasoline_col2,diesel_col2,electric_col2]
df[col1+'_lb_CO2'] = np.select(conditions_col1, values_col1)
df[col2+'_lb_CO2'] = np.select(conditions_col2, values_col2)
df['CO2_Impact(lb)'] = round((df[col1+'_lb_CO2'] - df[col2+'_lb_CO2']),3)
return df
| [
"IPython.display.display",
"numpy.select",
"emission.core.get_database.get_profile_db",
"numpy.where",
"emission.core.wrapper.localdate.LocalDate",
"emission.storage.timeseries.abstract_timeseries.TimeSeries.get_aggregate_time_series",
"emission.storage.timeseries.tcquery.TimeComponentQuery",
"pandas.... | [((786, 853), 'emission.storage.timeseries.tcquery.TimeComponentQuery', 'esttc.TimeComponentQuery', (['"""data.start_local_dt"""', 'query_ld', 'query_ld'], {}), "('data.start_local_dt', query_ld, query_ld)\n", (810, 853), True, 'import emission.storage.timeseries.tcquery as esttc\n'), ((1308, 1342), 'IPython.display.display', 'disp.display', (['participant_uuid_str'], {}), '(participant_uuid_str)\n', (1320, 1342), True, 'import IPython.display as disp\n'), ((1420, 1463), 'emission.storage.timeseries.abstract_timeseries.TimeSeries.get_aggregate_time_series', 'esta.TimeSeries.get_aggregate_time_series', ([], {}), '()\n', (1461, 1463), True, 'import emission.storage.timeseries.abstract_timeseries as esta\n'), ((2419, 2462), 'pandas.concat', 'pd.concat', (['[labeled_ct, label_only]'], {'axis': '(1)'}), '([labeled_ct, label_only], axis=1)\n', (2428, 2462), True, 'import pandas as pd\n'), ((4780, 4897), 'numpy.where', 'np.where', (["(expanded_ct['replaced_mode'] == 'same_mode')", "expanded_ct['mode_confirm']", "expanded_ct['replaced_mode']"], {}), "(expanded_ct['replaced_mode'] == 'same_mode', expanded_ct[\n 'mode_confirm'], expanded_ct['replaced_mode'])\n", (4788, 4897), True, 'import numpy as np\n'), ((7197, 7236), 'numpy.select', 'np.select', (['conditions_col1', 'values_col1'], {}), '(conditions_col1, values_col1)\n', (7206, 7236), True, 'import numpy as np\n'), ((7263, 7302), 'numpy.select', 'np.select', (['conditions_col2', 'values_col2'], {}), '(conditions_col2, values_col2)\n', (7272, 7302), True, 'import numpy as np\n'), ((8670, 8709), 'numpy.select', 'np.select', (['conditions_col1', 'values_col1'], {}), '(conditions_col1, values_col1)\n', (8679, 8709), True, 'import numpy as np\n'), ((8735, 8774), 'numpy.select', 'np.select', (['conditions_col2', 'values_col2'], {}), '(conditions_col2, values_col2)\n', (8744, 8774), True, 'import numpy as np\n'), ((616, 646), 'emission.core.wrapper.localdate.LocalDate', 'ecwl.LocalDate', (["{'year': year}"], {}), "({'year': year})\n", (630, 646), True, 'import emission.core.wrapper.localdate as ecwl\n'), ((730, 776), 'emission.core.wrapper.localdate.LocalDate', 'ecwl.LocalDate', (["{'year': year, 'month': month}"], {}), "({'year': year, 'month': month})\n", (744, 776), True, 'import emission.core.wrapper.localdate as ecwl\n'), ((1145, 1165), 'emission.core.get_database.get_profile_db', 'edb.get_profile_db', ([], {}), '()\n', (1163, 1165), True, 'import emission.core.get_database as edb\n')] |
import os
import numpy as np
import random
def identify(root):
if 'live' in root or '真人' in root:
return True
return False
def findsamename(root):
dolpfiles = os.listdir(os.path.join(root,'DOLP'))
s0files = os.listdir(os.path.join(root,'S0'))
s0flag = False
if '.png_s0' in s0files[0]:
s0flag=True
_s0files = []
for s0file in s0files:
temp = s0file.split('.png_s0')
_s0files.append(temp[0]+temp[1])
s0files = _s0files
dolpflag = False
if '.png_dolp' in dolpfiles[0]:
dolpflag = True
_dolpfiles = []
for dolpfile in dolpfiles:
temp = dolpfile.split('.png_dolp')
_dolpfiles.append(temp[0]+temp[1])
dolpfiles = _dolpfiles
dolpset,s0set = set(dolpfiles),set(s0files)
subfiles = list(dolpset&s0set)
_s0files = []
for s0file in subfiles:
if s0flag:
temp = s0file.split('.')
name = os.path.join(root,'S0',temp[0]+'.png_s0.'+temp[1])
_s0files.append((name,int(identify(name))))
else:
name = os.path.join(root,'S0',s0file)
_s0files.append((name,int(identify(name))))
s0files = np.array(_s0files)
_dolpfiles = []
for dolpfile in subfiles:
if dolpflag:
temp = dolpfile.split('.')
name = os.path.join(root,'DOLP',temp[0]+'.png_dolp.'+temp[1])
_dolpfiles.append((name,int(identify(name))))
else:
name = os.path.join(root,'DOLP',dolpfile)
_dolpfiles.append((name,int(identify(name))))
dolpfiles = np.array(_dolpfiles)
dolpreal,dolpfake,s0real,s0fake = [],[],[],[]
ind_ = np.where(dolpfiles[:,1]=='0')[0]
ind = np.where(dolpfiles[:,1]=='1')[0]
dolpreal = list(dolpfiles[ind])
dolpfake = list(dolpfiles[ind_])
s0real = list(s0files[ind])
s0fake = list(s0files[ind_])
return dolpreal,dolpfake,s0real,s0fake
def searchalldata(root,flag=False):
dolpreal,dolpfake,s0real,s0fake = [],[],[],[]
if flag:
items = []
items.append(os.path.join('dataset1_live','HUT','Only_Face'))
items.append(os.path.join('dataset2_all'))
items.append(os.path.join('dataset3_attack','HUT','Only_Face'))
else:
items = os.listdir(root)
for item in items:
# if item.__len__() <=1 or item == 'Deg' or '.txt' in item or 'S2' in item or 'S3' in item:
# continue
if '.' in item:
if identify(root):
if 'DOLP' in root:
dolpreal.append((os.path.join(root,item),1))
elif 'S0' in root:
s0real.append((os.path.join(root,item),1))
else:
if 'DOLP' in root:
dolpfake.append((os.path.join(root,item),0))
elif 'S0' in root:
s0fake.append((os.path.join(root,item),0))
else:
if 'dataset3_attack' in item or 'HUT' in item:
tmp1,tmp2,tmp3,tmp4 = findsamename(os.path.join(root,item))
dolpreal = dolpreal + tmp1
dolpfake = dolpfake + tmp2
s0real = s0real + tmp3
s0fake = s0fake + tmp4
else:
tmp1,tmp2,tmp3,tmp4 = searchalldata(os.path.join(root,item))
# tmp1,tmp2 = searchalldata(os.path.join(root,item))
dolpreal = dolpreal + tmp1
dolpfake = dolpfake + tmp2
s0real = s0real + tmp3
s0fake = s0fake + tmp4
return dolpreal,dolpfake,s0real,s0fake
def shuffle_split_data(data,testratio=0.2):
random.shuffle(data)
len1 = len(data)
tlen1 = int(len1*testratio)
datatest = data[:tlen1]
datatrain = data[tlen1:]
return datatrain,datatest
def gendatalist(root,testratio=0.2,mode='gen'):
dolpreal,dolpfake,s0real,s0fake = searchalldata(root,True)
indreal = np.arange(0,len(dolpreal))
dolpreal,dolpfake,s0real,s0fake = np.array(dolpreal),np.array(dolpfake),np.array(s0real),np.array(s0fake)
indrealtrain,indrealtest = shuffle_split_data(indreal,testratio)
if mode == 'gen':
indfake = np.arange(0,len(dolpfake))
indfaketrain,indfaketest = shuffle_split_data(indfake,testratio)
dolptraindir,dolptestdir = np.vstack((dolpreal[indrealtrain],dolpfake[indfaketrain])),np.vstack((dolpreal[indrealtest],dolpfake[indfaketest]))
s0traindir,s0testdir = np.vstack((s0real[indrealtrain],s0fake[indfaketrain])),np.vstack((s0real[indrealtest],s0fake[indfaketest]))
else:
dolptraindir,dolptestdir = dolpreal[indrealtrain],dolpreal[indrealtest]
s0traindir,s0testdir = s0real[indrealtrain],s0real[indrealtest]
return dolptraindir,dolptestdir,s0traindir,s0testdir
def save_train_test_to_txt(traindir,testdir,txtsavepath,filenameprefix,vis='gen'):
# traindir,testdir=gendatalist(datadir)
# with open(os.path.join(txtsavepath,filenameprefix+'all.txt'),'w') as f:
# all_ = traindir + testdir
# for line in all_:
# f.write(line[0]+','+str(line[1])+'\n')
if vis != 'vis':
with open(os.path.join(txtsavepath,filenameprefix+'train.txt'),'w') as f:
for line in traindir:
f.write(line[0]+','+str(line[1])+'\n')
with open(os.path.join(txtsavepath,filenameprefix+'test.txt'),'w') as f:
for line in testdir:
f.write(line[0]+','+str(line[1])+'\n')
else:
with open(os.path.join(txtsavepath,filenameprefix+'vis.txt'),'w') as f:
for line in traindir:
f.write(line[0]+','+str(line[1])+'\n')
with open(os.path.join(txtsavepath,filenameprefix+'vis.txt'),'a+') as f:
for line in testdir:
f.write(line[0]+','+str(line[1])+'\n')
def change(root):
root = os.path.join(root,'dataset2_all')
types = os.listdir(root)
for typ in types:
if typ == '1' or typ == '2':
continue;
else:
x1 = os.listdir(os.path.join(root,typ,'DOLP'))
x2 = os.listdir(os.path.join(root,typ,'S0'))
if '真人' in typ:
x1.sort(key= lambda x:int(x[x.find('(')+1:x.find(')')]))
x2.sort(key= lambda x:int(x[x.find('(')+1:x.find(')')]))
for i in range(len(x1)-1,-1,-1):
if x1[i] == x2[i]:
continue
os.rename(os.path.join(root,typ,'S0',x2[i]),os.path.join(root,typ,'S0',x1[i]))
pass
pass
if __name__ == '__main__':
datadir = os.path.join(os.path.abspath('.'),'datasets','multi_modal_Polarization')
mode = 'gen'
if mode == 'gen' or mode == 'vis':
dolptraindir,dolptestdir,s0traindir,s0testdir = gendatalist(datadir,0.3,mode)
if mode == 'gen':
save_train_test_to_txt(dolptraindir,dolptestdir,datadir,'DOLP_',mode)
save_train_test_to_txt(s0traindir,s0testdir,datadir,'S0_',mode)
elif mode == 'vis':
save_train_test_to_txt(dolptraindir,dolptestdir,datadir,'gen_DOLP_',mode)
save_train_test_to_txt(s0traindir,s0testdir,datadir,'gen_S0_',mode)
elif mode == 'change':
change(datadir)
pass | [
"os.listdir",
"random.shuffle",
"numpy.where",
"os.path.join",
"numpy.array",
"numpy.vstack",
"os.path.abspath"
] | [((1244, 1262), 'numpy.array', 'np.array', (['_s0files'], {}), '(_s0files)\n', (1252, 1262), True, 'import numpy as np\n'), ((1652, 1672), 'numpy.array', 'np.array', (['_dolpfiles'], {}), '(_dolpfiles)\n', (1660, 1672), True, 'import numpy as np\n'), ((3700, 3720), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (3714, 3720), False, 'import random\n'), ((5933, 5967), 'os.path.join', 'os.path.join', (['root', '"""dataset2_all"""'], {}), "(root, 'dataset2_all')\n", (5945, 5967), False, 'import os\n'), ((5979, 5995), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (5989, 5995), False, 'import os\n'), ((192, 218), 'os.path.join', 'os.path.join', (['root', '"""DOLP"""'], {}), "(root, 'DOLP')\n", (204, 218), False, 'import os\n'), ((244, 268), 'os.path.join', 'os.path.join', (['root', '"""S0"""'], {}), "(root, 'S0')\n", (256, 268), False, 'import os\n'), ((1734, 1766), 'numpy.where', 'np.where', (["(dolpfiles[:, 1] == '0')"], {}), "(dolpfiles[:, 1] == '0')\n", (1742, 1766), True, 'import numpy as np\n'), ((1777, 1809), 'numpy.where', 'np.where', (["(dolpfiles[:, 1] == '1')"], {}), "(dolpfiles[:, 1] == '1')\n", (1785, 1809), True, 'import numpy as np\n'), ((2334, 2350), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (2344, 2350), False, 'import os\n'), ((4052, 4070), 'numpy.array', 'np.array', (['dolpreal'], {}), '(dolpreal)\n', (4060, 4070), True, 'import numpy as np\n'), ((4071, 4089), 'numpy.array', 'np.array', (['dolpfake'], {}), '(dolpfake)\n', (4079, 4089), True, 'import numpy as np\n'), ((4090, 4106), 'numpy.array', 'np.array', (['s0real'], {}), '(s0real)\n', (4098, 4106), True, 'import numpy as np\n'), ((4107, 4123), 'numpy.array', 'np.array', (['s0fake'], {}), '(s0fake)\n', (4115, 4123), True, 'import numpy as np\n'), ((6686, 6706), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (6701, 6706), False, 'import os\n'), ((1003, 1059), 'os.path.join', 'os.path.join', (['root', '"""S0"""', "(temp[0] + '.png_s0.' + temp[1])"], {}), "(root, 'S0', temp[0] + '.png_s0.' + temp[1])\n", (1015, 1059), False, 'import os\n'), ((1143, 1175), 'os.path.join', 'os.path.join', (['root', '"""S0"""', 's0file'], {}), "(root, 'S0', s0file)\n", (1155, 1175), False, 'import os\n'), ((1397, 1457), 'os.path.join', 'os.path.join', (['root', '"""DOLP"""', "(temp[0] + '.png_dolp.' + temp[1])"], {}), "(root, 'DOLP', temp[0] + '.png_dolp.' + temp[1])\n", (1409, 1457), False, 'import os\n'), ((1543, 1579), 'os.path.join', 'os.path.join', (['root', '"""DOLP"""', 'dolpfile'], {}), "(root, 'DOLP', dolpfile)\n", (1555, 1579), False, 'import os\n'), ((2136, 2185), 'os.path.join', 'os.path.join', (['"""dataset1_live"""', '"""HUT"""', '"""Only_Face"""'], {}), "('dataset1_live', 'HUT', 'Only_Face')\n", (2148, 2185), False, 'import os\n'), ((2206, 2234), 'os.path.join', 'os.path.join', (['"""dataset2_all"""'], {}), "('dataset2_all')\n", (2218, 2234), False, 'import os\n'), ((2257, 2308), 'os.path.join', 'os.path.join', (['"""dataset3_attack"""', '"""HUT"""', '"""Only_Face"""'], {}), "('dataset3_attack', 'HUT', 'Only_Face')\n", (2269, 2308), False, 'import os\n'), ((4373, 4432), 'numpy.vstack', 'np.vstack', (['(dolpreal[indrealtrain], dolpfake[indfaketrain])'], {}), '((dolpreal[indrealtrain], dolpfake[indfaketrain]))\n', (4382, 4432), True, 'import numpy as np\n'), ((4432, 4489), 'numpy.vstack', 'np.vstack', (['(dolpreal[indrealtest], dolpfake[indfaketest])'], {}), '((dolpreal[indrealtest], dolpfake[indfaketest]))\n', (4441, 4489), True, 'import numpy as np\n'), ((4520, 4575), 'numpy.vstack', 'np.vstack', (['(s0real[indrealtrain], s0fake[indfaketrain])'], {}), '((s0real[indrealtrain], s0fake[indfaketrain]))\n', (4529, 4575), True, 'import numpy as np\n'), ((4575, 4628), 'numpy.vstack', 'np.vstack', (['(s0real[indrealtest], s0fake[indfaketest])'], {}), '((s0real[indrealtest], s0fake[indfaketest]))\n', (4584, 4628), True, 'import numpy as np\n'), ((5215, 5270), 'os.path.join', 'os.path.join', (['txtsavepath', "(filenameprefix + 'train.txt')"], {}), "(txtsavepath, filenameprefix + 'train.txt')\n", (5227, 5270), False, 'import os\n'), ((5395, 5449), 'os.path.join', 'os.path.join', (['txtsavepath', "(filenameprefix + 'test.txt')"], {}), "(txtsavepath, filenameprefix + 'test.txt')\n", (5407, 5449), False, 'import os\n'), ((5574, 5627), 'os.path.join', 'os.path.join', (['txtsavepath', "(filenameprefix + 'vis.txt')"], {}), "(txtsavepath, filenameprefix + 'vis.txt')\n", (5586, 5627), False, 'import os\n'), ((5752, 5805), 'os.path.join', 'os.path.join', (['txtsavepath', "(filenameprefix + 'vis.txt')"], {}), "(txtsavepath, filenameprefix + 'vis.txt')\n", (5764, 5805), False, 'import os\n'), ((6120, 6151), 'os.path.join', 'os.path.join', (['root', 'typ', '"""DOLP"""'], {}), "(root, typ, 'DOLP')\n", (6132, 6151), False, 'import os\n'), ((6193, 6222), 'os.path.join', 'os.path.join', (['root', 'typ', '"""S0"""'], {}), "(root, typ, 'S0')\n", (6205, 6222), False, 'import os\n'), ((3090, 3114), 'os.path.join', 'os.path.join', (['root', 'item'], {}), '(root, item)\n', (3102, 3114), False, 'import os\n'), ((3349, 3373), 'os.path.join', 'os.path.join', (['root', 'item'], {}), '(root, item)\n', (3361, 3373), False, 'import os\n'), ((6531, 6567), 'os.path.join', 'os.path.join', (['root', 'typ', '"""S0"""', 'x2[i]'], {}), "(root, typ, 'S0', x2[i])\n", (6543, 6567), False, 'import os\n'), ((6565, 6601), 'os.path.join', 'os.path.join', (['root', 'typ', '"""S0"""', 'x1[i]'], {}), "(root, typ, 'S0', x1[i])\n", (6577, 6601), False, 'import os\n'), ((2624, 2648), 'os.path.join', 'os.path.join', (['root', 'item'], {}), '(root, item)\n', (2636, 2648), False, 'import os\n'), ((2840, 2864), 'os.path.join', 'os.path.join', (['root', 'item'], {}), '(root, item)\n', (2852, 2864), False, 'import os\n'), ((2722, 2746), 'os.path.join', 'os.path.join', (['root', 'item'], {}), '(root, item)\n', (2734, 2746), False, 'import os\n'), ((2938, 2962), 'os.path.join', 'os.path.join', (['root', 'item'], {}), '(root, item)\n', (2950, 2962), False, 'import os\n')] |
#
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import numpy as np
import joblib
import json
import argparse
import dmm_input
#from dmm_model import inference_by_sample, loss, p_filter, sampleVariationalDist
from dmm_model import inference, loss, p_filter, sampleVariationalDist
from dmm_model import construct_placeholder, computeEmission, computeVariationalDist
import hyopt as hy
from attractor import field,potential,make_griddata_discrete,compute_discrete_transition_mat
#FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
#tf.app.flags.DEFINE_boolean('use_fp16', False,"""Train the model using fp16.""")
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class NumPyArangeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.int64):
return int(obj)
if isinstance(obj, np.int32):
return int(obj)
if isinstance(obj, np.float32):
return float(obj)
if isinstance(obj, np.float64):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist() # or map(int, obj)
return json.JSONEncoder.default(self, obj)
def get_default_config():
config={}
# data and network
#config["dim"]=None
config["dim"]=2
# training
config["epoch"] = 10
config["patience"] = 5
config["batch_size"] = 100
config["alpha"] = 1.0
config["learning_rate"] = 1.0e-2
config["curriculum_alpha"]=False
config["epoch_interval_save"] =10#100
config["epoch_interval_print"] =10#100
config["sampling_tau"]=10#0.1
config["normal_max_var"]=5.0#1.0
config["normal_min_var"]=1.0e-5
config["zero_dynamics_var"]=1.0
config["pfilter_sample_size"]=10
config["pfilter_proposal_sample_size"]=1000
config["pfilter_save_sample_num"]=100
# dataset
config["train_test_ratio"]=[0.8,0.2]
config["data_train_npy"] = None
config["mask_train_npy"] = None
config["data_test_npy"] = None
config["mask_test_npy"] = None
# save/load model
config["save_model_path"] = None
config["load_model"] = None
config["save_result_train"]=None
config["save_result_test"]=None
config["save_result_filter"]=None
#config["state_type"]="discrete"
config["state_type"]="normal"
config["sampling_type"]="none"
config["time_major"]=True
config["steps_npy"]=None
config["steps_test_npy"]=None
config["sampling_type"]="normal"
config["emission_type"]="normal"
config["state_type"]="normal"
config["dynamics_type"]="distribution"
config["pfilter_type"]="trained_dynamics"
config["potential_enabled"]=True,
config["potential_grad_transition_enabled"]=True,
config["potential_nn_enabled"]=False,
# generate json
#fp = open("config.json", "w")
#json.dump(config, fp, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
return config
def construct_feed(idx,data,placeholders,alpha,is_train=False):
feed_dict={}
num_potential_points=100
hy_param=hy.get_hyperparameter()
dim=hy_param["dim"]
dim_emit=hy_param["dim_emit"]
n_steps=hy_param["n_steps"]
batch_size=len(idx)
dropout_rate=0.0
if is_train:
if "dropout_rate" in hy_param:
dropout_rate=hy_param["dropout_rate"]
else:
dropout_rate=0.5
#
for key,ph in placeholders.items():
if key == "x":
feed_dict[ph]=data.x[idx,:,:]
elif key == "m":
feed_dict[ph]=data.m[idx,:,:]
elif key == "s":
feed_dict[ph]=data.s[idx]
elif key == "alpha":
feed_dict[ph]=alpha
elif key == "vd_eps":
#eps=np.zeros((batch_size,n_steps,dim))
if hy_param["state_type"]=="discrete":
eps=np.random.uniform(1.0e-10,1.0-1.0e-10,(batch_size,n_steps,dim))
eps=-np.log(-np.log(eps))
else:
eps=np.random.standard_normal((batch_size,n_steps,dim))
feed_dict[ph]=eps
elif key == "tr_eps":
#eps=np.zeros((batch_size,n_steps,dim))
eps=np.random.standard_normal((batch_size,n_steps,dim))
feed_dict[ph]=eps
elif key == "potential_points":
pts=np.random.standard_normal((num_potential_points,dim))
feed_dict[ph]=pts
elif key == "dropout_rate":
feed_dict[ph]=dropout_rate
elif key == "is_train":
feed_dict[ph]=is_train
return feed_dict
def print_variables():
# print variables
print('## emission variables')
vars_em = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="emission_var")
for v in vars_em:
print(v.name)
print('## variational dist. variables')
vars_vd = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="variational_dist_var")
for v in vars_vd:
print(v.name)
print('## transition variables')
vars_tr = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="transition_var")
for v in vars_tr:
print(v.name)
print('## potential variables')
vars_pot = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="potential_var")
for v in vars_pot:
print(v.name)
return
def compute_alpha(config,i):
alpha_max=config["alpha"]
if config["curriculum_alpha"]:
begin_tau=config["epoch"]*0.1
end_tau=config["epoch"]*0.9
tau=100.0
if i < begin_tau:
alpha=0.0
elif i<end_tau:
alpha=alpha_max*(1.0-np.exp(-(i-begin_tau)/tau))
else:
alpha=alpha_max
return alpha
return alpha_max
class EarlyStopping:
def __init__(self,config, **kwargs):
self.prev_validation_cost=None
self.validation_count=0
self.config=config
def evaluate_validation(self,validation_cost,info):
config=self.config
if self.prev_validation_cost is not None and self.prev_validation_cost<validation_cost:
self.validation_count+=1
if config["patience"] >0 and self.validation_count>=config["patience"]:
self.print_info(info)
print("[stop] by validation")
return True
else:
self.validation_count=0
self.prev_validation_cost=validation_cost
return False
def print_info(self,info):
config=self.config
epoch=info["epoch"]
training_cost=info["training_cost"]
validation_cost=info["validation_cost"]
training_error=info["training_error"]
validation_error=info["validation_error"]
training_all_costs=info["training_all_costs"]
validation_all_costs=info["validation_all_costs"]
alpha=info["alpha"]
save_path=info["save_path"]
if save_path is None:
format_tuple=(epoch, training_cost, training_error,
validation_cost,validation_error, self.validation_count)
print("epoch %d, training cost %g (error=%g), validation cost %g (error=%g) (count=%d) "%format_tuple)
print("[LOG] %d, %g,%g,%g,%g, %g, %g,%g,%g, %g,%g,%g"%(epoch,
training_cost,validation_cost,training_error,validation_error,alpha,
training_all_costs[0],training_all_costs[1],training_all_costs[2],
validation_all_costs[0],validation_all_costs[1],validation_all_costs[2]))
else:
format_tuple=(epoch, training_cost,training_error,
validation_cost,validation_error,self.validation_count,save_path)
print("epoch %d, training cost %g (error=%g), validation cost %g (error=%g) (count=%d) ([SAVE] %s) "%format_tuple)
print("[LOG] %d, %g,%g,%g,%g, %g, %g,%g,%g, %g,%g,%g"%(epoch,
training_cost,validation_cost,training_error,validation_error,alpha,
training_all_costs[0],training_all_costs[1],training_all_costs[2],
validation_all_costs[0],validation_all_costs[1],validation_all_costs[2]))
def compute_cost(sess,placeholders,data,data_idx,output_cost,batch_size,alpha,is_train):
# initialize costs
cost=0.0
error=0.0
all_costs=np.zeros((3,),np.float32)
# compute cost in data
n_batch=int(np.ceil(data.num*1.0/batch_size))
for j in range(n_batch):
idx=data_idx[j*batch_size:(j+1)*batch_size]
feed_dict=construct_feed(idx,data,placeholders,alpha,is_train=is_train)
cost +=np.array(sess.run(output_cost["cost"],feed_dict=feed_dict))
all_costs+=np.array(sess.run(output_cost["all_costs"],feed_dict=feed_dict))
error +=np.array(sess.run(output_cost["error"],feed_dict=feed_dict))/n_batch
data_info={
"cost":cost,
"error":error,
"all_costs":all_costs,
}
return data_info
def compute_cost_train_valid(sess,placeholders,train_data,valid_data,train_idx,valid_idx,output_cost,batch_size,alpha):
train_data_info=compute_cost(sess,placeholders,train_data,train_idx,output_cost,batch_size,alpha,is_train=True)
valid_data_info=compute_cost(sess,placeholders,valid_data,valid_idx,output_cost,batch_size,alpha,is_train=False)
all_info={}
for k,v in train_data_info.items():
all_info["training_"+k]=v
for k,v in valid_data_info.items():
all_info["validation_"+k]=v
return all_info
def compute_result(sess,placeholders,data,data_idx,outputs,batch_size,alpha):
results={}
n_batch=int(np.ceil(data.num*1.0/batch_size))
for j in range(n_batch):
idx=data_idx[j*batch_size:(j+1)*batch_size]
feed_dict=construct_feed(idx,data,placeholders,alpha)
for k,v in outputs.items():
if v is not None:
res=sess.run(v,feed_dict=feed_dict)
if k in ["z_s"]:
if k in results:
results[k]=np.concatenate([results[k],res],axis=0)
else:
results[k]=res
elif k in ["obs_params","obs_pred_params", "z_params","z_pred_params"]:
if k in results:
for i in range(len(res)):
results[k][i]=np.concatenate([results[k][i],res[i]],axis=0)
else:
results[k]=res
for k,v in results.items():
if k in ["z_s"]:
print(k,v.shape)
elif k in ["obs_params","obs_pred", "z_params","z_pred"]:
if len(v)==1:
print(k,v[0].shape)
else:
print(k,v[0].shape,v[1].shape)
return results
def get_dim(config,hy_param,data):
dim_emit=data.dim
if config["dim"] is None:
dim=dim_emit
config["dim"]=dim
else:
dim=config["dim"]
hy_param["dim"]=dim
hy_param["dim_emit"]=dim_emit
return dim,dim_emit
def train(sess,config):
hy_param=hy.get_hyperparameter()
train_data,valid_data = dmm_input.load_data(config,with_shuffle=True,with_train_test=True)
batch_size,n_batch=get_batch_size(config,hy_param,train_data)
dim,dim_emit=get_dim(config,hy_param,train_data)
n_steps=train_data.n_steps
hy_param["n_steps"]=n_steps
print("train_data_size:",train_data.num)
print("batch_size :",batch_size)
print("n_steps :",n_steps)
print("dim_emit :",dim_emit)
placeholders=construct_placeholder(config)
control_params={
"config":config,
"placeholders":placeholders,
}
# inference
#outputs=inference_by_sample(n_steps,control_params=control_params)
outputs=inference(n_steps,control_params=control_params)
# cost
output_cost=loss(outputs,placeholders["alpha"],control_params=control_params)
# train_step
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_step = tf.train.AdamOptimizer(config["learning_rate"]).minimize(output_cost["cost"])
print_variables()
saver = tf.train.Saver()
# initialize
init = tf.global_variables_initializer()
sess.run(init)
train_idx=list(range(train_data.num))
valid_idx=list(range(valid_data.num))
## training
validation_count=0
prev_validation_cost=0
alpha=None
early_stopping=EarlyStopping(config)
print("[LOG] epoch, cost,cost(valid.),error,error(valid.),alpha,cost(recons.),cost(temporal),cost(potential),cost(recons.,valid.),cost(temporal,valid),cost(potential,valid)")
for i in range(config["epoch"]):
np.random.shuffle(train_idx)
alpha=compute_alpha(config,i)
training_info=compute_cost_train_valid(sess,placeholders,
train_data,valid_data,train_idx,valid_idx,
output_cost,batch_size,alpha)
# save
save_path=None
if i%config["epoch_interval_save"] == 0:
save_path = saver.save(sess, config["save_model_path"]+"/model.%05d.ckpt"%(i))
# early stopping
training_info["epoch"]=i
training_info["alpha"]=alpha
training_info["save_path"]=save_path
if i%config["epoch_interval_print"] == 0:
early_stopping.print_info(training_info)
if i%100:
if early_stopping.evaluate_validation(training_info["validation_cost"],training_info):
break
# update
n_batch=int(np.ceil(train_data.num*1.0/batch_size))
for j in range(n_batch):
idx=train_idx[j*batch_size:(j+1)*batch_size]
feed_dict=construct_feed(idx,train_data,placeholders,alpha,is_train=True)
train_step.run(feed_dict=feed_dict)
training_info=compute_cost_train_valid(sess,placeholders,
train_data,valid_data,train_idx,valid_idx,
output_cost,batch_size,alpha)
print("[RESULT] training cost %g, validation cost %g, training error %g, validation error %g"%(
training_info["training_cost"],
training_info["validation_cost"],
training_info["training_error"],
training_info["validation_error"]))
hy_param["evaluation"]=training_info
# save hyperparameter
if config["save_model"] is not None and config["save_model"]!="":
save_model_path=config["save_model"]
save_path = saver.save(sess, save_model_path)
print("[SAVE] %s"%(save_path))
hy.save_hyperparameter()
## save results
if config["save_result_train"]!="":
results=compute_result(sess,placeholders,train_data,train_idx,outputs,batch_size,alpha)
results["config"]=config
print("[SAVE] result : ",config["save_result_train"])
base_path = os.path.dirname(config["save_result_train"])
os.makedirs(base_path,exist_ok=True)
joblib.dump(results,config["save_result_train"])
#
e=(train_data.x-results["obs_params"][0])**2
#
def infer(sess,config):
hy_param=hy.get_hyperparameter()
_,test_data = dmm_input.load_data(config,with_shuffle=False,with_train_test=False,
test_flag=True)
batch_size,n_batch=get_batch_size(config,hy_param,test_data)
dim,dim_emit=get_dim(config,hy_param,test_data)
n_steps=test_data.n_steps
hy_param["n_steps"]=n_steps
print("test_data_size:",test_data.num)
print("batch_size :",batch_size)
print("n_steps :",n_steps)
print("dim_emit :",dim_emit)
alpha=config["alpha"]
print("alpha :",alpha)
placeholders=construct_placeholder(config)
control_params={
"config":config,
"placeholders":placeholders,
}
# inference
outputs=inference(n_steps,control_params)
# cost
output_cost=loss(outputs,placeholders["alpha"],control_params=control_params)
# train_step
saver = tf.train.Saver()
print("[LOAD]",config["load_model"])
saver.restore(sess,config["load_model"])
test_idx=list(range(test_data.num))
# check point
test_info=compute_cost(sess,placeholders,
test_data,test_idx,
output_cost,batch_size,alpha,is_train=False)
print("cost: %g"%(test_info["cost"]))
## save results
if config["save_result_test"]!="":
results=compute_result(sess,placeholders,test_data,test_idx,outputs,batch_size,alpha)
results["config"]=config
print("[SAVE] result : ",config["save_result_test"])
base_path = os.path.dirname(config["save_result_test"])
os.makedirs(base_path,exist_ok=True)
joblib.dump(results,config["save_result_test"])
def filter_discrete_forward(sess,config):
hy_param=hy.get_hyperparameter()
_,test_data = dmm_input.load_data(config,with_shuffle=False,with_train_test=False,test_flag=True)
batch_size,n_batch=get_batch_size(config,hy_param,test_data)
dim,dim_emit=get_dim(config,hy_param,test_data)
n_steps=1
hy_param["n_steps"]=n_steps
z_holder=tf.placeholder(tf.float32,shape=(None,dim))
z0=make_griddata_discrete(dim)
batch_size=z0.shape[0]
control_params={
"dropout_rate":0.0,
"config":config,
}
# inference
params=computeEmission(z_holder,n_steps,
init_params_flag=True,control_params=control_params)
x_holder=tf.placeholder(tf.float32,shape=(None,100,dim_emit))
qz=computeVariationalDist(x_holder,n_steps,init_params_flag=True,control_params=control_params)
# load
try:
saver = tf.train.Saver()
print("[LOAD] ",config["load_model"])
saver.restore(sess,config["load_model"])
except:
print("[SKIP] Load parameters")
#
feed_dict={z_holder:z0}
x_params=sess.run(params,feed_dict=feed_dict)
x=test_data.x
feed_dict={x_holder:x}
out_qz=sess.run(qz,feed_dict=feed_dict)
print(out_qz[0].shape)
print(len(out_qz))
# data:
# data_num x n_steps x emit_dim
num_d=x_params[0].shape[0]
dist_x=[]
for d in range(num_d):
m=x_params[0][d,0,:]
print("##,",d,",".join(map(str,m)))
for d in range(num_d):
cov=x_params[1][d,0,:]
print("##,",d,",".join(map(str,cov)))
for d in range(num_d):
m=x_params[0][d,0,:]
cov=x_params[1][d,0,:]
diff_x=-(x-m)**2/(2*cov)
prob=-1.0/2.0*np.log(2*np.pi*cov)+diff_x
# prob: data_num x n_steps x emit_dim
prob=np.mean(prob,axis=2)
dist_x.append(prob)
dist_x=np.array(dist_x)
dist_x=np.transpose(dist_x,[1,2,0])
# dist: data_num x n_steps x dim
dist_x_max=np.zeros_like(dist_x)
for i in range(dist_x.shape[0]):
for j in range(dist_x.shape[1]):
k=np.argmax(dist_x[i,j,:])
dist_x_max[i,j,k]=1
##
## p(x|z)*q(z)
## p(x,z)
dist_qz=out_qz[0].reshape((20,100,dim))
dist_pxz=dist_qz*np.exp(dist_x)
##
tr_mat=compute_discrete_transition_mat(sess,config)
print(tr_mat)
beta=5.0e-2
tr_mat=beta*tr_mat+(1.0-beta)*np.identity(dim)
print(tr_mat)
## viterbi
prob_viterbi=np.zeros_like(dist_x)
prob_viterbi[:,:,:]=-np.inf
path_viterbi=np.zeros_like(dist_x)
index_viterbi=np.zeros_like(dist_x,dtype=np.int32)
for d in range(dist_x.shape[0]):
prob_viterbi[d,0,:]=dist_pxz[d,0,:]
index_viterbi[d,0,:]=np.argmax(dist_pxz[d,0,:])
step=dist_x.shape[1]-1
for t in range(step):
for i in range(dim):
for j in range(dim):
p=0
p+=prob_viterbi[d,t,i]
p+=np.log(dist_pxz[d,t+1,j])
#p+=np.log(dist_qz[d,t+1,j])
p+=np.log(tr_mat[i,j])
"""
if i==j:
p+=np.log(tr_mat[i,j]*0.9)
else:
p+=np.log(tr_mat[i,j]*0.1)
"""
if prob_viterbi[d,t+1,j]<p:
prob_viterbi[d,t+1,j]=p
index_viterbi[d,t+1,j]=i
##
i=np.argmax(prob_viterbi[d,step,:])
path_viterbi[d,step,i]=1.0
for t in range(step):
j=index_viterbi[d,step-t-1,i]
#print(prob_viterbi[d,step-t-1,i])
path_viterbi[d,step-t-1,j]=1.0
i=j
## save results
if config["save_result_filter"]!="":
results={}
#results["dist"]=dist_x
results["dist_max"]=dist_x_max
results["dist_qz"]=dist_qz
results["dist_pxz"]=dist_pxz
results["dist_px"]=dist_x
results["dist_viterbi"]=path_viterbi
results["tr_mat"]=tr_mat
print("[SAVE] result : ",config["save_result_filter"])
joblib.dump(results,config["save_result_filter"])
def get_batch_size(config,hy_param,data):
batch_size=config["batch_size"]
n_batch=int(data.num/batch_size)
if n_batch==0:
batch_size=data.num
n_batch=1
elif n_batch*batch_size!=data.num:
n_batch+=1
return batch_size,n_batch
def filtering(sess,config):
hy_param=hy.get_hyperparameter()
_,test_data = dmm_input.load_data(config,with_shuffle=False,with_train_test=False,test_flag=True)
n_steps=test_data.n_steps
hy_param["n_steps"]=n_steps
dim,dim_emit=get_dim(config,hy_param,test_data)
batch_size,n_batch=get_batch_size(config,hy_param,test_data)
print("data_size",test_data.num,
"batch_size",batch_size,
", n_step",test_data.n_steps,
", dim_emit",test_data.dim)
x_holder=tf.placeholder(tf.float32,shape=(None,dim_emit))
m_holder=tf.placeholder(tf.float32,shape=(None))
z_holder=tf.placeholder(tf.float32,shape=(None,dim))
sample_size=config["pfilter_sample_size"]
proposal_sample_size=config["pfilter_proposal_sample_size"]
save_sample_num=config["pfilter_save_sample_num"]
#z0=np.zeros((batch_size*sample_size,dim),dtype=np.float32)
z0=np.random.normal(0,1.0,size=(batch_size*sample_size,dim))
control_params={
"config":config,
"dropout_rate":0.0,
}
# inference
#outputs=p_filter(x_holder,z_holder,None,dim,dim_emit,sample_size,batch_size,control_params=control_params)
outputs=p_filter(x_holder,z_holder,None,sample_size,proposal_sample_size,batch_size,control_params=control_params)
# loding model
print_variables()
saver = tf.train.Saver()
print("[LOAD]",config["load_model"])
saver.restore(sess,config["load_model"])
feed_dict={x_holder:test_data.x[0:batch_size,0,:],z_holder:z0}
result=sess.run(outputs,feed_dict=feed_dict)
z=np.reshape(result["sampled_z"],[-1,dim])
zs=np.zeros((sample_size,test_data.num,n_steps,dim),dtype=np.float32)
# max: proposal_sample_size*sample_size
sample_idx=list(range(proposal_sample_size*sample_size))
np.random.shuffle(sample_idx)
sample_idx=sample_idx[:save_sample_num]
mus=np.zeros((save_sample_num,test_data.num,n_steps,dim_emit),dtype=np.float32)
errors=np.zeros((save_sample_num,test_data.num,n_steps,dim_emit),dtype=np.float32)
for j in range(n_batch):
idx=j*batch_size
print(j,"/",n_batch)
for step in range(n_steps):
if idx+batch_size>test_data.num: # for last
x=np.zeros((batch_size,dim),dtype=np.float32)
bs=batch_size-(idx+batch_size-test_data.num)
x[:bs,:]=test_data.x[idx:idx+batch_size,step,:]
else:
x=test_data.x[idx:idx+batch_size,step,:]
bs=batch_size
feed_dict={x_holder:x,z_holder:z}
result=sess.run(outputs,feed_dict=feed_dict)
z=result["sampled_z"]
mu=result["sampled_pred_params"][0]
zs[:,idx:idx+batch_size,step,:]=z[:,:bs,:]
mus[:,idx:idx+batch_size,step,:]=mu[sample_idx,:bs,:]
errors[:,idx:idx+batch_size,step,:]=mu[sample_idx,:bs,:]-x[:bs,:]
z=np.reshape(z,[-1,dim])
print("*", end="")
print("")
## save results
if config["save_result_filter"]!="":
results={}
results["z"]=zs
results["mu"]=mus
results["error"]=errors
print("[SAVE] result : ",config["save_result_filter"])
joblib.dump(results,config["save_result_filter"])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('mode', type=str,
help='train/infer')
parser.add_argument('--config', type=str,
default=None,
nargs='?',
help='config json file')
parser.add_argument('--no-config',
action='store_true',
help='use default setting')
parser.add_argument('--save-config',
default=None,
nargs='?',
help='save config json file')
parser.add_argument('--model', type=str,
default=None,
help='model')
parser.add_argument('--hyperparam', type=str,
default=None,
nargs='?',
help='hyperparameter json file')
parser.add_argument('--cpu',
action='store_true',
help='cpu mode (calcuration only with cpu)')
parser.add_argument('--gpu', type=str,
default=None,
help='constraint gpus (default: all) (e.g. --gpu 0,2)')
args=parser.parse_args()
# config
config=get_default_config()
if args.config is None:
if not args.no_config:
parser.print_help()
#quit()
else:
fp = open(args.config, 'r')
config.update(json.load(fp))
#if args.hyperparam is not None:
hy.initialize_hyperparameter(args.hyperparam)
config.update(hy.get_hyperparameter())
hy.get_hyperparameter().update(config)
# gpu/cpu
if args.cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = ""
elif args.gpu is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
# setup
mode_list=args.mode.split(",")
#with tf.Graph().as_default(), tf.device('/cpu:0'):
for mode in mode_list:
with tf.Graph().as_default():
with tf.Session() as sess:
# mode
if mode=="train":
train(sess,config)
elif mode=="infer" or mode=="test":
if args.model is not None:
config["load_model"]=args.model
infer(sess,config)
elif mode=="filter":
if args.model is not None:
config["load_model"]=args.model
filtering(sess,config)
elif mode=="filter2":
filter_discrete_forward(sess,config)
elif mode=="field":
field(sess,config)
elif mode=="potential":
potential(sess,config)
if args.save_config is not None:
print("[SAVE] config: ",args.save_config)
fp = open(args.save_config, "w")
json.dump(config, fp, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '),cls=NumPyArangeEncoder)
| [
"hyopt.save_hyperparameter",
"hyopt.initialize_hyperparameter",
"hyopt.get_hyperparameter",
"numpy.random.standard_normal",
"json.JSONEncoder.default",
"numpy.log",
"attractor.compute_discrete_transition_mat",
"numpy.array",
"tensorflow.control_dependencies",
"dmm_model.loss",
"numpy.mean",
"t... | [((3109, 3132), 'hyopt.get_hyperparameter', 'hy.get_hyperparameter', ([], {}), '()\n', (3130, 3132), True, 'import hyopt as hy\n'), ((4388, 4461), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""emission_var"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='emission_var')\n", (4405, 4461), True, 'import tensorflow as tf\n'), ((4549, 4635), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""variational_dist_var"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n 'variational_dist_var')\n", (4566, 4635), True, 'import tensorflow as tf\n'), ((4711, 4786), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""transition_var"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='transition_var')\n", (4728, 4786), True, 'import tensorflow as tf\n'), ((4867, 4941), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""potential_var"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='potential_var')\n", (4884, 4941), True, 'import tensorflow as tf\n'), ((7490, 7516), 'numpy.zeros', 'np.zeros', (['(3,)', 'np.float32'], {}), '((3,), np.float32)\n', (7498, 7516), True, 'import numpy as np\n'), ((9772, 9795), 'hyopt.get_hyperparameter', 'hy.get_hyperparameter', ([], {}), '()\n', (9793, 9795), True, 'import hyopt as hy\n'), ((9821, 9889), 'dmm_input.load_data', 'dmm_input.load_data', (['config'], {'with_shuffle': '(True)', 'with_train_test': '(True)'}), '(config, with_shuffle=True, with_train_test=True)\n', (9840, 9889), False, 'import dmm_input\n'), ((10226, 10255), 'dmm_model.construct_placeholder', 'construct_placeholder', (['config'], {}), '(config)\n', (10247, 10255), False, 'from dmm_model import construct_placeholder, computeEmission, computeVariationalDist\n'), ((10422, 10471), 'dmm_model.inference', 'inference', (['n_steps'], {'control_params': 'control_params'}), '(n_steps, control_params=control_params)\n', (10431, 10471), False, 'from dmm_model import inference, loss, p_filter, sampleVariationalDist\n'), ((10492, 10559), 'dmm_model.loss', 'loss', (['outputs', "placeholders['alpha']"], {'control_params': 'control_params'}), "(outputs, placeholders['alpha'], control_params=control_params)\n", (10496, 10559), False, 'from dmm_model import inference, loss, p_filter, sampleVariationalDist\n'), ((10586, 10628), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (10603, 10628), True, 'import tensorflow as tf\n'), ((10793, 10809), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (10807, 10809), True, 'import tensorflow as tf\n'), ((10832, 10865), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10863, 10865), True, 'import tensorflow as tf\n'), ((12834, 12858), 'hyopt.save_hyperparameter', 'hy.save_hyperparameter', ([], {}), '()\n', (12856, 12858), True, 'import hyopt as hy\n'), ((13329, 13352), 'hyopt.get_hyperparameter', 'hy.get_hyperparameter', ([], {}), '()\n', (13350, 13352), True, 'import hyopt as hy\n'), ((13368, 13458), 'dmm_input.load_data', 'dmm_input.load_data', (['config'], {'with_shuffle': '(False)', 'with_train_test': '(False)', 'test_flag': '(True)'}), '(config, with_shuffle=False, with_train_test=False,\n test_flag=True)\n', (13387, 13458), False, 'import dmm_input\n'), ((13844, 13873), 'dmm_model.construct_placeholder', 'construct_placeholder', (['config'], {}), '(config)\n', (13865, 13873), False, 'from dmm_model import construct_placeholder, computeEmission, computeVariationalDist\n'), ((13971, 14005), 'dmm_model.inference', 'inference', (['n_steps', 'control_params'], {}), '(n_steps, control_params)\n', (13980, 14005), False, 'from dmm_model import inference, loss, p_filter, sampleVariationalDist\n'), ((14026, 14093), 'dmm_model.loss', 'loss', (['outputs', "placeholders['alpha']"], {'control_params': 'control_params'}), "(outputs, placeholders['alpha'], control_params=control_params)\n", (14030, 14093), False, 'from dmm_model import inference, loss, p_filter, sampleVariationalDist\n'), ((14115, 14131), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (14129, 14131), True, 'import tensorflow as tf\n'), ((14843, 14866), 'hyopt.get_hyperparameter', 'hy.get_hyperparameter', ([], {}), '()\n', (14864, 14866), True, 'import hyopt as hy\n'), ((14882, 14972), 'dmm_input.load_data', 'dmm_input.load_data', (['config'], {'with_shuffle': '(False)', 'with_train_test': '(False)', 'test_flag': '(True)'}), '(config, with_shuffle=False, with_train_test=False,\n test_flag=True)\n', (14901, 14972), False, 'import dmm_input\n'), ((15129, 15174), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, dim)'}), '(tf.float32, shape=(None, dim))\n', (15143, 15174), True, 'import tensorflow as tf\n'), ((15177, 15204), 'attractor.make_griddata_discrete', 'make_griddata_discrete', (['dim'], {}), '(dim)\n', (15199, 15204), False, 'from attractor import field, potential, make_griddata_discrete, compute_discrete_transition_mat\n'), ((15313, 15406), 'dmm_model.computeEmission', 'computeEmission', (['z_holder', 'n_steps'], {'init_params_flag': '(True)', 'control_params': 'control_params'}), '(z_holder, n_steps, init_params_flag=True, control_params=\n control_params)\n', (15328, 15406), False, 'from dmm_model import construct_placeholder, computeEmission, computeVariationalDist\n'), ((15414, 15469), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 100, dim_emit)'}), '(tf.float32, shape=(None, 100, dim_emit))\n', (15428, 15469), True, 'import tensorflow as tf\n'), ((15471, 15570), 'dmm_model.computeVariationalDist', 'computeVariationalDist', (['x_holder', 'n_steps'], {'init_params_flag': '(True)', 'control_params': 'control_params'}), '(x_holder, n_steps, init_params_flag=True,\n control_params=control_params)\n', (15493, 15570), False, 'from dmm_model import construct_placeholder, computeEmission, computeVariationalDist\n'), ((16429, 16445), 'numpy.array', 'np.array', (['dist_x'], {}), '(dist_x)\n', (16437, 16445), True, 'import numpy as np\n'), ((16454, 16485), 'numpy.transpose', 'np.transpose', (['dist_x', '[1, 2, 0]'], {}), '(dist_x, [1, 2, 0])\n', (16466, 16485), True, 'import numpy as np\n'), ((16529, 16550), 'numpy.zeros_like', 'np.zeros_like', (['dist_x'], {}), '(dist_x)\n', (16542, 16550), True, 'import numpy as np\n'), ((16790, 16835), 'attractor.compute_discrete_transition_mat', 'compute_discrete_transition_mat', (['sess', 'config'], {}), '(sess, config)\n', (16821, 16835), False, 'from attractor import field, potential, make_griddata_discrete, compute_discrete_transition_mat\n'), ((16952, 16973), 'numpy.zeros_like', 'np.zeros_like', (['dist_x'], {}), '(dist_x)\n', (16965, 16973), True, 'import numpy as np\n'), ((17017, 17038), 'numpy.zeros_like', 'np.zeros_like', (['dist_x'], {}), '(dist_x)\n', (17030, 17038), True, 'import numpy as np\n'), ((17054, 17091), 'numpy.zeros_like', 'np.zeros_like', (['dist_x'], {'dtype': 'np.int32'}), '(dist_x, dtype=np.int32)\n', (17067, 17091), True, 'import numpy as np\n'), ((18524, 18547), 'hyopt.get_hyperparameter', 'hy.get_hyperparameter', ([], {}), '()\n', (18545, 18547), True, 'import hyopt as hy\n'), ((18563, 18653), 'dmm_input.load_data', 'dmm_input.load_data', (['config'], {'with_shuffle': '(False)', 'with_train_test': '(False)', 'test_flag': '(True)'}), '(config, with_shuffle=False, with_train_test=False,\n test_flag=True)\n', (18582, 18653), False, 'import dmm_input\n'), ((18948, 18998), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, dim_emit)'}), '(tf.float32, shape=(None, dim_emit))\n', (18962, 18998), True, 'import tensorflow as tf\n'), ((19007, 19045), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None'}), '(tf.float32, shape=None)\n', (19021, 19045), True, 'import tensorflow as tf\n'), ((19057, 19102), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, dim)'}), '(tf.float32, shape=(None, dim))\n', (19071, 19102), True, 'import tensorflow as tf\n'), ((19322, 19384), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1.0)'], {'size': '(batch_size * sample_size, dim)'}), '(0, 1.0, size=(batch_size * sample_size, dim))\n', (19338, 19384), True, 'import numpy as np\n'), ((19574, 19690), 'dmm_model.p_filter', 'p_filter', (['x_holder', 'z_holder', 'None', 'sample_size', 'proposal_sample_size', 'batch_size'], {'control_params': 'control_params'}), '(x_holder, z_holder, None, sample_size, proposal_sample_size,\n batch_size, control_params=control_params)\n', (19582, 19690), False, 'from dmm_model import inference, loss, p_filter, sampleVariationalDist\n'), ((19725, 19741), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (19739, 19741), True, 'import tensorflow as tf\n'), ((19938, 19980), 'numpy.reshape', 'np.reshape', (["result['sampled_z']", '[-1, dim]'], {}), "(result['sampled_z'], [-1, dim])\n", (19948, 19980), True, 'import numpy as np\n'), ((19983, 20053), 'numpy.zeros', 'np.zeros', (['(sample_size, test_data.num, n_steps, dim)'], {'dtype': 'np.float32'}), '((sample_size, test_data.num, n_steps, dim), dtype=np.float32)\n', (19991, 20053), True, 'import numpy as np\n'), ((20152, 20181), 'numpy.random.shuffle', 'np.random.shuffle', (['sample_idx'], {}), '(sample_idx)\n', (20169, 20181), True, 'import numpy as np\n'), ((20228, 20307), 'numpy.zeros', 'np.zeros', (['(save_sample_num, test_data.num, n_steps, dim_emit)'], {'dtype': 'np.float32'}), '((save_sample_num, test_data.num, n_steps, dim_emit), dtype=np.float32)\n', (20236, 20307), True, 'import numpy as np\n'), ((20312, 20391), 'numpy.zeros', 'np.zeros', (['(save_sample_num, test_data.num, n_steps, dim_emit)'], {'dtype': 'np.float32'}), '((save_sample_num, test_data.num, n_steps, dim_emit), dtype=np.float32)\n', (20320, 20391), True, 'import numpy as np\n'), ((21419, 21444), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (21442, 21444), False, 'import argparse\n'), ((22462, 22507), 'hyopt.initialize_hyperparameter', 'hy.initialize_hyperparameter', (['args.hyperparam'], {}), '(args.hyperparam)\n', (22490, 22507), True, 'import hyopt as hy\n'), ((1344, 1379), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (1368, 1379), False, 'import json\n'), ((7553, 7589), 'numpy.ceil', 'np.ceil', (['(data.num * 1.0 / batch_size)'], {}), '(data.num * 1.0 / batch_size)\n', (7560, 7589), True, 'import numpy as np\n'), ((8676, 8712), 'numpy.ceil', 'np.ceil', (['(data.num * 1.0 / batch_size)'], {}), '(data.num * 1.0 / batch_size)\n', (8683, 8712), True, 'import numpy as np\n'), ((10635, 10670), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (10658, 10670), True, 'import tensorflow as tf\n'), ((11280, 11308), 'numpy.random.shuffle', 'np.random.shuffle', (['train_idx'], {}), '(train_idx)\n', (11297, 11308), True, 'import numpy as np\n'), ((13100, 13144), 'os.path.dirname', 'os.path.dirname', (["config['save_result_train']"], {}), "(config['save_result_train'])\n", (13115, 13144), False, 'import os\n'), ((13147, 13184), 'os.makedirs', 'os.makedirs', (['base_path'], {'exist_ok': '(True)'}), '(base_path, exist_ok=True)\n', (13158, 13184), False, 'import os\n'), ((13186, 13235), 'joblib.dump', 'joblib.dump', (['results', "config['save_result_train']"], {}), "(results, config['save_result_train'])\n", (13197, 13235), False, 'import joblib\n'), ((14655, 14698), 'os.path.dirname', 'os.path.dirname', (["config['save_result_test']"], {}), "(config['save_result_test'])\n", (14670, 14698), False, 'import os\n'), ((14701, 14738), 'os.makedirs', 'os.makedirs', (['base_path'], {'exist_ok': '(True)'}), '(base_path, exist_ok=True)\n', (14712, 14738), False, 'import os\n'), ((14740, 14788), 'joblib.dump', 'joblib.dump', (['results', "config['save_result_test']"], {}), "(results, config['save_result_test'])\n", (14751, 14788), False, 'import joblib\n'), ((15588, 15604), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (15602, 15604), True, 'import tensorflow as tf\n'), ((16378, 16399), 'numpy.mean', 'np.mean', (['prob'], {'axis': '(2)'}), '(prob, axis=2)\n', (16385, 16399), True, 'import numpy as np\n'), ((16763, 16777), 'numpy.exp', 'np.exp', (['dist_x'], {}), '(dist_x)\n', (16769, 16777), True, 'import numpy as np\n'), ((17186, 17214), 'numpy.argmax', 'np.argmax', (['dist_pxz[d, 0, :]'], {}), '(dist_pxz[d, 0, :])\n', (17195, 17214), True, 'import numpy as np\n'), ((17657, 17692), 'numpy.argmax', 'np.argmax', (['prob_viterbi[d, step, :]'], {}), '(prob_viterbi[d, step, :])\n', (17666, 17692), True, 'import numpy as np\n'), ((18198, 18248), 'joblib.dump', 'joblib.dump', (['results', "config['save_result_filter']"], {}), "(results, config['save_result_filter'])\n", (18209, 18248), False, 'import joblib\n'), ((21330, 21380), 'joblib.dump', 'joblib.dump', (['results', "config['save_result_filter']"], {}), "(results, config['save_result_filter'])\n", (21341, 21380), False, 'import joblib\n'), ((22523, 22546), 'hyopt.get_hyperparameter', 'hy.get_hyperparameter', ([], {}), '()\n', (22544, 22546), True, 'import hyopt as hy\n'), ((23512, 23631), 'json.dump', 'json.dump', (['config', 'fp'], {'ensure_ascii': '(False)', 'indent': '(4)', 'sort_keys': '(True)', 'separators': "(',', ': ')", 'cls': 'NumPyArangeEncoder'}), "(config, fp, ensure_ascii=False, indent=4, sort_keys=True,\n separators=(',', ': '), cls=NumPyArangeEncoder)\n", (23521, 23631), False, 'import json\n'), ((11975, 12017), 'numpy.ceil', 'np.ceil', (['(train_data.num * 1.0 / batch_size)'], {}), '(train_data.num * 1.0 / batch_size)\n', (11982, 12017), True, 'import numpy as np\n'), ((16625, 16651), 'numpy.argmax', 'np.argmax', (['dist_x[i, j, :]'], {}), '(dist_x[i, j, :])\n', (16634, 16651), True, 'import numpy as np\n'), ((16894, 16910), 'numpy.identity', 'np.identity', (['dim'], {}), '(dim)\n', (16905, 16910), True, 'import numpy as np\n'), ((21082, 21106), 'numpy.reshape', 'np.reshape', (['z', '[-1, dim]'], {}), '(z, [-1, dim])\n', (21092, 21106), True, 'import numpy as np\n'), ((22412, 22425), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (22421, 22425), False, 'import json\n'), ((22549, 22572), 'hyopt.get_hyperparameter', 'hy.get_hyperparameter', ([], {}), '()\n', (22570, 22572), True, 'import hyopt as hy\n'), ((10687, 10734), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (["config['learning_rate']"], {}), "(config['learning_rate'])\n", (10709, 10734), True, 'import tensorflow as tf\n'), ((16304, 16327), 'numpy.log', 'np.log', (['(2 * np.pi * cov)'], {}), '(2 * np.pi * cov)\n', (16310, 16327), True, 'import numpy as np\n'), ((20539, 20584), 'numpy.zeros', 'np.zeros', (['(batch_size, dim)'], {'dtype': 'np.float32'}), '((batch_size, dim), dtype=np.float32)\n', (20547, 20584), True, 'import numpy as np\n'), ((22889, 22901), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (22899, 22901), True, 'import tensorflow as tf\n'), ((17356, 17385), 'numpy.log', 'np.log', (['dist_pxz[d, t + 1, j]'], {}), '(dist_pxz[d, t + 1, j])\n', (17362, 17385), True, 'import numpy as np\n'), ((17424, 17444), 'numpy.log', 'np.log', (['tr_mat[i, j]'], {}), '(tr_mat[i, j])\n', (17430, 17444), True, 'import numpy as np\n'), ((22856, 22866), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (22864, 22866), True, 'import tensorflow as tf\n'), ((5224, 5254), 'numpy.exp', 'np.exp', (['(-(i - begin_tau) / tau)'], {}), '(-(i - begin_tau) / tau)\n', (5230, 5254), True, 'import numpy as np\n'), ((8989, 9030), 'numpy.concatenate', 'np.concatenate', (['[results[k], res]'], {'axis': '(0)'}), '([results[k], res], axis=0)\n', (9003, 9030), True, 'import numpy as np\n'), ((3724, 3789), 'numpy.random.uniform', 'np.random.uniform', (['(1e-10)', '(1.0 - 1e-10)', '(batch_size, n_steps, dim)'], {}), '(1e-10, 1.0 - 1e-10, (batch_size, n_steps, dim))\n', (3741, 3789), True, 'import numpy as np\n'), ((3835, 3888), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(batch_size, n_steps, dim)'], {}), '((batch_size, n_steps, dim))\n', (3860, 3888), True, 'import numpy as np\n'), ((3982, 4035), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(batch_size, n_steps, dim)'], {}), '((batch_size, n_steps, dim))\n', (4007, 4035), True, 'import numpy as np\n'), ((9212, 9259), 'numpy.concatenate', 'np.concatenate', (['[results[k][i], res[i]]'], {'axis': '(0)'}), '([results[k][i], res[i]], axis=0)\n', (9226, 9259), True, 'import numpy as np\n'), ((4096, 4150), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(num_potential_points, dim)'], {}), '((num_potential_points, dim))\n', (4121, 4150), True, 'import numpy as np\n'), ((23322, 23341), 'attractor.field', 'field', (['sess', 'config'], {}), '(sess, config)\n', (23327, 23341), False, 'from attractor import field, potential, make_griddata_discrete, compute_discrete_transition_mat\n'), ((23374, 23397), 'attractor.potential', 'potential', (['sess', 'config'], {}), '(sess, config)\n', (23383, 23397), False, 'from attractor import field, potential, make_griddata_discrete, compute_discrete_transition_mat\n'), ((3805, 3816), 'numpy.log', 'np.log', (['eps'], {}), '(eps)\n', (3811, 3816), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 19:29:16 2020
@author: Robert
"""
#%% import library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import collections
#%% set path
workingDir = "D:\\working space\\appliedEconometric_Project\\rawData"
balSheet = "Balance sheet variables\\FS_Combas.csv"
iS = "Income statement variables\\FS_Comins.csv"
cF = "Cash-flow statement variables\\FS_Comscfi.csv"
#%% read balance sheet
balSheet_rawDf = pd.read_csv(os.path.join(workingDir, balSheet),
converters = {"Stkcd":str})
balSheet_rawDf['Accper'] = pd.to_datetime(balSheet_rawDf['Accper'],
format = "%Y-%m-%d")
# rename columns
bS_colName = ["SID","Accper","TypeRep","Monetary_Cap","Tradable_Fin_Assets",
"Tot_Cur_Assets", "Tot_Assets","ST_Borrowing","Tradable_Fin_Liab",
"Notes_Payable","Taxes_Sur_Payable","Non_Cur_Liab_DueIn1Y",
"Tot_Cur_Liab","LT_Liab","Min_Int","Tot_SE"]
balSheet_rawDf.columns = bS_colName
# drop redundant cols
balSheet_rawDf.drop(columns=['TypeRep'], inplace = True)
# sort by date
balSheet_rawDf.sort_values(by = ['Accper','SID'], inplace = True, ascending = [True, True])
# reset index
balSheet_rawDf.reset_index(inplace = True)
balSheet_rawDf.drop(columns=['index'], inplace = True)
#%% read income statement
incomeStatement_rawDf = pd.read_csv(os.path.join(workingDir, iS),
converters = {"Stkcd":str})
incomeStatement_rawDf['Accper'] = pd.to_datetime(incomeStatement_rawDf['Accper'],
format = "%Y-%m-%d")
# rename colns
iS_colName = ["SID", "Accper", "TypeRep", "Net_Profit", "Min_Int_Inc"]
incomeStatement_rawDf.columns = iS_colName
# drop and re-orgnize
incomeStatement_rawDf.sort_values(by = ['Accper', 'SID'], inplace = True, ascending = [True, True])
incomeStatement_rawDf.reset_index(inplace = True)
incomeStatement_rawDf.drop(columns = ["index","TypeRep"], inplace = True)
#%% read cash flow statement
cashFlowStatement_rawDf = pd.read_csv(os.path.join(workingDir, cF),
converters = {"Stkcd":str})
cashFlowStatement_rawDf['Accper'] = pd.to_datetime(cashFlowStatement_rawDf['Accper'],
format = "%Y-%m-%d")
# rename colns
cF_colName = ["SID","Accper","TypeRep","Depr_FA_COGA_DPBA", "Amort_Intang_Assets",
"Cash_At_End", "Cash_At_Begin","Cash_Equ_End", "Cash_Equ_Begin"]
cashFlowStatement_rawDf.columns = cF_colName
# drop and re-orgnize
cashFlowStatement_rawDf.sort_values(by = ['Accper', 'SID'], inplace = True, ascending = [True, True])
cashFlowStatement_rawDf.reset_index(inplace = True)
cashFlowStatement_rawDf.drop(columns = ["index", "TypeRep"], inplace = True)
#%% clean data - balance sheet
# note: special obs: XXXX-01-01 and XXXX-12-31
# XXXX-01-01 is a minor review of the previous term, tend to drop XXXX-01-01 because
# we don't know when we can get the adjustment
# current asset's absence may result from a consequence of different industry,
# see 600030(CITI securities) vs. 002415(HKVI, a manufacturer) for example.
# give up cleaning, no good solution
# set Minority interest to be 0 if it is nan
balSheet_rawDf["Min_Int"] = balSheet_rawDf["Min_Int"].fillna(0)
# compute new variable
balSheet_rawDf["Tot_SE_After_Min_Int"] = balSheet_rawDf["Tot_SE"] - \
balSheet_rawDf["Min_Int"]
#%% clean data - income statement
# 4 obs with net profit = 0(their Min-Int-Inc are also 0)
# 1 obs with unknown net profit, but non-zero Min-Int-Inc
# if Min-Int-Inc is null, assume to be 0
# further WIN.D: no obs of operating cost, but has record of net profit
incomeStatement_rawDf.at[8668, "Net_Profit"] = 4664929.85
incomeStatement_rawDf.fillna(0, inplace = True)
# compute Net_Profit_After_Min_Int_Inc
incomeStatement_rawDf["Net_Profit_After_Min_Int_Inc"] = incomeStatement_rawDf["Net_Profit"] -\
incomeStatement_rawDf["Min_Int_Inc"]
# checked: Net_Profit_After_Min_Int_Inc no null value
#%% clean data - cash flow statement
# this is a quarter data
# fill cash equvalence with 0
# cashFlowStatement_rawDf[["Cash_Equ_Begin", "Cash_Equ_End"]].fillna(0, inplace = True)
# fill Amortization of intangible assets with ffill
# cashFlowStatement_rawDf["SID_copy"] = cashFlowStatement_rawDf["SID"]
# cashFlowStatement_rawDf["Amort_Intang_Assets"] = \
# cashFlowStatement_rawDf.groupby(["SID"])["Amort_Intang_Assets"].apply(lambda x: x.fillna(method = 'ffill')) #FIXME: should be groupby first
# fill nan in cash equ with 0
# cashFlowStatement_rawDf[["Cash_Equ_End", "Cash_Equ_Begin"]] = \
# cashFlowStatement_rawDf[["Cash_Equ_End", "Cash_Equ_Begin"]].fillna(0)
# use nansum to build new variable
# compute new variable
cashFlowStatement_rawDf['Cash_Cash_Equ_Begin'] = cashFlowStatement_rawDf[["Cash_At_Begin","Cash_Equ_Begin"]].sum(axis = 1)
cashFlowStatement_rawDf['Cash_Cash_Equ_End'] = cashFlowStatement_rawDf[["Cash_At_End","Cash_Equ_Begin"]].sum(axis = 1)
# checked: no null for new variables
#%% merge - step 1. drop date of "XXXX-01-01"
# clean balance sheet
bS_colName_left = ["SID","Accper","Monetary_Cap","Tradable_Fin_Assets",
"Tot_Cur_Assets", "Tot_Assets","ST_Borrowing","Tradable_Fin_Liab",
"Notes_Payable","Taxes_Sur_Payable","Non_Cur_Liab_DueIn1Y",
"Tot_Cur_Liab","LT_Liab","Min_Int","Tot_SE_After_Min_Int"]
balSheet_dfForMerge = pd.DataFrame(balSheet_rawDf[np.logical_or(balSheet_rawDf["Accper"].dt.month!=1,
balSheet_rawDf["Accper"].dt.day!=1)][bS_colName_left])
# clean income statement sheet
iS_colName_left = ["SID", "Accper", "Net_Profit_After_Min_Int_Inc"]
incomeStatement_dfForMerge = pd.DataFrame(incomeStatement_rawDf[np.logical_or(
incomeStatement_rawDf["Accper"].dt.month!=1,
incomeStatement_rawDf["Accper"].dt.day!=1)][iS_colName_left])
# clean cash-flow statement sheet
cF_colName_left = ["SID","Accper","Depr_FA_COGA_DPBA", "Amort_Intang_Assets",
"Cash_Cash_Equ_Begin", "Cash_Cash_Equ_End"]
cashFlowStatement_dfForMerge = pd.DataFrame(cashFlowStatement_rawDf[np.logical_or(
cashFlowStatement_rawDf["Accper"].dt.month!=1,
cashFlowStatement_rawDf["Accper"].dt.day!=1
)][cF_colName_left])
#%% merge - step 2. merge tables by SID and Accper
# 3211 companies in all
# merge with style 'outer'
bal_iS = pd.merge(balSheet_dfForMerge, incomeStatement_dfForMerge,
how = "outer", on = ["SID","Accper"])
FA_fullyOuterMerge = pd.merge(bal_iS, cashFlowStatement_dfForMerge,
how = "outer", on = ["SID", "Accper"])
#%% process asOf_Date
# after proceesing financial reports, the next step is to mark financial report
# with the announcement date, so that we can clearly know when the data is avaiable
# for computation
annDate_filePath = "Announcement date of financial reports\\IAR_Rept.csv"
annDate_rawDf = pd.read_csv(os.path.join(workingDir, annDate_filePath),
converters = {"Stkcd":str})
annDate_rawDf["Accper"] = pd.to_datetime(annDate_rawDf["Accper"],
format = "%Y-%m-%d")
annDate_rawDf["Annodt"] = pd.to_datetime(annDate_rawDf["Annodt"],
format = "%Y-%m-%d")
# rename colns
initName_annDate_rawDf = annDate_rawDf.columns.values
initName_annDate_rawDf[0] = "SID"
annDate_rawDf.columns = initName_annDate_rawDf
# drop stockname coln
annDate_rawDf.drop(columns = ["Stknme"], inplace = True)
# sort by date
annDate_rawDf.sort_values(by = ['Accper','SID'], inplace = True, ascending = [True, True])
# reset index
annDate_rawDf.reset_index(inplace = True)
annDate_rawDf.drop(columns=['index'], inplace = True)
#%% merge Annodt with data
# sort FA before merge
FA_fullyOuterMerge.sort_values(by = ['Accper','SID'], inplace = True, ascending = [True, True])
# outer merge
FA_with_asOf_Date = pd.merge(FA_fullyOuterMerge, annDate_rawDf,
how = "outer", on = ["SID", "Accper"])
#%% check data with no announcement date and fill raw data
data_NoAnnDate = FA_with_asOf_Date[pd.isnull(FA_with_asOf_Date["Annodt"])]
col_despTime = [["SID","Accper","Reptyp", "Annodt"]]
# decide to leave it empty
#%% write into csv, and then move to cleaning the stock trading data
# Note: the data including fundamental data and stock trading data, they're
# all factors(float), after processing factors, we will then move to the processing
# of Filters(Bool) & Classifiers(str or int)
FA_with_asOf_Date.to_csv(os.path.join(workingDir, "FundamentalData_AsOfDate.csv")) | [
"pandas.isnull",
"pandas.merge",
"os.path.join",
"numpy.logical_or",
"pandas.to_datetime"
] | [((616, 675), 'pandas.to_datetime', 'pd.to_datetime', (["balSheet_rawDf['Accper']"], {'format': '"""%Y-%m-%d"""'}), "(balSheet_rawDf['Accper'], format='%Y-%m-%d')\n", (630, 675), True, 'import pandas as pd\n'), ((1560, 1626), 'pandas.to_datetime', 'pd.to_datetime', (["incomeStatement_rawDf['Accper']"], {'format': '"""%Y-%m-%d"""'}), "(incomeStatement_rawDf['Accper'], format='%Y-%m-%d')\n", (1574, 1626), True, 'import pandas as pd\n'), ((2257, 2325), 'pandas.to_datetime', 'pd.to_datetime', (["cashFlowStatement_rawDf['Accper']"], {'format': '"""%Y-%m-%d"""'}), "(cashFlowStatement_rawDf['Accper'], format='%Y-%m-%d')\n", (2271, 2325), True, 'import pandas as pd\n'), ((6531, 6628), 'pandas.merge', 'pd.merge', (['balSheet_dfForMerge', 'incomeStatement_dfForMerge'], {'how': '"""outer"""', 'on': "['SID', 'Accper']"}), "(balSheet_dfForMerge, incomeStatement_dfForMerge, how='outer', on=[\n 'SID', 'Accper'])\n", (6539, 6628), True, 'import pandas as pd\n'), ((6667, 6752), 'pandas.merge', 'pd.merge', (['bal_iS', 'cashFlowStatement_dfForMerge'], {'how': '"""outer"""', 'on': "['SID', 'Accper']"}), "(bal_iS, cashFlowStatement_dfForMerge, how='outer', on=['SID',\n 'Accper'])\n", (6675, 6752), True, 'import pandas as pd\n'), ((7217, 7275), 'pandas.to_datetime', 'pd.to_datetime', (["annDate_rawDf['Accper']"], {'format': '"""%Y-%m-%d"""'}), "(annDate_rawDf['Accper'], format='%Y-%m-%d')\n", (7231, 7275), True, 'import pandas as pd\n'), ((7345, 7403), 'pandas.to_datetime', 'pd.to_datetime', (["annDate_rawDf['Annodt']"], {'format': '"""%Y-%m-%d"""'}), "(annDate_rawDf['Annodt'], format='%Y-%m-%d')\n", (7359, 7403), True, 'import pandas as pd\n'), ((8078, 8156), 'pandas.merge', 'pd.merge', (['FA_fullyOuterMerge', 'annDate_rawDf'], {'how': '"""outer"""', 'on': "['SID', 'Accper']"}), "(FA_fullyOuterMerge, annDate_rawDf, how='outer', on=['SID', 'Accper'])\n", (8086, 8156), True, 'import pandas as pd\n'), ((495, 529), 'os.path.join', 'os.path.join', (['workingDir', 'balSheet'], {}), '(workingDir, balSheet)\n', (507, 529), False, 'import os\n'), ((1431, 1459), 'os.path.join', 'os.path.join', (['workingDir', 'iS'], {}), '(workingDir, iS)\n', (1443, 1459), False, 'import os\n'), ((2125, 2153), 'os.path.join', 'os.path.join', (['workingDir', 'cF'], {}), '(workingDir, cF)\n', (2137, 2153), False, 'import os\n'), ((7091, 7133), 'os.path.join', 'os.path.join', (['workingDir', 'annDate_filePath'], {}), '(workingDir, annDate_filePath)\n', (7103, 7133), False, 'import os\n'), ((8285, 8323), 'pandas.isnull', 'pd.isnull', (["FA_with_asOf_Date['Annodt']"], {}), "(FA_with_asOf_Date['Annodt'])\n", (8294, 8323), True, 'import pandas as pd\n'), ((8706, 8762), 'os.path.join', 'os.path.join', (['workingDir', '"""FundamentalData_AsOfDate.csv"""'], {}), "(workingDir, 'FundamentalData_AsOfDate.csv')\n", (8718, 8762), False, 'import os\n'), ((5573, 5669), 'numpy.logical_or', 'np.logical_or', (["(balSheet_rawDf['Accper'].dt.month != 1)", "(balSheet_rawDf['Accper'].dt.day != 1)"], {}), "(balSheet_rawDf['Accper'].dt.month != 1, balSheet_rawDf[\n 'Accper'].dt.day != 1)\n", (5586, 5669), True, 'import numpy as np\n'), ((5909, 6019), 'numpy.logical_or', 'np.logical_or', (["(incomeStatement_rawDf['Accper'].dt.month != 1)", "(incomeStatement_rawDf['Accper'].dt.day != 1)"], {}), "(incomeStatement_rawDf['Accper'].dt.month != 1, \n incomeStatement_rawDf['Accper'].dt.day != 1)\n", (5922, 6019), True, 'import numpy as np\n'), ((6279, 6393), 'numpy.logical_or', 'np.logical_or', (["(cashFlowStatement_rawDf['Accper'].dt.month != 1)", "(cashFlowStatement_rawDf['Accper'].dt.day != 1)"], {}), "(cashFlowStatement_rawDf['Accper'].dt.month != 1, \n cashFlowStatement_rawDf['Accper'].dt.day != 1)\n", (6292, 6393), True, 'import numpy as np\n')] |
import os
import numpy as np
import unittest
from yggdrasil import units
from yggdrasil.tests import assert_equal
from yggdrasil.communication import AsciiTableComm
from yggdrasil.communication.tests import test_AsciiFileComm as parent
from yggdrasil.metaschema.properties.ScalarMetaschemaProperties import (
data2dtype)
def test_AsciiTableComm_nofmt():
r"""Test read of asciitable without format."""
test_file = os.path.join(os.getcwd(), 'temp_file.txt')
rows = [('one', 1, 1.0), ('two', 2, 2.0), ('three', 3, 3.0)]
lines = [('%5s\t%d\t%f\n' % r) for r in rows]
contents = (''.join(lines)).encode("utf-8")
with open(test_file, 'wb') as fd:
fd.write(contents)
inst = AsciiTableComm.AsciiTableComm('test', test_file, direction='recv')
inst.open()
for ans in rows:
flag, x = inst.recv_dict()
assert(flag)
irow = [e for e in ans]
irow[0] = irow[0].encode("utf-8")
idict = {'f%d' % i: irow[i] for i in range(len(irow))}
# irow = tuple(irow)
assert_equal(x, idict)
flag, x = inst.recv()
assert(not flag)
inst.close()
os.remove(test_file)
class TestAsciiTableComm(parent.TestAsciiFileComm):
r"""Test for AsciiTableComm communication class."""
comm = 'AsciiTableComm'
@unittest.skipIf(True, 'Table comm')
def test_send_recv_comment(self):
r"""Disabled: Test send/recv with commented message."""
pass # pragma: no cover
def map_sent2recv(self, obj):
r"""Convert a sent object into a received one."""
if not self.instance.is_eof(obj):
field_units = self.testing_options.get('field_units', None)
if field_units:
if isinstance(obj, dict):
return {k: units.add_units(v, u, dtype=data2dtype(v))
for (k, v), u in zip(obj.items(), field_units)}
elif isinstance(obj, (list, tuple)):
return [units.add_units(x, u, dtype=data2dtype(x))
for x, u in zip(obj, field_units)]
return obj
class TestAsciiTableComm_AsArray(TestAsciiTableComm):
r"""Test for AsciiTableComm communication class."""
testing_option_kws = {'array_columns': True}
class TestAsciiTableComm_single(TestAsciiTableComm):
r"""Test for AsciiTableComm communication class with field names sent."""
def get_options(self):
r"""Get testing options."""
nele = 5
dtype = np.dtype(dict(formats=['float'], names=['f0']))
arr1 = np.zeros((nele, ), dtype)
arr2 = np.ones((nele, ), dtype)
out = {'kwargs': {'as_array': True, 'field_names': ['f0']},
'contents': (
b'# f0\n# %g\n'
+ nele * b'0\n' + nele * b'1\n'),
'send': [[arr1['f0']], [arr2['f0']]],
'recv': [[np.hstack([arr1, arr2])['f0']]],
'recv_partial': [[[arr1['f0']]], [[arr2['f0']]]],
'dict': {'f0': arr1['f0']},
'objects': [[arr1['f0']], [arr2['f0']]]}
out['msg'] = out['send'][0]
out['msg_array'] = arr1
return out
def test_send_dict_default(self):
r"""Test automated conversion of dictionary to pandas data frame."""
self.do_send_recv(msg_send=self.testing_options['dict'],
msg_recv=self.testing_options['msg'])
| [
"yggdrasil.tests.assert_equal",
"yggdrasil.communication.AsciiTableComm.AsciiTableComm",
"numpy.ones",
"yggdrasil.metaschema.properties.ScalarMetaschemaProperties.data2dtype",
"numpy.hstack",
"unittest.skipIf",
"os.getcwd",
"numpy.zeros",
"os.remove"
] | [((709, 775), 'yggdrasil.communication.AsciiTableComm.AsciiTableComm', 'AsciiTableComm.AsciiTableComm', (['"""test"""', 'test_file'], {'direction': '"""recv"""'}), "('test', test_file, direction='recv')\n", (738, 775), False, 'from yggdrasil.communication import AsciiTableComm\n'), ((1134, 1154), 'os.remove', 'os.remove', (['test_file'], {}), '(test_file)\n', (1143, 1154), False, 'import os\n'), ((1304, 1339), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""Table comm"""'], {}), "(True, 'Table comm')\n", (1319, 1339), False, 'import unittest\n'), ((440, 451), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (449, 451), False, 'import os\n'), ((1043, 1065), 'yggdrasil.tests.assert_equal', 'assert_equal', (['x', 'idict'], {}), '(x, idict)\n', (1055, 1065), False, 'from yggdrasil.tests import assert_equal\n'), ((2567, 2591), 'numpy.zeros', 'np.zeros', (['(nele,)', 'dtype'], {}), '((nele,), dtype)\n', (2575, 2591), True, 'import numpy as np\n'), ((2608, 2631), 'numpy.ones', 'np.ones', (['(nele,)', 'dtype'], {}), '((nele,), dtype)\n', (2615, 2631), True, 'import numpy as np\n'), ((2896, 2919), 'numpy.hstack', 'np.hstack', (['[arr1, arr2]'], {}), '([arr1, arr2])\n', (2905, 2919), True, 'import numpy as np\n'), ((1811, 1824), 'yggdrasil.metaschema.properties.ScalarMetaschemaProperties.data2dtype', 'data2dtype', (['v'], {}), '(v)\n', (1821, 1824), False, 'from yggdrasil.metaschema.properties.ScalarMetaschemaProperties import data2dtype\n'), ((2011, 2024), 'yggdrasil.metaschema.properties.ScalarMetaschemaProperties.data2dtype', 'data2dtype', (['x'], {}), '(x)\n', (2021, 2024), False, 'from yggdrasil.metaschema.properties.ScalarMetaschemaProperties import data2dtype\n')] |
from pyE17.utils import imsave
from matplotlib import pyplot as plt
import numpy as np
from numpy.fft import fft2, ifftshift, fftshift, ifft2, fft, ifft
from scipy.ndimage.filters import gaussian_filter1d
from .plot import imsave
def taperarray(shape, edge):
xx, yy = np.mgrid[0:shape[0], 0:shape[1]]
xx1 = np.flipud(xx)
xx2 = np.minimum(xx, xx1)
yy1 = np.fliplr(yy)
yy2 = np.minimum(yy, yy1)
rr = np.minimum(xx2, yy2).astype(np.float)
rr[rr <= edge] /= edge
rr[rr > edge] = 1
rr *= np.pi / 2
rr = np.sin(rr)
# print xx2
# fig, ax = plt.subplots()
# imax = ax.imshow(rr)
# plt.colorbar(imax)
# plt.show()
return rr
def tapercircle(shape, edge, edgeloc=8 / 20.0, loc=(0, 0)):
x0, y0 = loc
xx, yy = np.meshgrid(np.arange(-shape[0] / 2, shape[0] / 2), np.arange(-shape[1] / 2, shape[1] / 2))
rr = np.sqrt((xx - x0) ** 2 + (yy - y0) ** 2)
rr -= (shape[0] * edgeloc - edge)
one = rr < 0
taper = np.logical_and(rr >= 0, rr <= edge)
zero = rr > edge
# rr[taper]
rr[zero] = 0
rr[taper] = np.abs(rr[taper] - np.max(rr[taper])) / np.max(rr[taper])
rr[one] = 1
return rr
def frc(im1, im2, annulus_width=1, edgetaper=5, edgeloc=8 / 20.0, loc=(0, 0), smooth=None, working_mask=None, x=None,
y=None, rmax=None, taper=None):
"""
r = radial_data(data,annulus_width,working_mask,x,y)
A function to reduce an image to a radial cross-section.
:INPUT:
data - whatever data you are radially averaging. Data is
binned into a series of annuli of width 'annulus_width'
pixels.
annulus_width - width of each annulus. Default is 1.
working_mask - array of same size as 'data', with zeros at
whichever 'data' points you don't want included
in the radial data computations.
x,y - coordinate system in which the data exists (used to set
the center of the data). By default, these are set to
integer meshgrids
rmax -- maximum radial value over which to compute statistics
:OUTPUT:
r - a data structure containing the following
statistics, computed across each annulus:
.r - the radial coordinate used (outer edge of annulus)
.mean - mean of the data in the annulus
.sum - the sum of all enclosed values at the given radius
.std - standard deviation of the data in the annulus
.median - median value in the annulus
.max - maximum value in the annulus
.min - minimum value in the annulus
.numel - number of elements in the annulus
:EXAMPLE:
::
import numpy as np
import pylab as py
import radial_data as rad
# Create coordinate grid
npix = 50.
x = np.arange(npix) - npix/2.
xx, yy = np.meshgrid(x, x)
r = np.sqrt(xx**2 + yy**2)
fake_psf = np.exp(-(r/5.)**2)
noise = 0.1 * np.random.normal(0, 1, r.size).reshape(r.shape)
simulation = fake_psf + noise
rad_stats = rad.radial_data(simulation, x=xx, y=yy)
py.figure()
py.plot(rad_stats.r, rad_stats.mean / rad_stats.std)
py.xlabel('Radial coordinate')
py.ylabel('Signal to Noise')
"""
# 2012-02-25 20:40 IJMC: Empty bins now have numel=0, not nan.
# 2012-02-04 17:41 IJMC: Added "SUM" flag
# 2010-11-19 16:36 IJC: Updated documentation for Sphinx
# 2010-03-10 19:22 IJC: Ported to python from Matlab
# 2005/12/19 Added 'working_region' option (IJC)
# 2005/12/15 Switched order of outputs (IJC)
# 2005/12/12 IJC: Removed decifact, changed name, wrote comments.
# 2005/11/04 by <NAME> at the Jet Propulsion Laboratory
import numpy as ny
class radialDat:
"""Empty object container.
"""
def __init__(self):
self.num = None
self.denom = None
self.T1bit = None
self.Thalfbit = None
# ---------------------
# Set up input parameters
# ---------------------
if working_mask == None:
working_mask = ny.ones(im1.shape, bool)
npix, npiy = im1.shape
if taper is not None:
taper0 = taper
else:
taper0 = tapercircle(im1.shape, edgetaper, edgeloc, loc)
f, a = plt.subplots()
a.imshow(imsave(im1 * taper0))
plt.title('tapered')
plt.show()
# taper0 = 1
F1 = fftshift(fft2(ifftshift(im1 * taper0)))
F2 = fftshift(fft2(ifftshift(im2 * taper0)))
F1F2_star = F1 * F2.conj()
if x == None or y == None:
x1 = ny.arange(-npix / 2., npix / 2.)
y1 = ny.arange(-npiy / 2., npiy / 2.)
x, y = ny.meshgrid(y1, x1)
r = abs(x + 1j * y)
if rmax == None:
rmax = r[working_mask].max()
# ---------------------
# Prepare the data container
# ---------------------
dr = ny.abs([x[0, 0] - x[0, 1]]) * annulus_width
radial = ny.arange(rmax / dr) * dr + dr / 2.
nrad = len(radial)
radialdata = radialDat()
radialdata.num = ny.zeros(nrad)
radialdata.denom = ny.zeros(nrad)
radialdata.T1bit = ny.zeros(nrad)
radialdata.Thalfbit = ny.zeros(nrad)
radialdata.r = radial / (npix / 2)
# ---------------------
# Loop through the bins
# ---------------------
for irad in range(nrad): # = 1:numel(radial)
minrad = irad * dr
maxrad = minrad + dr
thisindex = (r >= minrad) * (r < maxrad) * working_mask
# import pylab as py
# pdb.set_trace()
if not thisindex.ravel().any():
radialdata.num[irad] = ny.nan
radialdata.denom[irad] = ny.nan
radialdata.T1bit[irad] = ny.nan
radialdata.Thalfbit[irad] = ny.nan
else:
sqrt_n = np.sqrt(thisindex.astype(np.int).sum())
radialdata.num[irad] = np.real(F1F2_star[thisindex].sum())
radialdata.denom[irad] = ny.sqrt((ny.abs(F1[thisindex]) ** 2).sum() * (ny.abs(F2[thisindex]) ** 2).sum())
radialdata.T1bit[irad] = (0.5 + 2.4142 / sqrt_n) / (1.5 + 1.4142 / sqrt_n)
radialdata.Thalfbit[irad] = (0.2071 + 1.9102 / sqrt_n) / (1.2071 + 0.9102 / sqrt_n)
# ---------------------
# Return with data
# ---------------------
radialdata.frc = ny.nan_to_num(radialdata.num / radialdata.denom)
radialdata.frc[radialdata.frc < 0] = 0
if smooth is not None:
radialdata.frc = gaussian_filter1d(radialdata.frc, smooth)
take = radialdata.r <= 1.1
radialdata.r = radialdata.r[take]
radialdata.frc = radialdata.frc[take]
radialdata.T1bit = radialdata.T1bit[take]
radialdata.Thalfbit = radialdata.Thalfbit[take]
return radialdata | [
"numpy.sqrt",
"numpy.sin",
"numpy.arange",
"numpy.max",
"matplotlib.pyplot.subplots",
"numpy.meshgrid",
"numpy.abs",
"numpy.ones",
"numpy.flipud",
"numpy.fliplr",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.fft.ifftshift",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy... | [((316, 329), 'numpy.flipud', 'np.flipud', (['xx'], {}), '(xx)\n', (325, 329), True, 'import numpy as np\n'), ((340, 359), 'numpy.minimum', 'np.minimum', (['xx', 'xx1'], {}), '(xx, xx1)\n', (350, 359), True, 'import numpy as np\n'), ((370, 383), 'numpy.fliplr', 'np.fliplr', (['yy'], {}), '(yy)\n', (379, 383), True, 'import numpy as np\n'), ((394, 413), 'numpy.minimum', 'np.minimum', (['yy', 'yy1'], {}), '(yy, yy1)\n', (404, 413), True, 'import numpy as np\n'), ((540, 550), 'numpy.sin', 'np.sin', (['rr'], {}), '(rr)\n', (546, 550), True, 'import numpy as np\n'), ((889, 929), 'numpy.sqrt', 'np.sqrt', (['((xx - x0) ** 2 + (yy - y0) ** 2)'], {}), '((xx - x0) ** 2 + (yy - y0) ** 2)\n', (896, 929), True, 'import numpy as np\n'), ((997, 1032), 'numpy.logical_and', 'np.logical_and', (['(rr >= 0)', '(rr <= edge)'], {}), '(rr >= 0, rr <= edge)\n', (1011, 1032), True, 'import numpy as np\n'), ((4430, 4444), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4442, 4444), True, 'from matplotlib import pyplot as plt\n'), ((4484, 4504), 'matplotlib.pyplot.title', 'plt.title', (['"""tapered"""'], {}), "('tapered')\n", (4493, 4504), True, 'from matplotlib import pyplot as plt\n'), ((4509, 4519), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4517, 4519), True, 'from matplotlib import pyplot as plt\n'), ((5177, 5191), 'numpy.zeros', 'ny.zeros', (['nrad'], {}), '(nrad)\n', (5185, 5191), True, 'import numpy as ny\n'), ((5215, 5229), 'numpy.zeros', 'ny.zeros', (['nrad'], {}), '(nrad)\n', (5223, 5229), True, 'import numpy as ny\n'), ((5253, 5267), 'numpy.zeros', 'ny.zeros', (['nrad'], {}), '(nrad)\n', (5261, 5267), True, 'import numpy as ny\n'), ((5294, 5308), 'numpy.zeros', 'ny.zeros', (['nrad'], {}), '(nrad)\n', (5302, 5308), True, 'import numpy as ny\n'), ((6423, 6471), 'numpy.nan_to_num', 'ny.nan_to_num', (['(radialdata.num / radialdata.denom)'], {}), '(radialdata.num / radialdata.denom)\n', (6436, 6471), True, 'import numpy as ny\n'), ((800, 838), 'numpy.arange', 'np.arange', (['(-shape[0] / 2)', '(shape[0] / 2)'], {}), '(-shape[0] / 2, shape[0] / 2)\n', (809, 838), True, 'import numpy as np\n'), ((840, 878), 'numpy.arange', 'np.arange', (['(-shape[1] / 2)', '(shape[1] / 2)'], {}), '(-shape[1] / 2, shape[1] / 2)\n', (849, 878), True, 'import numpy as np\n'), ((1147, 1164), 'numpy.max', 'np.max', (['rr[taper]'], {}), '(rr[taper])\n', (1153, 1164), True, 'import numpy as np\n'), ((4242, 4266), 'numpy.ones', 'ny.ones', (['im1.shape', 'bool'], {}), '(im1.shape, bool)\n', (4249, 4266), True, 'import numpy as ny\n'), ((4458, 4478), 'pyE17.utils.imsave', 'imsave', (['(im1 * taper0)'], {}), '(im1 * taper0)\n', (4464, 4478), False, 'from pyE17.utils import imsave\n'), ((4714, 4748), 'numpy.arange', 'ny.arange', (['(-npix / 2.0)', '(npix / 2.0)'], {}), '(-npix / 2.0, npix / 2.0)\n', (4723, 4748), True, 'import numpy as ny\n'), ((4760, 4794), 'numpy.arange', 'ny.arange', (['(-npiy / 2.0)', '(npiy / 2.0)'], {}), '(-npiy / 2.0, npiy / 2.0)\n', (4769, 4794), True, 'import numpy as ny\n'), ((4808, 4827), 'numpy.meshgrid', 'ny.meshgrid', (['y1', 'x1'], {}), '(y1, x1)\n', (4819, 4827), True, 'import numpy as ny\n'), ((5011, 5038), 'numpy.abs', 'ny.abs', (['[x[0, 0] - x[0, 1]]'], {}), '([x[0, 0] - x[0, 1]])\n', (5017, 5038), True, 'import numpy as ny\n'), ((6567, 6608), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['radialdata.frc', 'smooth'], {}), '(radialdata.frc, smooth)\n', (6584, 6608), False, 'from scipy.ndimage.filters import gaussian_filter1d\n'), ((423, 443), 'numpy.minimum', 'np.minimum', (['xx2', 'yy2'], {}), '(xx2, yy2)\n', (433, 443), True, 'import numpy as np\n'), ((4563, 4586), 'numpy.fft.ifftshift', 'ifftshift', (['(im1 * taper0)'], {}), '(im1 * taper0)\n', (4572, 4586), False, 'from numpy.fft import fft2, ifftshift, fftshift, ifft2, fft, ifft\n'), ((4612, 4635), 'numpy.fft.ifftshift', 'ifftshift', (['(im2 * taper0)'], {}), '(im2 * taper0)\n', (4621, 4635), False, 'from numpy.fft import fft2, ifftshift, fftshift, ifft2, fft, ifft\n'), ((5068, 5088), 'numpy.arange', 'ny.arange', (['(rmax / dr)'], {}), '(rmax / dr)\n', (5077, 5088), True, 'import numpy as ny\n'), ((1126, 1143), 'numpy.max', 'np.max', (['rr[taper]'], {}), '(rr[taper])\n', (1132, 1143), True, 'import numpy as np\n'), ((6067, 6088), 'numpy.abs', 'ny.abs', (['F1[thisindex]'], {}), '(F1[thisindex])\n', (6073, 6088), True, 'import numpy as ny\n'), ((6104, 6125), 'numpy.abs', 'ny.abs', (['F2[thisindex]'], {}), '(F2[thisindex])\n', (6110, 6125), True, 'import numpy as ny\n')] |
#!/usr/bin/env python3
import gym
import ptan
import argparse
import time
import random
import numpy as np
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
GAMMA = 0.99
LEARNING_RATE = 0.001
ENTROPY_BETA = 0.01
BATCH_SIZE = 8
REWARD_STEPS = 10
class PGN(nn.Module):
def __init__(self, input_size, n_actions):
super(PGN, self).__init__()
self.net = nn.Sequential(
nn.Linear(input_size, 128),
nn.ReLU(),
nn.Linear(128, n_actions)
)
def forward(self, x):
return self.net(x)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--baseline", default=False, action='store_true', help="Enable mean baseline")
# Common arguments
parser.add_argument('--learning-rate', type=float, default=7e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=5,
help='seed of the experiment')
parser.add_argument('--episode-length', type=int, default=200,
help='the maximum length of each episode')
parser.add_argument('--total-timesteps', type=int, default=50000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=bool, default=True,
help='whether to set `torch.backends.cudnn.deterministic=True`')
# Algorithm specific arguments
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--vf-coef', type=float, default=0.25,
help="value function's coefficient the loss function")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--ent-coef', type=float, default=0.01,
help="policy entropy's coefficient the loss function")
args = parser.parse_args()
env = gym.make("CartPole-v0")
experiment_name = "".join(
[time.strftime('%Y.%m.%d.%H.%M.%z')] +
[ f"__{getattr(args, arg)}" for arg in vars(args)]
)
writer = SummaryWriter(f"runs/{experiment_name}")
if not args.seed:
args.seed = int(time.time())
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
env.seed(args.seed)
net = PGN(env.observation_space.shape[0], env.action_space.n)
print(net)
agent = ptan.agent.PolicyAgent(net, preprocessor=ptan.agent.float32_preprocessor,
apply_softmax=True)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=GAMMA, steps_count=REWARD_STEPS)
optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
total_rewards = []
step_rewards = []
step_idx = 0
done_episodes = 0
reward_sum = 0.0
batch_states, batch_actions, batch_scales = [], [], []
for step_idx, exp in enumerate(exp_source):
if step_idx > args.total_timesteps:
break
reward_sum += exp.reward
baseline = reward_sum / (step_idx + 1)
writer.add_scalar("baseline", baseline, step_idx)
batch_states.append(exp.state)
batch_actions.append(int(exp.action))
if args.baseline:
batch_scales.append(exp.reward - baseline)
else:
batch_scales.append(exp.reward)
# handle new rewards
new_rewards = exp_source.pop_total_rewards()
if new_rewards:
done_episodes += 1
reward = new_rewards[0]
total_rewards.append(reward)
mean_rewards = float(np.mean(total_rewards[-100:]))
writer.add_scalar("reward", reward, step_idx)
writer.add_scalar("reward_100", mean_rewards, step_idx)
writer.add_scalar("episodes", done_episodes, step_idx)
if len(batch_states) < BATCH_SIZE:
continue
states_v = torch.FloatTensor(batch_states)
batch_actions_t = torch.LongTensor(batch_actions)
batch_scale_v = torch.FloatTensor(batch_scales)
optimizer.zero_grad()
logits_v = net(states_v)
log_prob_v = F.log_softmax(logits_v, dim=1)
log_prob_actions_v = batch_scale_v * log_prob_v[range(BATCH_SIZE), batch_actions_t]
loss_policy_v = -log_prob_actions_v.mean()
loss_policy_v.backward(retain_graph=True)
grads = np.concatenate([p.grad.data.numpy().flatten()
for p in net.parameters()
if p.grad is not None])
prob_v = F.softmax(logits_v, dim=1)
entropy_v = -(prob_v * log_prob_v).sum(dim=1).mean()
entropy_loss_v = -ENTROPY_BETA * entropy_v
entropy_loss_v.backward()
optimizer.step()
loss_v = loss_policy_v + entropy_loss_v
# calc KL-div
new_logits_v = net(states_v)
new_prob_v = F.softmax(new_logits_v, dim=1)
kl_div_v = -((new_prob_v / prob_v).log() * prob_v).sum(dim=1).mean()
writer.add_scalar("kl", kl_div_v.item(), step_idx)
writer.add_scalar("baseline", baseline, step_idx)
writer.add_scalar("entropy", entropy_v.item(), step_idx)
writer.add_scalar("batch_scales", np.mean(batch_scales), step_idx)
writer.add_scalar("loss_entropy", entropy_loss_v.item(), step_idx)
writer.add_scalar("loss_policy", loss_policy_v.item(), step_idx)
writer.add_scalar("loss_total", loss_v.item(), step_idx)
writer.add_scalar("grad_l2", np.sqrt(np.mean(np.square(grads))), step_idx)
writer.add_scalar("grad_max", np.max(np.abs(grads)), step_idx)
writer.add_scalar("grad_var", np.var(grads), step_idx)
batch_states.clear()
batch_actions.clear()
batch_scales.clear()
writer.close()
| [
"torch.nn.ReLU",
"ptan.agent.PolicyAgent",
"torch.LongTensor",
"torch.nn.functional.softmax",
"gym.make",
"numpy.mean",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"numpy.random.seed",
"numpy.abs",
"numpy.square",
"torch.nn.functional.log_softmax",
"time.time",
"torch.manual_s... | [((681, 706), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (704, 706), False, 'import argparse\n'), ((2133, 2156), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (2141, 2156), False, 'import gym\n'), ((2322, 2362), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['f"""runs/{experiment_name}"""'], {}), "(f'runs/{experiment_name}')\n", (2335, 2362), False, 'from tensorboardX import SummaryWriter\n'), ((2426, 2448), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (2437, 2448), False, 'import random\n'), ((2453, 2478), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2467, 2478), True, 'import numpy as np\n'), ((2483, 2511), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2500, 2511), False, 'import torch\n'), ((2676, 2773), 'ptan.agent.PolicyAgent', 'ptan.agent.PolicyAgent', (['net'], {'preprocessor': 'ptan.agent.float32_preprocessor', 'apply_softmax': '(True)'}), '(net, preprocessor=ptan.agent.float32_preprocessor,\n apply_softmax=True)\n', (2698, 2773), False, 'import ptan\n'), ((2822, 2918), 'ptan.experience.ExperienceSourceFirstLast', 'ptan.experience.ExperienceSourceFirstLast', (['env', 'agent'], {'gamma': 'GAMMA', 'steps_count': 'REWARD_STEPS'}), '(env, agent, gamma=GAMMA,\n steps_count=REWARD_STEPS)\n', (2863, 2918), False, 'import ptan\n'), ((4175, 4206), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_states'], {}), '(batch_states)\n', (4192, 4206), False, 'import torch\n'), ((4233, 4264), 'torch.LongTensor', 'torch.LongTensor', (['batch_actions'], {}), '(batch_actions)\n', (4249, 4264), False, 'import torch\n'), ((4289, 4320), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_scales'], {}), '(batch_scales)\n', (4306, 4320), False, 'import torch\n'), ((4406, 4436), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits_v'], {'dim': '(1)'}), '(logits_v, dim=1)\n', (4419, 4436), True, 'import torch.nn.functional as F\n'), ((4825, 4851), 'torch.nn.functional.softmax', 'F.softmax', (['logits_v'], {'dim': '(1)'}), '(logits_v, dim=1)\n', (4834, 4851), True, 'import torch.nn.functional as F\n'), ((5153, 5183), 'torch.nn.functional.softmax', 'F.softmax', (['new_logits_v'], {'dim': '(1)'}), '(new_logits_v, dim=1)\n', (5162, 5183), True, 'import torch.nn.functional as F\n'), ((486, 512), 'torch.nn.Linear', 'nn.Linear', (['input_size', '(128)'], {}), '(input_size, 128)\n', (495, 512), True, 'import torch.nn as nn\n'), ((526, 535), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (533, 535), True, 'import torch.nn as nn\n'), ((549, 574), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'n_actions'], {}), '(128, n_actions)\n', (558, 574), True, 'import torch.nn as nn\n'), ((2409, 2420), 'time.time', 'time.time', ([], {}), '()\n', (2418, 2420), False, 'import time\n'), ((5486, 5507), 'numpy.mean', 'np.mean', (['batch_scales'], {}), '(batch_scales)\n', (5493, 5507), True, 'import numpy as np\n'), ((5925, 5938), 'numpy.var', 'np.var', (['grads'], {}), '(grads)\n', (5931, 5938), True, 'import numpy as np\n'), ((2201, 2235), 'time.strftime', 'time.strftime', (['"""%Y.%m.%d.%H.%M.%z"""'], {}), "('%Y.%m.%d.%H.%M.%z')\n", (2214, 2235), False, 'import time\n'), ((3866, 3895), 'numpy.mean', 'np.mean', (['total_rewards[-100:]'], {}), '(total_rewards[-100:])\n', (3873, 3895), True, 'import numpy as np\n'), ((5861, 5874), 'numpy.abs', 'np.abs', (['grads'], {}), '(grads)\n', (5867, 5874), True, 'import numpy as np\n'), ((5786, 5802), 'numpy.square', 'np.square', (['grads'], {}), '(grads)\n', (5795, 5802), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import cartopy.crs as ccrs
from .afi_base import AFI_basePlotter
from cosmic.util import load_cmap_data
class AFI_meanPlotter(AFI_basePlotter):
def gen_axes(self):
if self.domain == 'china':
gs = gridspec.GridSpec(len(self.runids) + 1, 3, figure=self.fig,
height_ratios=[1.] * len(self.runids) + [0.2])
elif self.domain in ['asia', 'europe']:
gs = gridspec.GridSpec(len(self.runids) + 1, 3, figure=self.fig,
height_ratios=[1.] * len(self.runids) + [0.3])
fig_axes = []
cb_axes = []
for i in range(len(self.runids)):
ax_row = []
for j in range(3):
ax_row.append(plt.subplot(gs[i, j], projection=ccrs.PlateCarree()))
fig_axes.append(ax_row)
for j in range(3):
cb_axes.append(plt.subplot(gs[-1, j]))
return np.array(fig_axes), np.array(cb_axes)
def add_titles_colourbars(self):
for j, mode in enumerate(self.MODES):
title_ax = self.fig_axes[0, j]
title_ax.set_title(self.TITLE_MODE_MAP[mode])
im = self.image_grid[-1][j]
cax = self.cb_axes[j]
cax.axis('off')
if mode == 'amount':
cmap, norm, bounds, cbar_kwargs = load_cmap_data('cmap_data/li2018_fig2_cb1.pkl')
cbar_kwargs['norm'] = norm
units = 'mm day$^{-1}$'
elif mode == 'freq':
cmap, norm, bounds, cbar_kwargs = load_cmap_data('cmap_data/li2018_fig2_cb2.pkl')
cbar_kwargs['norm'] = norm
units = '%'
elif mode == 'intensity':
cmap, norm, bounds, cbar_kwargs = load_cmap_data('cmap_data/li2018_fig2_cb3.pkl')
cbar_kwargs['norm'] = norm
units = 'mm hr$^{-1}$'
cbar_kwargs['extend'] = 'max'
plt.colorbar(im, ax=cax, label=f'{mode} precip. ({units})',
**cbar_kwargs, spacing='uniform',
orientation='horizontal', fraction=0.9)
def plot_ax(self, ax, cube, runid, mode):
lon_min, lon_max = cube.coord('longitude').points[[0, -1]]
lat_min, lat_max = cube.coord('latitude').points[[0, -1]]
extent = (lon_min, lon_max, lat_min, lat_max)
if mode == 'amount':
cmap, norm, bounds, cbar_kwargs = load_cmap_data('cmap_data/li2018_fig2_cb1.pkl')
data = cube.data.mean(axis=0) * 24 # mm/hr -> mm/day
kwargs = {'vmin': 1e-3, 'vmax': 12, 'cmap': cmap}
elif mode == 'freq':
data = cube.data.mean(axis=0) * 100
cmap, norm, bounds, cbar_kwargs = load_cmap_data('cmap_data/li2018_fig2_cb2.pkl')
kwargs = {'vmin': 0, 'cmap': cmap}
kwargs['vmin'] = 3
kwargs['vmax'] = 40
elif mode == 'intensity':
cmap, norm, bounds, cbar_kwargs = load_cmap_data('cmap_data/li2018_fig2_cb3.pkl')
data = cube.data.mean(axis=0)
kwargs = {'vmin': 1e-2, 'vmax': 4, 'cmap': cmap}
kwargs['norm'] = norm
if runid == 'cmorph':
Lat, Lon = np.meshgrid(cube.coord('latitude').points, cube.coord('longitude').points, indexing='ij')
data = np.ma.masked_array(data, Lat > 59)
im = ax.imshow(data, origin='lower', extent=extent, **kwargs)
return im
| [
"matplotlib.pyplot.colorbar",
"cartopy.crs.PlateCarree",
"numpy.array",
"cosmic.util.load_cmap_data",
"numpy.ma.masked_array",
"matplotlib.pyplot.subplot"
] | [((1015, 1033), 'numpy.array', 'np.array', (['fig_axes'], {}), '(fig_axes)\n', (1023, 1033), True, 'import numpy as np\n'), ((1035, 1052), 'numpy.array', 'np.array', (['cb_axes'], {}), '(cb_axes)\n', (1043, 1052), True, 'import numpy as np\n'), ((2030, 2167), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'cax', 'label': 'f"""{mode} precip. ({units})"""', 'spacing': '"""uniform"""', 'orientation': '"""horizontal"""', 'fraction': '(0.9)'}), "(im, ax=cax, label=f'{mode} precip. ({units})', **cbar_kwargs,\n spacing='uniform', orientation='horizontal', fraction=0.9)\n", (2042, 2167), True, 'import matplotlib.pyplot as plt\n'), ((2524, 2571), 'cosmic.util.load_cmap_data', 'load_cmap_data', (['"""cmap_data/li2018_fig2_cb1.pkl"""'], {}), "('cmap_data/li2018_fig2_cb1.pkl')\n", (2538, 2571), False, 'from cosmic.util import load_cmap_data\n'), ((3405, 3439), 'numpy.ma.masked_array', 'np.ma.masked_array', (['data', '(Lat > 59)'], {}), '(data, Lat > 59)\n', (3423, 3439), True, 'import numpy as np\n'), ((976, 998), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[-1, j]'], {}), '(gs[-1, j])\n', (987, 998), True, 'import matplotlib.pyplot as plt\n'), ((1424, 1471), 'cosmic.util.load_cmap_data', 'load_cmap_data', (['"""cmap_data/li2018_fig2_cb1.pkl"""'], {}), "('cmap_data/li2018_fig2_cb1.pkl')\n", (1438, 1471), False, 'from cosmic.util import load_cmap_data\n'), ((2823, 2870), 'cosmic.util.load_cmap_data', 'load_cmap_data', (['"""cmap_data/li2018_fig2_cb2.pkl"""'], {}), "('cmap_data/li2018_fig2_cb2.pkl')\n", (2837, 2870), False, 'from cosmic.util import load_cmap_data\n'), ((1638, 1685), 'cosmic.util.load_cmap_data', 'load_cmap_data', (['"""cmap_data/li2018_fig2_cb2.pkl"""'], {}), "('cmap_data/li2018_fig2_cb2.pkl')\n", (1652, 1685), False, 'from cosmic.util import load_cmap_data\n'), ((3061, 3108), 'cosmic.util.load_cmap_data', 'load_cmap_data', (['"""cmap_data/li2018_fig2_cb3.pkl"""'], {}), "('cmap_data/li2018_fig2_cb3.pkl')\n", (3075, 3108), False, 'from cosmic.util import load_cmap_data\n'), ((1845, 1892), 'cosmic.util.load_cmap_data', 'load_cmap_data', (['"""cmap_data/li2018_fig2_cb3.pkl"""'], {}), "('cmap_data/li2018_fig2_cb3.pkl')\n", (1859, 1892), False, 'from cosmic.util import load_cmap_data\n'), ((865, 883), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (881, 883), True, 'import cartopy.crs as ccrs\n')] |
import argparse
import numpy as np
from art.utils import random_sphere
from utils.config import label2nb_dict, set_gpu
from utils.data import load_data
from utils.model import load_model
from utils.plot import make_adv_img, make_confusion_matrix
from utils.utils import get_fooling_rate, get_targeted_success_rate, set_art
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='chestx')
parser.add_argument('--model', type=str, default='inceptionv3')
parser.add_argument('--norm', type=str, default='l2')
parser.add_argument('--eps', type=float, default=0.04)
parser.add_argument('--gpu', type=str, default='0')
args = parser.parse_args()
set_gpu(args.gpu)
X_train, X_test, y_train, y_test, mean_l2_train, mean_inf_train = load_data(
dataset=args.dataset, normalize=True, norm=True)
model = load_model(
dataset=args.dataset,
nb_class=y_train.shape[1],
model_type=args.model,
mode='inference'
)
# # Generate adversarial examples
classifier, norm, eps = set_art(
model, args.norm, args.eps, mean_l2_train, mean_inf_train)
h, w, c = X_train.shape[1], X_train.shape[2], X_train.shape[3]
noise = random_sphere(nb_points=1,
nb_dims=(h * w * c),
radius=eps,
norm=norm)
noise = noise.reshape(h, w, c).astype('float32')
base_f = 'random_{}_{}_eps{:.3f}'.format(
args.model, args.norm, args.eps)
save_f_noise = 'result/{}/noise/{}'.format(args.dataset, base_f)
np.save(save_f_noise, noise)
# # Evaluate the ART classifier on adversarial examples
preds_train = np.argmax(classifier.predict(X_train), axis=1)
preds_test = np.argmax(classifier.predict(X_test), axis=1)
X_train_adv = X_train + noise
X_test_adv = X_test + noise
preds_train_adv = np.argmax(classifier.predict(X_train_adv), axis=1)
preds_test_adv = np.argmax(classifier.predict(X_test_adv), axis=1)
rf_train = get_fooling_rate(preds=preds_train, preds_adv=preds_train_adv)
rf_test = get_fooling_rate(preds=preds_test, preds_adv=preds_test_adv)
rs_train_list, rs_test_list = [], []
label_list = label2nb_dict[args.dataset].keys()
for target in label_list:
rs_train_list.append(get_targeted_success_rate(
preds_train_adv, label2nb_dict[args.dataset][target]))
rs_test_list.append(get_targeted_success_rate(
preds_test_adv, label2nb_dict[args.dataset][target]))
save_f_train = 'result/{}/conf_mat/train_{}.png'.format(
args.dataset, base_f)
save_f_test = 'result/{}/conf_mat/test_{}.png'.format(
args.dataset, base_f)
title_train = 'Rf:{:.3f}\nRs '.format(rf_train)
title_test = 'Rf:{:.3f}\nRs '.format(rf_test)
for i, target in enumerate(label_list):
title_train = title_train + \
'{}:{:.3f} '.format(target, rs_train_list[i])
title_test = title_test + \
'{}:{:.3f} '.format(target, rs_train_list[i])
make_confusion_matrix(
y_row=preds_train,
y_col=preds_train_adv,
save_file_name=save_f_train,
dataset=args.dataset,
ylabel='Pred clean',
xlabel='Pred adv',
title=title_train
)
make_confusion_matrix(
y_row=preds_test,
y_col=preds_test_adv,
save_file_name=save_f_test,
dataset=args.dataset,
ylabel='Pred clean',
xlabel='Pred adv',
title=title_test
)
# # Show the adversarial examples
save_f_img = 'result/{}/imshow/{}.png'.format(args.dataset, base_f)
make_adv_img(
clean_img=X_test[0],
noise=noise,
adv_img=X_test_adv[0],
save_file_name=save_f_img
)
| [
"utils.plot.make_adv_img",
"art.utils.random_sphere",
"argparse.ArgumentParser",
"utils.config.set_gpu",
"utils.utils.set_art",
"utils.utils.get_targeted_success_rate",
"utils.model.load_model",
"utils.data.load_data",
"utils.plot.make_confusion_matrix",
"numpy.save",
"utils.utils.get_fooling_ra... | [((335, 360), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (358, 360), False, 'import argparse\n'), ((675, 692), 'utils.config.set_gpu', 'set_gpu', (['args.gpu'], {}), '(args.gpu)\n', (682, 692), False, 'from utils.config import label2nb_dict, set_gpu\n'), ((760, 818), 'utils.data.load_data', 'load_data', ([], {'dataset': 'args.dataset', 'normalize': '(True)', 'norm': '(True)'}), '(dataset=args.dataset, normalize=True, norm=True)\n', (769, 818), False, 'from utils.data import load_data\n'), ((833, 938), 'utils.model.load_model', 'load_model', ([], {'dataset': 'args.dataset', 'nb_class': 'y_train.shape[1]', 'model_type': 'args.model', 'mode': '"""inference"""'}), "(dataset=args.dataset, nb_class=y_train.shape[1], model_type=args\n .model, mode='inference')\n", (843, 938), False, 'from utils.model import load_model\n'), ((1012, 1078), 'utils.utils.set_art', 'set_art', (['model', 'args.norm', 'args.eps', 'mean_l2_train', 'mean_inf_train'], {}), '(model, args.norm, args.eps, mean_l2_train, mean_inf_train)\n', (1019, 1078), False, 'from utils.utils import get_fooling_rate, get_targeted_success_rate, set_art\n'), ((1156, 1224), 'art.utils.random_sphere', 'random_sphere', ([], {'nb_points': '(1)', 'nb_dims': '(h * w * c)', 'radius': 'eps', 'norm': 'norm'}), '(nb_points=1, nb_dims=h * w * c, radius=eps, norm=norm)\n', (1169, 1224), False, 'from art.utils import random_sphere\n'), ((1487, 1515), 'numpy.save', 'np.save', (['save_f_noise', 'noise'], {}), '(save_f_noise, noise)\n', (1494, 1515), True, 'import numpy as np\n'), ((1902, 1964), 'utils.utils.get_fooling_rate', 'get_fooling_rate', ([], {'preds': 'preds_train', 'preds_adv': 'preds_train_adv'}), '(preds=preds_train, preds_adv=preds_train_adv)\n', (1918, 1964), False, 'from utils.utils import get_fooling_rate, get_targeted_success_rate, set_art\n'), ((1975, 2035), 'utils.utils.get_fooling_rate', 'get_fooling_rate', ([], {'preds': 'preds_test', 'preds_adv': 'preds_test_adv'}), '(preds=preds_test, preds_adv=preds_test_adv)\n', (1991, 2035), False, 'from utils.utils import get_fooling_rate, get_targeted_success_rate, set_art\n'), ((2854, 3035), 'utils.plot.make_confusion_matrix', 'make_confusion_matrix', ([], {'y_row': 'preds_train', 'y_col': 'preds_train_adv', 'save_file_name': 'save_f_train', 'dataset': 'args.dataset', 'ylabel': '"""Pred clean"""', 'xlabel': '"""Pred adv"""', 'title': 'title_train'}), "(y_row=preds_train, y_col=preds_train_adv,\n save_file_name=save_f_train, dataset=args.dataset, ylabel='Pred clean',\n xlabel='Pred adv', title=title_train)\n", (2875, 3035), False, 'from utils.plot import make_adv_img, make_confusion_matrix\n'), ((3058, 3235), 'utils.plot.make_confusion_matrix', 'make_confusion_matrix', ([], {'y_row': 'preds_test', 'y_col': 'preds_test_adv', 'save_file_name': 'save_f_test', 'dataset': 'args.dataset', 'ylabel': '"""Pred clean"""', 'xlabel': '"""Pred adv"""', 'title': 'title_test'}), "(y_row=preds_test, y_col=preds_test_adv,\n save_file_name=save_f_test, dataset=args.dataset, ylabel='Pred clean',\n xlabel='Pred adv', title=title_test)\n", (3079, 3235), False, 'from utils.plot import make_adv_img, make_confusion_matrix\n'), ((3362, 3462), 'utils.plot.make_adv_img', 'make_adv_img', ([], {'clean_img': 'X_test[0]', 'noise': 'noise', 'adv_img': 'X_test_adv[0]', 'save_file_name': 'save_f_img'}), '(clean_img=X_test[0], noise=noise, adv_img=X_test_adv[0],\n save_file_name=save_f_img)\n', (3374, 3462), False, 'from utils.plot import make_adv_img, make_confusion_matrix\n'), ((2173, 2252), 'utils.utils.get_targeted_success_rate', 'get_targeted_success_rate', (['preds_train_adv', 'label2nb_dict[args.dataset][target]'], {}), '(preds_train_adv, label2nb_dict[args.dataset][target])\n', (2198, 2252), False, 'from utils.utils import get_fooling_rate, get_targeted_success_rate, set_art\n'), ((2287, 2365), 'utils.utils.get_targeted_success_rate', 'get_targeted_success_rate', (['preds_test_adv', 'label2nb_dict[args.dataset][target]'], {}), '(preds_test_adv, label2nb_dict[args.dataset][target])\n', (2312, 2365), False, 'from utils.utils import get_fooling_rate, get_targeted_success_rate, set_art\n')] |
import numpy as np
import csv, os
import torch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader, TensorDataset
import global_vars as Global
from sklearn.manifold import TSNE
from datasets.NIH_Chest import NIHChestBinaryTrainSplit
import seaborn as sns
import argparse
import os
import models as Models
from easydict import EasyDict
import _pickle
from datasets.NIH_Chest import NIHChest
from datasets.PADChest import PADChestBinaryTrainSplit, PADChestBinaryValSplit, PADChestBinaryTestSplit, PADChestSV
import umap
All_OD1 = [
'UniformNoise',
#'NormalNoise',
'MNIST',
'FashionMNIST',
'NotMNIST',
#'CIFAR100',
'CIFAR10',
'STL10',
'TinyImagenet',
'MURAHAND',
#'MURAWRIST',
#'MURAELBOW',
'MURAFINGER',
#'MURAFOREARM',
#'MURAHUMERUS',
#'MURASHOULDER',
]
ALL_OD2_NIH = [
"PADChestAP",
"PADChestL",
"PADChestAPHorizontal",
"PADChestPED"
]
ALL_OD2_PAD = ["PADChestAP",
"PADChestPA",
"PADChestAPHorizontal",
"PADChestPED"
]
d3_tags_NIH = ['Cardiomegaly', 'Pneumothorax', 'Nodule', 'Mass']
d3_tags_PAD = ['cardiomegaly', 'pneumothorax', 'nodule', 'mass']
def proc_data(args, model, D1, d2s, tags):
Out_X = []
Out_Y = []
Cat2Y = {}
for y, D2 in enumerate(d2s):
Cat2Y[tags[y]] = y + 1
loader = DataLoader(D2, shuffle=True, batch_size=args.points_per_d2)
for i, (X, _) in enumerate(loader):
x = X.numpy()
Out_X.append(x)
Out_Y.append(np.ones(x.shape[0]) * (y + 1))
break
Out_X = np.concatenate(Out_X, axis=0)
Out_Y = np.concatenate(Out_Y, axis=0)
N_out = Out_X.shape[0]
print(N_out)
N_in = max(int(N_out * 0.2), args.points_per_d2)
In_X = []
for i in range(N_in):
In_X.append(D1[i][0].numpy())
In_Y = np.zeros(N_in)
print(N_in, len(In_X), len(In_Y))
Cat2Y["In-Data"] = 0
ALL_X = np.concatenate((In_X, Out_X))
ALL_Y = np.concatenate((In_Y, Out_Y))
new_dataset = TensorDataset(torch.tensor(ALL_X))
loader = DataLoader(new_dataset, batch_size=64)
ALL_EMBS = []
for i, (X,) in enumerate(loader):
x = model.encode(X.cuda()).data.cpu().numpy()
ALL_EMBS.append(x)
ALL_EMBS = np.concatenate(ALL_EMBS, axis=0)
return ALL_EMBS, ALL_Y, Cat2Y
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str, help="unique path for symlink to dataset")
parser.add_argument('--seed', type=int, default=42, help='Random seed. (default 42)')
parser.add_argument('--exp', '--experiment_id', type=str, default='test', help='The Experiment ID. (default test)')
parser.add_argument('--embedding_function', type=str, default="VAE")
parser.add_argument('--dataset', type=str, default="nihcc")
parser.add_argument('--encoder_loss', type=str, default='bce')
#parser.add_argument('--model_path', type=str, default="model_ref/Generic_VAE.HClass/NIHCC.dataset/BCE.max.512.d.12.nH.1024/model.best.pth")
parser.add_argument('--umap', action="store_true")
parser.add_argument('--points_per_d2', type=int, default=1024)
parser.add_argument('--lr', type=float, default=0.1, help="learning rate in tsne mode, min_dist in umap mode")
parser.add_argument('--perplexity', type=float, default=100.0, help="perplexity in tsne mode, n_neighbor in umap mode")
parser.add_argument('--n_iter', type=int, default=1000, help="n iter in tsne mode, n epoch in umap mode")
parser.add_argument('--load', action="store_true")
parser.add_argument('--plot_percent', default=0.5, type=float)
args = parser.parse_args()
args.experiment_id = args.exp
exp_data = []
workspace_path = os.path.abspath('workspace')
exp_list = args.experiment_id.split(',')
exp_paths = []
for exp_id in exp_list:
experiments_path = os.path.join(workspace_path, 'experiments', exp_id)
if not os.path.exists(experiments_path):
os.makedirs(experiments_path)
# Make the experiment subfolders.
for folder_name in exp_data:
if not os.path.exists(os.path.join(experiments_path, folder_name)):
os.makedirs(os.path.join(experiments_path, folder_name))
exp_paths.append(experiments_path)
if len(exp_list) == 1:
args.experiment_path = exp_paths[0]
else:
print('Operating in multi experiment mode.', 'red')
args.experiment_path = exp_paths
#####################################################################################################
if not args.load or not os.path.exists(os.path.join(args.experiment_path, "all_embs_UC3_ppd_%d_d1_%s.npy"% (args.points_per_d2, args.dataset))):
assert args.dataset in ['NIHCC', 'PADChest']
if args.dataset.lower() == 'nihcc':
D164 = NIHChestBinaryTrainSplit(root_path=os.path.join(args.root_path, 'NIHCC'), downsample=64)
elif args.dataset.lower() == "padchest":
D164 = PADChestBinaryTrainSplit(root_path=os.path.join(args.root_path, "PADChest"), binary=True, downsample=64)
D1 = D164.get_D1_train()
emb = args.embedding_function.lower()
assert emb in ["vae", "ae", "ali"]
dummy_args = EasyDict()
dummy_args.exp = "foo"
dummy_args.experiment_path = args.experiment_path
if args.encoder_loss.lower() == "bce":
tag = "BCE"
else:
tag = "MSE"
if emb == "vae":
model = Global.dataset_reference_vaes[args.dataset][0]()
home_path = Models.get_ref_model_path(dummy_args, model.__class__.__name__, D164.name,
suffix_str=tag + "." + model.netid)
model_path = os.path.join(home_path, 'model.best.pth')
elif emb == "ae":
model = Global.dataset_reference_autoencoders[args.dataset][0]()
home_path = Models.get_ref_model_path(dummy_args, model.__class__.__name__, D164.name,
suffix_str=tag + "." + model.netid)
model_path = os.path.join(home_path, 'model.best.pth')
else:
model = Global.dataset_reference_ALI[args.dataset][0]()
home_path = Models.get_ref_model_path(dummy_args, model.__class__.__name__, D164.name,
suffix_str=tag + "." + model.netid)
model_path = os.path.join(home_path, 'model.best.pth')
model.load_state_dict(torch.load(model_path))
model = model.to("cuda")
d2s = []
for y, d2 in enumerate(All_OD1):
dataset = Global.all_datasets[d2]
if 'dataset_path' in dataset.__dict__:
print(os.path.join(args.root_path, dataset.dataset_path))
D2 = dataset(root_path=os.path.join(args.root_path, dataset.dataset_path)).get_D2_test(D164)
else:
D2 = dataset().get_D2_test(D164)
d2s.append(D2)
ALL_EMBS, ALL_Y, Cat2Y = proc_data(args, model, D1, d2s, All_OD1)
with open(os.path.join(args.experiment_path, "cat2y_UC1_ppd_%d_d1_%s.pkl"% (args.points_per_d2, args.dataset)), "wb") as fp:
_pickle.dump(Cat2Y, fp)
np.save(os.path.join(args.experiment_path, "all_y_UC1_ppd_%d_d1_%s.npy" % (args.points_per_d2, args.dataset)), ALL_Y)
np.save(os.path.join(args.experiment_path, "all_embs_UC1_ppd_%d_d1_%s.npy"% (args.points_per_d2, args.dataset)), ALL_EMBS)
#######################################################################################
d2s = []
OD2 = ALL_OD2_NIH if args.dataset=="NIHCC" else ALL_OD2_PAD
for y, d2 in enumerate(OD2):
dataset = Global.all_datasets[d2]
if 'dataset_path' in dataset.__dict__:
print(os.path.join(args.root_path, dataset.dataset_path))
D2 = dataset(root_path=os.path.join(args.root_path, dataset.dataset_path)).get_D2_test(D164)
else:
D2 = dataset().get_D2_test(D164)
d2s.append(D2)
ALL_EMBS, ALL_Y, Cat2Y = proc_data(args, model, D1, d2s, OD2)
with open(os.path.join(args.experiment_path, "cat2y_UC2_ppd_%d_d1_%s.pkl"% (args.points_per_d2, args.dataset)), "wb") as fp:
_pickle.dump(Cat2Y, fp)
np.save(os.path.join(args.experiment_path, "all_y_UC2_ppd_%d_d1_%s.npy"% (args.points_per_d2, args.dataset)), ALL_Y)
np.save(os.path.join(args.experiment_path, "all_embs_UC2_ppd_%d_d1_%s.npy"% (args.points_per_d2, args.dataset)), ALL_EMBS)
#########################################################################################
d2s = []
d3_tags = d3_tags_NIH if args.dataset == "NIHCC" else d3_tags_PAD
for d2 in d3_tags:
if args.dataset == "NIHCC":
D2 = NIHChest(root_path=os.path.join(args.root_path, 'NIHCC'), binary=True, test_length=5000,
keep_in_classes=[d2, ]).get_D2_test(D164)
elif args.dataset == "PADChest":
D2 = PADChestSV(root_path=os.path.join(args.root_path, 'PADChest'), binary=True, test_length=5000,
keep_in_classes=[d2, ], downsample=64).get_D2_test(D164)
d2s.append(D2)
ALL_EMBS, ALL_Y, Cat2Y = proc_data(args, model, D1, d2s, d3_tags)
with open(os.path.join(args.experiment_path, "cat2y_UC3_ppd_%d_d1_%s.pkl"% (args.points_per_d2, args.dataset)), "wb") as fp:
_pickle.dump(Cat2Y, fp)
np.save(os.path.join(args.experiment_path, "all_y_UC3_ppd_%d_d1_%s.npy"% (args.points_per_d2, args.dataset)), ALL_Y)
np.save(os.path.join(args.experiment_path, "all_embs_UC3_ppd_%d_d1_%s.npy"% (args.points_per_d2, args.dataset)), ALL_EMBS)
else:
pass
for i in range(3):
uc_tag = i+1
ALL_EMBS = np.load(os.path.join(args.experiment_path, "all_embs_UC%i_ppd_%d_d1_%s.npy"%(uc_tag, args.points_per_d2, args.dataset)))
with open(os.path.join(args.experiment_path, "cat2y_UC%i_ppd_%d_d1_%s.pkl"%(uc_tag, args.points_per_d2, args.dataset)), "rb") as fp:
Cat2Y = _pickle.load(fp)
ALL_Y = np.load(os.path.join(args.experiment_path, "all_y_UC%i_ppd_%d_d1_%s.npy"%(uc_tag, args.points_per_d2, args.dataset)))
N=ALL_EMBS.shape[0]
ALL_EMBS = ALL_EMBS.reshape(N, -1)
N_plot = int(args.plot_percent * ALL_EMBS.shape[0])
rand_inds = np.arange(ALL_EMBS.shape[0])
np.random.shuffle(rand_inds)
rand_inds = rand_inds[:N_plot]
ALL_Y = ALL_Y[rand_inds]
if args.umap:
tsne = umap.UMAP(n_neighbors=int(args.perplexity), min_dist=args.lr, n_components=2, metric="euclidean", n_epochs=args.n_iter)
else:
tsne = TSNE(perplexity=args.perplexity, learning_rate=args.lr, n_iter= args.n_iter)
palette = sns.color_palette("bright", 10)
from matplotlib.colors import ListedColormap
my_cmap = ListedColormap(palette.as_hex())
X_embedded = tsne.fit_transform(ALL_EMBS)
X_embedded = X_embedded[rand_inds]
np.save(
os.path.join(args.experiment_path, "embedded_UC%i_ppd_%d_d1_%s.npy" % (uc_tag, args.points_per_d2, args.dataset)),
X_embedded)
np.save(
os.path.join(args.experiment_path,
"selectedY_UC%i_ppd_%d_d1_%s.npy" % (uc_tag, args.points_per_d2, args.dataset)),
ALL_Y)
fig, ax = plt.subplots()
for k, cla in Cat2Y.items():
target_inds = np.nonzero(ALL_Y == cla)
ax.scatter(X_embedded[target_inds, 0].squeeze(), X_embedded[target_inds, 1].squeeze(), c=palette.as_hex()[cla], label=k, s=3.0)
#ax.scatter(X_embedded[:, 0], X_embedded[:, 1], c=ALL_Y, cmap=my_cmap)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#ax.legend([k for k in Cat2Y.keys()])
#plt.show()
if args.umap:
plt.savefig(os.path.join(args.experiment_path, "UC_%i_umap.png" % uc_tag), dpi=200)
else:
plt.savefig(os.path.join(args.experiment_path, "UC_%i_tsne.png"%uc_tag), dpi=200)
#sns.scatterplot(X_embedded[:, 0], X_embedded[:, 1], hue=ALL_Y, legend='full', palette=palette)
print("done")
| [
"_pickle.dump",
"numpy.arange",
"os.path.exists",
"argparse.ArgumentParser",
"seaborn.color_palette",
"_pickle.load",
"sklearn.manifold.TSNE",
"easydict.EasyDict",
"numpy.concatenate",
"numpy.ones",
"matplotlib.use",
"numpy.nonzero",
"models.get_ref_model_path",
"os.makedirs",
"torch.loa... | [((65, 86), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (79, 86), False, 'import matplotlib\n'), ((1764, 1793), 'numpy.concatenate', 'np.concatenate', (['Out_X'], {'axis': '(0)'}), '(Out_X, axis=0)\n', (1778, 1793), True, 'import numpy as np\n'), ((1806, 1835), 'numpy.concatenate', 'np.concatenate', (['Out_Y'], {'axis': '(0)'}), '(Out_Y, axis=0)\n', (1820, 1835), True, 'import numpy as np\n'), ((2022, 2036), 'numpy.zeros', 'np.zeros', (['N_in'], {}), '(N_in)\n', (2030, 2036), True, 'import numpy as np\n'), ((2112, 2141), 'numpy.concatenate', 'np.concatenate', (['(In_X, Out_X)'], {}), '((In_X, Out_X))\n', (2126, 2141), True, 'import numpy as np\n'), ((2154, 2183), 'numpy.concatenate', 'np.concatenate', (['(In_Y, Out_Y)'], {}), '((In_Y, Out_Y))\n', (2168, 2183), True, 'import numpy as np\n'), ((2251, 2289), 'torch.utils.data.DataLoader', 'DataLoader', (['new_dataset'], {'batch_size': '(64)'}), '(new_dataset, batch_size=64)\n', (2261, 2289), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((2442, 2474), 'numpy.concatenate', 'np.concatenate', (['ALL_EMBS'], {'axis': '(0)'}), '(ALL_EMBS, axis=0)\n', (2456, 2474), True, 'import numpy as np\n'), ((2551, 2576), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2574, 2576), False, 'import argparse\n'), ((3926, 3954), 'os.path.abspath', 'os.path.abspath', (['"""workspace"""'], {}), "('workspace')\n", (3941, 3954), False, 'import os\n'), ((1519, 1578), 'torch.utils.data.DataLoader', 'DataLoader', (['D2'], {'shuffle': '(True)', 'batch_size': 'args.points_per_d2'}), '(D2, shuffle=True, batch_size=args.points_per_d2)\n', (1529, 1578), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((2217, 2236), 'torch.tensor', 'torch.tensor', (['ALL_X'], {}), '(ALL_X)\n', (2229, 2236), False, 'import torch\n'), ((4075, 4126), 'os.path.join', 'os.path.join', (['workspace_path', '"""experiments"""', 'exp_id'], {}), "(workspace_path, 'experiments', exp_id)\n", (4087, 4126), False, 'import os\n'), ((5456, 5466), 'easydict.EasyDict', 'EasyDict', ([], {}), '()\n', (5464, 5466), False, 'from easydict import EasyDict\n'), ((10704, 10732), 'numpy.arange', 'np.arange', (['ALL_EMBS.shape[0]'], {}), '(ALL_EMBS.shape[0])\n', (10713, 10732), True, 'import numpy as np\n'), ((10741, 10769), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_inds'], {}), '(rand_inds)\n', (10758, 10769), True, 'import numpy as np\n'), ((11131, 11162), 'seaborn.color_palette', 'sns.color_palette', (['"""bright"""', '(10)'], {}), "('bright', 10)\n", (11148, 11162), True, 'import seaborn as sns\n'), ((11739, 11753), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11751, 11753), True, 'import matplotlib.pyplot as plt\n'), ((4142, 4174), 'os.path.exists', 'os.path.exists', (['experiments_path'], {}), '(experiments_path)\n', (4156, 4174), False, 'import os\n'), ((4188, 4217), 'os.makedirs', 'os.makedirs', (['experiments_path'], {}), '(experiments_path)\n', (4199, 4217), False, 'import os\n'), ((5783, 5897), 'models.get_ref_model_path', 'Models.get_ref_model_path', (['dummy_args', 'model.__class__.__name__', 'D164.name'], {'suffix_str': "(tag + '.' + model.netid)"}), "(dummy_args, model.__class__.__name__, D164.name,\n suffix_str=tag + '.' + model.netid)\n", (5808, 5897), True, 'import models as Models\n'), ((5969, 6010), 'os.path.join', 'os.path.join', (['home_path', '"""model.best.pth"""'], {}), "(home_path, 'model.best.pth')\n", (5981, 6010), False, 'import os\n'), ((6732, 6754), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (6742, 6754), False, 'import torch\n'), ((7445, 7468), '_pickle.dump', '_pickle.dump', (['Cat2Y', 'fp'], {}), '(Cat2Y, fp)\n', (7457, 7468), False, 'import _pickle\n'), ((7485, 7591), 'os.path.join', 'os.path.join', (['args.experiment_path', "('all_y_UC1_ppd_%d_d1_%s.npy' % (args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'all_y_UC1_ppd_%d_d1_%s.npy' % (args.\n points_per_d2, args.dataset))\n", (7497, 7591), False, 'import os\n'), ((7611, 7720), 'os.path.join', 'os.path.join', (['args.experiment_path', "('all_embs_UC1_ppd_%d_d1_%s.npy' % (args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'all_embs_UC1_ppd_%d_d1_%s.npy' % (args.\n points_per_d2, args.dataset))\n", (7623, 7720), False, 'import os\n'), ((8537, 8560), '_pickle.dump', '_pickle.dump', (['Cat2Y', 'fp'], {}), '(Cat2Y, fp)\n', (8549, 8560), False, 'import _pickle\n'), ((8577, 8683), 'os.path.join', 'os.path.join', (['args.experiment_path', "('all_y_UC2_ppd_%d_d1_%s.npy' % (args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'all_y_UC2_ppd_%d_d1_%s.npy' % (args.\n points_per_d2, args.dataset))\n", (8589, 8683), False, 'import os\n'), ((8702, 8811), 'os.path.join', 'os.path.join', (['args.experiment_path', "('all_embs_UC2_ppd_%d_d1_%s.npy' % (args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'all_embs_UC2_ppd_%d_d1_%s.npy' % (args.\n points_per_d2, args.dataset))\n", (8714, 8811), False, 'import os\n'), ((9752, 9775), '_pickle.dump', '_pickle.dump', (['Cat2Y', 'fp'], {}), '(Cat2Y, fp)\n', (9764, 9775), False, 'import _pickle\n'), ((9792, 9898), 'os.path.join', 'os.path.join', (['args.experiment_path', "('all_y_UC3_ppd_%d_d1_%s.npy' % (args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'all_y_UC3_ppd_%d_d1_%s.npy' % (args.\n points_per_d2, args.dataset))\n", (9804, 9898), False, 'import os\n'), ((9917, 10026), 'os.path.join', 'os.path.join', (['args.experiment_path', "('all_embs_UC3_ppd_%d_d1_%s.npy' % (args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'all_embs_UC3_ppd_%d_d1_%s.npy' % (args.\n points_per_d2, args.dataset))\n", (9929, 10026), False, 'import os\n'), ((10128, 10246), 'os.path.join', 'os.path.join', (['args.experiment_path', "('all_embs_UC%i_ppd_%d_d1_%s.npy' % (uc_tag, args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'all_embs_UC%i_ppd_%d_d1_%s.npy' % (\n uc_tag, args.points_per_d2, args.dataset))\n", (10140, 10246), False, 'import os\n'), ((10402, 10418), '_pickle.load', '_pickle.load', (['fp'], {}), '(fp)\n', (10414, 10418), False, 'import _pickle\n'), ((10443, 10557), 'os.path.join', 'os.path.join', (['args.experiment_path', "('all_y_UC%i_ppd_%d_d1_%s.npy' % (uc_tag, args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'all_y_UC%i_ppd_%d_d1_%s.npy' % (uc_tag,\n args.points_per_d2, args.dataset))\n", (10455, 10557), False, 'import os\n'), ((11036, 11111), 'sklearn.manifold.TSNE', 'TSNE', ([], {'perplexity': 'args.perplexity', 'learning_rate': 'args.lr', 'n_iter': 'args.n_iter'}), '(perplexity=args.perplexity, learning_rate=args.lr, n_iter=args.n_iter)\n', (11040, 11111), False, 'from sklearn.manifold import TSNE\n'), ((11392, 11510), 'os.path.join', 'os.path.join', (['args.experiment_path', "('embedded_UC%i_ppd_%d_d1_%s.npy' % (uc_tag, args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'embedded_UC%i_ppd_%d_d1_%s.npy' % (\n uc_tag, args.points_per_d2, args.dataset))\n", (11404, 11510), False, 'import os\n'), ((11560, 11679), 'os.path.join', 'os.path.join', (['args.experiment_path', "('selectedY_UC%i_ppd_%d_d1_%s.npy' % (uc_tag, args.points_per_d2, args.dataset)\n )"], {}), "(args.experiment_path, 'selectedY_UC%i_ppd_%d_d1_%s.npy' % (\n uc_tag, args.points_per_d2, args.dataset))\n", (11572, 11679), False, 'import os\n'), ((11817, 11841), 'numpy.nonzero', 'np.nonzero', (['(ALL_Y == cla)'], {}), '(ALL_Y == cla)\n', (11827, 11841), True, 'import numpy as np\n'), ((4827, 4936), 'os.path.join', 'os.path.join', (['args.experiment_path', "('all_embs_UC3_ppd_%d_d1_%s.npy' % (args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'all_embs_UC3_ppd_%d_d1_%s.npy' % (args.\n points_per_d2, args.dataset))\n", (4839, 4936), False, 'import os\n'), ((6139, 6253), 'models.get_ref_model_path', 'Models.get_ref_model_path', (['dummy_args', 'model.__class__.__name__', 'D164.name'], {'suffix_str': "(tag + '.' + model.netid)"}), "(dummy_args, model.__class__.__name__, D164.name,\n suffix_str=tag + '.' + model.netid)\n", (6164, 6253), True, 'import models as Models\n'), ((6325, 6366), 'os.path.join', 'os.path.join', (['home_path', '"""model.best.pth"""'], {}), "(home_path, 'model.best.pth')\n", (6337, 6366), False, 'import os\n'), ((6473, 6587), 'models.get_ref_model_path', 'Models.get_ref_model_path', (['dummy_args', 'model.__class__.__name__', 'D164.name'], {'suffix_str': "(tag + '.' + model.netid)"}), "(dummy_args, model.__class__.__name__, D164.name,\n suffix_str=tag + '.' + model.netid)\n", (6498, 6587), True, 'import models as Models\n'), ((6659, 6700), 'os.path.join', 'os.path.join', (['home_path', '"""model.best.pth"""'], {}), "(home_path, 'model.best.pth')\n", (6671, 6700), False, 'import os\n'), ((7318, 7424), 'os.path.join', 'os.path.join', (['args.experiment_path', "('cat2y_UC1_ppd_%d_d1_%s.pkl' % (args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'cat2y_UC1_ppd_%d_d1_%s.pkl' % (args.\n points_per_d2, args.dataset))\n", (7330, 7424), False, 'import os\n'), ((8410, 8516), 'os.path.join', 'os.path.join', (['args.experiment_path', "('cat2y_UC2_ppd_%d_d1_%s.pkl' % (args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'cat2y_UC2_ppd_%d_d1_%s.pkl' % (args.\n points_per_d2, args.dataset))\n", (8422, 8516), False, 'import os\n'), ((9625, 9731), 'os.path.join', 'os.path.join', (['args.experiment_path', "('cat2y_UC3_ppd_%d_d1_%s.pkl' % (args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'cat2y_UC3_ppd_%d_d1_%s.pkl' % (args.\n points_per_d2, args.dataset))\n", (9637, 9731), False, 'import os\n'), ((10259, 10373), 'os.path.join', 'os.path.join', (['args.experiment_path', "('cat2y_UC%i_ppd_%d_d1_%s.pkl' % (uc_tag, args.points_per_d2, args.dataset))"], {}), "(args.experiment_path, 'cat2y_UC%i_ppd_%d_d1_%s.pkl' % (uc_tag,\n args.points_per_d2, args.dataset))\n", (10271, 10373), False, 'import os\n'), ((12396, 12457), 'os.path.join', 'os.path.join', (['args.experiment_path', "('UC_%i_umap.png' % uc_tag)"], {}), "(args.experiment_path, 'UC_%i_umap.png' % uc_tag)\n", (12408, 12457), False, 'import os\n'), ((12506, 12567), 'os.path.join', 'os.path.join', (['args.experiment_path', "('UC_%i_tsne.png' % uc_tag)"], {}), "(args.experiment_path, 'UC_%i_tsne.png' % uc_tag)\n", (12518, 12567), False, 'import os\n'), ((1702, 1721), 'numpy.ones', 'np.ones', (['x.shape[0]'], {}), '(x.shape[0])\n', (1709, 1721), True, 'import numpy as np\n'), ((4332, 4375), 'os.path.join', 'os.path.join', (['experiments_path', 'folder_name'], {}), '(experiments_path, folder_name)\n', (4344, 4375), False, 'import os\n'), ((4406, 4449), 'os.path.join', 'os.path.join', (['experiments_path', 'folder_name'], {}), '(experiments_path, folder_name)\n', (4418, 4449), False, 'import os\n'), ((5084, 5121), 'os.path.join', 'os.path.join', (['args.root_path', '"""NIHCC"""'], {}), "(args.root_path, 'NIHCC')\n", (5096, 5121), False, 'import os\n'), ((6968, 7018), 'os.path.join', 'os.path.join', (['args.root_path', 'dataset.dataset_path'], {}), '(args.root_path, dataset.dataset_path)\n', (6980, 7018), False, 'import os\n'), ((8064, 8114), 'os.path.join', 'os.path.join', (['args.root_path', 'dataset.dataset_path'], {}), '(args.root_path, dataset.dataset_path)\n', (8076, 8114), False, 'import os\n'), ((5241, 5281), 'os.path.join', 'os.path.join', (['args.root_path', '"""PADChest"""'], {}), "(args.root_path, 'PADChest')\n", (5253, 5281), False, 'import os\n'), ((7059, 7109), 'os.path.join', 'os.path.join', (['args.root_path', 'dataset.dataset_path'], {}), '(args.root_path, dataset.dataset_path)\n', (7071, 7109), False, 'import os\n'), ((8155, 8205), 'os.path.join', 'os.path.join', (['args.root_path', 'dataset.dataset_path'], {}), '(args.root_path, dataset.dataset_path)\n', (8167, 8205), False, 'import os\n'), ((9114, 9151), 'os.path.join', 'os.path.join', (['args.root_path', '"""NIHCC"""'], {}), "(args.root_path, 'NIHCC')\n", (9126, 9151), False, 'import os\n'), ((9343, 9383), 'os.path.join', 'os.path.join', (['args.root_path', '"""PADChest"""'], {}), "(args.root_path, 'PADChest')\n", (9355, 9383), False, 'import os\n')] |
from __future__ import print_function, division
import pandas as pd
import numpy as np
from isochrones.query import Query
from .data import TGASPATH
TGAS = None
class TGASQuery(Query):
"""Special subclass for a query based on TGAS DR1.
`row` is a row of the Gaia DR1 table.
"""
def __init__(self, row, radius=5):
self.row = row
Query.__init__(self, row.ra, row.dec, row.pmra, row.pmdec,
epoch=row.ref_epoch, radius=radius)
@classmethod
def from_id(cls, i, **kwargs):
global TGAS
if TGAS is None:
TGAS = pd.read_hdf(TGASPATH, 'df')
if i < len(TGAS):
ind = i
else:
ind = np.where(TGAS.source_id==i)[0][0]
new = cls(TGAS.iloc[ind], **kwargs)
return new | [
"numpy.where",
"pandas.read_hdf",
"isochrones.query.Query.__init__"
] | [((368, 467), 'isochrones.query.Query.__init__', 'Query.__init__', (['self', 'row.ra', 'row.dec', 'row.pmra', 'row.pmdec'], {'epoch': 'row.ref_epoch', 'radius': 'radius'}), '(self, row.ra, row.dec, row.pmra, row.pmdec, epoch=row.\n ref_epoch, radius=radius)\n', (382, 467), False, 'from isochrones.query import Query\n'), ((605, 632), 'pandas.read_hdf', 'pd.read_hdf', (['TGASPATH', '"""df"""'], {}), "(TGASPATH, 'df')\n", (616, 632), True, 'import pandas as pd\n'), ((723, 752), 'numpy.where', 'np.where', (['(TGAS.source_id == i)'], {}), '(TGAS.source_id == i)\n', (731, 752), True, 'import numpy as np\n')] |
import numpy as np
import json
from django.views.decorators.http import require_http_methods
from django.shortcuts import render
def solve_linear_equation(arr1, arr2, arr3):
try:
arr1 = np.loads(arr1.encode())
arr2 = np.loads(arr2.encode())
arr3 = np.loads(arr3.encode())
except Exception as err:
result = "参数有误"
unknown_data = np.array([arr1, arr2])
const_data = np.array([arr3[0], arr3[1]])
result = np.linalg.solve(unknown_data, const_data)
return result
# 解二元一次线性方程 arr1为第一个方程的未知项系数,arr2为第二个方程未知项系数,第三个为两个方程的常数项
# 如: 1.x+y=1 对应arr1=[1,1] 2.3x+5y=7 对应arr2 = [3,5] 3.两者对应常数项为arr3 = [1,7]
# loads主要用于导入本地数据
def solve_matrix_dot(arr1, arr2):
try:
np_data1 = np.array([arr1])
np_data2 = np.array([arr2])
except Exception as err:
print(err)
result = np.dot(arr1, arr2)
return result
# 解矩阵的点乘,由于是用json传,所以理论上只要是[]的格式都可以,不论阶数
@require_http_methods(["GET"])
def index(request):
if request.method == "GET":
return render(request, "index.html")
@require_http_methods(["POST", "GET"])
def xy(request):
try:
data = json.loads(request.body.decode())
arr1 = data.get("arr1")
arr2 = data.get("arr2")
arr3 = data.get("arr3")
except Exception as err:
string = "请使用JSON传输数据"
return render(request, "xy.html", context={"result": string})
result = solve_linear_equation(arr1, arr2, arr3)
string = "[x,y]={}".format(result)
return render(request, "xy.html", context={"result": string})
@require_http_methods(["POST", "GET"])
def matrix(request):
try:
data = json.loads(request.body.decode())
matrix1 = data.get("matrix1")
matrix2 = data.get("matrix2")
except Exception as err:
string = "请使用JSON传数据"
return render(request, "matrix.html", context={"result": string})
result = solve_matrix_dot(matrix1, matrix2)
string = "result_matrix={}".format(result)
return render(request, "matrix.html", context={"result": string})
| [
"django.shortcuts.render",
"numpy.linalg.solve",
"django.views.decorators.http.require_http_methods",
"numpy.array",
"numpy.dot"
] | [((927, 956), 'django.views.decorators.http.require_http_methods', 'require_http_methods', (["['GET']"], {}), "(['GET'])\n", (947, 956), False, 'from django.views.decorators.http import require_http_methods\n'), ((1057, 1094), 'django.views.decorators.http.require_http_methods', 'require_http_methods', (["['POST', 'GET']"], {}), "(['POST', 'GET'])\n", (1077, 1094), False, 'from django.views.decorators.http import require_http_methods\n'), ((1557, 1594), 'django.views.decorators.http.require_http_methods', 'require_http_methods', (["['POST', 'GET']"], {}), "(['POST', 'GET'])\n", (1577, 1594), False, 'from django.views.decorators.http import require_http_methods\n'), ((374, 396), 'numpy.array', 'np.array', (['[arr1, arr2]'], {}), '([arr1, arr2])\n', (382, 396), True, 'import numpy as np\n'), ((414, 442), 'numpy.array', 'np.array', (['[arr3[0], arr3[1]]'], {}), '([arr3[0], arr3[1]])\n', (422, 442), True, 'import numpy as np\n'), ((456, 497), 'numpy.linalg.solve', 'np.linalg.solve', (['unknown_data', 'const_data'], {}), '(unknown_data, const_data)\n', (471, 497), True, 'import numpy as np\n'), ((845, 863), 'numpy.dot', 'np.dot', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (851, 863), True, 'import numpy as np\n'), ((1499, 1553), 'django.shortcuts.render', 'render', (['request', '"""xy.html"""'], {'context': "{'result': string}"}), "(request, 'xy.html', context={'result': string})\n", (1505, 1553), False, 'from django.shortcuts import render\n'), ((1989, 2047), 'django.shortcuts.render', 'render', (['request', '"""matrix.html"""'], {'context': "{'result': string}"}), "(request, 'matrix.html', context={'result': string})\n", (1995, 2047), False, 'from django.shortcuts import render\n'), ((731, 747), 'numpy.array', 'np.array', (['[arr1]'], {}), '([arr1])\n', (739, 747), True, 'import numpy as np\n'), ((767, 783), 'numpy.array', 'np.array', (['[arr2]'], {}), '([arr2])\n', (775, 783), True, 'import numpy as np\n'), ((1024, 1053), 'django.shortcuts.render', 'render', (['request', '"""index.html"""'], {}), "(request, 'index.html')\n", (1030, 1053), False, 'from django.shortcuts import render\n'), ((1341, 1395), 'django.shortcuts.render', 'render', (['request', '"""xy.html"""'], {'context': "{'result': string}"}), "(request, 'xy.html', context={'result': string})\n", (1347, 1395), False, 'from django.shortcuts import render\n'), ((1824, 1882), 'django.shortcuts.render', 'render', (['request', '"""matrix.html"""'], {'context': "{'result': string}"}), "(request, 'matrix.html', context={'result': string})\n", (1830, 1882), False, 'from django.shortcuts import render\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2019/8/1
@Author : AnNing
功能:
1、计算G0、Gt、DNI
2、补全缺失的整点时次数据的Itol、Ib、Id
优化
1、修改为矩阵运算
2、优化assignE
3、DEBUG函数assignTime,原来的函数直接在hour加8可能超过24
"""
import os
import h5py
import numpy as np
from dateutil.relativedelta import relativedelta
from lib.lib_constant import FULL_VALUE, ER_TXT, EP_TXT
from lib.lib_read_ssi import FY4ASSI
from lib.lib_database import add_result_data, exist_result_data
G0_Correct = 0.75 # 使用G0订正Itol的系数
def cos(x):
return np.cos(np.radians(x))
def sin(x):
return np.sin(np.radians(x))
def isleap(y):
y = int(y)
return (y % 4 == 0 and y % 100 != 0) or y % 400 == 0
def calDoy(y, m, d):
y = int(y)
m = int(m)
d = int(d)
Doy = 0
a = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if isleap(y):
a[1] = 29
for x in a[0:m - 1]:
Doy += x
return Doy + d
def calDelta(Doy):
# print "360/365*(284 + Doy) is %f" % (360.0/365*(284 + Doy))
return 23.45 * sin(360.0 / 365 * (284 + Doy))
def calOmega(hr, min, lon, E):
TT = hr + min / 60.0 + 4 * (lon - 120) / 60.0 + E / 60.0
return (TT - 12) * 15
def calCosThetaz(lat, Delta, Omega):
return cos(lat) * cos(Delta) * cos(Omega) + sin(lat) * sin(Delta)
def calG0(Doy, CosThetaz):
return 1366.1 * (1 + 0.033 * cos(360.0 / 365 * Doy)) * CosThetaz
def calRb(lat, Beta, Delta, Omega, CosThetaz):
return (cos(lat - Beta) * cos(Delta) * cos(Omega) + sin(lat - Beta) * sin(Delta)) / CosThetaz
def calGt(Ib, Id, Ai, Rb, Beta, Itol):
return (Ib + Id * Ai) * Rb + Id * (1 - Ai) * (1 + cos(Beta)) / 2.0 + Itol * 0.2 * (1 - cos(Beta)) / 2.0
def assignTime(date):
"""
DEBUG函数assignTime,原来的函数直接在hour加8可能超过24
:param date:
:return:
"""
date += relativedelta(hours=8) # 修改时间为北京时
datestrf = date.strftime('%Y-%m-%d-%H-%M-%S')
y, m, d, h, mm, s = datestrf.split('-')
return [int(i) for i in (y, m, d, h, mm)]
def assignE(y, m, d):
"""
assignE
:param y:
:param m:
:param d:
:return:
"""
y = int(y)
m = int(m)
d = int(d)
if isleap(y):
e_file = ER_TXT
else:
e_file = EP_TXT
e_data = np.loadtxt(e_file)
md = int('{:02d}{:02d}'.format(m, d))
index = np.where(e_data == md)
row = index[0]
if row.size != 0:
return e_data[row, 1]
else:
raise ValueError('没有找到E值: {}'.format((y, m, d)))
def _write_out_file(out_file, result):
valid_count = 0
for key in result:
if result[key] is None:
continue
else:
valid_count += 1
if valid_count == 0:
print('没有足够的有效数据,不生成结果文件')
return
out_dir = os.path.dirname(out_file)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
try:
compression = 'gzip'
compression_opts = 5
shuffle = True
with h5py.File(out_file, 'w') as hdf5:
for dataset in result.keys():
data = result[dataset]
data[np.isnan(data)] = FULL_VALUE
hdf5.create_dataset(dataset,
dtype=np.float32, data=result[dataset], compression=compression,
compression_opts=compression_opts,
shuffle=shuffle)
print('>>> 成功生成HDF文件{}'.format(out_file))
except Exception as why:
print(why)
print('HDF写入数据错误')
os.remove(out_file)
def itcal(in_file, out_file, resultid=None, planid=None, datatime=None, resolution_type=None):
# 如果原来的整点数据不存在,直接使用G0进行补充
# 如果原来的整点数据存在,使用G0进行校正
area_type = 'Full_DISK'
if os.path.isfile(out_file):
print('数据已经存在: {}'.format(out_file))
if not exist_result_data(resultid=resultid, datatime=datatime,
resolution_type=resolution_type,
area_type=area_type):
add_result_data(resultid=resultid, planid=planid, address=out_file, datatime=datatime,
resolution_type=resolution_type, area_type=area_type, element=None)
return
print('<<< itcal: {}'.format(in_file))
beta = 35.0 # 常量
try:
datas = FY4ASSI(in_file)
except Exception as why:
print(why)
print('初始化FY4A SSI读取类错误')
return
date_time = FY4ASSI.get_date_time_orbit(in_file)
y, m, d, hr, minus = assignTime(date_time)
e = assignE(y, m, d)
doy = calDoy(y, m, d)
delta = calDelta(doy)
print(delta)
try:
lons = FY4ASSI.get_longitude_4km()
lats = FY4ASSI.get_latitude_4km()
except Exception as why:
print(why)
print('读取lons和lats错误')
return
DQF = np.ones_like(lons, dtype=np.int8) # 标识数据集
Omega = calOmega(hr, minus, lons, e)
cos_the_taz = calCosThetaz(lats, delta, Omega)
G0 = calG0(doy, cos_the_taz)
print('G0')
print(np.nanmin(G0), np.nanmax(G0))
print((G0 > 0).sum())
index_invalid_g0 = np.logical_or(G0 >= 1500, G0 <= 0) # ########################## G0的无效值赋值为nan
G0[index_invalid_g0] = np.nan
if np.isnan(G0).all():
print('Warning::::::::没有有效的G0数据,不生产数据')
return
# G0无效
DQF[index_invalid_g0] = 0
# 校正总直散数据
if os.path.isfile(in_file):
try:
Itol = datas.get_ssi()
Ib = datas.get_dirssi()
Id = datas.get_difssi()
except Exception as why:
print(why)
print('读取ssi,dirssi和difssi错误')
return
# 将G0 >= 1400, G0 <= 0的值置为nan
Itol[index_invalid_g0] = np.nan
Ib[index_invalid_g0] = np.nan
Id[index_invalid_g0] = np.nan
# Itol 有效
index_valid_itol = np.logical_and(np.isfinite(Itol), Itol < G0)
DQF[index_valid_itol] = 1
# 校正G0有效,但是Itol无效的数据
index_invalid_itol = np.logical_or(Itol > G0, np.logical_and(np.isnan(Itol), np.isfinite(G0)))
Itol[index_invalid_itol] = G0_Correct * G0[index_invalid_itol]
Ib[index_invalid_itol] = 0.3 * Itol[index_invalid_itol]
Id[index_invalid_itol] = 0.7 * Itol[index_invalid_itol]
DQF[index_invalid_itol] = 2
else:
Itol = G0_Correct * G0
Ib = 0.3 * Itol
Id = 0.7 * Itol
# 计算Gt和DNI
Rb = calRb(lats, beta, delta, Omega, cos_the_taz)
Ai = Ib / G0
Gt = calGt(Ib, Id, Ai, Rb, beta, Itol)
DNI = Ib / cos_the_taz
# 校正Gt
index_invalid_gt = np.logical_and(Gt < 0, np.isfinite(G0))
Gt[index_invalid_gt] = 0.75 * G0[index_invalid_gt]
DQF[index_invalid_gt] = 3
Gt[lats < 0] = np.nan # 20191121 AnNing 根据用户需求,Gt的数据只生产北半球
# 输出数据
result = {'G0': G0, 'Gt': Gt, 'DNI': DNI, 'SSI': Itol, 'DirSSI': Ib, 'DifSSI': Id, 'DQF': DQF}
try:
_write_out_file(out_file, result)
if os.path.isfile(out_file) and not exist_result_data(resultid=resultid, datatime=datatime,
resolution_type=resolution_type,
area_type=area_type):
add_result_data(resultid=resultid, planid=planid, address=out_file, datatime=datatime,
resolution_type=resolution_type, area_type=area_type, element=None)
except Exception as why:
print(why)
print('输出结果文件错误')
return
# if __name__ == '__main__':
# in_dir = '/home/gfssi/GFData/Source/FY4A+AGRI/SSI_4KM/Orbit/20190630/'
# out_dir = '/home/gfssi/GFData/Result/FY4A+AGRI/SSI_4KM/Orbit/20190630/'
# filenames = os.listdir(in_dir)
# filenames.sort()
#
# for file_name in filenames:
# if file_name[-2:] != 'NC':
# continue
# else:
# in_file = os.path.join(in_dir, file_name)
# out_file = os.path.join(out_dir, file_name)
# itcal(in_file, out_file)
# in_dir = '/home/gfssi/GFData/Result/FY4A+AGRI/SSI_4KM/Orbit/20190630'
# out_dir = '/home/gfssi/GFData/Result/FY4A+AGRI/SSI_4KMCorrect/Orbit/20190630'
# file_name = 'FY4A-_AGRI--_N_DISK_1047E_L2-_SSI-_MULT_NOM_20190630000000_20190630001459_4000M_V0001.NC'
# in_file = os.path.join(in_dir, file_name)
# out_file = os.path.join(out_dir, file_name)
# itcal(in_file, out_file)
# if __name__ == '__main__':
#
# y, m, d, hr, minus = assignTime(date_time)
# e = assignE(y, m, d)
# doy = calDoy(y, m, d)
# delta = calDelta(doy)
# Omega = calOmega(hr, minus, lons, e)
# cos_the_taz = calCosThetaz(lats, delta, Omega)
# G0 = calG0(doy, cos_the_taz)
| [
"numpy.radians",
"dateutil.relativedelta.relativedelta",
"numpy.isfinite",
"numpy.nanmin",
"os.remove",
"numpy.where",
"os.path.isdir",
"numpy.nanmax",
"lib.lib_database.add_result_data",
"lib.lib_database.exist_result_data",
"lib.lib_read_ssi.FY4ASSI.get_latitude_4km",
"os.path.isfile",
"os... | [((1790, 1812), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'hours': '(8)'}), '(hours=8)\n', (1803, 1812), False, 'from dateutil.relativedelta import relativedelta\n'), ((2206, 2224), 'numpy.loadtxt', 'np.loadtxt', (['e_file'], {}), '(e_file)\n', (2216, 2224), True, 'import numpy as np\n'), ((2280, 2302), 'numpy.where', 'np.where', (['(e_data == md)'], {}), '(e_data == md)\n', (2288, 2302), True, 'import numpy as np\n'), ((2711, 2736), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (2726, 2736), False, 'import os\n'), ((3682, 3706), 'os.path.isfile', 'os.path.isfile', (['out_file'], {}), '(out_file)\n', (3696, 3706), False, 'import os\n'), ((4377, 4413), 'lib.lib_read_ssi.FY4ASSI.get_date_time_orbit', 'FY4ASSI.get_date_time_orbit', (['in_file'], {}), '(in_file)\n', (4404, 4413), False, 'from lib.lib_read_ssi import FY4ASSI\n'), ((4754, 4787), 'numpy.ones_like', 'np.ones_like', (['lons'], {'dtype': 'np.int8'}), '(lons, dtype=np.int8)\n', (4766, 4787), True, 'import numpy as np\n'), ((5028, 5062), 'numpy.logical_or', 'np.logical_or', (['(G0 >= 1500)', '(G0 <= 0)'], {}), '(G0 >= 1500, G0 <= 0)\n', (5041, 5062), True, 'import numpy as np\n'), ((5294, 5317), 'os.path.isfile', 'os.path.isfile', (['in_file'], {}), '(in_file)\n', (5308, 5317), False, 'import os\n'), ((520, 533), 'numpy.radians', 'np.radians', (['x'], {}), '(x)\n', (530, 533), True, 'import numpy as np\n'), ((567, 580), 'numpy.radians', 'np.radians', (['x'], {}), '(x)\n', (577, 580), True, 'import numpy as np\n'), ((2748, 2770), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (2761, 2770), False, 'import os\n'), ((2780, 2800), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (2791, 2800), False, 'import os\n'), ((4247, 4263), 'lib.lib_read_ssi.FY4ASSI', 'FY4ASSI', (['in_file'], {}), '(in_file)\n', (4254, 4263), False, 'from lib.lib_read_ssi import FY4ASSI\n'), ((4579, 4606), 'lib.lib_read_ssi.FY4ASSI.get_longitude_4km', 'FY4ASSI.get_longitude_4km', ([], {}), '()\n', (4604, 4606), False, 'from lib.lib_read_ssi import FY4ASSI\n'), ((4622, 4648), 'lib.lib_read_ssi.FY4ASSI.get_latitude_4km', 'FY4ASSI.get_latitude_4km', ([], {}), '()\n', (4646, 4648), False, 'from lib.lib_read_ssi import FY4ASSI\n'), ((4949, 4962), 'numpy.nanmin', 'np.nanmin', (['G0'], {}), '(G0)\n', (4958, 4962), True, 'import numpy as np\n'), ((4964, 4977), 'numpy.nanmax', 'np.nanmax', (['G0'], {}), '(G0)\n', (4973, 4977), True, 'import numpy as np\n'), ((6510, 6525), 'numpy.isfinite', 'np.isfinite', (['G0'], {}), '(G0)\n', (6521, 6525), True, 'import numpy as np\n'), ((2905, 2929), 'h5py.File', 'h5py.File', (['out_file', '"""w"""'], {}), "(out_file, 'w')\n", (2914, 2929), False, 'import h5py\n'), ((3473, 3492), 'os.remove', 'os.remove', (['out_file'], {}), '(out_file)\n', (3482, 3492), False, 'import os\n'), ((3768, 3882), 'lib.lib_database.exist_result_data', 'exist_result_data', ([], {'resultid': 'resultid', 'datatime': 'datatime', 'resolution_type': 'resolution_type', 'area_type': 'area_type'}), '(resultid=resultid, datatime=datatime, resolution_type=\n resolution_type, area_type=area_type)\n', (3785, 3882), False, 'from lib.lib_database import add_result_data, exist_result_data\n'), ((3957, 4119), 'lib.lib_database.add_result_data', 'add_result_data', ([], {'resultid': 'resultid', 'planid': 'planid', 'address': 'out_file', 'datatime': 'datatime', 'resolution_type': 'resolution_type', 'area_type': 'area_type', 'element': 'None'}), '(resultid=resultid, planid=planid, address=out_file,\n datatime=datatime, resolution_type=resolution_type, area_type=area_type,\n element=None)\n', (3972, 4119), False, 'from lib.lib_database import add_result_data, exist_result_data\n'), ((5147, 5159), 'numpy.isnan', 'np.isnan', (['G0'], {}), '(G0)\n', (5155, 5159), True, 'import numpy as np\n'), ((5774, 5791), 'numpy.isfinite', 'np.isfinite', (['Itol'], {}), '(Itol)\n', (5785, 5791), True, 'import numpy as np\n'), ((6850, 6874), 'os.path.isfile', 'os.path.isfile', (['out_file'], {}), '(out_file)\n', (6864, 6874), False, 'import os\n'), ((7130, 7292), 'lib.lib_database.add_result_data', 'add_result_data', ([], {'resultid': 'resultid', 'planid': 'planid', 'address': 'out_file', 'datatime': 'datatime', 'resolution_type': 'resolution_type', 'area_type': 'area_type', 'element': 'None'}), '(resultid=resultid, planid=planid, address=out_file,\n datatime=datatime, resolution_type=resolution_type, area_type=area_type,\n element=None)\n', (7145, 7292), False, 'from lib.lib_database import add_result_data, exist_result_data\n'), ((5937, 5951), 'numpy.isnan', 'np.isnan', (['Itol'], {}), '(Itol)\n', (5945, 5951), True, 'import numpy as np\n'), ((5953, 5968), 'numpy.isfinite', 'np.isfinite', (['G0'], {}), '(G0)\n', (5964, 5968), True, 'import numpy as np\n'), ((6883, 6997), 'lib.lib_database.exist_result_data', 'exist_result_data', ([], {'resultid': 'resultid', 'datatime': 'datatime', 'resolution_type': 'resolution_type', 'area_type': 'area_type'}), '(resultid=resultid, datatime=datatime, resolution_type=\n resolution_type, area_type=area_type)\n', (6900, 6997), False, 'from lib.lib_database import add_result_data, exist_result_data\n'), ((3041, 3055), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (3049, 3055), True, 'import numpy as np\n')] |
import numpy as np
from torch.utils.data import Dataset
from pathlib import Path
from pytorch_lightning.callbacks import Callback
from ..models.base import SegmentationModel
from ..image_process.convert import cv_to_pil, to_4dim, tensor_to_cv, normalize255
class GenerateSegmentationImageCallback(Callback):
def __init__(self, model: SegmentationModel, output_dir: str, per_epoch: int, dataset: Dataset, alpha=0.6,
apply_all_images=False):
self._model: SegmentationModel = model
self._output_dir = output_dir
self._per_epoch = per_epoch
self._dataset = dataset
self._alpha = alpha
self._apply_all_images = apply_all_images
if not Path(self._output_dir).exists():
Path(self._output_dir).mkdir(parents=True)
def on_epoch_end(self, trainer, _):
epoch = trainer.current_epoch
if (epoch + 1) % self._per_epoch != 0:
return
if self._apply_all_images:
for i, (img_tensor, label) in enumerate(self._dataset):
origin_image = normalize255(tensor_to_cv(img_tensor))
prob, pred_label = self._model.predict_index_image(to_4dim(img_tensor))
index_image = tensor_to_cv(pred_label[0]).astype('uint8')
mixed_img = self._model.generate_mixed_segment_image(origin_image, index_image)
cv_to_pil(mixed_img).save(f"{self._output_dir}/data{i}_image{epoch + 1}.png")
return
data_len = len(self._dataset)
random_image_index = np.random.randint(0, data_len)
img_tensor, _ = self._dataset[random_image_index]
origin_image = normalize255(tensor_to_cv(img_tensor))
pred_label, prob = self._model.predict_index_image(to_4dim(img_tensor))
index_image = tensor_to_cv(pred_label[0])
mixed_img = self._model.generate_mixed_segment_image(origin_image, index_image, self._alpha)
cv_to_pil(mixed_img).save(f"{self._output_dir}/result_image{epoch + 1}.png")
| [
"numpy.random.randint",
"pathlib.Path"
] | [((1554, 1584), 'numpy.random.randint', 'np.random.randint', (['(0)', 'data_len'], {}), '(0, data_len)\n', (1571, 1584), True, 'import numpy as np\n'), ((710, 732), 'pathlib.Path', 'Path', (['self._output_dir'], {}), '(self._output_dir)\n', (714, 732), False, 'from pathlib import Path\n'), ((755, 777), 'pathlib.Path', 'Path', (['self._output_dir'], {}), '(self._output_dir)\n', (759, 777), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 22 11:24:01 2021
@author: ja17375
"""
import pygmt
import numpy as np
import pandas as pd
import xarray as xr
import netCDF4 as nc
def plot_forte_gmt():
tx2008 = np.loadtxt('/Users/ja17375/SWSTomo/ForteModels/Flow_Models/TX2008/forteV2_1deg_150km.txt')
shp = (181, 361)
dg = 15
lat = tx2008[:,1].reshape(shp)
lon = tx2008[:,2].reshape(shp)
Ur = tx2008[:,3].reshape(shp)
Utheta = tx2008[:,4].reshape(shp)*-1 # theta is colat so invert
Uphi = tx2008[:,5].reshape(shp)
hzdeg = ((lat % dg == 0) & (lon % dg == 0))
# Cast Ur (radial velocity) into xarry for pyGMT
U_grid = xr.DataArray(data=np.flipud(Ur),
coords=[('latitude', np.linspace(-90,90,181),
{'units': 'degrees_north'}),
('longitude', np.linspace(-180,180,361),
{'units': 'degrees_east'})],
)
fig = pygmt.Figure()
africa_med = [-25,80,-5,60]
easia = [60,150,10,70]
epac = [-170, -80, 10, 65]
proj = "M15c"
gproj = "Ks12c"
fig.basemap(region=africa_me, projection=proj, frame="afg",)
# Flow model TX2008
# pygmt.makecpt(cmap='roma', series=[-1.5, 1.5], reverse=True)
# fig.grdimage(grid=U_grid)
# fig.colorbar(frame=['a0.5', 'x+l"Vertical Velocity (cm/yr)"' ])
# S40RTS
fig.grdimage(grid='/Users/ja17375/DiscrePy/Data/S40RTS/S40RTS_2800km.grd',
cmap='/Users/ja17375/DiscrePy/Data/S40RTS/S40RTS.cpt')
fig.colorbar(frame=['a0.5', 'x+l"dVs (%)"' ], cmap='/Users/ja17375/DiscrePy/Data/S40RTS/S40RTS.cpt')
fig.coast(shorelines=True)
# flow_ang = np.rad2deg(np.arctan2(np.ravel(Utheta[hzdeg]), np.ravel(Uphi[hzdeg])))
# flow_len = np.sqrt(np.ravel(Utheta[hzdeg])**2 + np.ravel(Uphi[hzdeg])**2)
# flow_data = np.zeros((325, 4))
# flow_data[:,0] = lon[hzdeg]
# flow_data[:,1] = lat[hzdeg]
# flow_data[:,2] = flow_ang
# flow_data[:,3] = flow_len *0.5
# fig.plot(data=flow_data, style = 'v0.2c+e', color='black', pen='1p')
# flow_data[:,2] = flow_data[:,2] + 180
# fig.plot(data=flow_data, style = 'v0c', color='black', pen='1p')
fig.plot(x=130, y=20, direction = [[0], [1]], style = 'v0c', color='black', pen='1p')
data = pd.read_csv('~/DiscrePy/Sheba/Results/Combined/Filt_05Hz/Combined_goodQ.pairs', delim_whitespace=True)
for i, row in data.iterrows():
fig.plot(x=[row['SKS_PP_LON'], row['SKKS_PP_LON']],
y=[row['SKS_PP_LAT'], row['SKKS_PP_LAT']],
pen="1p,black")
if (row['Q_SKS'] >= 0.5):
#Plot split SKS - black circle
fig.plot(x=row['SKS_PP_LON'],
y=row['SKS_PP_LAT'],
style='c0.15c', color='black', pen='black')
vec = np.array([[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS'], row['TLAG_SKS']*0.5],
[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS']+180, row['TLAG_SKS']*0.5]])
fig.plot(data=vec, style = 'v0c', color='black', pen='0.75p')
elif (row['Q_SKS'] <= -0.5):
fig.plot(x=row['SKS_PP_LON'],
y=row['SKS_PP_LAT'],
style='c0.15c', color='white', pen='black')
else:
print('Bad Q for SKS')
if (row['Q_SKKS'] >= 0.5):
#Plot split SKKS - black circle
fig.plot(x=row['SKKS_PP_LON'],
y=row['SKKS_PP_LAT'],
style='d0.15c', color='black', pen='black')
vec = np.array([[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS'], row['TLAG_SKKS']*0.5],
[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS']+180, row['TLAG_SKKS']*0.5]])
fig.plot(data=vec, style = 'v0c', color='black', pen='0.75p')
elif (row['Q_SKKS'] <= -0.5):
fig.plot(x=row['SKKS_PP_LON'],
y=row['SKKS_PP_LAT'],
style='d0.15c', color='white', pen='black')
fig.savefig('/Users/ja17375/Documents/Thesis-enclosing/Thesis/chapters/chapter02/Figs/Africa_Med_SKS_SKKS_onS40RTS.eps',
crop=True, show=True)
# fig.show(method='external')
def plot_flament(dpath='/Users/ja17375/SWSTomo/FlamentModel',extent='epac'):
nc_vx = nc.Dataset(f'{dpath}/C3-vx-000Ma-2677km.grd')
nc_vy = nc.Dataset(f'{dpath}/C3-vy-000Ma-2677km.grd')
nc_vz = nc.Dataset(f'{dpath}/C3-vz-000Ma-2677km.grd')
vel_conv = 4.9e-4 # converts velocity to cm/year (from N. Flament - see model README.txt)
Utheta = nc_vx['z'][:] * vel_conv *-1 #theta is colat so invert
Uphi = nc_vy['z'][:] * vel_conv # longitudl velocity
Ur = nc_vz['z'][:] * vel_conv # radial velocity
lon, lat = np.meshgrid(nc_vx['lon'][:], nc_vx['lat'][:])
dg = 15
hzdeg = ((lat % dg == 0) & (lon % dg == 0))
U_grid = xr.DataArray(data=np.flipud(Ur),
coords=[('latitude', np.linspace(-90,90,181),
{'units': 'degrees_north'}),
('longitude', np.linspace(-180,180,361),
{'units': 'degrees_east'})],
)
fig = pygmt.Figure()
africa_med = [25,70,-5,50]
fig.basemap(region=africa_med, projection="Ks12c", frame="afg",)
fig.grdimage(grid=U_grid)
fig.coast(shorelines=True)
flow_ang = np.rad2deg(np.arctan2(np.ravel(Utheta[hzdeg]), np.ravel(Uphi[hzdeg])))
flow_len = np.sqrt(np.ravel(Utheta[hzdeg])**2 + np.ravel(Uphi[hzdeg])**2)
flow_data = np.zeros((325, 4))
flow_data[:,0] = lon[hzdeg]
flow_data[:,1] = lat[hzdeg]
flow_data[:,2] = flow_ang
flow_data[:,3] = flow_len *0.1
fig.plot(data=flow_data, style = 'v0.2c+e', color='black', pen='1p')
flow_data[:,2] = flow_data[:,2] + 180
fig.plot(data=flow_data, style = 'v0c', color='black', pen='1p')
data = pd.read_csv('~/DiscrePy/Sheba/Results/Combined/Filt_05Hz/Combined_goodQ.pairs', delim_whitespace=True)
for i, row in data.iterrows():
fig.plot(x=[row['SKS_PP_LON'], row['SKKS_PP_LON']],
y=[row['SKS_PP_LAT'], row['SKKS_PP_LAT']],
pen="1p,black")
if (row['Q_SKS'] >= 0.5):
#Plot split SKS - black circle
fig.plot(x=row['SKS_PP_LON'],
y=row['SKS_PP_LAT'],
style='c0.15c', color='black', pen='black')
vec = np.array([[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS'], row['TLAG_SKS']*0.25],
[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS']+180, row['TLAG_SKS']*0.25]])
fig.plot(data=vec, style = 'v0c', color='black', pen='0.75p')
elif (row['Q_SKS'] <= -0.5):
fig.plot(x=row['SKS_PP_LON'],
y=row['SKS_PP_LAT'],
style='c0.15c', color='white', pen='black')
else:
print('Bad Q for SKS')
if (row['Q_SKKS'] >= 0.5):
#Plot split SKKS - black circle
fig.plot(x=row['SKKS_PP_LON'],
y=row['SKKS_PP_LAT'],
style='d0.15c', color='black', pen='black')
vec = np.array([[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS'], row['TLAG_SKKS']*0.25],
[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS']+180, row['TLAG_SKKS']*0.25]])
fig.plot(data=vec, style = 'v0c', color='black', pen='0.75p')
elif (row['Q_SKKS'] <= -0.5):
fig.plot(x=row['SKKS_PP_LON'],
y=row['SKKS_PP_LAT'],
style='d0.15c', color='white', pen='black')
fig.show(method='external')
if __name__ == '__main__':
plot_forte_gmt() | [
"pandas.read_csv",
"numpy.flipud",
"netCDF4.Dataset",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"pygmt.Figure",
"numpy.ravel",
"numpy.meshgrid",
"numpy.loadtxt"
] | [((239, 339), 'numpy.loadtxt', 'np.loadtxt', (['"""/Users/ja17375/SWSTomo/ForteModels/Flow_Models/TX2008/forteV2_1deg_150km.txt"""'], {}), "(\n '/Users/ja17375/SWSTomo/ForteModels/Flow_Models/TX2008/forteV2_1deg_150km.txt'\n )\n", (249, 339), True, 'import numpy as np\n'), ((1045, 1059), 'pygmt.Figure', 'pygmt.Figure', ([], {}), '()\n', (1057, 1059), False, 'import pygmt\n'), ((2384, 2490), 'pandas.read_csv', 'pd.read_csv', (['"""~/DiscrePy/Sheba/Results/Combined/Filt_05Hz/Combined_goodQ.pairs"""'], {'delim_whitespace': '(True)'}), "('~/DiscrePy/Sheba/Results/Combined/Filt_05Hz/Combined_goodQ.pairs',\n delim_whitespace=True)\n", (2395, 2490), True, 'import pandas as pd\n'), ((4461, 4506), 'netCDF4.Dataset', 'nc.Dataset', (['f"""{dpath}/C3-vx-000Ma-2677km.grd"""'], {}), "(f'{dpath}/C3-vx-000Ma-2677km.grd')\n", (4471, 4506), True, 'import netCDF4 as nc\n'), ((4519, 4564), 'netCDF4.Dataset', 'nc.Dataset', (['f"""{dpath}/C3-vy-000Ma-2677km.grd"""'], {}), "(f'{dpath}/C3-vy-000Ma-2677km.grd')\n", (4529, 4564), True, 'import netCDF4 as nc\n'), ((4577, 4622), 'netCDF4.Dataset', 'nc.Dataset', (['f"""{dpath}/C3-vz-000Ma-2677km.grd"""'], {}), "(f'{dpath}/C3-vz-000Ma-2677km.grd')\n", (4587, 4622), True, 'import netCDF4 as nc\n'), ((4910, 4955), 'numpy.meshgrid', 'np.meshgrid', (["nc_vx['lon'][:]", "nc_vx['lat'][:]"], {}), "(nc_vx['lon'][:], nc_vx['lat'][:])\n", (4921, 4955), True, 'import numpy as np\n'), ((5390, 5404), 'pygmt.Figure', 'pygmt.Figure', ([], {}), '()\n', (5402, 5404), False, 'import pygmt\n'), ((5746, 5764), 'numpy.zeros', 'np.zeros', (['(325, 4)'], {}), '((325, 4))\n', (5754, 5764), True, 'import numpy as np\n'), ((6090, 6196), 'pandas.read_csv', 'pd.read_csv', (['"""~/DiscrePy/Sheba/Results/Combined/Filt_05Hz/Combined_goodQ.pairs"""'], {'delim_whitespace': '(True)'}), "('~/DiscrePy/Sheba/Results/Combined/Filt_05Hz/Combined_goodQ.pairs',\n delim_whitespace=True)\n", (6101, 6196), True, 'import pandas as pd\n'), ((704, 717), 'numpy.flipud', 'np.flipud', (['Ur'], {}), '(Ur)\n', (713, 717), True, 'import numpy as np\n'), ((2925, 3111), 'numpy.array', 'np.array', (["[[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS'], row['TLAG_SKS'] * \n 0.5], [row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS'] + 180, row\n ['TLAG_SKS'] * 0.5]]"], {}), "([[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS'], row[\n 'TLAG_SKS'] * 0.5], [row['SKS_PP_LON'], row['SKS_PP_LAT'], row[\n 'FAST_SKS'] + 180, row['TLAG_SKS'] * 0.5]])\n", (2933, 3111), True, 'import numpy as np\n'), ((3709, 3903), 'numpy.array', 'np.array', (["[[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS'], row['TLAG_SKKS'\n ] * 0.5], [row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS'] + \n 180, row['TLAG_SKKS'] * 0.5]]"], {}), "([[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS'], row[\n 'TLAG_SKKS'] * 0.5], [row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row[\n 'FAST_SKKS'] + 180, row['TLAG_SKKS'] * 0.5]])\n", (3717, 3903), True, 'import numpy as np\n'), ((5049, 5062), 'numpy.flipud', 'np.flipud', (['Ur'], {}), '(Ur)\n', (5058, 5062), True, 'import numpy as np\n'), ((5603, 5626), 'numpy.ravel', 'np.ravel', (['Utheta[hzdeg]'], {}), '(Utheta[hzdeg])\n', (5611, 5626), True, 'import numpy as np\n'), ((5628, 5649), 'numpy.ravel', 'np.ravel', (['Uphi[hzdeg]'], {}), '(Uphi[hzdeg])\n', (5636, 5649), True, 'import numpy as np\n'), ((6631, 6819), 'numpy.array', 'np.array', (["[[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS'], row['TLAG_SKS'] * \n 0.25], [row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS'] + 180, \n row['TLAG_SKS'] * 0.25]]"], {}), "([[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS'], row[\n 'TLAG_SKS'] * 0.25], [row['SKS_PP_LON'], row['SKS_PP_LAT'], row[\n 'FAST_SKS'] + 180, row['TLAG_SKS'] * 0.25]])\n", (6639, 6819), True, 'import numpy as np\n'), ((7417, 7613), 'numpy.array', 'np.array', (["[[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS'], row['TLAG_SKKS'\n ] * 0.25], [row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS'] + \n 180, row['TLAG_SKKS'] * 0.25]]"], {}), "([[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS'], row[\n 'TLAG_SKKS'] * 0.25], [row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row[\n 'FAST_SKKS'] + 180, row['TLAG_SKKS'] * 0.25]])\n", (7425, 7613), True, 'import numpy as np\n'), ((5675, 5698), 'numpy.ravel', 'np.ravel', (['Utheta[hzdeg]'], {}), '(Utheta[hzdeg])\n', (5683, 5698), True, 'import numpy as np\n'), ((5704, 5725), 'numpy.ravel', 'np.ravel', (['Uphi[hzdeg]'], {}), '(Uphi[hzdeg])\n', (5712, 5725), True, 'import numpy as np\n'), ((766, 791), 'numpy.linspace', 'np.linspace', (['(-90)', '(90)', '(181)'], {}), '(-90, 90, 181)\n', (777, 791), True, 'import numpy as np\n'), ((912, 939), 'numpy.linspace', 'np.linspace', (['(-180)', '(180)', '(361)'], {}), '(-180, 180, 361)\n', (923, 939), True, 'import numpy as np\n'), ((5111, 5136), 'numpy.linspace', 'np.linspace', (['(-90)', '(90)', '(181)'], {}), '(-90, 90, 181)\n', (5122, 5136), True, 'import numpy as np\n'), ((5257, 5284), 'numpy.linspace', 'np.linspace', (['(-180)', '(180)', '(361)'], {}), '(-180, 180, 361)\n', (5268, 5284), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LogNorm
import numpy as np
data = np.zeros((100, 100))
adder = np.abs(np.random.randn(40, 40))
center = adder / np.max(adder)
data[20:60, 20:60] = center
plt.imshow(data, cmap=cm.seismic)
plt.colorbar()
plt.show() | [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.zeros",
"numpy.random.randn",
"matplotlib.pyplot.show"
] | [((125, 145), 'numpy.zeros', 'np.zeros', (['(100, 100)'], {}), '((100, 100))\n', (133, 145), True, 'import numpy as np\n'), ((246, 279), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data'], {'cmap': 'cm.seismic'}), '(data, cmap=cm.seismic)\n', (256, 279), True, 'import matplotlib.pyplot as plt\n'), ((280, 294), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (292, 294), True, 'import matplotlib.pyplot as plt\n'), ((295, 305), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (303, 305), True, 'import matplotlib.pyplot as plt\n'), ((161, 184), 'numpy.random.randn', 'np.random.randn', (['(40)', '(40)'], {}), '(40, 40)\n', (176, 184), True, 'import numpy as np\n'), ((203, 216), 'numpy.max', 'np.max', (['adder'], {}), '(adder)\n', (209, 216), True, 'import numpy as np\n')] |
from torch.utils.data import Dataset
from PIL import Image
import os
import torch
import numpy as np
# from scipy.io import loadmat
from torchvision import transforms
class PennAction(Dataset):
'''
Generated samples will be saved in a Dict.
Keys:
image : image arry
label: keypoints x and y
rotation: image rotation degree(1-hot code)
'''
def __init__(self, root_path, transform=None, num_per_pair=2, frame_interval=2):
self._clip_len = int(num_per_pair)
self._interval = int(frame_interval)
self.transform = transform
self.label_path = os.path.join(os.path.abspath(root_path), 'labels')
self.video_path = os.path.join(os.path.abspath(root_path), 'frames')
self.videos = sorted(os.listdir(self.video_path))
self.labels = sorted([f for f in os.listdir(self.label_path) if f.endswith('.npz')])
# self.resize = resize
self.num = len(os.listdir(self.video_path))
def __getitem__(self, index):
selected_videos = self.videos[index]
selected_labels = self.labels[index]
selected_label_path = os.path.join(os.path.abspath(self.label_path), selected_labels)
selected_video_path = os.path.join(os.path.abspath(self.video_path), selected_videos)
frame_list = sorted([f for f in os.listdir(selected_video_path) if f.endswith('.png')])
id = np.random.randint(0, len(frame_list) - self._interval * self._clip_len + 1)
image = []
label = []
rotation = []
for i in range(self._clip_len):
tmp = self._get_one_frame(selected_label_path, selected_video_path, frame_list, id + i * self._interval)
image.append(tmp['image'])
label.append(tmp['label'])
rotation.append(tmp['rotation'])
image = torch.stack(image)
label = torch.stack(label)
rotation = torch.stack(rotation)
# we will save rotation degree as label here
sample = {'image':image,'label':label,'rotation': rotation}
return sample
def _get_one_frame(self, label_path, frame_path, frame_list, id):
points = np.load(label_path)
x_points = points['x']
y_points = points['y']
#vis = points['visibility'][id].astype(np.bool)
# print(vis)
# bounding box:
# bbox = points['bbox']
i_path = os.path.join(frame_path, frame_list[id])
image = Image.open(i_path)
image = torch.as_tensor(np.asarray(image))
labelx = x_points[id] # [vis]
labely = y_points[id] # [vis]
# left = np.int(bbox[id][0])
# top = np.int(bbox[id][1])
# right = np.int(np.ceil(bbox[id][2]))
# bottom = np.int(np.ceil(bbox[id][3]))
# print(labelx.shape)
label = torch.as_tensor([labelx, labely]).T
# print(label.shape)
# image = image[top:bottom, left: right]
# label = label - [left, top]
tmp = {'image': image, 'label': label, 'rotation':None}
# image = image.resize(self.resize, Image.ANTIALIAS)
if self.transform:
tmp = self.transform(tmp)
return tmp
def __len__(self):
return self.num
if __name__ == '__main__':
# Calling the function
import matplotlib.pyplot as plt
from utils.transformer import *
test = PennAction(root_path='/home/chen/Video_Disentanled_Representation/Datasets/Penn_Action',
transform=transforms.Compose([Rescale((192,192)),Rotate()]))
t1 = len(test)
plt.figure(figsize=(20,20))
for i, k in enumerate(np.random.randint(2200, size=4)):
t = test[k]
for j in range(3):
tt1 = t['image'][j]
# tt2 = t['label'][0]
plt.subplot(4, 3, i*3 + j +1)
plt.imshow(tt1)
#plt.scatter(tt2[:, 0], tt2[:, 1], c='r', marker='+')
# forget the labels now
plt.show()
| [
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"os.listdir",
"torch.as_tensor",
"torch.stack",
"os.path.join",
"numpy.asarray",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"os.path.abspath",
"numpy.load",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((3546, 3574), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (3556, 3574), True, 'import matplotlib.pyplot as plt\n'), ((3913, 3923), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3921, 3923), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1842), 'torch.stack', 'torch.stack', (['image'], {}), '(image)\n', (1835, 1842), False, 'import torch\n'), ((1859, 1877), 'torch.stack', 'torch.stack', (['label'], {}), '(label)\n', (1870, 1877), False, 'import torch\n'), ((1897, 1918), 'torch.stack', 'torch.stack', (['rotation'], {}), '(rotation)\n', (1908, 1918), False, 'import torch\n'), ((2150, 2169), 'numpy.load', 'np.load', (['label_path'], {}), '(label_path)\n', (2157, 2169), True, 'import numpy as np\n'), ((2384, 2424), 'os.path.join', 'os.path.join', (['frame_path', 'frame_list[id]'], {}), '(frame_path, frame_list[id])\n', (2396, 2424), False, 'import os\n'), ((2441, 2459), 'PIL.Image.open', 'Image.open', (['i_path'], {}), '(i_path)\n', (2451, 2459), False, 'from PIL import Image\n'), ((3600, 3631), 'numpy.random.randint', 'np.random.randint', (['(2200)'], {'size': '(4)'}), '(2200, size=4)\n', (3617, 3631), True, 'import numpy as np\n'), ((618, 644), 'os.path.abspath', 'os.path.abspath', (['root_path'], {}), '(root_path)\n', (633, 644), False, 'import os\n'), ((695, 721), 'os.path.abspath', 'os.path.abspath', (['root_path'], {}), '(root_path)\n', (710, 721), False, 'import os\n'), ((763, 790), 'os.listdir', 'os.listdir', (['self.video_path'], {}), '(self.video_path)\n', (773, 790), False, 'import os\n'), ((940, 967), 'os.listdir', 'os.listdir', (['self.video_path'], {}), '(self.video_path)\n', (950, 967), False, 'import os\n'), ((1137, 1169), 'os.path.abspath', 'os.path.abspath', (['self.label_path'], {}), '(self.label_path)\n', (1152, 1169), False, 'import os\n'), ((1231, 1263), 'os.path.abspath', 'os.path.abspath', (['self.video_path'], {}), '(self.video_path)\n', (1246, 1263), False, 'import os\n'), ((2492, 2509), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2502, 2509), True, 'import numpy as np\n'), ((2802, 2835), 'torch.as_tensor', 'torch.as_tensor', (['[labelx, labely]'], {}), '([labelx, labely])\n', (2817, 2835), False, 'import torch\n'), ((3758, 3790), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(3)', '(i * 3 + j + 1)'], {}), '(4, 3, i * 3 + j + 1)\n', (3769, 3790), True, 'import matplotlib.pyplot as plt\n'), ((3800, 3815), 'matplotlib.pyplot.imshow', 'plt.imshow', (['tt1'], {}), '(tt1)\n', (3810, 3815), True, 'import matplotlib.pyplot as plt\n'), ((833, 860), 'os.listdir', 'os.listdir', (['self.label_path'], {}), '(self.label_path)\n', (843, 860), False, 'import os\n'), ((1322, 1353), 'os.listdir', 'os.listdir', (['selected_video_path'], {}), '(selected_video_path)\n', (1332, 1353), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import time
import warnings
from itertools import cycle, islice
from sklearn.cluster import MiniBatchKMeans
from sklearn.mixture import GaussianMixture
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import kneighbors_graph
from sklearn import cluster, datasets, mixture
import matplotlib.pyplot as plt
import numpy as np
import torch
from torchmm.hmm_packed import kpp_rand
from torchmm.hmm_packed import kmeans_rand
from torchmm.hmm import HiddenMarkovModel
from torchmm.base import DiagNormalModel
"""
Created on Mon Jun 1 21:12:06 2020
@author: robert.sheline
"""
print(__doc__)
seed = round(time.time())
np.random.seed(seed)
torch.manual_seed(seed)
class torchmm_transform(TransformerMixin, BaseEstimator):
def __init__(self, k, rand_fun=None):
self.n_states = k
self.rand_fun = rand_fun
def fit(self, X, restarts=15):
n_states = self.n_states
X = torch.tensor([[_x] for _x in X]).float()
T0 = torch.zeros(n_states).softmax(0)
T = torch.zeros((n_states, n_states)).softmax(1)
states = []
for s_idx in range(n_states):
means = torch.zeros(2)
precisions = torch.ones(2)
states.append(DiagNormalModel(means, precisions))
self.hmm = HiddenMarkovModel(states, T0=T0, T=T)
self.hmm.fit(X, max_steps=500, epsilon=1e-3, restarts=restarts,
rand_fun=self.rand_fun)
ll = self.hmm.log_prob(X) + self.hmm.log_parameters_prob()
print("torchmm", self.rand_fun)
print(ll)
return self
def predict(self, X):
X = torch.tensor([[_x] for _x in X]).float()
return torch.stack(self.hmm.decode(X)[0]).squeeze()
if __name__ == "__main__":
# ============
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
# ============
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
# Anisotropicly distributed data
random_state = 170
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
# blobs with varied variances
varied = datasets.make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
# ============
# Set up cluster parameters
# ============
plt.figure(figsize=(9 * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3,
'min_samples': 20,
'xi': 0.05,
'min_cluster_size': 0.1}
datasets = [
(noisy_circles, {'damping': .77, 'preference': -240,
'quantile': .2, 'n_clusters': 2,
'min_samples': 20, 'xi': 0.25}),
(noisy_moons, {'damping': .75, 'preference': -220, 'n_clusters': 2}),
(varied, {'eps': .18, 'n_neighbors': 2,
'min_samples': 5, 'xi': 0.035, 'min_cluster_size': .2}),
(aniso, {'eps': .15, 'n_neighbors': 2,
'min_samples': 20, 'xi': 0.1, 'min_cluster_size': .2}),
(blobs, {}),
(no_structure, {})]
for i_dataset, (dataset, algo_params) in enumerate(datasets):
print()
print("NEW DATA")
# update parameters with dataset-specific values
params = default_base.copy()
params.update(algo_params)
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=params['quantile'])
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(
X, n_neighbors=params['n_neighbors'], include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# ============
# Create cluster objects
# ============
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=params['n_clusters'])
ward = cluster.AgglomerativeClustering(
n_clusters=params['n_clusters'], linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(
n_clusters=params['n_clusters'], eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=params['eps'])
optics = cluster.OPTICS(min_samples=params['min_samples'],
xi=params['xi'],
min_cluster_size=params['min_cluster_size'])
affinity_propagation = cluster.AffinityPropagation(
damping=params['damping'], preference=params['preference'])
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock",
n_clusters=params['n_clusters'], connectivity=connectivity)
birch = cluster.Birch(n_clusters=params['n_clusters'])
gmm = mixture.GaussianMixture(
n_components=params['n_clusters'], covariance_type='diag')
torchmm_model_0 = torchmm_transform(params['n_clusters'],
rand_fun=None)
torchmm_model_1 = torchmm_transform(params['n_clusters'],
rand_fun=kpp_rand)
torchmm_model_2 = torchmm_transform(params['n_clusters'],
rand_fun=kmeans_rand)
clustering_algorithms = (
('MiniBatchKMeans', two_means),
# ('AffinityPropagation', affinity_propagation),
# ('MeanShift', ms),
# ('SpectralClustering', spectral),
# ('Ward', ward),
# ('AgglomerativeClustering', average_linkage),
# ('DBSCAN', dbscan),
# ('OPTICS', optics),
# ('Birch', birch),
('GaussianMixture', gmm),
('Torchmm - Random Init', torchmm_model_0),
('Torchmm - K++ Init', torchmm_model_1),
('Torchmm - Kmeans Init', torchmm_model_2)
)
# clustering_algorithms = (('MiniBatchKMeans', two_means),
# ('Torchmm', torchmm_model))
for name, algorithm in clustering_algorithms:
t0 = time.time()
# catch warnings related to kneighbors_graph
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the " +
"connectivity matrix is [0-9]{1,2}" +
" > 1. Completing it to avoid stopping the tree early.",
category=UserWarning)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding" +
" may not work as expected.",
category=UserWarning)
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# print('y_pred: ', type(y_pred), ' .. ', y_pred)
plt.subplot(len(datasets), len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(y_pred) + 1))))
# add black color for outliers (if any)
colors = np.append(colors, ["#000000"])
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[y_pred])
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.xticks(())
plt.yticks(())
# plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
# transform=plt.gca().transAxes, size=15,
# horizontalalignment='right')
plot_num += 1
plt.show()
| [
"torchmm.hmm.HiddenMarkovModel",
"sklearn.cluster.SpectralClustering",
"numpy.random.rand",
"sklearn.neighbors.kneighbors_graph",
"sklearn.datasets.make_circles",
"sklearn.cluster.MeanShift",
"sklearn.cluster.DBSCAN",
"sklearn.cluster.AgglomerativeClustering",
"sklearn.datasets.make_blobs",
"numpy... | [((750, 770), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (764, 770), True, 'import numpy as np\n'), ((772, 795), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (789, 795), False, 'import torch\n'), ((736, 747), 'time.time', 'time.time', ([], {}), '()\n', (745, 747), False, 'import time\n'), ((2147, 2213), 'sklearn.datasets.make_circles', 'datasets.make_circles', ([], {'n_samples': 'n_samples', 'factor': '(0.5)', 'noise': '(0.05)'}), '(n_samples=n_samples, factor=0.5, noise=0.05)\n', (2168, 2213), False, 'from sklearn import cluster, datasets, mixture\n'), ((2274, 2326), 'sklearn.datasets.make_moons', 'datasets.make_moons', ([], {'n_samples': 'n_samples', 'noise': '(0.05)'}), '(n_samples=n_samples, noise=0.05)\n', (2293, 2326), False, 'from sklearn import cluster, datasets, mixture\n'), ((2339, 2395), 'sklearn.datasets.make_blobs', 'datasets.make_blobs', ([], {'n_samples': 'n_samples', 'random_state': '(8)'}), '(n_samples=n_samples, random_state=8)\n', (2358, 2395), False, 'from sklearn import cluster, datasets, mixture\n'), ((2527, 2594), 'sklearn.datasets.make_blobs', 'datasets.make_blobs', ([], {'n_samples': 'n_samples', 'random_state': 'random_state'}), '(n_samples=n_samples, random_state=random_state)\n', (2546, 2594), False, 'from sklearn import cluster, datasets, mixture\n'), ((2659, 2684), 'numpy.dot', 'np.dot', (['X', 'transformation'], {}), '(X, transformation)\n', (2665, 2684), True, 'import numpy as np\n'), ((2762, 2862), 'sklearn.datasets.make_blobs', 'datasets.make_blobs', ([], {'n_samples': 'n_samples', 'cluster_std': '[1.0, 2.5, 0.5]', 'random_state': 'random_state'}), '(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5],\n random_state=random_state)\n', (2781, 2862), False, 'from sklearn import cluster, datasets, mixture\n'), ((3007, 3044), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9 * 2 + 3, 12.5)'}), '(figsize=(9 * 2 + 3, 12.5))\n', (3017, 3044), True, 'import matplotlib.pyplot as plt\n'), ((3050, 3147), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.02)', 'right': '(0.98)', 'bottom': '(0.001)', 'top': '(0.96)', 'wspace': '(0.05)', 'hspace': '(0.01)'}), '(left=0.02, right=0.98, bottom=0.001, top=0.96, wspace=\n 0.05, hspace=0.01)\n', (3069, 3147), True, 'import matplotlib.pyplot as plt\n'), ((9355, 9365), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9363, 9365), True, 'import matplotlib.pyplot as plt\n'), ((1419, 1456), 'torchmm.hmm.HiddenMarkovModel', 'HiddenMarkovModel', (['states'], {'T0': 'T0', 'T': 'T'}), '(states, T0=T0, T=T)\n', (1436, 1456), False, 'from torchmm.hmm import HiddenMarkovModel\n'), ((2416, 2444), 'numpy.random.rand', 'np.random.rand', (['n_samples', '(2)'], {}), '(n_samples, 2)\n', (2430, 2444), True, 'import numpy as np\n'), ((4552, 4610), 'sklearn.cluster.estimate_bandwidth', 'cluster.estimate_bandwidth', (['X'], {'quantile': "params['quantile']"}), "(X, quantile=params['quantile'])\n", (4578, 4610), False, 'from sklearn import cluster, datasets, mixture\n'), ((4688, 4762), 'sklearn.neighbors.kneighbors_graph', 'kneighbors_graph', (['X'], {'n_neighbors': "params['n_neighbors']", 'include_self': '(False)'}), "(X, n_neighbors=params['n_neighbors'], include_self=False)\n", (4704, 4762), False, 'from sklearn.neighbors import kneighbors_graph\n'), ((4976, 5032), 'sklearn.cluster.MeanShift', 'cluster.MeanShift', ([], {'bandwidth': 'bandwidth', 'bin_seeding': '(True)'}), '(bandwidth=bandwidth, bin_seeding=True)\n', (4993, 5032), False, 'from sklearn import cluster, datasets, mixture\n'), ((5054, 5110), 'sklearn.cluster.MiniBatchKMeans', 'cluster.MiniBatchKMeans', ([], {'n_clusters': "params['n_clusters']"}), "(n_clusters=params['n_clusters'])\n", (5077, 5110), False, 'from sklearn import cluster, datasets, mixture\n'), ((5127, 5239), 'sklearn.cluster.AgglomerativeClustering', 'cluster.AgglomerativeClustering', ([], {'n_clusters': "params['n_clusters']", 'linkage': '"""ward"""', 'connectivity': 'connectivity'}), "(n_clusters=params['n_clusters'], linkage=\n 'ward', connectivity=connectivity)\n", (5158, 5239), False, 'from sklearn import cluster, datasets, mixture\n'), ((5282, 5399), 'sklearn.cluster.SpectralClustering', 'cluster.SpectralClustering', ([], {'n_clusters': "params['n_clusters']", 'eigen_solver': '"""arpack"""', 'affinity': '"""nearest_neighbors"""'}), "(n_clusters=params['n_clusters'], eigen_solver=\n 'arpack', affinity='nearest_neighbors')\n", (5308, 5399), False, 'from sklearn import cluster, datasets, mixture\n'), ((5440, 5473), 'sklearn.cluster.DBSCAN', 'cluster.DBSCAN', ([], {'eps': "params['eps']"}), "(eps=params['eps'])\n", (5454, 5473), False, 'from sklearn import cluster, datasets, mixture\n'), ((5492, 5607), 'sklearn.cluster.OPTICS', 'cluster.OPTICS', ([], {'min_samples': "params['min_samples']", 'xi': "params['xi']", 'min_cluster_size': "params['min_cluster_size']"}), "(min_samples=params['min_samples'], xi=params['xi'],\n min_cluster_size=params['min_cluster_size'])\n", (5506, 5607), False, 'from sklearn import cluster, datasets, mixture\n'), ((5702, 5794), 'sklearn.cluster.AffinityPropagation', 'cluster.AffinityPropagation', ([], {'damping': "params['damping']", 'preference': "params['preference']"}), "(damping=params['damping'], preference=params[\n 'preference'])\n", (5729, 5794), False, 'from sklearn import cluster, datasets, mixture\n'), ((5831, 5967), 'sklearn.cluster.AgglomerativeClustering', 'cluster.AgglomerativeClustering', ([], {'linkage': '"""average"""', 'affinity': '"""cityblock"""', 'n_clusters': "params['n_clusters']", 'connectivity': 'connectivity'}), "(linkage='average', affinity='cityblock',\n n_clusters=params['n_clusters'], connectivity=connectivity)\n", (5862, 5967), False, 'from sklearn import cluster, datasets, mixture\n'), ((6008, 6054), 'sklearn.cluster.Birch', 'cluster.Birch', ([], {'n_clusters': "params['n_clusters']"}), "(n_clusters=params['n_clusters'])\n", (6021, 6054), False, 'from sklearn import cluster, datasets, mixture\n'), ((6070, 6157), 'sklearn.mixture.GaussianMixture', 'mixture.GaussianMixture', ([], {'n_components': "params['n_clusters']", 'covariance_type': '"""diag"""'}), "(n_components=params['n_clusters'], covariance_type=\n 'diag')\n", (6093, 6157), False, 'from sklearn import cluster, datasets, mixture\n'), ((1279, 1293), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (1290, 1293), False, 'import torch\n'), ((1320, 1333), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (1330, 1333), False, 'import torch\n'), ((7382, 7393), 'time.time', 'time.time', ([], {}), '()\n', (7391, 7393), False, 'import time\n'), ((8131, 8142), 'time.time', 'time.time', ([], {}), '()\n', (8140, 8142), False, 'import time\n'), ((8927, 8957), 'numpy.append', 'np.append', (['colors', "['#000000']"], {}), "(colors, ['#000000'])\n", (8936, 8957), True, 'import numpy as np\n'), ((8971, 9028), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'s': '(10)', 'color': 'colors[y_pred]'}), '(X[:, 0], X[:, 1], s=10, color=colors[y_pred])\n', (8982, 9028), True, 'import matplotlib.pyplot as plt\n'), ((9044, 9063), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2.5)', '(2.5)'], {}), '(-2.5, 2.5)\n', (9052, 9063), True, 'import matplotlib.pyplot as plt\n'), ((9077, 9096), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2.5)', '(2.5)'], {}), '(-2.5, 2.5)\n', (9085, 9096), True, 'import matplotlib.pyplot as plt\n'), ((9110, 9124), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (9120, 9124), True, 'import matplotlib.pyplot as plt\n'), ((9138, 9152), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (9148, 9152), True, 'import matplotlib.pyplot as plt\n'), ((1048, 1080), 'torch.tensor', 'torch.tensor', (['[[_x] for _x in X]'], {}), '([[_x] for _x in X])\n', (1060, 1080), False, 'import torch\n'), ((1105, 1126), 'torch.zeros', 'torch.zeros', (['n_states'], {}), '(n_states)\n', (1116, 1126), False, 'import torch\n'), ((1151, 1184), 'torch.zeros', 'torch.zeros', (['(n_states, n_states)'], {}), '((n_states, n_states))\n', (1162, 1184), False, 'import torch\n'), ((1361, 1395), 'torchmm.base.DiagNormalModel', 'DiagNormalModel', (['means', 'precisions'], {}), '(means, precisions)\n', (1376, 1395), False, 'from torchmm.base import DiagNormalModel\n'), ((1773, 1805), 'torch.tensor', 'torch.tensor', (['[[_x] for _x in X]'], {}), '([[_x] for _x in X])\n', (1785, 1805), False, 'import torch\n'), ((4450, 4466), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4464, 4466), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7472, 7497), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (7495, 7497), False, 'import warnings\n'), ((7516, 7739), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': "('the number of connected components of the ' +\n 'connectivity matrix is [0-9]{1,2}' +\n ' > 1. Completing it to avoid stopping the tree early.')", 'category': 'UserWarning'}), "('ignore', message=\n 'the number of connected components of the ' +\n 'connectivity matrix is [0-9]{1,2}' +\n ' > 1. Completing it to avoid stopping the tree early.', category=\n UserWarning)\n", (7539, 7739), False, 'import warnings\n'), ((7845, 8000), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': "('Graph is not fully connected, spectral embedding' +\n ' may not work as expected.')", 'category': 'UserWarning'}), "('ignore', message=\n 'Graph is not fully connected, spectral embedding' +\n ' may not work as expected.', category=UserWarning)\n", (7868, 8000), False, 'import warnings\n'), ((8507, 8531), 'matplotlib.pyplot.title', 'plt.title', (['name'], {'size': '(18)'}), '(name, size=18)\n', (8516, 8531), True, 'import matplotlib.pyplot as plt\n'), ((8577, 8687), 'itertools.cycle', 'cycle', (["['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3',\n '#999999', '#e41a1c', '#dede00']"], {}), "(['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3',\n '#999999', '#e41a1c', '#dede00'])\n", (8582, 8687), False, 'from itertools import cycle, islice\n')] |
import numpy as np
import gym
from gym.spaces import Box
import pdb
class AtariPreprocessing(gym.Wrapper):
r"""Atari 2600 preprocessings.
This class follows the guidelines in
Machado et al. (2018), "Revisiting the Arcade Learning Environment:
Evaluation Protocols and Open Problems for General Agents".
Specifically:
* NoopReset: obtain initial state by taking random number of no-ops on reset.
* FireReset: take action on reset for environments that are fixed until firing.
* Frame skipping: 4 by default
* Max-pooling: most recent two observations
* Termination signal when a life is lost: turned off by default. Not recommended by Machado et al. (2018).
* Resize to a square image: 84x84 by default
* Grayscale observation: optional
Args:
env (Env): environment
noop_max (int): max number of no-ops
frame_skip (int): the frequency at which the agent experiences the game.
screen_size (int): resize Atari frame
terminal_on_life_loss (bool): if True, then step() returns done=True whenever a
life is lost.
grayscale_obs (bool): if True, then gray scale observation is returned, otherwise, RGB observation
is returned.
"""
def __init__(self, env, noop_max=30, frame_skip=4, screen_size=84, terminal_on_life_loss=False, grayscale_obs=True):
super().__init__(env)
assert frame_skip > 0
assert screen_size > 0
self.noop_max = noop_max
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
self.frame_skip = frame_skip
self.screen_size = screen_size
self.terminal_on_life_loss = terminal_on_life_loss
self.grayscale_obs = grayscale_obs
# buffer of most recent two observations for max pooling
if grayscale_obs:
self.obs_buffer = [np.empty(env.observation_space.shape[:2], dtype=np.uint8),
np.empty(env.observation_space.shape[:2], dtype=np.uint8)]
else:
self.obs_buffer = [np.empty(env.observation_space.shape, dtype=np.uint8),
np.empty(env.observation_space.shape, dtype=np.uint8)]
self.ale = env.unwrapped.ale
self.lives = 0
self.game_over = False
if grayscale_obs:
self.observation_space = Box(low=0, high=255, shape=(screen_size, screen_size), dtype=np.uint8)
else:
self.observation_space = Box(low=0, high=255, shape=(screen_size, screen_size, 3), dtype=np.uint8)
def step(self, action):
R = 0.0
for t in range(self.frame_skip):
_, reward, done, info = self.env.step(action)
R += reward
self.game_over = done
if self.terminal_on_life_loss:
new_lives = self.ale.lives()
done = done or new_lives < self.lives
self.lives = new_lives
if done:
break
if t == self.frame_skip - 2:
if self.grayscale_obs:
self.ale.getScreenGrayscale(self.obs_buffer[0])
else:
self.ale.getScreenRGB2(self.obs_buffer[0])
elif t == self.frame_skip - 1:
if self.grayscale_obs:
self.ale.getScreenGrayscale(self.obs_buffer[1])
else:
self.ale.getScreenRGB2(self.obs_buffer[1])
return self._get_obs(), R, done, info
def reset(self, **kwargs):
# NoopReset
self.env.reset(**kwargs)
noops = self.env.unwrapped.np_random.randint(1, self.noop_max + 1) if self.noop_max > 0 else 0
for _ in range(noops):
_, _, done, _ = self.env.step(0)
if done:
self.env.reset(**kwargs)
# FireReset
action_meanings = self.env.unwrapped.get_action_meanings()
if action_meanings[1] == 'FIRE' and len(action_meanings) >= 3:
self.env.step(1)
self.env.step(2)
self.lives = self.ale.lives()
if self.grayscale_obs:
self.ale.getScreenGrayscale(self.obs_buffer[0])
else:
self.ale.getScreenRGB2(self.obs_buffer[0])
self.obs_buffer[1].fill(0)
return self._get_obs()
def _get_obs(self):
import cv2
if self.frame_skip > 1: # more efficient in-place pooling
np.maximum(self.obs_buffer[0], self.obs_buffer[1], out=self.obs_buffer[0])
obs = cv2.resize(self.obs_buffer[0], (self.screen_size, self.screen_size), interpolation=cv2.INTER_AREA)
obs = np.asarray(obs, dtype=np.uint8)
return obs
from collections import deque
import numpy as np
from gym.spaces import Box
from gym import ObservationWrapper
class LazyFrames(object):
r"""Ensures common frames are only stored once to optimize memory use.
To further reduce the memory use, it is optionally to turn on lz4 to
compress the observations.
.. note::
This object should only be converted to numpy array just before forward pass.
"""
def __init__(self, frames, lz4_compress=False):
if lz4_compress:
from lz4.block import compress
self.shape = frames[0].shape
self.dtype = frames[0].dtype
frames = [compress(frame) for frame in frames]
self._frames = frames
self.lz4_compress = lz4_compress
def __array__(self, dtype=None):
if self.lz4_compress:
from lz4.block import decompress
frames = [np.frombuffer(decompress(frame), dtype=self.dtype).reshape(self.shape) for frame in self._frames]
else:
frames = self._frames
out = np.stack(frames, axis=0)
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self.__array__())
def __getitem__(self, i):
return self.__array__()[i]
class FrameStack(ObservationWrapper):
r"""Observation wrapper that stacks the observations in a rolling manner.
For example, if the number of stacks is 4, then the returned observation contains
the most recent 4 observations. For environment 'Pendulum-v0', the original observation
is an array with shape [3], so if we stack 4 observations, the processed observation
has shape [3, 4].
.. note::
To be memory efficient, the stacked observations are wrapped by :class:`LazyFrame`.
.. note::
The observation space must be `Box` type. If one uses `Dict`
as observation space, it should apply `FlattenDictWrapper` at first.
Example::
>>> import gym
>>> env = gym.make('PongNoFrameskip-v0')
>>> env = FrameStack(env, 4)
>>> env.observation_space
Box(4, 210, 160, 3)
Args:
env (Env): environment object
num_stack (int): number of stacks
"""
def __init__(self, env, num_stack, lz4_compress=False):
super(FrameStack, self).__init__(env)
self.num_stack = num_stack
self.lz4_compress = lz4_compress
self.frames = deque(maxlen=num_stack)
low = np.repeat(self.observation_space.low[np.newaxis, ...], num_stack, axis=0)
high = np.repeat(self.observation_space.high[np.newaxis, ...], num_stack, axis=0)
self.observation_space = Box(low=low, high=high, dtype=self.observation_space.dtype)
def _get_observation(self):
assert len(self.frames) == self.num_stack, (len(self.frames), self.num_stack)
return LazyFrames(list(self.frames), self.lz4_compress)
def step(self, action):
observation, reward, done, info = self.env.step(action)
self.frames.append(observation)
return self._get_observation(), reward, done, info
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
[self.frames.append(observation) for _ in range(self.num_stack)]
return self._get_observation()
| [
"lz4.block.compress",
"collections.deque",
"numpy.repeat",
"numpy.asarray",
"gym.spaces.Box",
"numpy.stack",
"numpy.empty",
"lz4.block.decompress",
"numpy.maximum",
"cv2.resize"
] | [((4521, 4623), 'cv2.resize', 'cv2.resize', (['self.obs_buffer[0]', '(self.screen_size, self.screen_size)'], {'interpolation': 'cv2.INTER_AREA'}), '(self.obs_buffer[0], (self.screen_size, self.screen_size),\n interpolation=cv2.INTER_AREA)\n', (4531, 4623), False, 'import cv2\n'), ((4634, 4665), 'numpy.asarray', 'np.asarray', (['obs'], {'dtype': 'np.uint8'}), '(obs, dtype=np.uint8)\n', (4644, 4665), True, 'import numpy as np\n'), ((5745, 5769), 'numpy.stack', 'np.stack', (['frames'], {'axis': '(0)'}), '(frames, axis=0)\n', (5753, 5769), True, 'import numpy as np\n'), ((7152, 7175), 'collections.deque', 'deque', ([], {'maxlen': 'num_stack'}), '(maxlen=num_stack)\n', (7157, 7175), False, 'from collections import deque\n'), ((7191, 7264), 'numpy.repeat', 'np.repeat', (['self.observation_space.low[np.newaxis, ...]', 'num_stack'], {'axis': '(0)'}), '(self.observation_space.low[np.newaxis, ...], num_stack, axis=0)\n', (7200, 7264), True, 'import numpy as np\n'), ((7280, 7354), 'numpy.repeat', 'np.repeat', (['self.observation_space.high[np.newaxis, ...]', 'num_stack'], {'axis': '(0)'}), '(self.observation_space.high[np.newaxis, ...], num_stack, axis=0)\n', (7289, 7354), True, 'import numpy as np\n'), ((7388, 7447), 'gym.spaces.Box', 'Box', ([], {'low': 'low', 'high': 'high', 'dtype': 'self.observation_space.dtype'}), '(low=low, high=high, dtype=self.observation_space.dtype)\n', (7391, 7447), False, 'from gym.spaces import Box\n'), ((2362, 2432), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(screen_size, screen_size)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(screen_size, screen_size), dtype=np.uint8)\n', (2365, 2432), False, 'from gym.spaces import Box\n'), ((2484, 2557), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(screen_size, screen_size, 3)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(screen_size, screen_size, 3), dtype=np.uint8)\n', (2487, 2557), False, 'from gym.spaces import Box\n'), ((4432, 4506), 'numpy.maximum', 'np.maximum', (['self.obs_buffer[0]', 'self.obs_buffer[1]'], {'out': 'self.obs_buffer[0]'}), '(self.obs_buffer[0], self.obs_buffer[1], out=self.obs_buffer[0])\n', (4442, 4506), True, 'import numpy as np\n'), ((1871, 1928), 'numpy.empty', 'np.empty', (['env.observation_space.shape[:2]'], {'dtype': 'np.uint8'}), '(env.observation_space.shape[:2], dtype=np.uint8)\n', (1879, 1928), True, 'import numpy as np\n'), ((1961, 2018), 'numpy.empty', 'np.empty', (['env.observation_space.shape[:2]'], {'dtype': 'np.uint8'}), '(env.observation_space.shape[:2], dtype=np.uint8)\n', (1969, 2018), True, 'import numpy as np\n'), ((2065, 2118), 'numpy.empty', 'np.empty', (['env.observation_space.shape'], {'dtype': 'np.uint8'}), '(env.observation_space.shape, dtype=np.uint8)\n', (2073, 2118), True, 'import numpy as np\n'), ((2151, 2204), 'numpy.empty', 'np.empty', (['env.observation_space.shape'], {'dtype': 'np.uint8'}), '(env.observation_space.shape, dtype=np.uint8)\n', (2159, 2204), True, 'import numpy as np\n'), ((5342, 5357), 'lz4.block.compress', 'compress', (['frame'], {}), '(frame)\n', (5350, 5357), False, 'from lz4.block import compress\n'), ((5599, 5616), 'lz4.block.decompress', 'decompress', (['frame'], {}), '(frame)\n', (5609, 5616), False, 'from lz4.block import decompress\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 22 09:34:05 2020
@author: didi
"""
import collections
import numpy as np
import tensorflow as tf
import gym
import random
import copy
import os
import sys
from peal.utils.epsilon_decay import linearly_decaying_epsilon
from peal.replay_buffers.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from peal.agents.default_config import DEFAULT_CONFIG as config
KLNetworktype = collections.namedtuple(
'cross_entropy_network', ['q_values', 'target_policy_probs', 'behavior_policy_probs'])
class KLNetwork(tf.keras.Model):
def __init__(self, num_actions,
hiddens=[64, 64], activation='relu',
name='kl_network'):
super().__init__(name=name)
self.affine_layers = [tf.keras.layers.Dense(hidden, activation)
for hidden in hiddens]
self.action_head = tf.keras.layers.Dense(num_actions, activation='softmax')
self.q_head = tf.keras.layers.Dense(num_actions, activation=None)
# self.actor_layers = [tf.keras.layers.Dense(hidden, activation)
# for hidden in hiddens]
# self.actor = tf.keras.layers.Dense(num_actions, activation='softmax')
# self.actor_layers.append(self.actor)
self.imitation_layers = [tf.keras.layers.Dense(hidden, activation)
for hidden in hiddens]
self.imitation = tf.keras.layers.Dense(num_actions, activation='softmax')
self.imitation_layers.append(self.imitation)
def call(self, state):
features = tf.cast(state, tf.float32)
# action_probs = copy.deepcopy(features)
i = copy.deepcopy(features)
for dense in self.affine_layers:
features = dense(features)
action_probs = self.action_head(features)
q = self.q_head(features)
# for dense in self.actor_layers:
# action_probs = dense(action_probs)
for dense in self.imitation_layers:
i = dense(i)
return KLNetworktype(q, action_probs, i)
def get_q_values(self, q_tables, actions):
indices = tf.stack([tf.range(actions.shape[0], dtype=tf.int64),
actions], axis=-1)
q_values = tf.gather_nd(q_tables, indices=indices)
return q_values
class KLAgent():
def __init__(self, name='LunarLander-v2',
num_actions=4,
network=KLNetwork,
config=config):
self.name = name
self.env = gym.make(name)
self.env.spec.max_episode_steps = config['max_episode_steps']
self.num_actions = num_actions
self.network = network
self.config = config
self.lamda = config['constraint_hyperparm']
self.gamma = config['gamma']
# model & target model
self.model = self.network(num_actions, config['hiddens'], config['activation'])
self.target_model = self.network(num_actions, config['hiddens'], config['activation'])
# optimizer
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(config['lr'], decay_steps=config['decay_steps'], decay_rate=1)
self.optimizer = tf.keras.optimizers.Adam(lr_schedule, epsilon=0.00015)
# loss
# self.loss = tf.keras.losses.Huber(delta=1.0, reduction=tf.keras.losses.Reduction.NONE)
# replay buffer
if config['prioritized_replay']:
self.replay_buffer = PrioritizedReplayBuffer(size=config['buffer_size'],
alpha=config['prioritized_replay_alpha'],
beta=config['prioritized_replay_beta'],
online=config['online'],
persistent_directory=config['persistent_directory'],
episode_counts_to_save=config['episode_counts_to_save'],
sample_steps_to_refresh=config['sample_steps_to_refresh'])
else:
self.replay_buffer = ReplayBuffer(size=config['buffer_size'],
online=config['online'],
persistent_directory=config['persistent_directory'],
episode_counts_to_save=config['episode_counts_to_save'],
sample_steps_to_refresh=config['sample_steps_to_refresh'])
# training_steps
self.training_steps = 0
# evaluation scores
self.eval_episode_rewards = []
self.eval_episode_steps = []
def learn(self):
self._learn_online() if self.config['online'] else self._learn_offline()
def _learn_online(self):
config = self.config
state = self.env.reset()
episode_id = 0
for step_id in range(config['max_training_steps']):
action = self._select_action(state)
next_state, reward, done, info = self.env.step(action)
self.replay_buffer.add(state, action, reward, next_state, done, episode_id)
if len(self.replay_buffer) > config['min_replay_history']:
if self.training_steps % config['update_period'] == 0:
meanq = self._train()
if self.training_steps % config['target_update_period'] == 0:
if len(self.model.weights) != len(self.target_model.weights): # training not started yet
self.target_model(state[None])
self._update_target_weights()
state = next_state
if done:
state = self.env.reset()
episode_id += 1
self.training_steps += 1
if self.training_steps % config['training_steps_to_checkpoint'] == 0:
path = config['checkpoint_path'] + 'kl_{}.ckpt'.format(self.training_steps)
self.save(path)
print('saving model weights at {}'.format(path))
if self.training_steps % config['training_steps_to_eval'] == 0:
self._eval(5)
# reset env
# state = self.env.reset()
episode_id += 1
### log progress
mean_episode_reward = np.mean(self.eval_episode_rewards[-10:])
mean_episode_step = np.mean(self.eval_episode_steps[-10:])
max_episode_reward = np.max(self.eval_episode_rewards[-10:])
max_episode_step = np.max(self.eval_episode_steps[-10:])
print("------------------------------------------------")
print("episodes %d" % episode_id)
print("timestep %d" % self.training_steps)
print("exploration %f" % config['epsilon_fn'](self.training_steps,
config['epsilon_start'],
config['epsilon_decay_period'],
config['epsilon_end'],
config['min_replay_history']))
print("learning_rate %f" % self.optimizer.lr(self.training_steps))
print("mean reward (100 episodes) %f" % mean_episode_reward)
print("max reward (100 episodes) %f" % max_episode_reward)
print("mean step (100 episodes) %f" % mean_episode_step)
print("max step (100 episodes) %f" % max_episode_step)
if len(self.replay_buffer) > config['min_replay_history']:
print("mean q values %f" % meanq)
sys.stdout.flush()
if mean_episode_reward > config['target_mean_episode_reward']:
break
if len(self.replay_buffer._trajectory_storage) > 0:
self.replay_buffer.save()
def _learn_offline(self):
config = self.config
for step_id in range(config['max_training_steps']):
if self.training_steps % config['update_period'] == 0:
meanq = self._train()
if self.training_steps % config['target_update_period'] == 0:
self._update_target_weights()
self.training_steps += 1
if self.training_steps % config['training_steps_to_checkpoint'] == 0:
if config['double'] == False:
path = config['checkpoint_path'] + 'offline_kl_{}.ckpt'.format(self.training_steps)
else:
path = config['checkpoint_path'] + 'offline_kl_{}.ckpt'.format(self.training_steps)
self.save(path)
print('saving model weights at {}'.format(path))
if self.training_steps % config['training_steps_to_eval'] == 0:
self._eval(5)
### log progress
mean_episode_reward = np.mean(self.eval_episode_rewards[-10:])
mean_episode_step = np.mean(self.eval_episode_steps[-10:])
max_episode_reward = np.max(self.eval_episode_rewards[-10:])
max_episode_step = np.max(self.eval_episode_steps[-10:])
print("------------------------------------------------")
print("timestep %d" % self.training_steps)
print("learning_rate %f" % self.optimizer.lr(self.training_steps))
print("mean reward (100 episodes) %f" % mean_episode_reward)
print("max reward (100 episodes) %f" % max_episode_reward)
print("mean step (100 episodes) %f" % mean_episode_step)
print("max step (100 episodes) %f" % max_episode_step)
print("mean q values %f" % meanq)
sys.stdout.flush()
if mean_episode_reward > config['target_mean_episode_reward']:
break
def _select_action(self, state):
config = self.config
if config['eval_mode']:
epsilon = config['epsilon_eval']
else:
epsilon = config['epsilon_fn'](
self.training_steps,
config['epsilon_start'],
config['epsilon_decay_period'],
config['epsilon_end'],
config['min_replay_history'])
if random.random() <= epsilon:
return random.randint(0, self.num_actions - 1)
else:
q, _, _ = tf.stop_gradient(self.model(state[None]))
action = tf.argmax(q, axis=-1)
return int(action)
def _update_target_weights(self):
config = self.config
weights = self.model.get_weights()
tgt_weights = self.target_model.get_weights()
for idx in range(len(weights)):
tgt_weights[idx] = config['tau'] * tgt_weights[idx] + (1 - config['tau']) * weights[idx]
self.target_model.set_weights(tgt_weights)
def _eval(self, n_episodes=5):
env = gym.make(self.name)
self.config['eval_mode'] = True
for i in range(n_episodes):
rewards, steps = 0, 0
state = env.reset()
for t in range(config['max_episode_steps']):
action = self._select_action(state)
next_state, reward, done, info = env.step(action)
state = next_state
rewards += reward
steps += 1
if done: break
self.eval_episode_rewards.append(rewards)
self.eval_episode_steps.append(steps)
env.close()
self.config['eval_mode'] = False
def save(self, path):
self.model.save_weights(path)
def load(self, path):
self.model.load_weights(path)
self.target_model.load_weights(path)
# def policy(self, states, actions):
# q_values = self.model(states).q_values
# q_actions = np.argmax(q_values, axis=1)
# return tf.cast(actions == q_actions, tf.float32)
def greedy_actions(self, states):
q, _, _ = tf.stop_gradient(self.model(states))
actions = tf.argmax(q, axis=-1)
return actions
def _train(self):
config = self.config
transitions = self.replay_buffer.sample(config['batch_size'])
states, actions, rewards = transitions[0], transitions[1], transitions[2]
next_states, dones = transitions[3], transitions[4]
is_non_terminal = 1. - tf.cast(dones, tf.float32)
# print('state mean: {}, action mean: {}, reward mean: {}'.format(np.mean(states), np.mean(actions), np.mean(rewards)))
with tf.GradientTape() as tape:
q_tables_tp1, target_policy_probs, behavior_policy_probs = self.model(next_states)
next_actions = tf.argmax(q_tables_tp1, axis=-1)
q_tables_tp1, _, _ = tf.stop_gradient(self.target_model(next_states))
q_values_tp1 = self.model.get_q_values(q_tables_tp1, next_actions)
# print(f'values shape: {q_values.shape}, rewards shape: {rewards.shape}, done shape: {is_non_terminal.shape}')
# compute targets
targets = rewards + self.gamma * q_values_tp1 * is_non_terminal
# Get current Q estimate
q_tables, _, _ = self.model(states)
q_values = self.model.get_q_values(q_tables, actions)
# Critic loss
q_loss = tf.losses.Huber(delta=1.0)(targets, q_values)
# constrants between behavior and target policy
kl_loss = tf.reduce_mean(tf.losses.KLD(behavior_policy_probs, target_policy_probs))
# Actor-Critic loss
actor_critic_loss = q_loss + self.lamda * kl_loss
# imitation_loss
i_loss = tf.reduce_mean(tf.losses.sparse_categorical_crossentropy(actions, behavior_policy_probs, from_logits=False))
final_loss = actor_critic_loss + i_loss
# final_loss = actor_critic_loss
# minimize loss
grads = tape.gradient(final_loss, self.model.trainable_variables)
grads = [tf.clip_by_value(grad, -config['grad_clip'], config['grad_clip']) for grad in grads]
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
return tf.math.reduce_mean(q_values)
# def main():
# import matplotlib.pyplot as plt
# import seaborn as sns
# sns.set(style="darkgrid")
# config['online'] = False
# config['hiddens'] = [256, 256]
# config['max_training_steps'] = 500000
# config['lr'] = 5e-4
# config['decay_steps'] = 1000000
# config['episode_counts_to_save'] = 100
# config['persistent_directory'] = 'offline/'
# config['checkpoint_path'] = 'offline/ckpts/'
# config['constraint_hyperparm'] = 0.3
# agent = KLAgent(name='LunarLander-v2', num_actions=4, config=config)
# agent.learn()
# if __name__ == '__main__':
# main()
| [
"tensorflow.losses.Huber",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"copy.deepcopy",
"tensorflow.cast",
"gym.make",
"numpy.mean",
"tensorflow.keras.optimizers.schedules.InverseTimeDecay",
"numpy.max",
"tensorflow.math.reduce_mean",
"tensorflow.clip_by_value",
"tensorflow.los... | [((458, 571), 'collections.namedtuple', 'collections.namedtuple', (['"""cross_entropy_network"""', "['q_values', 'target_policy_probs', 'behavior_policy_probs']"], {}), "('cross_entropy_network', ['q_values',\n 'target_policy_probs', 'behavior_policy_probs'])\n", (480, 571), False, 'import collections\n'), ((928, 984), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_actions'], {'activation': '"""softmax"""'}), "(num_actions, activation='softmax')\n", (949, 984), True, 'import tensorflow as tf\n'), ((1007, 1058), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_actions'], {'activation': 'None'}), '(num_actions, activation=None)\n', (1028, 1058), True, 'import tensorflow as tf\n'), ((1484, 1540), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_actions'], {'activation': '"""softmax"""'}), "(num_actions, activation='softmax')\n", (1505, 1540), True, 'import tensorflow as tf\n'), ((1658, 1684), 'tensorflow.cast', 'tf.cast', (['state', 'tf.float32'], {}), '(state, tf.float32)\n', (1665, 1684), True, 'import tensorflow as tf\n'), ((1746, 1769), 'copy.deepcopy', 'copy.deepcopy', (['features'], {}), '(features)\n', (1759, 1769), False, 'import copy\n'), ((2427, 2466), 'tensorflow.gather_nd', 'tf.gather_nd', (['q_tables'], {'indices': 'indices'}), '(q_tables, indices=indices)\n', (2439, 2466), True, 'import tensorflow as tf\n'), ((2709, 2723), 'gym.make', 'gym.make', (['name'], {}), '(name)\n', (2717, 2723), False, 'import gym\n'), ((3247, 3361), 'tensorflow.keras.optimizers.schedules.InverseTimeDecay', 'tf.keras.optimizers.schedules.InverseTimeDecay', (["config['lr']"], {'decay_steps': "config['decay_steps']", 'decay_rate': '(1)'}), "(config['lr'], decay_steps=\n config['decay_steps'], decay_rate=1)\n", (3293, 3361), True, 'import tensorflow as tf\n'), ((3382, 3436), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['lr_schedule'], {'epsilon': '(0.00015)'}), '(lr_schedule, epsilon=0.00015)\n', (3406, 3436), True, 'import tensorflow as tf\n'), ((11479, 11498), 'gym.make', 'gym.make', (['self.name'], {}), '(self.name)\n', (11487, 11498), False, 'import gym\n'), ((12631, 12652), 'tensorflow.argmax', 'tf.argmax', (['q'], {'axis': '(-1)'}), '(q, axis=-1)\n', (12640, 12652), True, 'import tensorflow as tf\n'), ((15116, 15145), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['q_values'], {}), '(q_values)\n', (15135, 15145), True, 'import tensorflow as tf\n'), ((811, 852), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['hidden', 'activation'], {}), '(hidden, activation)\n', (832, 852), True, 'import tensorflow as tf\n'), ((1369, 1410), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['hidden', 'activation'], {}), '(hidden, activation)\n', (1390, 1410), True, 'import tensorflow as tf\n'), ((3647, 3998), 'peal.replay_buffers.replay_buffer.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', ([], {'size': "config['buffer_size']", 'alpha': "config['prioritized_replay_alpha']", 'beta': "config['prioritized_replay_beta']", 'online': "config['online']", 'persistent_directory': "config['persistent_directory']", 'episode_counts_to_save': "config['episode_counts_to_save']", 'sample_steps_to_refresh': "config['sample_steps_to_refresh']"}), "(size=config['buffer_size'], alpha=config[\n 'prioritized_replay_alpha'], beta=config['prioritized_replay_beta'],\n online=config['online'], persistent_directory=config[\n 'persistent_directory'], episode_counts_to_save=config[\n 'episode_counts_to_save'], sample_steps_to_refresh=config[\n 'sample_steps_to_refresh'])\n", (3670, 3998), False, 'from peal.replay_buffers.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\n'), ((4271, 4517), 'peal.replay_buffers.replay_buffer.ReplayBuffer', 'ReplayBuffer', ([], {'size': "config['buffer_size']", 'online': "config['online']", 'persistent_directory': "config['persistent_directory']", 'episode_counts_to_save': "config['episode_counts_to_save']", 'sample_steps_to_refresh': "config['sample_steps_to_refresh']"}), "(size=config['buffer_size'], online=config['online'],\n persistent_directory=config['persistent_directory'],\n episode_counts_to_save=config['episode_counts_to_save'],\n sample_steps_to_refresh=config['sample_steps_to_refresh'])\n", (4283, 4517), False, 'from peal.replay_buffers.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\n'), ((10810, 10825), 'random.random', 'random.random', ([], {}), '()\n', (10823, 10825), False, 'import random\n'), ((10857, 10896), 'random.randint', 'random.randint', (['(0)', '(self.num_actions - 1)'], {}), '(0, self.num_actions - 1)\n', (10871, 10896), False, 'import random\n'), ((10997, 11018), 'tensorflow.argmax', 'tf.argmax', (['q'], {'axis': '(-1)'}), '(q, axis=-1)\n', (11006, 11018), True, 'import tensorflow as tf\n'), ((13002, 13028), 'tensorflow.cast', 'tf.cast', (['dones', 'tf.float32'], {}), '(dones, tf.float32)\n', (13009, 13028), True, 'import tensorflow as tf\n'), ((13188, 13205), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (13203, 13205), True, 'import tensorflow as tf\n'), ((13363, 13395), 'tensorflow.argmax', 'tf.argmax', (['q_tables_tp1'], {'axis': '(-1)'}), '(q_tables_tp1, axis=-1)\n', (13372, 13395), True, 'import tensorflow as tf\n'), ((14924, 14989), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', "(-config['grad_clip'])", "config['grad_clip']"], {}), "(grad, -config['grad_clip'], config['grad_clip'])\n", (14940, 14989), True, 'import tensorflow as tf\n'), ((2295, 2337), 'tensorflow.range', 'tf.range', (['actions.shape[0]'], {'dtype': 'tf.int64'}), '(actions.shape[0], dtype=tf.int64)\n', (2303, 2337), True, 'import tensorflow as tf\n'), ((6623, 6663), 'numpy.mean', 'np.mean', (['self.eval_episode_rewards[-10:]'], {}), '(self.eval_episode_rewards[-10:])\n', (6630, 6663), True, 'import numpy as np\n'), ((6700, 6738), 'numpy.mean', 'np.mean', (['self.eval_episode_steps[-10:]'], {}), '(self.eval_episode_steps[-10:])\n', (6707, 6738), True, 'import numpy as np\n'), ((6776, 6815), 'numpy.max', 'np.max', (['self.eval_episode_rewards[-10:]'], {}), '(self.eval_episode_rewards[-10:])\n', (6782, 6815), True, 'import numpy as np\n'), ((6852, 6889), 'numpy.max', 'np.max', (['self.eval_episode_steps[-10:]'], {}), '(self.eval_episode_steps[-10:])\n', (6858, 6889), True, 'import numpy as np\n'), ((8040, 8058), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8056, 8058), False, 'import sys\n'), ((9346, 9386), 'numpy.mean', 'np.mean', (['self.eval_episode_rewards[-10:]'], {}), '(self.eval_episode_rewards[-10:])\n', (9353, 9386), True, 'import numpy as np\n'), ((9423, 9461), 'numpy.mean', 'np.mean', (['self.eval_episode_steps[-10:]'], {}), '(self.eval_episode_steps[-10:])\n', (9430, 9461), True, 'import numpy as np\n'), ((9499, 9538), 'numpy.max', 'np.max', (['self.eval_episode_rewards[-10:]'], {}), '(self.eval_episode_rewards[-10:])\n', (9505, 9538), True, 'import numpy as np\n'), ((9574, 9611), 'numpy.max', 'np.max', (['self.eval_episode_steps[-10:]'], {}), '(self.eval_episode_steps[-10:])\n', (9580, 9611), True, 'import numpy as np\n'), ((10203, 10221), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10219, 10221), False, 'import sys\n'), ((14097, 14123), 'tensorflow.losses.Huber', 'tf.losses.Huber', ([], {'delta': '(1.0)'}), '(delta=1.0)\n', (14112, 14123), True, 'import tensorflow as tf\n'), ((14276, 14333), 'tensorflow.losses.KLD', 'tf.losses.KLD', (['behavior_policy_probs', 'target_policy_probs'], {}), '(behavior_policy_probs, target_policy_probs)\n', (14289, 14333), True, 'import tensorflow as tf\n'), ((14545, 14641), 'tensorflow.losses.sparse_categorical_crossentropy', 'tf.losses.sparse_categorical_crossentropy', (['actions', 'behavior_policy_probs'], {'from_logits': '(False)'}), '(actions, behavior_policy_probs,\n from_logits=False)\n', (14586, 14641), True, 'import tensorflow as tf\n')] |
import numpy as np
import matplotlib.pylab as plt
import sys
def run():
visualizeTarget = sys.argv[1]
print(visualizeTarget)
if(visualizeTarget=='step'):
x=np.arange(-5.0,5.0,0.1)
y=step(x)
plt.plot(x,y)
plt.ylim(-0.1,1.1)
plt.show()
elif(visualizeTarget=='sigmoid'):
x=np.arange(-5.0,5.0,0.1)
y=sigmoid(x)
plt.plot(x,y)
plt.ylim(-0.1,1.1)
plt.show()
elif(visualizeTarget=='relu'):
x=np.arange(-5.0,5.0,0.1)
y=relu(x)
plt.plot(x,y)
# plt.ylim(-0.1,1.1)
plt.show()
elif(visualizeTarget=='all'):
x=np.arange(-5.0,5.0,0.1)
y=step(x)
plt.plot(x,y)
# plt.ylim(-0.1,1.1)
x=np.arange(-5.0,5.0,0.1)
y=sigmoid(x)
plt.plot(x,y)
x=np.arange(-5.0,5.0,0.1)
y=relu(x)
plt.plot(x,y)
# plt.ylim(-0.1,3.0)
plt.show()
# for x in sys.argv:
# print(x)
class variable():
def __init__(self, value):
self.data = value
pass
def read(self):
return self.data
def test():
v = variable(424)
print(v.read() == 424)
a = np.array([2,3,1,4,2])
print(a)
print(sigmoid(a))
def TestSimpleANDGate():
print('simple AND gate test')
print(SimpleANDGate(0,0))
print(SimpleANDGate(0,1))
print(SimpleANDGate(1,0))
print(SimpleANDGate(1,1))
def SimpleANDGate(x1,x2):
w1,w2,theta = 0.5,0.5,0.7
tmp = x1*w1+x2*w2
if(tmp<=theta): return 0
elif(tmp>theta): return 1
def TestANDGate():
print('and gate test')
print(ANDGate(0,0))
print(ANDGate(0,1))
print(ANDGate(1,0))
print(ANDGate(1,1))
def ANDGate(x1,x2):
x = np.array([x1,x2])
w=np.array([0.5,0.5])
b=-0.7
tmp=np.sum(w*x)+b
if(tmp<=0): return 0
else: return 1
def TestNANDGate():
print('nand gate test')
print(NANDGate(0,0))
print(NANDGate(0,1))
print(NANDGate(1,0))
print(NANDGate(1,1))
def NANDGate(x1,x2):
x = np.array([x1,x2])
w=np.array([-0.5,-0.5])
b=0.7
tmp=np.sum(w*x)+b
if(tmp<=0): return 0
else: return 1
def TestORGate():
print('OR gate test')
print(ORGate(0,0))
print(ORGate(0,1))
print(ORGate(1,0))
print(ORGate(1,1))
def ORGate(x1,x2):
x = np.array([x1,x2])
w=np.array([0.5,0.5])
b=-0.2
tmp=np.sum(w*x)+b
if(tmp<=0): return 0
else: return 1
def XORGate(x1,x2):
a = ORGate(x1,x2)
b = NANDGate(x1,x2)
return ANDGate(a,b)
def step(x):
y=x>0
return y.astype(np.int)
def simple_step(value):
if(value <= 0): return 0
else: return 1
def sigmoid(value):
return 1/(1+np.exp(-value))
def relu(x):
return np.maximum(0,x)
class MultiplyLayer:
def __init__(self):
self.x = None
self.y = None
def forward(self, x, y):
self.x=x
self.y=y
out=x*y
return out
def backward(self, dout):
dx=dout*self.y
dy=dout*self.x
return dx,dy
def matrixTest1():
print('mat')
b = np.array([[1,2],[3,4],[5,6]])
print(b)
print(np.ndim(b)) # 배열의 차원 수
print(b.shape) # 배열의 형상 (모든 차원의 각 길이)
def matrixMultiplyTest():
print('multiply')
a = np.array([[1,2],[3,4]])
print(a.shape)
b=np.array([[5,6],[7,8]])
print(b.shape)
print(np.dot(a,b)) # 행렬 곱셈
a = np.array([[1,2],[3,4],[5,6]])
print(a.shape)
b=np.array([7,8])
print(b.shape)
print(np.dot(a,b))
x = np.array([1,2])
W = np.array([[1,3,5],[2,4,6]])
y = np.dot(x,W)
print(y)
if(__name__=='main'):
# test()
# TestSimpleANDGate()
# matrixTest1()
# matrixMultiplyTest()
run()
| [
"matplotlib.pylab.ylim",
"numpy.ndim",
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.sum",
"matplotlib.pylab.show",
"matplotlib.pylab.plot",
"numpy.maximum",
"numpy.arange"
] | [((1188, 1213), 'numpy.array', 'np.array', (['[2, 3, 1, 4, 2]'], {}), '([2, 3, 1, 4, 2])\n', (1196, 1213), True, 'import numpy as np\n'), ((1735, 1753), 'numpy.array', 'np.array', (['[x1, x2]'], {}), '([x1, x2])\n', (1743, 1753), True, 'import numpy as np\n'), ((1759, 1779), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (1767, 1779), True, 'import numpy as np\n'), ((2036, 2054), 'numpy.array', 'np.array', (['[x1, x2]'], {}), '([x1, x2])\n', (2044, 2054), True, 'import numpy as np\n'), ((2060, 2082), 'numpy.array', 'np.array', (['[-0.5, -0.5]'], {}), '([-0.5, -0.5])\n', (2068, 2082), True, 'import numpy as np\n'), ((2324, 2342), 'numpy.array', 'np.array', (['[x1, x2]'], {}), '([x1, x2])\n', (2332, 2342), True, 'import numpy as np\n'), ((2348, 2368), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (2356, 2368), True, 'import numpy as np\n'), ((2739, 2755), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (2749, 2755), True, 'import numpy as np\n'), ((3089, 3123), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (3097, 3123), True, 'import numpy as np\n'), ((3268, 3294), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (3276, 3294), True, 'import numpy as np\n'), ((3317, 3343), 'numpy.array', 'np.array', (['[[5, 6], [7, 8]]'], {}), '([[5, 6], [7, 8]])\n', (3325, 3343), True, 'import numpy as np\n'), ((3401, 3435), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (3409, 3435), True, 'import numpy as np\n'), ((3456, 3472), 'numpy.array', 'np.array', (['[7, 8]'], {}), '([7, 8])\n', (3464, 3472), True, 'import numpy as np\n'), ((3522, 3538), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (3530, 3538), True, 'import numpy as np\n'), ((3546, 3578), 'numpy.array', 'np.array', (['[[1, 3, 5], [2, 4, 6]]'], {}), '([[1, 3, 5], [2, 4, 6]])\n', (3554, 3578), True, 'import numpy as np\n'), ((3582, 3594), 'numpy.dot', 'np.dot', (['x', 'W'], {}), '(x, W)\n', (3588, 3594), True, 'import numpy as np\n'), ((182, 207), 'numpy.arange', 'np.arange', (['(-5.0)', '(5.0)', '(0.1)'], {}), '(-5.0, 5.0, 0.1)\n', (191, 207), True, 'import numpy as np\n'), ((232, 246), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (240, 246), True, 'import matplotlib.pylab as plt\n'), ((254, 273), 'matplotlib.pylab.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (262, 273), True, 'import matplotlib.pylab as plt\n'), ((281, 291), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (289, 291), True, 'import matplotlib.pylab as plt\n'), ((1798, 1811), 'numpy.sum', 'np.sum', (['(w * x)'], {}), '(w * x)\n', (1804, 1811), True, 'import numpy as np\n'), ((2100, 2113), 'numpy.sum', 'np.sum', (['(w * x)'], {}), '(w * x)\n', (2106, 2113), True, 'import numpy as np\n'), ((2387, 2400), 'numpy.sum', 'np.sum', (['(w * x)'], {}), '(w * x)\n', (2393, 2400), True, 'import numpy as np\n'), ((3142, 3152), 'numpy.ndim', 'np.ndim', (['b'], {}), '(b)\n', (3149, 3152), True, 'import numpy as np\n'), ((3370, 3382), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (3376, 3382), True, 'import numpy as np\n'), ((3501, 3513), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (3507, 3513), True, 'import numpy as np\n'), ((340, 365), 'numpy.arange', 'np.arange', (['(-5.0)', '(5.0)', '(0.1)'], {}), '(-5.0, 5.0, 0.1)\n', (349, 365), True, 'import numpy as np\n'), ((393, 407), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (401, 407), True, 'import matplotlib.pylab as plt\n'), ((415, 434), 'matplotlib.pylab.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (423, 434), True, 'import matplotlib.pylab as plt\n'), ((442, 452), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (450, 452), True, 'import matplotlib.pylab as plt\n'), ((2698, 2712), 'numpy.exp', 'np.exp', (['(-value)'], {}), '(-value)\n', (2704, 2712), True, 'import numpy as np\n'), ((498, 523), 'numpy.arange', 'np.arange', (['(-5.0)', '(5.0)', '(0.1)'], {}), '(-5.0, 5.0, 0.1)\n', (507, 523), True, 'import numpy as np\n'), ((548, 562), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (556, 562), True, 'import matplotlib.pylab as plt\n'), ((599, 609), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (607, 609), True, 'import matplotlib.pylab as plt\n'), ((654, 679), 'numpy.arange', 'np.arange', (['(-5.0)', '(5.0)', '(0.1)'], {}), '(-5.0, 5.0, 0.1)\n', (663, 679), True, 'import numpy as np\n'), ((704, 718), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (712, 718), True, 'import matplotlib.pylab as plt\n'), ((757, 782), 'numpy.arange', 'np.arange', (['(-5.0)', '(5.0)', '(0.1)'], {}), '(-5.0, 5.0, 0.1)\n', (766, 782), True, 'import numpy as np\n'), ((810, 824), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (818, 824), True, 'import matplotlib.pylab as plt\n'), ((834, 859), 'numpy.arange', 'np.arange', (['(-5.0)', '(5.0)', '(0.1)'], {}), '(-5.0, 5.0, 0.1)\n', (843, 859), True, 'import numpy as np\n'), ((884, 898), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (892, 898), True, 'import matplotlib.pylab as plt\n'), ((935, 945), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (943, 945), True, 'import matplotlib.pylab as plt\n')] |
# -*- coding: utf-8 -*-
"""
Author: <NAME>
Version: 2019-10-03
"""
import numpy as np
from scipy.linalg import expm
#from pykalman import KalmanFilter as KF
if (__name__ == '__main__'):
import config
else:
import myModules.config as config
class EKF:
def __init__(self, point_reactor, tstep):
self.point_reactor = point_reactor
self.tstep = tstep
def nvars(self):
return self.point_reactor.nvars
def filter(self, observations, state_initial, COV_initial):
self.obs_mat = np.zeros(self.point_reactor.state_dims)
self.obs_mat[0] = 1
# Start by updating the initial guess with the initial measurement:
state_upd, COV_upd = self.update(state_initial, COV_initial, observations[0])
states = [state_upd]
COVs = [COV_upd]
for obs in observations[1:]:
# Prediction step:
state_pred = self.predict(states[-1])
Jac = self.jacobian_pred(states[-1])
# COV_pred = Jac @ COVs[-1] @ Jac.T + COV_transition
COV_pred = Jac @ COVs[-1] @ Jac.T
COV_pred[:self.nvars(), :self.nvars()] = COV_pred[:self.nvars(), :self.nvars()] + np.diag( np.power(
config.stdev_transition_dep * state_pred[:self.nvars()]
, 2))
# Update step:
state_upd, COV_upd = self.update(state_pred, COV_pred, obs)
states.append(state_upd)
COVs.append(COV_upd)
return np.array(states), np.array(COVs)
def update(self, state_pred, COV_pred, obs):
"""Update step (eqs are same as linear KF):"""
COV_obs = obs # obs = Poiss(obs)
K = COV_pred @ self.obs_mat.T / (self.obs_mat @ COV_pred @ self.obs_mat.T + COV_obs)
state_upd = state_pred + np.dot(K, obs - self.obs_mat @ state_pred)
COV_upd = COV_pred - K.reshape((K.size,1)) @ self.obs_mat.reshape((1, self.obs_mat.size)) @ COV_pred
return state_upd, COV_upd
def predict(self, state):
"""
INPUT:
state at t0 = [n, c_1, ... c_6, rho, beta_1, ..., beta_6, lambda_1, ..., lambda_6, Lambda]
OUTPUT:
state at t0 + self.tstep
"""
return self.point_reactor.rod_drop(
(0, self.tstep),
initial_state=state,
store_time_steps=False,
)
def jacobian_pred(self, state):
"""
Based on state = [n, c_1, ..., c_6, rho, beta_1, ..., beta_6, lambda_1, ..., lambda_6, Lambda]^T
"""
# We have equations for dx/dt = f(x), for which we can compute the Jacobian. We obtain the Jacobian PHI with x^{(i|i-1)} approx.= PHI @ x^{(i-1|i-1)} by taking the matrix exponential of Jac_of_dx_dt*tstep .
# The Jacobian will be of dimension state_dims x state_dims
Jac_of_dx_dt = np.zeros([self.point_reactor.state_dims]*2)
# dd/dt = A @ d where d = x[:nvars] are the dependent variables
A = self.point_reactor.PRKE_matrix(state=state)
# So del (dd/dt) / del d = A
Jac_of_dx_dt[:self.nvars(), :self.nvars()] = A
# Still missing del (dd/dt) / del e, del (de/dt) / del d, and del (de/dt) / del e, where e = x[nvars:] are the independent variables (estimated "parameters")
# e are the independent variables. They are assumed constant. Hence:
# de/dt = 0 --> del (de/dt) / del x = 0 <-- already 0 in Jac
# dd/dt = A @ d, where A is a (nonlinear) function of the independent variables e. So we derived the expressions for the terms of del (dd/dt) / del e (by hand).
slc_c_l = slice(1, self.nvars())
n = state[0]
c_l = state[slc_c_l]
rho = state[self.point_reactor.ind_rho]
beta_l = state[self.point_reactor.slc_beta_l]
Lambda = state[self.point_reactor.ind_Lambda]
# dn/dt = (rho - beta) / Lambda * n + lambda_l @ beta_l
# => del (dn/dt) / del rho = n / Lambda ...etc
Jac_of_dx_dt[0, self.point_reactor.ind_rho] = n / Lambda
Jac_of_dx_dt[0, self.point_reactor.slc_beta_l] = - n / Lambda
Jac_of_dx_dt[0, self.point_reactor.slc_lambda_l] = c_l
Jac_of_dx_dt[0, self.point_reactor.ind_Lambda] = (beta_l.sum() - rho) * state[0] / Lambda**2
# dc_l/dt = beta_l / Lambda * n - lambda_l * c_l
Jac_of_dx_dt[slc_c_l, self.point_reactor.slc_beta_l] = n / Lambda * np.eye(self.point_reactor.fuel.ngroups())
Jac_of_dx_dt[slc_c_l, self.point_reactor.slc_lambda_l] = np.diag(- c_l)
Jac_of_dx_dt[slc_c_l, self.point_reactor.ind_Lambda] = - n / Lambda**2 * beta_l
# PHI = matrix exponential of (Jac_of_dx_dt * tstep)
return expm(Jac_of_dx_dt * self.tstep)
if __name__ == '__main__':
config.include_reactivity = True
config.process_noise_on_params = False
config.noise_before = False
config.compare_purely_model_based = True
config.use_EKF = False
config.use_UKF = True
config.stdev_initial_factor = 0.5
config.stdev_transition_dep = 1e-3
import PRK as prk
reactivity_unitless = 0.00112
params = {'__header__': b'MATLAB 5.0 MAT-file, Platform: PCWIN64, Created on: Wed May 15 13:30:01 2019', '__version__': '1.0', '__globals__': [], 'BETA_MEAN': np.array([[7.354466e-03, 2.381485e-04, 1.261007e-03, 1.228657e-03,
2.838228e-03, 1.263191e-03, 5.252351e-04]]), 'BETA_STD': np.array([[7.245201e-05, 1.211003e-05, 2.883927e-05, 2.757947e-05,
4.144158e-05, 2.897984e-05, 1.774963e-05]]), 'LAMBDA_MEAN': np.array([[4.986124e-01, 1.335352e-02, 3.261235e-02, 1.210585e-01,
3.056654e-01, 8.610382e-01, 2.892019e+00]]), 'LAMBDA_STD': np.array([[6.465619e-03, 3.066375e-06, 9.880134e-06, 2.044211e-05,
1.411160e-04, 6.096978e-04, 2.933295e-03]]), 'LIFETIME_MEAN': np.array([[4.717137e-05, 4.715002e-05, 5.005594e-05]]), 'LIFETIME_STD': np.array([[1.827880e-07, 1.833994e-07, 4.855028e-07]]), 'GEN_TIME_MEAN': np.array([[4.686777e-05, 4.684656e-05, 4.973400e-05]]), 'GEN_TIME_STD': np.array([[9.682227e-08, 9.715106e-08, 4.796570e-07]])}
crocus = prk.PointReactor(params['LAMBDA_MEAN'].flatten()[1:], params['BETA_MEAN'].flatten()[1:], params['GEN_TIME_MEAN'].flatten()[0])
crocus.set_include_params(True)
uncertainties = prk.Fuel(params['LAMBDA_STD'].flatten()[1:], params['BETA_STD'].flatten()[1:], params['GEN_TIME_STD'].flatten()[0])
ekf = EKF(crocus, 1e-1)
state_initial = crocus.stationary_PRKE_solution(24, reactivity=reactivity_unitless)
print(state_initial)
jac_initial = ekf.jacobian_pred(state_initial)
observations = [24, 25, 26, 28]
COV_initial = np.diag( np.power(
np.concatenate((
state_initial[:crocus.nvars] * config.stdev_initial_factor,
uncertainties.param_values(5.883572956799998e-05)
)),
2) )
states, COVs = ekf.filter(observations, state_initial, COV_initial)
### The following is wrong!
# fac1 = (rho - beta_l.sum()) * self.tstep / Lambda
# fac2 = state[0] * self.tstep / Lambda
# dn_by_dlambda_l = c_l * self.tstep * np.exp(lambda_l * self.tstep)
# dc_l_by_dbeta_l = fac2 * np.exp(beta_l * self.tstep / Lambda)
#
# Jac[0, self.point_reactor.ind_rho] = fac2 * np.exp(fac1)
# Jac[0, self.point_reactor.slc_beta_l] = -1 * Jac[0, self.point_reactor.ind_rho]
# Jac[0, self.point_reactor.slc_lambda_l] = dn_by_dlambda_l
# Jac[0, self.point_reactor.ind_Lambda] = -1 * Jac[0, self.point_reactor.ind_rho] * fac1 / self.tstep
#
# Jac[1:self.nvars(), self.point_reactor.slc_beta_l] = np.diag( dc_l_by_dbeta_l )
# Jac[1:self.nvars(), self.point_reactor.slc_lambda_l] = - dn_by_dlambda_l
# Jac[1:self.nvars(), self.point_reactor.ind_Lambda] = -1 * beta_l / Lambda * dc_l_by_dbeta_l
# return Jac | [
"numpy.diag",
"numpy.array",
"scipy.linalg.expm",
"numpy.zeros",
"numpy.dot"
] | [((539, 578), 'numpy.zeros', 'np.zeros', (['self.point_reactor.state_dims'], {}), '(self.point_reactor.state_dims)\n', (547, 578), True, 'import numpy as np\n'), ((2951, 2996), 'numpy.zeros', 'np.zeros', (['([self.point_reactor.state_dims] * 2)'], {}), '([self.point_reactor.state_dims] * 2)\n', (2959, 2996), True, 'import numpy as np\n'), ((4682, 4695), 'numpy.diag', 'np.diag', (['(-c_l)'], {}), '(-c_l)\n', (4689, 4695), True, 'import numpy as np\n'), ((4870, 4901), 'scipy.linalg.expm', 'expm', (['(Jac_of_dx_dt * self.tstep)'], {}), '(Jac_of_dx_dt * self.tstep)\n', (4874, 4901), False, 'from scipy.linalg import expm\n'), ((5455, 5564), 'numpy.array', 'np.array', (['[[0.007354466, 0.0002381485, 0.001261007, 0.001228657, 0.002838228, \n 0.001263191, 0.0005252351]]'], {}), '([[0.007354466, 0.0002381485, 0.001261007, 0.001228657, 0.002838228,\n 0.001263191, 0.0005252351]])\n', (5463, 5564), True, 'import numpy as np\n'), ((5587, 5702), 'numpy.array', 'np.array', (['[[7.245201e-05, 1.211003e-05, 2.883927e-05, 2.757947e-05, 4.144158e-05, \n 2.897984e-05, 1.774963e-05]]'], {}), '([[7.245201e-05, 1.211003e-05, 2.883927e-05, 2.757947e-05, \n 4.144158e-05, 2.897984e-05, 1.774963e-05]])\n', (5595, 5702), True, 'import numpy as np\n'), ((5722, 5817), 'numpy.array', 'np.array', (['[[0.4986124, 0.01335352, 0.03261235, 0.1210585, 0.3056654, 0.8610382, 2.892019]\n ]'], {}), '([[0.4986124, 0.01335352, 0.03261235, 0.1210585, 0.3056654, \n 0.8610382, 2.892019]])\n', (5730, 5817), True, 'import numpy as np\n'), ((5856, 5968), 'numpy.array', 'np.array', (['[[0.006465619, 3.066375e-06, 9.880134e-06, 2.044211e-05, 0.000141116, \n 0.0006096978, 0.002933295]]'], {}), '([[0.006465619, 3.066375e-06, 9.880134e-06, 2.044211e-05, \n 0.000141116, 0.0006096978, 0.002933295]])\n', (5864, 5968), True, 'import numpy as np\n'), ((5993, 6047), 'numpy.array', 'np.array', (['[[4.717137e-05, 4.715002e-05, 5.005594e-05]]'], {}), '([[4.717137e-05, 4.715002e-05, 5.005594e-05]])\n', (6001, 6047), True, 'import numpy as np\n'), ((6065, 6118), 'numpy.array', 'np.array', (['[[1.82788e-07, 1.833994e-07, 4.855028e-07]]'], {}), '([[1.82788e-07, 1.833994e-07, 4.855028e-07]])\n', (6073, 6118), True, 'import numpy as np\n'), ((6138, 6190), 'numpy.array', 'np.array', (['[[4.686777e-05, 4.684656e-05, 4.9734e-05]]'], {}), '([[4.686777e-05, 4.684656e-05, 4.9734e-05]])\n', (6146, 6190), True, 'import numpy as np\n'), ((6210, 6263), 'numpy.array', 'np.array', (['[[9.682227e-08, 9.715106e-08, 4.79657e-07]]'], {}), '([[9.682227e-08, 9.715106e-08, 4.79657e-07]])\n', (6218, 6263), True, 'import numpy as np\n'), ((1556, 1572), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (1564, 1572), True, 'import numpy as np\n'), ((1574, 1588), 'numpy.array', 'np.array', (['COVs'], {}), '(COVs)\n', (1582, 1588), True, 'import numpy as np\n'), ((1865, 1907), 'numpy.dot', 'np.dot', (['K', '(obs - self.obs_mat @ state_pred)'], {}), '(K, obs - self.obs_mat @ state_pred)\n', (1871, 1907), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
""" This file contains a class which handles the `teili` / `brian2` side
of the high-level network compiler to the ORCA processor.
The ORCA processor is described [here](https://www.frontiersin.org/articles/10.3389/fnins.2018.00213/full).
A documentation on `teili` can be found [here](https://teili.readthedocs.io/en/latest/)
`Teili` uses `brian2` to simulate Spiking Neural Networks. More information on `brian2`
can be found [here](https://brian2.readthedocs.io/en/stable/).
To install teili with all dependencies please run:
**pip install teili**
for more information please refer to the [installation instructions](https://teili.readthedocs.io/en/latest/scripts/Getting%20started.html#install-python-requirements).
"""
# @Author: schlowm0 (<NAME>)
# @AuthorEmail: <EMAIL>
# @Date: 12/09/2019
import os
import numpy as np
import pickle
import collections
from brian2 import Network, Synapses, NeuronGroup
from brian2 import SpikeGeneratorGroup, PoissonGroup
from teili import TeiliNetwork, Connections, Neurons
class Speed(TeiliNetwork, Network):
"""This class provides the first iteration of a high-level
interface to the ORCA Neuromorphic Signal Processor (NSP) developed
at ICNS, Western Sydney University.
The ORCA NSP a fully digital FPGA neural network simulator for very-large
scale event-based Spiking Neural Networks suited to operate on event-based
data as provided by the Dynamic Vision Sensor.
Attributes:
conn_groups (dict): Contains a dictionary of all synaptic groups
which are present in the network. Each group has a unique ID.
neuron_groups (dict): Contains a dictionary of all neuron groups
which are present in the network. Each group has a unique ID.
spikegen_groups (dict): Contains a dictionary of all
`SpikeGeneratorGroup`s which are present in the network.
poisson_groups (dict): Contains a dictionary of all
`PoissonGroup`s which are present in the network.
total_num_neurons (int): Total number of neurons in the network.
total_num_synapses (int): Total number of synapses in the network.
neuron_populations (dict): {'unique population ID': N}
neuron_params (dict): {'unique popluation ID': parameters}
synapse_populations (dict): {'unique synapse ID': [ID_pre, ID_post]}
synapse_params (dict): {'unique synapse ID': parameters}
synapse_tags (dict): Tags help to identify synaptic properties. Tags
consist of
{'sign', 'target_sign', 'p_connection', 'plastic', 'mean', 'std'}
where
* sign (str) : 'exc' | 'inh'
* target_sign (str) : 'exc' | 'inh'
* p_connection (float): [0, 1]
* plastic (bool) : True | False
* mean (float): [0, 1]
* std (float): [0, 1]
net_dict (dict): Dictionary containing all necessary information
to program the ORCA processor. The dictionary is structured
as following:
* total_n (int): Total number of neurons
* total_s (int): Total number of synapses
* n_pop (dict): {'unique population ID': N}
* s_pop (dict): {'unique synapse ID': [ID_pre, ID_post]}
* s_tags (dict): {'unique synapse ID': synapse_tags}
* n_params (dict): {'unique population ID': parameters}
* s_params (dict): {'unique synapse ID': parameters}
"""
def __init__(self, net):
"""Summary
Args:
net (TYPE): Description
"""
self.neuron_groups = {att.name: att for att in net.__dict__['objects']
if type(att) == Neurons or type(att) == NeuronGroup}
self.conn_groups = {att.name: att for att in net.__dict__['objects']
if type(att) == Connections or type(att) == Synapses}
self.poisson_groups = {att.name: att for att in net.__dict__['objects']
if type(att) == PoissonGroup}
self.spikegen_groups = {att.name: att for att in net.__dict__['objects']
if type(att) == SpikeGeneratorGroup}
self.total_num_neurons = 0
self.total_num_synapses = 0
for group_key in self.neuron_groups:
self.total_num_neurons += self.neuron_groups[group_key].N
for group_key in self.conn_groups:
self.total_num_synapses += len(self.conn_groups[group_key])
self.net_dict = collections.OrderedDict()
self.net_dict = {'n_total': self.total_num_neurons,
's_total': self.total_num_synapses,
'n_pop': self.extract_neuron_groups(),
's_pop': self.extract_synapse_groups(),
's_tags': self.extract_synapse_tags(),
'n_params': self.extract_neuron_parameters(),
's_params': self.extract_synapse_parameters(),
}
def extract_neuron_groups(self):
""" This function extracts all present `NeuronGroup`/`Neurons` from
a provided network.
Returns:
dict: {'unique population ID': N}
"""
self.neuron_populations = {}
for group_key in self.neuron_groups:
num_neurons = self.neuron_groups[group_key].N
self.neuron_populations.update({group_key: num_neurons})
return self.neuron_populations
def extract_synapse_groups(self):
"""This function returns dictionary that contains the name synapse
identifier and corresponding pre, post population identifier.
Returns:
dict: {'unique synapse ID': [ID_pre, ID_post]}
"""
self.synapse_populations = {}
for group_key in self.conn_groups:
pre_post = [self.conn_groups[group_key].source.name,
self.conn_groups[group_key].target.name]
self.synapse_populations.update({group_key: pre_post})
return self.synapse_populations
def extract_synapse_tags(self):
"""This function collects impartant meta information of a given
synapse group, given the synapse identifier, and returns a
dictionary
Returns:
dict: Tags help to identify synaptic properties.
Tags consist of
{'sign', 'target_sign', 'p_connection', 'plastic', 'mean', 'std'}
where
* sign (str) : 'exc' | 'inh'
* target_sign (str) : 'exc' | 'inh'
* p_connection (float): [0, 1]
* plastic (bool) : True | False
* mean (float): [0, 1]
* std (float): [0, 1]
"""
self.synapse_tags = {}
for group_key in self.conn_groups:
current_tags = self.conn_groups[group_key]._tags
current_tags.pop('mismatch', None)
current_tags.pop('noise', None)
current_tags.pop('level', None)
current_tags.pop('num_inputs', None)
current_tags.pop('bb_type', None)
current_tags.pop('group_type', None)
current_tags.pop('connection_type', None)
if 'taupre' in self.conn_groups[group_key]._init_parameters:
current_tags.update({'plastic': True})
else:
current_tags.update({'plastic': False})
p_connection = len(self.conn_groups[group_key]) / \
(self.conn_groups[group_key].source.N *
self.conn_groups[group_key].target.N)
current_tags.update({'p_connection': p_connection})
current_tags.update({'mean':
np.round(np.mean(
self.conn_groups
[group_key].w_plast), 4)})
current_tags.update({'std':
np.round(np.std(
self.conn_groups
[group_key].w_plast), 4)})
self.synapse_tags.update({group_key: current_tags})
return self.synapse_tags
def extract_neuron_parameters(self):
""" This function extracts the initial neuron paramter.
At the moment we assume that the network **does not** have
heterogeneous parameters.
Returns:
dict: {'unique popluation ID': parameters}
"""
self.neuron_params = {}
for group_key in self.neuron_groups:
self.neuron_params.update({group_key:
self.neuron_groups
[group_key]._init_parameters})
return self.neuron_params
def extract_synapse_parameters(self):
"""This function extracts the initial neuron paramter.
At the moment we assume that the network **does not** have
heterogeneous parameters.
Returns:
dict: {'unique synapse ID': parameters}
"""
self.synapse_params = {}
for group_key in self.conn_groups:
self.synapse_params.update({group_key:
self.conn_groups
[group_key]._init_parameters})
return self.synapse_params
def save_to_file(self, filename, directory=None):
"""Simple wrapper function to save network description to a
pickle file.
Args:
filename (str): Desired name to store the network dictionary as
directory (str, optional): Desired directory to save the network file.
If no directory is provided the file is saved to:
`~/teiliApps/output/`
"""
if directory is None:
directory = os.path.join(os.path.expanduser('~'),
'teiliApps',
'output')
if not os.path.exists(directory):
os.makedirs(directory)
if filename[-2:] == '.p' or filename[-7:] == '.pickle':
filename = os.path.join(directory,
filename)
else:
filename = filename + '.p'
filename = os.path.join(directory,
filename)
with open(filename, 'wb') as handle:
pickle.dump(self.net_dict, handle,
protocol=pickle.HIGHEST_PROTOCOL)
print('Network description saved to: \n {}'
.format(filename))
def load_from_file(self, filename):
""" Wrapper function to load previously exported network.
Args:
filename (str): Filename with full path and file extension.
"""
with open(filename, 'rb') as handle:
self.net_dict = pickle.load(handle)
def print_network(self):
"""Simple print function for quick check
"""
for k,v in self.net_dict.items():
if type(v) == dict:
print(k)
for kk, vv in v.items():
if type(vv) == dict:
print(' ', kk)
for kkk, vvv in vv.items():
print(' ', kkk, ': ', vvv)
else:
print(' ', kk, ': ', vv)
else:
print(k, v)
| [
"os.path.exists",
"collections.OrderedDict",
"numpy.mean",
"pickle.dump",
"os.makedirs",
"pickle.load",
"os.path.join",
"numpy.std",
"os.path.expanduser"
] | [((4598, 4623), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4621, 4623), False, 'import collections\n'), ((10282, 10315), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (10294, 10315), False, 'import os\n'), ((10428, 10461), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (10440, 10461), False, 'import os\n'), ((10556, 10624), 'pickle.dump', 'pickle.dump', (['self.net_dict', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.net_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (10567, 10624), False, 'import pickle\n'), ((11022, 11041), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (11033, 11041), False, 'import pickle\n'), ((9987, 10010), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (10005, 10010), False, 'import os\n'), ((10128, 10153), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (10142, 10153), False, 'import os\n'), ((10171, 10193), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (10182, 10193), False, 'import os\n'), ((7892, 7936), 'numpy.mean', 'np.mean', (['self.conn_groups[group_key].w_plast'], {}), '(self.conn_groups[group_key].w_plast)\n', (7899, 7936), True, 'import numpy as np\n'), ((8101, 8144), 'numpy.std', 'np.std', (['self.conn_groups[group_key].w_plast'], {}), '(self.conn_groups[group_key].w_plast)\n', (8107, 8144), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.